00001
00002 #ifndef TABLETOP_COLLISION_MAP_PROCESSING_SERVICE_TABLETOPCOLLISIONMAPPROCESSING_H
00003 #define TABLETOP_COLLISION_MAP_PROCESSING_SERVICE_TABLETOPCOLLISIONMAPPROCESSING_H
00004 #include <string>
00005 #include <vector>
00006 #include <map>
00007 #include <ostream>
00008 #include "ros/serialization.h"
00009 #include "ros/builtin_message_traits.h"
00010 #include "ros/message_operations.h"
00011 #include "ros/time.h"
00012
00013 #include "ros/macros.h"
00014
00015 #include "ros/assert.h"
00016
00017 #include "ros/service_traits.h"
00018
00019 #include "tabletop_object_detector/TabletopDetectionResult.h"
00020
00021
00022 #include "object_manipulation_msgs/GraspableObject.h"
00023
00024 namespace tabletop_collision_map_processing
00025 {
00026 template <class ContainerAllocator>
00027 struct TabletopCollisionMapProcessingRequest_ {
00028 typedef TabletopCollisionMapProcessingRequest_<ContainerAllocator> Type;
00029
00030 TabletopCollisionMapProcessingRequest_()
00031 : detection_result()
00032 , reset_collision_models(false)
00033 , reset_attached_models(false)
00034 , desired_frame()
00035 {
00036 }
00037
00038 TabletopCollisionMapProcessingRequest_(const ContainerAllocator& _alloc)
00039 : detection_result(_alloc)
00040 , reset_collision_models(false)
00041 , reset_attached_models(false)
00042 , desired_frame(_alloc)
00043 {
00044 }
00045
00046 typedef ::tabletop_object_detector::TabletopDetectionResult_<ContainerAllocator> _detection_result_type;
00047 ::tabletop_object_detector::TabletopDetectionResult_<ContainerAllocator> detection_result;
00048
00049 typedef uint8_t _reset_collision_models_type;
00050 uint8_t reset_collision_models;
00051
00052 typedef uint8_t _reset_attached_models_type;
00053 uint8_t reset_attached_models;
00054
00055 typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _desired_frame_type;
00056 std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > desired_frame;
00057
00058
00059 private:
00060 static const char* __s_getDataType_() { return "tabletop_collision_map_processing/TabletopCollisionMapProcessingRequest"; }
00061 public:
00062 ROS_DEPRECATED static const std::string __s_getDataType() { return __s_getDataType_(); }
00063
00064 ROS_DEPRECATED const std::string __getDataType() const { return __s_getDataType_(); }
00065
00066 private:
00067 static const char* __s_getMD5Sum_() { return "fe986e22908c7a2936a0cdf9f240d4a3"; }
00068 public:
00069 ROS_DEPRECATED static const std::string __s_getMD5Sum() { return __s_getMD5Sum_(); }
00070
00071 ROS_DEPRECATED const std::string __getMD5Sum() const { return __s_getMD5Sum_(); }
00072
00073 private:
00074 static const char* __s_getServerMD5Sum_() { return "58e439dda25eed20079051e6af1b5eaa"; }
00075 public:
00076 ROS_DEPRECATED static const std::string __s_getServerMD5Sum() { return __s_getServerMD5Sum_(); }
00077
00078 ROS_DEPRECATED const std::string __getServerMD5Sum() const { return __s_getServerMD5Sum_(); }
00079
00080 private:
00081 static const char* __s_getMessageDefinition_() { return "\n\
00082 \n\
00083 \n\
00084 \n\
00085 \n\
00086 \n\
00087 \n\
00088 tabletop_object_detector/TabletopDetectionResult detection_result\n\
00089 \n\
00090 \n\
00091 bool reset_collision_models\n\
00092 \n\
00093 \n\
00094 bool reset_attached_models\n\
00095 \n\
00096 \n\
00097 \n\
00098 string desired_frame\n\
00099 \n\
00100 \n\
00101 ================================================================================\n\
00102 MSG: tabletop_object_detector/TabletopDetectionResult\n\
00103 # Contains all the information from one run of the tabletop detection node\n\
00104 \n\
00105 # The information for the plane that has been detected\n\
00106 Table table\n\
00107 \n\
00108 # The raw clusters detected in the scan \n\
00109 sensor_msgs/PointCloud[] clusters\n\
00110 \n\
00111 # The list of potential models that have been detected for each cluster\n\
00112 # An empty list will be returned for a cluster that has no recognition results at all\n\
00113 household_objects_database_msgs/DatabaseModelPoseList[] models\n\
00114 \n\
00115 # For each cluster, the index of the list of models that was fit to that cluster\n\
00116 # keep in mind that multiple raw clusters can correspond to a single fit\n\
00117 int32[] cluster_model_indices\n\
00118 \n\
00119 # Whether the detection has succeeded or failed\n\
00120 int32 NO_CLOUD_RECEIVED = 1\n\
00121 int32 NO_TABLE = 2\n\
00122 int32 OTHER_ERROR = 3\n\
00123 int32 SUCCESS = 4\n\
00124 int32 result\n\
00125 \n\
00126 ================================================================================\n\
00127 MSG: tabletop_object_detector/Table\n\
00128 # Informs that a planar table has been detected at a given location\n\
00129 \n\
00130 # The pose gives you the transform that take you to the coordinate system\n\
00131 # of the table, with the origin somewhere in the table plane and the \n\
00132 # z axis normal to the plane\n\
00133 geometry_msgs/PoseStamped pose\n\
00134 \n\
00135 # These values give you the observed extents of the table, along x and y,\n\
00136 # in the table's own coordinate system (above)\n\
00137 # there is no guarantee that the origin of the table coordinate system is\n\
00138 # inside the boundary defined by these values. \n\
00139 float32 x_min\n\
00140 float32 x_max\n\
00141 float32 y_min\n\
00142 float32 y_max\n\
00143 \n\
00144 # There is no guarantee that the table does NOT extend further than these \n\
00145 # values; this is just as far as we've observed it.\n\
00146 \n\
00147 \n\
00148 # Newer table definition as triangle mesh of convex hull (relative to pose)\n\
00149 arm_navigation_msgs/Shape convex_hull\n\
00150 \n\
00151 ================================================================================\n\
00152 MSG: geometry_msgs/PoseStamped\n\
00153 # A Pose with reference coordinate frame and timestamp\n\
00154 Header header\n\
00155 Pose pose\n\
00156 \n\
00157 ================================================================================\n\
00158 MSG: std_msgs/Header\n\
00159 # Standard metadata for higher-level stamped data types.\n\
00160 # This is generally used to communicate timestamped data \n\
00161 # in a particular coordinate frame.\n\
00162 # \n\
00163 # sequence ID: consecutively increasing ID \n\
00164 uint32 seq\n\
00165 #Two-integer timestamp that is expressed as:\n\
00166 # * stamp.secs: seconds (stamp_secs) since epoch\n\
00167 # * stamp.nsecs: nanoseconds since stamp_secs\n\
00168 # time-handling sugar is provided by the client library\n\
00169 time stamp\n\
00170 #Frame this data is associated with\n\
00171 # 0: no frame\n\
00172 # 1: global frame\n\
00173 string frame_id\n\
00174 \n\
00175 ================================================================================\n\
00176 MSG: geometry_msgs/Pose\n\
00177 # A representation of pose in free space, composed of postion and orientation. \n\
00178 Point position\n\
00179 Quaternion orientation\n\
00180 \n\
00181 ================================================================================\n\
00182 MSG: geometry_msgs/Point\n\
00183 # This contains the position of a point in free space\n\
00184 float64 x\n\
00185 float64 y\n\
00186 float64 z\n\
00187 \n\
00188 ================================================================================\n\
00189 MSG: geometry_msgs/Quaternion\n\
00190 # This represents an orientation in free space in quaternion form.\n\
00191 \n\
00192 float64 x\n\
00193 float64 y\n\
00194 float64 z\n\
00195 float64 w\n\
00196 \n\
00197 ================================================================================\n\
00198 MSG: arm_navigation_msgs/Shape\n\
00199 byte SPHERE=0\n\
00200 byte BOX=1\n\
00201 byte CYLINDER=2\n\
00202 byte MESH=3\n\
00203 \n\
00204 byte type\n\
00205 \n\
00206 \n\
00207 #### define sphere, box, cylinder ####\n\
00208 # the origin of each shape is considered at the shape's center\n\
00209 \n\
00210 # for sphere\n\
00211 # radius := dimensions[0]\n\
00212 \n\
00213 # for cylinder\n\
00214 # radius := dimensions[0]\n\
00215 # length := dimensions[1]\n\
00216 # the length is along the Z axis\n\
00217 \n\
00218 # for box\n\
00219 # size_x := dimensions[0]\n\
00220 # size_y := dimensions[1]\n\
00221 # size_z := dimensions[2]\n\
00222 float64[] dimensions\n\
00223 \n\
00224 \n\
00225 #### define mesh ####\n\
00226 \n\
00227 # list of triangles; triangle k is defined by tre vertices located\n\
00228 # at indices triangles[3k], triangles[3k+1], triangles[3k+2]\n\
00229 int32[] triangles\n\
00230 geometry_msgs/Point[] vertices\n\
00231 \n\
00232 ================================================================================\n\
00233 MSG: sensor_msgs/PointCloud\n\
00234 # This message holds a collection of 3d points, plus optional additional\n\
00235 # information about each point.\n\
00236 \n\
00237 # Time of sensor data acquisition, coordinate frame ID.\n\
00238 Header header\n\
00239 \n\
00240 # Array of 3d points. Each Point32 should be interpreted as a 3d point\n\
00241 # in the frame given in the header.\n\
00242 geometry_msgs/Point32[] points\n\
00243 \n\
00244 # Each channel should have the same number of elements as points array,\n\
00245 # and the data in each channel should correspond 1:1 with each point.\n\
00246 # Channel names in common practice are listed in ChannelFloat32.msg.\n\
00247 ChannelFloat32[] channels\n\
00248 \n\
00249 ================================================================================\n\
00250 MSG: geometry_msgs/Point32\n\
00251 # This contains the position of a point in free space(with 32 bits of precision).\n\
00252 # It is recommeded to use Point wherever possible instead of Point32. \n\
00253 # \n\
00254 # This recommendation is to promote interoperability. \n\
00255 #\n\
00256 # This message is designed to take up less space when sending\n\
00257 # lots of points at once, as in the case of a PointCloud. \n\
00258 \n\
00259 float32 x\n\
00260 float32 y\n\
00261 float32 z\n\
00262 ================================================================================\n\
00263 MSG: sensor_msgs/ChannelFloat32\n\
00264 # This message is used by the PointCloud message to hold optional data\n\
00265 # associated with each point in the cloud. The length of the values\n\
00266 # array should be the same as the length of the points array in the\n\
00267 # PointCloud, and each value should be associated with the corresponding\n\
00268 # point.\n\
00269 \n\
00270 # Channel names in existing practice include:\n\
00271 # \"u\", \"v\" - row and column (respectively) in the left stereo image.\n\
00272 # This is opposite to usual conventions but remains for\n\
00273 # historical reasons. The newer PointCloud2 message has no\n\
00274 # such problem.\n\
00275 # \"rgb\" - For point clouds produced by color stereo cameras. uint8\n\
00276 # (R,G,B) values packed into the least significant 24 bits,\n\
00277 # in order.\n\
00278 # \"intensity\" - laser or pixel intensity.\n\
00279 # \"distance\"\n\
00280 \n\
00281 # The channel name should give semantics of the channel (e.g.\n\
00282 # \"intensity\" instead of \"value\").\n\
00283 string name\n\
00284 \n\
00285 # The values array should be 1-1 with the elements of the associated\n\
00286 # PointCloud.\n\
00287 float32[] values\n\
00288 \n\
00289 ================================================================================\n\
00290 MSG: household_objects_database_msgs/DatabaseModelPoseList\n\
00291 # stores a list of possible database models recognition results\n\
00292 DatabaseModelPose[] model_list\n\
00293 ================================================================================\n\
00294 MSG: household_objects_database_msgs/DatabaseModelPose\n\
00295 # Informs that a specific model from the Model Database has been \n\
00296 # identified at a certain location\n\
00297 \n\
00298 # the database id of the model\n\
00299 int32 model_id\n\
00300 \n\
00301 # the pose that it can be found in\n\
00302 geometry_msgs/PoseStamped pose\n\
00303 \n\
00304 # a measure of the confidence level in this detection result\n\
00305 float32 confidence\n\
00306 \n\
00307 # the name of the object detector that generated this detection result\n\
00308 string detector_name\n\
00309 \n\
00310 "; }
00311 public:
00312 ROS_DEPRECATED static const std::string __s_getMessageDefinition() { return __s_getMessageDefinition_(); }
00313
00314 ROS_DEPRECATED const std::string __getMessageDefinition() const { return __s_getMessageDefinition_(); }
00315
00316 ROS_DEPRECATED virtual uint8_t *serialize(uint8_t *write_ptr, uint32_t seq) const
00317 {
00318 ros::serialization::OStream stream(write_ptr, 1000000000);
00319 ros::serialization::serialize(stream, detection_result);
00320 ros::serialization::serialize(stream, reset_collision_models);
00321 ros::serialization::serialize(stream, reset_attached_models);
00322 ros::serialization::serialize(stream, desired_frame);
00323 return stream.getData();
00324 }
00325
00326 ROS_DEPRECATED virtual uint8_t *deserialize(uint8_t *read_ptr)
00327 {
00328 ros::serialization::IStream stream(read_ptr, 1000000000);
00329 ros::serialization::deserialize(stream, detection_result);
00330 ros::serialization::deserialize(stream, reset_collision_models);
00331 ros::serialization::deserialize(stream, reset_attached_models);
00332 ros::serialization::deserialize(stream, desired_frame);
00333 return stream.getData();
00334 }
00335
00336 ROS_DEPRECATED virtual uint32_t serializationLength() const
00337 {
00338 uint32_t size = 0;
00339 size += ros::serialization::serializationLength(detection_result);
00340 size += ros::serialization::serializationLength(reset_collision_models);
00341 size += ros::serialization::serializationLength(reset_attached_models);
00342 size += ros::serialization::serializationLength(desired_frame);
00343 return size;
00344 }
00345
00346 typedef boost::shared_ptr< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<ContainerAllocator> > Ptr;
00347 typedef boost::shared_ptr< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<ContainerAllocator> const> ConstPtr;
00348 boost::shared_ptr<std::map<std::string, std::string> > __connection_header;
00349 };
00350 typedef ::tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<std::allocator<void> > TabletopCollisionMapProcessingRequest;
00351
00352 typedef boost::shared_ptr< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest> TabletopCollisionMapProcessingRequestPtr;
00353 typedef boost::shared_ptr< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest const> TabletopCollisionMapProcessingRequestConstPtr;
00354
00355
00356 template <class ContainerAllocator>
00357 struct TabletopCollisionMapProcessingResponse_ {
00358 typedef TabletopCollisionMapProcessingResponse_<ContainerAllocator> Type;
00359
00360 TabletopCollisionMapProcessingResponse_()
00361 : graspable_objects()
00362 , collision_object_names()
00363 , collision_support_surface_name()
00364 {
00365 }
00366
00367 TabletopCollisionMapProcessingResponse_(const ContainerAllocator& _alloc)
00368 : graspable_objects(_alloc)
00369 , collision_object_names(_alloc)
00370 , collision_support_surface_name(_alloc)
00371 {
00372 }
00373
00374 typedef std::vector< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> >::other > _graspable_objects_type;
00375 std::vector< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> >::other > graspable_objects;
00376
00377 typedef std::vector<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > , typename ContainerAllocator::template rebind<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::other > _collision_object_names_type;
00378 std::vector<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > , typename ContainerAllocator::template rebind<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::other > collision_object_names;
00379
00380 typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _collision_support_surface_name_type;
00381 std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > collision_support_surface_name;
00382
00383
00384 ROS_DEPRECATED uint32_t get_graspable_objects_size() const { return (uint32_t)graspable_objects.size(); }
00385 ROS_DEPRECATED void set_graspable_objects_size(uint32_t size) { graspable_objects.resize((size_t)size); }
00386 ROS_DEPRECATED void get_graspable_objects_vec(std::vector< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> >::other > & vec) const { vec = this->graspable_objects; }
00387 ROS_DEPRECATED void set_graspable_objects_vec(const std::vector< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> >::other > & vec) { this->graspable_objects = vec; }
00388 ROS_DEPRECATED uint32_t get_collision_object_names_size() const { return (uint32_t)collision_object_names.size(); }
00389 ROS_DEPRECATED void set_collision_object_names_size(uint32_t size) { collision_object_names.resize((size_t)size); }
00390 ROS_DEPRECATED void get_collision_object_names_vec(std::vector<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > , typename ContainerAllocator::template rebind<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::other > & vec) const { vec = this->collision_object_names; }
00391 ROS_DEPRECATED void set_collision_object_names_vec(const std::vector<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > , typename ContainerAllocator::template rebind<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::other > & vec) { this->collision_object_names = vec; }
00392 private:
00393 static const char* __s_getDataType_() { return "tabletop_collision_map_processing/TabletopCollisionMapProcessingResponse"; }
00394 public:
00395 ROS_DEPRECATED static const std::string __s_getDataType() { return __s_getDataType_(); }
00396
00397 ROS_DEPRECATED const std::string __getDataType() const { return __s_getDataType_(); }
00398
00399 private:
00400 static const char* __s_getMD5Sum_() { return "684fb6e369cb671a9c4149f683c75e88"; }
00401 public:
00402 ROS_DEPRECATED static const std::string __s_getMD5Sum() { return __s_getMD5Sum_(); }
00403
00404 ROS_DEPRECATED const std::string __getMD5Sum() const { return __s_getMD5Sum_(); }
00405
00406 private:
00407 static const char* __s_getServerMD5Sum_() { return "58e439dda25eed20079051e6af1b5eaa"; }
00408 public:
00409 ROS_DEPRECATED static const std::string __s_getServerMD5Sum() { return __s_getServerMD5Sum_(); }
00410
00411 ROS_DEPRECATED const std::string __getServerMD5Sum() const { return __s_getServerMD5Sum_(); }
00412
00413 private:
00414 static const char* __s_getMessageDefinition_() { return "\n\
00415 \n\
00416 object_manipulation_msgs/GraspableObject[] graspable_objects\n\
00417 \n\
00418 \n\
00419 \n\
00420 string[] collision_object_names\n\
00421 \n\
00422 \n\
00423 string collision_support_surface_name\n\
00424 \n\
00425 \n\
00426 ================================================================================\n\
00427 MSG: object_manipulation_msgs/GraspableObject\n\
00428 # an object that the object_manipulator can work on\n\
00429 \n\
00430 # a graspable object can be represented in multiple ways. This message\n\
00431 # can contain all of them. Which one is actually used is up to the receiver\n\
00432 # of this message. When adding new representations, one must be careful that\n\
00433 # they have reasonable lightweight defaults indicating that that particular\n\
00434 # representation is not available.\n\
00435 \n\
00436 # the tf frame to be used as a reference frame when combining information from\n\
00437 # the different representations below\n\
00438 string reference_frame_id\n\
00439 \n\
00440 # potential recognition results from a database of models\n\
00441 # all poses are relative to the object reference pose\n\
00442 household_objects_database_msgs/DatabaseModelPose[] potential_models\n\
00443 \n\
00444 # the point cloud itself\n\
00445 sensor_msgs/PointCloud cluster\n\
00446 \n\
00447 # a region of a PointCloud2 of interest\n\
00448 object_manipulation_msgs/SceneRegion region\n\
00449 \n\
00450 # the name that this object has in the collision environment\n\
00451 string collision_name\n\
00452 ================================================================================\n\
00453 MSG: household_objects_database_msgs/DatabaseModelPose\n\
00454 # Informs that a specific model from the Model Database has been \n\
00455 # identified at a certain location\n\
00456 \n\
00457 # the database id of the model\n\
00458 int32 model_id\n\
00459 \n\
00460 # the pose that it can be found in\n\
00461 geometry_msgs/PoseStamped pose\n\
00462 \n\
00463 # a measure of the confidence level in this detection result\n\
00464 float32 confidence\n\
00465 \n\
00466 # the name of the object detector that generated this detection result\n\
00467 string detector_name\n\
00468 \n\
00469 ================================================================================\n\
00470 MSG: geometry_msgs/PoseStamped\n\
00471 # A Pose with reference coordinate frame and timestamp\n\
00472 Header header\n\
00473 Pose pose\n\
00474 \n\
00475 ================================================================================\n\
00476 MSG: std_msgs/Header\n\
00477 # Standard metadata for higher-level stamped data types.\n\
00478 # This is generally used to communicate timestamped data \n\
00479 # in a particular coordinate frame.\n\
00480 # \n\
00481 # sequence ID: consecutively increasing ID \n\
00482 uint32 seq\n\
00483 #Two-integer timestamp that is expressed as:\n\
00484 # * stamp.secs: seconds (stamp_secs) since epoch\n\
00485 # * stamp.nsecs: nanoseconds since stamp_secs\n\
00486 # time-handling sugar is provided by the client library\n\
00487 time stamp\n\
00488 #Frame this data is associated with\n\
00489 # 0: no frame\n\
00490 # 1: global frame\n\
00491 string frame_id\n\
00492 \n\
00493 ================================================================================\n\
00494 MSG: geometry_msgs/Pose\n\
00495 # A representation of pose in free space, composed of postion and orientation. \n\
00496 Point position\n\
00497 Quaternion orientation\n\
00498 \n\
00499 ================================================================================\n\
00500 MSG: geometry_msgs/Point\n\
00501 # This contains the position of a point in free space\n\
00502 float64 x\n\
00503 float64 y\n\
00504 float64 z\n\
00505 \n\
00506 ================================================================================\n\
00507 MSG: geometry_msgs/Quaternion\n\
00508 # This represents an orientation in free space in quaternion form.\n\
00509 \n\
00510 float64 x\n\
00511 float64 y\n\
00512 float64 z\n\
00513 float64 w\n\
00514 \n\
00515 ================================================================================\n\
00516 MSG: sensor_msgs/PointCloud\n\
00517 # This message holds a collection of 3d points, plus optional additional\n\
00518 # information about each point.\n\
00519 \n\
00520 # Time of sensor data acquisition, coordinate frame ID.\n\
00521 Header header\n\
00522 \n\
00523 # Array of 3d points. Each Point32 should be interpreted as a 3d point\n\
00524 # in the frame given in the header.\n\
00525 geometry_msgs/Point32[] points\n\
00526 \n\
00527 # Each channel should have the same number of elements as points array,\n\
00528 # and the data in each channel should correspond 1:1 with each point.\n\
00529 # Channel names in common practice are listed in ChannelFloat32.msg.\n\
00530 ChannelFloat32[] channels\n\
00531 \n\
00532 ================================================================================\n\
00533 MSG: geometry_msgs/Point32\n\
00534 # This contains the position of a point in free space(with 32 bits of precision).\n\
00535 # It is recommeded to use Point wherever possible instead of Point32. \n\
00536 # \n\
00537 # This recommendation is to promote interoperability. \n\
00538 #\n\
00539 # This message is designed to take up less space when sending\n\
00540 # lots of points at once, as in the case of a PointCloud. \n\
00541 \n\
00542 float32 x\n\
00543 float32 y\n\
00544 float32 z\n\
00545 ================================================================================\n\
00546 MSG: sensor_msgs/ChannelFloat32\n\
00547 # This message is used by the PointCloud message to hold optional data\n\
00548 # associated with each point in the cloud. The length of the values\n\
00549 # array should be the same as the length of the points array in the\n\
00550 # PointCloud, and each value should be associated with the corresponding\n\
00551 # point.\n\
00552 \n\
00553 # Channel names in existing practice include:\n\
00554 # \"u\", \"v\" - row and column (respectively) in the left stereo image.\n\
00555 # This is opposite to usual conventions but remains for\n\
00556 # historical reasons. The newer PointCloud2 message has no\n\
00557 # such problem.\n\
00558 # \"rgb\" - For point clouds produced by color stereo cameras. uint8\n\
00559 # (R,G,B) values packed into the least significant 24 bits,\n\
00560 # in order.\n\
00561 # \"intensity\" - laser or pixel intensity.\n\
00562 # \"distance\"\n\
00563 \n\
00564 # The channel name should give semantics of the channel (e.g.\n\
00565 # \"intensity\" instead of \"value\").\n\
00566 string name\n\
00567 \n\
00568 # The values array should be 1-1 with the elements of the associated\n\
00569 # PointCloud.\n\
00570 float32[] values\n\
00571 \n\
00572 ================================================================================\n\
00573 MSG: object_manipulation_msgs/SceneRegion\n\
00574 # Point cloud\n\
00575 sensor_msgs/PointCloud2 cloud\n\
00576 \n\
00577 # Indices for the region of interest\n\
00578 int32[] mask\n\
00579 \n\
00580 # One of the corresponding 2D images, if applicable\n\
00581 sensor_msgs/Image image\n\
00582 \n\
00583 # The disparity image, if applicable\n\
00584 sensor_msgs/Image disparity_image\n\
00585 \n\
00586 # Camera info for the camera that took the image\n\
00587 sensor_msgs/CameraInfo cam_info\n\
00588 \n\
00589 # a 3D region of interest for grasp planning\n\
00590 geometry_msgs/PoseStamped roi_box_pose\n\
00591 geometry_msgs/Vector3 roi_box_dims\n\
00592 \n\
00593 ================================================================================\n\
00594 MSG: sensor_msgs/PointCloud2\n\
00595 # This message holds a collection of N-dimensional points, which may\n\
00596 # contain additional information such as normals, intensity, etc. The\n\
00597 # point data is stored as a binary blob, its layout described by the\n\
00598 # contents of the \"fields\" array.\n\
00599 \n\
00600 # The point cloud data may be organized 2d (image-like) or 1d\n\
00601 # (unordered). Point clouds organized as 2d images may be produced by\n\
00602 # camera depth sensors such as stereo or time-of-flight.\n\
00603 \n\
00604 # Time of sensor data acquisition, and the coordinate frame ID (for 3d\n\
00605 # points).\n\
00606 Header header\n\
00607 \n\
00608 # 2D structure of the point cloud. If the cloud is unordered, height is\n\
00609 # 1 and width is the length of the point cloud.\n\
00610 uint32 height\n\
00611 uint32 width\n\
00612 \n\
00613 # Describes the channels and their layout in the binary data blob.\n\
00614 PointField[] fields\n\
00615 \n\
00616 bool is_bigendian # Is this data bigendian?\n\
00617 uint32 point_step # Length of a point in bytes\n\
00618 uint32 row_step # Length of a row in bytes\n\
00619 uint8[] data # Actual point data, size is (row_step*height)\n\
00620 \n\
00621 bool is_dense # True if there are no invalid points\n\
00622 \n\
00623 ================================================================================\n\
00624 MSG: sensor_msgs/PointField\n\
00625 # This message holds the description of one point entry in the\n\
00626 # PointCloud2 message format.\n\
00627 uint8 INT8 = 1\n\
00628 uint8 UINT8 = 2\n\
00629 uint8 INT16 = 3\n\
00630 uint8 UINT16 = 4\n\
00631 uint8 INT32 = 5\n\
00632 uint8 UINT32 = 6\n\
00633 uint8 FLOAT32 = 7\n\
00634 uint8 FLOAT64 = 8\n\
00635 \n\
00636 string name # Name of field\n\
00637 uint32 offset # Offset from start of point struct\n\
00638 uint8 datatype # Datatype enumeration, see above\n\
00639 uint32 count # How many elements in the field\n\
00640 \n\
00641 ================================================================================\n\
00642 MSG: sensor_msgs/Image\n\
00643 # This message contains an uncompressed image\n\
00644 # (0, 0) is at top-left corner of image\n\
00645 #\n\
00646 \n\
00647 Header header # Header timestamp should be acquisition time of image\n\
00648 # Header frame_id should be optical frame of camera\n\
00649 # origin of frame should be optical center of cameara\n\
00650 # +x should point to the right in the image\n\
00651 # +y should point down in the image\n\
00652 # +z should point into to plane of the image\n\
00653 # If the frame_id here and the frame_id of the CameraInfo\n\
00654 # message associated with the image conflict\n\
00655 # the behavior is undefined\n\
00656 \n\
00657 uint32 height # image height, that is, number of rows\n\
00658 uint32 width # image width, that is, number of columns\n\
00659 \n\
00660 # The legal values for encoding are in file src/image_encodings.cpp\n\
00661 # If you want to standardize a new string format, join\n\
00662 # ros-users@lists.sourceforge.net and send an email proposing a new encoding.\n\
00663 \n\
00664 string encoding # Encoding of pixels -- channel meaning, ordering, size\n\
00665 # taken from the list of strings in src/image_encodings.cpp\n\
00666 \n\
00667 uint8 is_bigendian # is this data bigendian?\n\
00668 uint32 step # Full row length in bytes\n\
00669 uint8[] data # actual matrix data, size is (step * rows)\n\
00670 \n\
00671 ================================================================================\n\
00672 MSG: sensor_msgs/CameraInfo\n\
00673 # This message defines meta information for a camera. It should be in a\n\
00674 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\
00675 # image topics named:\n\
00676 #\n\
00677 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\
00678 # image - monochrome, distorted\n\
00679 # image_color - color, distorted\n\
00680 # image_rect - monochrome, rectified\n\
00681 # image_rect_color - color, rectified\n\
00682 #\n\
00683 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\
00684 # for producing the four processed image topics from image_raw and\n\
00685 # camera_info. The meaning of the camera parameters are described in\n\
00686 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\
00687 #\n\
00688 # The image_geometry package provides a user-friendly interface to\n\
00689 # common operations using this meta information. If you want to, e.g.,\n\
00690 # project a 3d point into image coordinates, we strongly recommend\n\
00691 # using image_geometry.\n\
00692 #\n\
00693 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\
00694 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\
00695 # indicates an uncalibrated camera.\n\
00696 \n\
00697 #######################################################################\n\
00698 # Image acquisition info #\n\
00699 #######################################################################\n\
00700 \n\
00701 # Time of image acquisition, camera coordinate frame ID\n\
00702 Header header # Header timestamp should be acquisition time of image\n\
00703 # Header frame_id should be optical frame of camera\n\
00704 # origin of frame should be optical center of camera\n\
00705 # +x should point to the right in the image\n\
00706 # +y should point down in the image\n\
00707 # +z should point into the plane of the image\n\
00708 \n\
00709 \n\
00710 #######################################################################\n\
00711 # Calibration Parameters #\n\
00712 #######################################################################\n\
00713 # These are fixed during camera calibration. Their values will be the #\n\
00714 # same in all messages until the camera is recalibrated. Note that #\n\
00715 # self-calibrating systems may \"recalibrate\" frequently. #\n\
00716 # #\n\
00717 # The internal parameters can be used to warp a raw (distorted) image #\n\
00718 # to: #\n\
00719 # 1. An undistorted image (requires D and K) #\n\
00720 # 2. A rectified image (requires D, K, R) #\n\
00721 # The projection matrix P projects 3D points into the rectified image.#\n\
00722 #######################################################################\n\
00723 \n\
00724 # The image dimensions with which the camera was calibrated. Normally\n\
00725 # this will be the full camera resolution in pixels.\n\
00726 uint32 height\n\
00727 uint32 width\n\
00728 \n\
00729 # The distortion model used. Supported models are listed in\n\
00730 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\
00731 # simple model of radial and tangential distortion - is sufficent.\n\
00732 string distortion_model\n\
00733 \n\
00734 # The distortion parameters, size depending on the distortion model.\n\
00735 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\
00736 float64[] D\n\
00737 \n\
00738 # Intrinsic camera matrix for the raw (distorted) images.\n\
00739 # [fx 0 cx]\n\
00740 # K = [ 0 fy cy]\n\
00741 # [ 0 0 1]\n\
00742 # Projects 3D points in the camera coordinate frame to 2D pixel\n\
00743 # coordinates using the focal lengths (fx, fy) and principal point\n\
00744 # (cx, cy).\n\
00745 float64[9] K # 3x3 row-major matrix\n\
00746 \n\
00747 # Rectification matrix (stereo cameras only)\n\
00748 # A rotation matrix aligning the camera coordinate system to the ideal\n\
00749 # stereo image plane so that epipolar lines in both stereo images are\n\
00750 # parallel.\n\
00751 float64[9] R # 3x3 row-major matrix\n\
00752 \n\
00753 # Projection/camera matrix\n\
00754 # [fx' 0 cx' Tx]\n\
00755 # P = [ 0 fy' cy' Ty]\n\
00756 # [ 0 0 1 0]\n\
00757 # By convention, this matrix specifies the intrinsic (camera) matrix\n\
00758 # of the processed (rectified) image. That is, the left 3x3 portion\n\
00759 # is the normal camera intrinsic matrix for the rectified image.\n\
00760 # It projects 3D points in the camera coordinate frame to 2D pixel\n\
00761 # coordinates using the focal lengths (fx', fy') and principal point\n\
00762 # (cx', cy') - these may differ from the values in K.\n\
00763 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\
00764 # also have R = the identity and P[1:3,1:3] = K.\n\
00765 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\
00766 # position of the optical center of the second camera in the first\n\
00767 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\
00768 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\
00769 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\
00770 # Tx = -fx' * B, where B is the baseline between the cameras.\n\
00771 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\
00772 # the rectified image is given by:\n\
00773 # [u v w]' = P * [X Y Z 1]'\n\
00774 # x = u / w\n\
00775 # y = v / w\n\
00776 # This holds for both images of a stereo pair.\n\
00777 float64[12] P # 3x4 row-major matrix\n\
00778 \n\
00779 \n\
00780 #######################################################################\n\
00781 # Operational Parameters #\n\
00782 #######################################################################\n\
00783 # These define the image region actually captured by the camera #\n\
00784 # driver. Although they affect the geometry of the output image, they #\n\
00785 # may be changed freely without recalibrating the camera. #\n\
00786 #######################################################################\n\
00787 \n\
00788 # Binning refers here to any camera setting which combines rectangular\n\
00789 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\
00790 # resolution of the output image to\n\
00791 # (width / binning_x) x (height / binning_y).\n\
00792 # The default values binning_x = binning_y = 0 is considered the same\n\
00793 # as binning_x = binning_y = 1 (no subsampling).\n\
00794 uint32 binning_x\n\
00795 uint32 binning_y\n\
00796 \n\
00797 # Region of interest (subwindow of full camera resolution), given in\n\
00798 # full resolution (unbinned) image coordinates. A particular ROI\n\
00799 # always denotes the same window of pixels on the camera sensor,\n\
00800 # regardless of binning settings.\n\
00801 # The default setting of roi (all values 0) is considered the same as\n\
00802 # full resolution (roi.width = width, roi.height = height).\n\
00803 RegionOfInterest roi\n\
00804 \n\
00805 ================================================================================\n\
00806 MSG: sensor_msgs/RegionOfInterest\n\
00807 # This message is used to specify a region of interest within an image.\n\
00808 #\n\
00809 # When used to specify the ROI setting of the camera when the image was\n\
00810 # taken, the height and width fields should either match the height and\n\
00811 # width fields for the associated image; or height = width = 0\n\
00812 # indicates that the full resolution image was captured.\n\
00813 \n\
00814 uint32 x_offset # Leftmost pixel of the ROI\n\
00815 # (0 if the ROI includes the left edge of the image)\n\
00816 uint32 y_offset # Topmost pixel of the ROI\n\
00817 # (0 if the ROI includes the top edge of the image)\n\
00818 uint32 height # Height of ROI\n\
00819 uint32 width # Width of ROI\n\
00820 \n\
00821 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\
00822 # ROI in this message. Typically this should be False if the full image\n\
00823 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\
00824 # used).\n\
00825 bool do_rectify\n\
00826 \n\
00827 ================================================================================\n\
00828 MSG: geometry_msgs/Vector3\n\
00829 # This represents a vector in free space. \n\
00830 \n\
00831 float64 x\n\
00832 float64 y\n\
00833 float64 z\n\
00834 "; }
00835 public:
00836 ROS_DEPRECATED static const std::string __s_getMessageDefinition() { return __s_getMessageDefinition_(); }
00837
00838 ROS_DEPRECATED const std::string __getMessageDefinition() const { return __s_getMessageDefinition_(); }
00839
00840 ROS_DEPRECATED virtual uint8_t *serialize(uint8_t *write_ptr, uint32_t seq) const
00841 {
00842 ros::serialization::OStream stream(write_ptr, 1000000000);
00843 ros::serialization::serialize(stream, graspable_objects);
00844 ros::serialization::serialize(stream, collision_object_names);
00845 ros::serialization::serialize(stream, collision_support_surface_name);
00846 return stream.getData();
00847 }
00848
00849 ROS_DEPRECATED virtual uint8_t *deserialize(uint8_t *read_ptr)
00850 {
00851 ros::serialization::IStream stream(read_ptr, 1000000000);
00852 ros::serialization::deserialize(stream, graspable_objects);
00853 ros::serialization::deserialize(stream, collision_object_names);
00854 ros::serialization::deserialize(stream, collision_support_surface_name);
00855 return stream.getData();
00856 }
00857
00858 ROS_DEPRECATED virtual uint32_t serializationLength() const
00859 {
00860 uint32_t size = 0;
00861 size += ros::serialization::serializationLength(graspable_objects);
00862 size += ros::serialization::serializationLength(collision_object_names);
00863 size += ros::serialization::serializationLength(collision_support_surface_name);
00864 return size;
00865 }
00866
00867 typedef boost::shared_ptr< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<ContainerAllocator> > Ptr;
00868 typedef boost::shared_ptr< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<ContainerAllocator> const> ConstPtr;
00869 boost::shared_ptr<std::map<std::string, std::string> > __connection_header;
00870 };
00871 typedef ::tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<std::allocator<void> > TabletopCollisionMapProcessingResponse;
00872
00873 typedef boost::shared_ptr< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse> TabletopCollisionMapProcessingResponsePtr;
00874 typedef boost::shared_ptr< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse const> TabletopCollisionMapProcessingResponseConstPtr;
00875
00876 struct TabletopCollisionMapProcessing
00877 {
00878
00879 typedef TabletopCollisionMapProcessingRequest Request;
00880 typedef TabletopCollisionMapProcessingResponse Response;
00881 Request request;
00882 Response response;
00883
00884 typedef Request RequestType;
00885 typedef Response ResponseType;
00886 };
00887 }
00888
00889 namespace ros
00890 {
00891 namespace message_traits
00892 {
00893 template<class ContainerAllocator> struct IsMessage< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<ContainerAllocator> > : public TrueType {};
00894 template<class ContainerAllocator> struct IsMessage< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<ContainerAllocator> const> : public TrueType {};
00895 template<class ContainerAllocator>
00896 struct MD5Sum< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<ContainerAllocator> > {
00897 static const char* value()
00898 {
00899 return "fe986e22908c7a2936a0cdf9f240d4a3";
00900 }
00901
00902 static const char* value(const ::tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<ContainerAllocator> &) { return value(); }
00903 static const uint64_t static_value1 = 0xfe986e22908c7a29ULL;
00904 static const uint64_t static_value2 = 0x36a0cdf9f240d4a3ULL;
00905 };
00906
00907 template<class ContainerAllocator>
00908 struct DataType< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<ContainerAllocator> > {
00909 static const char* value()
00910 {
00911 return "tabletop_collision_map_processing/TabletopCollisionMapProcessingRequest";
00912 }
00913
00914 static const char* value(const ::tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<ContainerAllocator> &) { return value(); }
00915 };
00916
00917 template<class ContainerAllocator>
00918 struct Definition< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<ContainerAllocator> > {
00919 static const char* value()
00920 {
00921 return "\n\
00922 \n\
00923 \n\
00924 \n\
00925 \n\
00926 \n\
00927 \n\
00928 tabletop_object_detector/TabletopDetectionResult detection_result\n\
00929 \n\
00930 \n\
00931 bool reset_collision_models\n\
00932 \n\
00933 \n\
00934 bool reset_attached_models\n\
00935 \n\
00936 \n\
00937 \n\
00938 string desired_frame\n\
00939 \n\
00940 \n\
00941 ================================================================================\n\
00942 MSG: tabletop_object_detector/TabletopDetectionResult\n\
00943 # Contains all the information from one run of the tabletop detection node\n\
00944 \n\
00945 # The information for the plane that has been detected\n\
00946 Table table\n\
00947 \n\
00948 # The raw clusters detected in the scan \n\
00949 sensor_msgs/PointCloud[] clusters\n\
00950 \n\
00951 # The list of potential models that have been detected for each cluster\n\
00952 # An empty list will be returned for a cluster that has no recognition results at all\n\
00953 household_objects_database_msgs/DatabaseModelPoseList[] models\n\
00954 \n\
00955 # For each cluster, the index of the list of models that was fit to that cluster\n\
00956 # keep in mind that multiple raw clusters can correspond to a single fit\n\
00957 int32[] cluster_model_indices\n\
00958 \n\
00959 # Whether the detection has succeeded or failed\n\
00960 int32 NO_CLOUD_RECEIVED = 1\n\
00961 int32 NO_TABLE = 2\n\
00962 int32 OTHER_ERROR = 3\n\
00963 int32 SUCCESS = 4\n\
00964 int32 result\n\
00965 \n\
00966 ================================================================================\n\
00967 MSG: tabletop_object_detector/Table\n\
00968 # Informs that a planar table has been detected at a given location\n\
00969 \n\
00970 # The pose gives you the transform that take you to the coordinate system\n\
00971 # of the table, with the origin somewhere in the table plane and the \n\
00972 # z axis normal to the plane\n\
00973 geometry_msgs/PoseStamped pose\n\
00974 \n\
00975 # These values give you the observed extents of the table, along x and y,\n\
00976 # in the table's own coordinate system (above)\n\
00977 # there is no guarantee that the origin of the table coordinate system is\n\
00978 # inside the boundary defined by these values. \n\
00979 float32 x_min\n\
00980 float32 x_max\n\
00981 float32 y_min\n\
00982 float32 y_max\n\
00983 \n\
00984 # There is no guarantee that the table does NOT extend further than these \n\
00985 # values; this is just as far as we've observed it.\n\
00986 \n\
00987 \n\
00988 # Newer table definition as triangle mesh of convex hull (relative to pose)\n\
00989 arm_navigation_msgs/Shape convex_hull\n\
00990 \n\
00991 ================================================================================\n\
00992 MSG: geometry_msgs/PoseStamped\n\
00993 # A Pose with reference coordinate frame and timestamp\n\
00994 Header header\n\
00995 Pose pose\n\
00996 \n\
00997 ================================================================================\n\
00998 MSG: std_msgs/Header\n\
00999 # Standard metadata for higher-level stamped data types.\n\
01000 # This is generally used to communicate timestamped data \n\
01001 # in a particular coordinate frame.\n\
01002 # \n\
01003 # sequence ID: consecutively increasing ID \n\
01004 uint32 seq\n\
01005 #Two-integer timestamp that is expressed as:\n\
01006 # * stamp.secs: seconds (stamp_secs) since epoch\n\
01007 # * stamp.nsecs: nanoseconds since stamp_secs\n\
01008 # time-handling sugar is provided by the client library\n\
01009 time stamp\n\
01010 #Frame this data is associated with\n\
01011 # 0: no frame\n\
01012 # 1: global frame\n\
01013 string frame_id\n\
01014 \n\
01015 ================================================================================\n\
01016 MSG: geometry_msgs/Pose\n\
01017 # A representation of pose in free space, composed of postion and orientation. \n\
01018 Point position\n\
01019 Quaternion orientation\n\
01020 \n\
01021 ================================================================================\n\
01022 MSG: geometry_msgs/Point\n\
01023 # This contains the position of a point in free space\n\
01024 float64 x\n\
01025 float64 y\n\
01026 float64 z\n\
01027 \n\
01028 ================================================================================\n\
01029 MSG: geometry_msgs/Quaternion\n\
01030 # This represents an orientation in free space in quaternion form.\n\
01031 \n\
01032 float64 x\n\
01033 float64 y\n\
01034 float64 z\n\
01035 float64 w\n\
01036 \n\
01037 ================================================================================\n\
01038 MSG: arm_navigation_msgs/Shape\n\
01039 byte SPHERE=0\n\
01040 byte BOX=1\n\
01041 byte CYLINDER=2\n\
01042 byte MESH=3\n\
01043 \n\
01044 byte type\n\
01045 \n\
01046 \n\
01047 #### define sphere, box, cylinder ####\n\
01048 # the origin of each shape is considered at the shape's center\n\
01049 \n\
01050 # for sphere\n\
01051 # radius := dimensions[0]\n\
01052 \n\
01053 # for cylinder\n\
01054 # radius := dimensions[0]\n\
01055 # length := dimensions[1]\n\
01056 # the length is along the Z axis\n\
01057 \n\
01058 # for box\n\
01059 # size_x := dimensions[0]\n\
01060 # size_y := dimensions[1]\n\
01061 # size_z := dimensions[2]\n\
01062 float64[] dimensions\n\
01063 \n\
01064 \n\
01065 #### define mesh ####\n\
01066 \n\
01067 # list of triangles; triangle k is defined by tre vertices located\n\
01068 # at indices triangles[3k], triangles[3k+1], triangles[3k+2]\n\
01069 int32[] triangles\n\
01070 geometry_msgs/Point[] vertices\n\
01071 \n\
01072 ================================================================================\n\
01073 MSG: sensor_msgs/PointCloud\n\
01074 # This message holds a collection of 3d points, plus optional additional\n\
01075 # information about each point.\n\
01076 \n\
01077 # Time of sensor data acquisition, coordinate frame ID.\n\
01078 Header header\n\
01079 \n\
01080 # Array of 3d points. Each Point32 should be interpreted as a 3d point\n\
01081 # in the frame given in the header.\n\
01082 geometry_msgs/Point32[] points\n\
01083 \n\
01084 # Each channel should have the same number of elements as points array,\n\
01085 # and the data in each channel should correspond 1:1 with each point.\n\
01086 # Channel names in common practice are listed in ChannelFloat32.msg.\n\
01087 ChannelFloat32[] channels\n\
01088 \n\
01089 ================================================================================\n\
01090 MSG: geometry_msgs/Point32\n\
01091 # This contains the position of a point in free space(with 32 bits of precision).\n\
01092 # It is recommeded to use Point wherever possible instead of Point32. \n\
01093 # \n\
01094 # This recommendation is to promote interoperability. \n\
01095 #\n\
01096 # This message is designed to take up less space when sending\n\
01097 # lots of points at once, as in the case of a PointCloud. \n\
01098 \n\
01099 float32 x\n\
01100 float32 y\n\
01101 float32 z\n\
01102 ================================================================================\n\
01103 MSG: sensor_msgs/ChannelFloat32\n\
01104 # This message is used by the PointCloud message to hold optional data\n\
01105 # associated with each point in the cloud. The length of the values\n\
01106 # array should be the same as the length of the points array in the\n\
01107 # PointCloud, and each value should be associated with the corresponding\n\
01108 # point.\n\
01109 \n\
01110 # Channel names in existing practice include:\n\
01111 # \"u\", \"v\" - row and column (respectively) in the left stereo image.\n\
01112 # This is opposite to usual conventions but remains for\n\
01113 # historical reasons. The newer PointCloud2 message has no\n\
01114 # such problem.\n\
01115 # \"rgb\" - For point clouds produced by color stereo cameras. uint8\n\
01116 # (R,G,B) values packed into the least significant 24 bits,\n\
01117 # in order.\n\
01118 # \"intensity\" - laser or pixel intensity.\n\
01119 # \"distance\"\n\
01120 \n\
01121 # The channel name should give semantics of the channel (e.g.\n\
01122 # \"intensity\" instead of \"value\").\n\
01123 string name\n\
01124 \n\
01125 # The values array should be 1-1 with the elements of the associated\n\
01126 # PointCloud.\n\
01127 float32[] values\n\
01128 \n\
01129 ================================================================================\n\
01130 MSG: household_objects_database_msgs/DatabaseModelPoseList\n\
01131 # stores a list of possible database models recognition results\n\
01132 DatabaseModelPose[] model_list\n\
01133 ================================================================================\n\
01134 MSG: household_objects_database_msgs/DatabaseModelPose\n\
01135 # Informs that a specific model from the Model Database has been \n\
01136 # identified at a certain location\n\
01137 \n\
01138 # the database id of the model\n\
01139 int32 model_id\n\
01140 \n\
01141 # the pose that it can be found in\n\
01142 geometry_msgs/PoseStamped pose\n\
01143 \n\
01144 # a measure of the confidence level in this detection result\n\
01145 float32 confidence\n\
01146 \n\
01147 # the name of the object detector that generated this detection result\n\
01148 string detector_name\n\
01149 \n\
01150 ";
01151 }
01152
01153 static const char* value(const ::tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<ContainerAllocator> &) { return value(); }
01154 };
01155
01156 }
01157 }
01158
01159
01160 namespace ros
01161 {
01162 namespace message_traits
01163 {
01164 template<class ContainerAllocator> struct IsMessage< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<ContainerAllocator> > : public TrueType {};
01165 template<class ContainerAllocator> struct IsMessage< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<ContainerAllocator> const> : public TrueType {};
01166 template<class ContainerAllocator>
01167 struct MD5Sum< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<ContainerAllocator> > {
01168 static const char* value()
01169 {
01170 return "684fb6e369cb671a9c4149f683c75e88";
01171 }
01172
01173 static const char* value(const ::tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<ContainerAllocator> &) { return value(); }
01174 static const uint64_t static_value1 = 0x684fb6e369cb671aULL;
01175 static const uint64_t static_value2 = 0x9c4149f683c75e88ULL;
01176 };
01177
01178 template<class ContainerAllocator>
01179 struct DataType< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<ContainerAllocator> > {
01180 static const char* value()
01181 {
01182 return "tabletop_collision_map_processing/TabletopCollisionMapProcessingResponse";
01183 }
01184
01185 static const char* value(const ::tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<ContainerAllocator> &) { return value(); }
01186 };
01187
01188 template<class ContainerAllocator>
01189 struct Definition< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<ContainerAllocator> > {
01190 static const char* value()
01191 {
01192 return "\n\
01193 \n\
01194 object_manipulation_msgs/GraspableObject[] graspable_objects\n\
01195 \n\
01196 \n\
01197 \n\
01198 string[] collision_object_names\n\
01199 \n\
01200 \n\
01201 string collision_support_surface_name\n\
01202 \n\
01203 \n\
01204 ================================================================================\n\
01205 MSG: object_manipulation_msgs/GraspableObject\n\
01206 # an object that the object_manipulator can work on\n\
01207 \n\
01208 # a graspable object can be represented in multiple ways. This message\n\
01209 # can contain all of them. Which one is actually used is up to the receiver\n\
01210 # of this message. When adding new representations, one must be careful that\n\
01211 # they have reasonable lightweight defaults indicating that that particular\n\
01212 # representation is not available.\n\
01213 \n\
01214 # the tf frame to be used as a reference frame when combining information from\n\
01215 # the different representations below\n\
01216 string reference_frame_id\n\
01217 \n\
01218 # potential recognition results from a database of models\n\
01219 # all poses are relative to the object reference pose\n\
01220 household_objects_database_msgs/DatabaseModelPose[] potential_models\n\
01221 \n\
01222 # the point cloud itself\n\
01223 sensor_msgs/PointCloud cluster\n\
01224 \n\
01225 # a region of a PointCloud2 of interest\n\
01226 object_manipulation_msgs/SceneRegion region\n\
01227 \n\
01228 # the name that this object has in the collision environment\n\
01229 string collision_name\n\
01230 ================================================================================\n\
01231 MSG: household_objects_database_msgs/DatabaseModelPose\n\
01232 # Informs that a specific model from the Model Database has been \n\
01233 # identified at a certain location\n\
01234 \n\
01235 # the database id of the model\n\
01236 int32 model_id\n\
01237 \n\
01238 # the pose that it can be found in\n\
01239 geometry_msgs/PoseStamped pose\n\
01240 \n\
01241 # a measure of the confidence level in this detection result\n\
01242 float32 confidence\n\
01243 \n\
01244 # the name of the object detector that generated this detection result\n\
01245 string detector_name\n\
01246 \n\
01247 ================================================================================\n\
01248 MSG: geometry_msgs/PoseStamped\n\
01249 # A Pose with reference coordinate frame and timestamp\n\
01250 Header header\n\
01251 Pose pose\n\
01252 \n\
01253 ================================================================================\n\
01254 MSG: std_msgs/Header\n\
01255 # Standard metadata for higher-level stamped data types.\n\
01256 # This is generally used to communicate timestamped data \n\
01257 # in a particular coordinate frame.\n\
01258 # \n\
01259 # sequence ID: consecutively increasing ID \n\
01260 uint32 seq\n\
01261 #Two-integer timestamp that is expressed as:\n\
01262 # * stamp.secs: seconds (stamp_secs) since epoch\n\
01263 # * stamp.nsecs: nanoseconds since stamp_secs\n\
01264 # time-handling sugar is provided by the client library\n\
01265 time stamp\n\
01266 #Frame this data is associated with\n\
01267 # 0: no frame\n\
01268 # 1: global frame\n\
01269 string frame_id\n\
01270 \n\
01271 ================================================================================\n\
01272 MSG: geometry_msgs/Pose\n\
01273 # A representation of pose in free space, composed of postion and orientation. \n\
01274 Point position\n\
01275 Quaternion orientation\n\
01276 \n\
01277 ================================================================================\n\
01278 MSG: geometry_msgs/Point\n\
01279 # This contains the position of a point in free space\n\
01280 float64 x\n\
01281 float64 y\n\
01282 float64 z\n\
01283 \n\
01284 ================================================================================\n\
01285 MSG: geometry_msgs/Quaternion\n\
01286 # This represents an orientation in free space in quaternion form.\n\
01287 \n\
01288 float64 x\n\
01289 float64 y\n\
01290 float64 z\n\
01291 float64 w\n\
01292 \n\
01293 ================================================================================\n\
01294 MSG: sensor_msgs/PointCloud\n\
01295 # This message holds a collection of 3d points, plus optional additional\n\
01296 # information about each point.\n\
01297 \n\
01298 # Time of sensor data acquisition, coordinate frame ID.\n\
01299 Header header\n\
01300 \n\
01301 # Array of 3d points. Each Point32 should be interpreted as a 3d point\n\
01302 # in the frame given in the header.\n\
01303 geometry_msgs/Point32[] points\n\
01304 \n\
01305 # Each channel should have the same number of elements as points array,\n\
01306 # and the data in each channel should correspond 1:1 with each point.\n\
01307 # Channel names in common practice are listed in ChannelFloat32.msg.\n\
01308 ChannelFloat32[] channels\n\
01309 \n\
01310 ================================================================================\n\
01311 MSG: geometry_msgs/Point32\n\
01312 # This contains the position of a point in free space(with 32 bits of precision).\n\
01313 # It is recommeded to use Point wherever possible instead of Point32. \n\
01314 # \n\
01315 # This recommendation is to promote interoperability. \n\
01316 #\n\
01317 # This message is designed to take up less space when sending\n\
01318 # lots of points at once, as in the case of a PointCloud. \n\
01319 \n\
01320 float32 x\n\
01321 float32 y\n\
01322 float32 z\n\
01323 ================================================================================\n\
01324 MSG: sensor_msgs/ChannelFloat32\n\
01325 # This message is used by the PointCloud message to hold optional data\n\
01326 # associated with each point in the cloud. The length of the values\n\
01327 # array should be the same as the length of the points array in the\n\
01328 # PointCloud, and each value should be associated with the corresponding\n\
01329 # point.\n\
01330 \n\
01331 # Channel names in existing practice include:\n\
01332 # \"u\", \"v\" - row and column (respectively) in the left stereo image.\n\
01333 # This is opposite to usual conventions but remains for\n\
01334 # historical reasons. The newer PointCloud2 message has no\n\
01335 # such problem.\n\
01336 # \"rgb\" - For point clouds produced by color stereo cameras. uint8\n\
01337 # (R,G,B) values packed into the least significant 24 bits,\n\
01338 # in order.\n\
01339 # \"intensity\" - laser or pixel intensity.\n\
01340 # \"distance\"\n\
01341 \n\
01342 # The channel name should give semantics of the channel (e.g.\n\
01343 # \"intensity\" instead of \"value\").\n\
01344 string name\n\
01345 \n\
01346 # The values array should be 1-1 with the elements of the associated\n\
01347 # PointCloud.\n\
01348 float32[] values\n\
01349 \n\
01350 ================================================================================\n\
01351 MSG: object_manipulation_msgs/SceneRegion\n\
01352 # Point cloud\n\
01353 sensor_msgs/PointCloud2 cloud\n\
01354 \n\
01355 # Indices for the region of interest\n\
01356 int32[] mask\n\
01357 \n\
01358 # One of the corresponding 2D images, if applicable\n\
01359 sensor_msgs/Image image\n\
01360 \n\
01361 # The disparity image, if applicable\n\
01362 sensor_msgs/Image disparity_image\n\
01363 \n\
01364 # Camera info for the camera that took the image\n\
01365 sensor_msgs/CameraInfo cam_info\n\
01366 \n\
01367 # a 3D region of interest for grasp planning\n\
01368 geometry_msgs/PoseStamped roi_box_pose\n\
01369 geometry_msgs/Vector3 roi_box_dims\n\
01370 \n\
01371 ================================================================================\n\
01372 MSG: sensor_msgs/PointCloud2\n\
01373 # This message holds a collection of N-dimensional points, which may\n\
01374 # contain additional information such as normals, intensity, etc. The\n\
01375 # point data is stored as a binary blob, its layout described by the\n\
01376 # contents of the \"fields\" array.\n\
01377 \n\
01378 # The point cloud data may be organized 2d (image-like) or 1d\n\
01379 # (unordered). Point clouds organized as 2d images may be produced by\n\
01380 # camera depth sensors such as stereo or time-of-flight.\n\
01381 \n\
01382 # Time of sensor data acquisition, and the coordinate frame ID (for 3d\n\
01383 # points).\n\
01384 Header header\n\
01385 \n\
01386 # 2D structure of the point cloud. If the cloud is unordered, height is\n\
01387 # 1 and width is the length of the point cloud.\n\
01388 uint32 height\n\
01389 uint32 width\n\
01390 \n\
01391 # Describes the channels and their layout in the binary data blob.\n\
01392 PointField[] fields\n\
01393 \n\
01394 bool is_bigendian # Is this data bigendian?\n\
01395 uint32 point_step # Length of a point in bytes\n\
01396 uint32 row_step # Length of a row in bytes\n\
01397 uint8[] data # Actual point data, size is (row_step*height)\n\
01398 \n\
01399 bool is_dense # True if there are no invalid points\n\
01400 \n\
01401 ================================================================================\n\
01402 MSG: sensor_msgs/PointField\n\
01403 # This message holds the description of one point entry in the\n\
01404 # PointCloud2 message format.\n\
01405 uint8 INT8 = 1\n\
01406 uint8 UINT8 = 2\n\
01407 uint8 INT16 = 3\n\
01408 uint8 UINT16 = 4\n\
01409 uint8 INT32 = 5\n\
01410 uint8 UINT32 = 6\n\
01411 uint8 FLOAT32 = 7\n\
01412 uint8 FLOAT64 = 8\n\
01413 \n\
01414 string name # Name of field\n\
01415 uint32 offset # Offset from start of point struct\n\
01416 uint8 datatype # Datatype enumeration, see above\n\
01417 uint32 count # How many elements in the field\n\
01418 \n\
01419 ================================================================================\n\
01420 MSG: sensor_msgs/Image\n\
01421 # This message contains an uncompressed image\n\
01422 # (0, 0) is at top-left corner of image\n\
01423 #\n\
01424 \n\
01425 Header header # Header timestamp should be acquisition time of image\n\
01426 # Header frame_id should be optical frame of camera\n\
01427 # origin of frame should be optical center of cameara\n\
01428 # +x should point to the right in the image\n\
01429 # +y should point down in the image\n\
01430 # +z should point into to plane of the image\n\
01431 # If the frame_id here and the frame_id of the CameraInfo\n\
01432 # message associated with the image conflict\n\
01433 # the behavior is undefined\n\
01434 \n\
01435 uint32 height # image height, that is, number of rows\n\
01436 uint32 width # image width, that is, number of columns\n\
01437 \n\
01438 # The legal values for encoding are in file src/image_encodings.cpp\n\
01439 # If you want to standardize a new string format, join\n\
01440 # ros-users@lists.sourceforge.net and send an email proposing a new encoding.\n\
01441 \n\
01442 string encoding # Encoding of pixels -- channel meaning, ordering, size\n\
01443 # taken from the list of strings in src/image_encodings.cpp\n\
01444 \n\
01445 uint8 is_bigendian # is this data bigendian?\n\
01446 uint32 step # Full row length in bytes\n\
01447 uint8[] data # actual matrix data, size is (step * rows)\n\
01448 \n\
01449 ================================================================================\n\
01450 MSG: sensor_msgs/CameraInfo\n\
01451 # This message defines meta information for a camera. It should be in a\n\
01452 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\
01453 # image topics named:\n\
01454 #\n\
01455 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\
01456 # image - monochrome, distorted\n\
01457 # image_color - color, distorted\n\
01458 # image_rect - monochrome, rectified\n\
01459 # image_rect_color - color, rectified\n\
01460 #\n\
01461 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\
01462 # for producing the four processed image topics from image_raw and\n\
01463 # camera_info. The meaning of the camera parameters are described in\n\
01464 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\
01465 #\n\
01466 # The image_geometry package provides a user-friendly interface to\n\
01467 # common operations using this meta information. If you want to, e.g.,\n\
01468 # project a 3d point into image coordinates, we strongly recommend\n\
01469 # using image_geometry.\n\
01470 #\n\
01471 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\
01472 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\
01473 # indicates an uncalibrated camera.\n\
01474 \n\
01475 #######################################################################\n\
01476 # Image acquisition info #\n\
01477 #######################################################################\n\
01478 \n\
01479 # Time of image acquisition, camera coordinate frame ID\n\
01480 Header header # Header timestamp should be acquisition time of image\n\
01481 # Header frame_id should be optical frame of camera\n\
01482 # origin of frame should be optical center of camera\n\
01483 # +x should point to the right in the image\n\
01484 # +y should point down in the image\n\
01485 # +z should point into the plane of the image\n\
01486 \n\
01487 \n\
01488 #######################################################################\n\
01489 # Calibration Parameters #\n\
01490 #######################################################################\n\
01491 # These are fixed during camera calibration. Their values will be the #\n\
01492 # same in all messages until the camera is recalibrated. Note that #\n\
01493 # self-calibrating systems may \"recalibrate\" frequently. #\n\
01494 # #\n\
01495 # The internal parameters can be used to warp a raw (distorted) image #\n\
01496 # to: #\n\
01497 # 1. An undistorted image (requires D and K) #\n\
01498 # 2. A rectified image (requires D, K, R) #\n\
01499 # The projection matrix P projects 3D points into the rectified image.#\n\
01500 #######################################################################\n\
01501 \n\
01502 # The image dimensions with which the camera was calibrated. Normally\n\
01503 # this will be the full camera resolution in pixels.\n\
01504 uint32 height\n\
01505 uint32 width\n\
01506 \n\
01507 # The distortion model used. Supported models are listed in\n\
01508 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\
01509 # simple model of radial and tangential distortion - is sufficent.\n\
01510 string distortion_model\n\
01511 \n\
01512 # The distortion parameters, size depending on the distortion model.\n\
01513 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\
01514 float64[] D\n\
01515 \n\
01516 # Intrinsic camera matrix for the raw (distorted) images.\n\
01517 # [fx 0 cx]\n\
01518 # K = [ 0 fy cy]\n\
01519 # [ 0 0 1]\n\
01520 # Projects 3D points in the camera coordinate frame to 2D pixel\n\
01521 # coordinates using the focal lengths (fx, fy) and principal point\n\
01522 # (cx, cy).\n\
01523 float64[9] K # 3x3 row-major matrix\n\
01524 \n\
01525 # Rectification matrix (stereo cameras only)\n\
01526 # A rotation matrix aligning the camera coordinate system to the ideal\n\
01527 # stereo image plane so that epipolar lines in both stereo images are\n\
01528 # parallel.\n\
01529 float64[9] R # 3x3 row-major matrix\n\
01530 \n\
01531 # Projection/camera matrix\n\
01532 # [fx' 0 cx' Tx]\n\
01533 # P = [ 0 fy' cy' Ty]\n\
01534 # [ 0 0 1 0]\n\
01535 # By convention, this matrix specifies the intrinsic (camera) matrix\n\
01536 # of the processed (rectified) image. That is, the left 3x3 portion\n\
01537 # is the normal camera intrinsic matrix for the rectified image.\n\
01538 # It projects 3D points in the camera coordinate frame to 2D pixel\n\
01539 # coordinates using the focal lengths (fx', fy') and principal point\n\
01540 # (cx', cy') - these may differ from the values in K.\n\
01541 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\
01542 # also have R = the identity and P[1:3,1:3] = K.\n\
01543 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\
01544 # position of the optical center of the second camera in the first\n\
01545 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\
01546 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\
01547 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\
01548 # Tx = -fx' * B, where B is the baseline between the cameras.\n\
01549 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\
01550 # the rectified image is given by:\n\
01551 # [u v w]' = P * [X Y Z 1]'\n\
01552 # x = u / w\n\
01553 # y = v / w\n\
01554 # This holds for both images of a stereo pair.\n\
01555 float64[12] P # 3x4 row-major matrix\n\
01556 \n\
01557 \n\
01558 #######################################################################\n\
01559 # Operational Parameters #\n\
01560 #######################################################################\n\
01561 # These define the image region actually captured by the camera #\n\
01562 # driver. Although they affect the geometry of the output image, they #\n\
01563 # may be changed freely without recalibrating the camera. #\n\
01564 #######################################################################\n\
01565 \n\
01566 # Binning refers here to any camera setting which combines rectangular\n\
01567 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\
01568 # resolution of the output image to\n\
01569 # (width / binning_x) x (height / binning_y).\n\
01570 # The default values binning_x = binning_y = 0 is considered the same\n\
01571 # as binning_x = binning_y = 1 (no subsampling).\n\
01572 uint32 binning_x\n\
01573 uint32 binning_y\n\
01574 \n\
01575 # Region of interest (subwindow of full camera resolution), given in\n\
01576 # full resolution (unbinned) image coordinates. A particular ROI\n\
01577 # always denotes the same window of pixels on the camera sensor,\n\
01578 # regardless of binning settings.\n\
01579 # The default setting of roi (all values 0) is considered the same as\n\
01580 # full resolution (roi.width = width, roi.height = height).\n\
01581 RegionOfInterest roi\n\
01582 \n\
01583 ================================================================================\n\
01584 MSG: sensor_msgs/RegionOfInterest\n\
01585 # This message is used to specify a region of interest within an image.\n\
01586 #\n\
01587 # When used to specify the ROI setting of the camera when the image was\n\
01588 # taken, the height and width fields should either match the height and\n\
01589 # width fields for the associated image; or height = width = 0\n\
01590 # indicates that the full resolution image was captured.\n\
01591 \n\
01592 uint32 x_offset # Leftmost pixel of the ROI\n\
01593 # (0 if the ROI includes the left edge of the image)\n\
01594 uint32 y_offset # Topmost pixel of the ROI\n\
01595 # (0 if the ROI includes the top edge of the image)\n\
01596 uint32 height # Height of ROI\n\
01597 uint32 width # Width of ROI\n\
01598 \n\
01599 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\
01600 # ROI in this message. Typically this should be False if the full image\n\
01601 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\
01602 # used).\n\
01603 bool do_rectify\n\
01604 \n\
01605 ================================================================================\n\
01606 MSG: geometry_msgs/Vector3\n\
01607 # This represents a vector in free space. \n\
01608 \n\
01609 float64 x\n\
01610 float64 y\n\
01611 float64 z\n\
01612 ";
01613 }
01614
01615 static const char* value(const ::tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<ContainerAllocator> &) { return value(); }
01616 };
01617
01618 }
01619 }
01620
01621 namespace ros
01622 {
01623 namespace serialization
01624 {
01625
01626 template<class ContainerAllocator> struct Serializer< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<ContainerAllocator> >
01627 {
01628 template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
01629 {
01630 stream.next(m.detection_result);
01631 stream.next(m.reset_collision_models);
01632 stream.next(m.reset_attached_models);
01633 stream.next(m.desired_frame);
01634 }
01635
01636 ROS_DECLARE_ALLINONE_SERIALIZER;
01637 };
01638 }
01639 }
01640
01641
01642 namespace ros
01643 {
01644 namespace serialization
01645 {
01646
01647 template<class ContainerAllocator> struct Serializer< ::tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<ContainerAllocator> >
01648 {
01649 template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
01650 {
01651 stream.next(m.graspable_objects);
01652 stream.next(m.collision_object_names);
01653 stream.next(m.collision_support_surface_name);
01654 }
01655
01656 ROS_DECLARE_ALLINONE_SERIALIZER;
01657 };
01658 }
01659 }
01660
01661 namespace ros
01662 {
01663 namespace service_traits
01664 {
01665 template<>
01666 struct MD5Sum<tabletop_collision_map_processing::TabletopCollisionMapProcessing> {
01667 static const char* value()
01668 {
01669 return "58e439dda25eed20079051e6af1b5eaa";
01670 }
01671
01672 static const char* value(const tabletop_collision_map_processing::TabletopCollisionMapProcessing&) { return value(); }
01673 };
01674
01675 template<>
01676 struct DataType<tabletop_collision_map_processing::TabletopCollisionMapProcessing> {
01677 static const char* value()
01678 {
01679 return "tabletop_collision_map_processing/TabletopCollisionMapProcessing";
01680 }
01681
01682 static const char* value(const tabletop_collision_map_processing::TabletopCollisionMapProcessing&) { return value(); }
01683 };
01684
01685 template<class ContainerAllocator>
01686 struct MD5Sum<tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<ContainerAllocator> > {
01687 static const char* value()
01688 {
01689 return "58e439dda25eed20079051e6af1b5eaa";
01690 }
01691
01692 static const char* value(const tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<ContainerAllocator> &) { return value(); }
01693 };
01694
01695 template<class ContainerAllocator>
01696 struct DataType<tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<ContainerAllocator> > {
01697 static const char* value()
01698 {
01699 return "tabletop_collision_map_processing/TabletopCollisionMapProcessing";
01700 }
01701
01702 static const char* value(const tabletop_collision_map_processing::TabletopCollisionMapProcessingRequest_<ContainerAllocator> &) { return value(); }
01703 };
01704
01705 template<class ContainerAllocator>
01706 struct MD5Sum<tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<ContainerAllocator> > {
01707 static const char* value()
01708 {
01709 return "58e439dda25eed20079051e6af1b5eaa";
01710 }
01711
01712 static const char* value(const tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<ContainerAllocator> &) { return value(); }
01713 };
01714
01715 template<class ContainerAllocator>
01716 struct DataType<tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<ContainerAllocator> > {
01717 static const char* value()
01718 {
01719 return "tabletop_collision_map_processing/TabletopCollisionMapProcessing";
01720 }
01721
01722 static const char* value(const tabletop_collision_map_processing::TabletopCollisionMapProcessingResponse_<ContainerAllocator> &) { return value(); }
01723 };
01724
01725 }
01726 }
01727
01728 #endif // TABLETOP_COLLISION_MAP_PROCESSING_SERVICE_TABLETOPCOLLISIONMAPPROCESSING_H
01729