$search
00001 /* Auto-generated by genmsg_cpp for file /home/rosbuild/hudson/workspace/doc-electric-object_manipulation/doc_stacks/2013-03-01_16-13-18.345538/object_manipulation/object_manipulation_msgs/srv/GraspPlanning.srv */ 00002 #ifndef OBJECT_MANIPULATION_MSGS_SERVICE_GRASPPLANNING_H 00003 #define OBJECT_MANIPULATION_MSGS_SERVICE_GRASPPLANNING_H 00004 #include <string> 00005 #include <vector> 00006 #include <map> 00007 #include <ostream> 00008 #include "ros/serialization.h" 00009 #include "ros/builtin_message_traits.h" 00010 #include "ros/message_operations.h" 00011 #include "ros/time.h" 00012 00013 #include "ros/macros.h" 00014 00015 #include "ros/assert.h" 00016 00017 #include "ros/service_traits.h" 00018 00019 #include "object_manipulation_msgs/GraspableObject.h" 00020 #include "object_manipulation_msgs/Grasp.h" 00021 #include "object_manipulation_msgs/GraspableObject.h" 00022 00023 00024 #include "object_manipulation_msgs/Grasp.h" 00025 #include "object_manipulation_msgs/GraspPlanningErrorCode.h" 00026 00027 namespace object_manipulation_msgs 00028 { 00029 template <class ContainerAllocator> 00030 struct GraspPlanningRequest_ { 00031 typedef GraspPlanningRequest_<ContainerAllocator> Type; 00032 00033 GraspPlanningRequest_() 00034 : arm_name() 00035 , target() 00036 , collision_object_name() 00037 , collision_support_surface_name() 00038 , grasps_to_evaluate() 00039 , movable_obstacles() 00040 { 00041 } 00042 00043 GraspPlanningRequest_(const ContainerAllocator& _alloc) 00044 : arm_name(_alloc) 00045 , target(_alloc) 00046 , collision_object_name(_alloc) 00047 , collision_support_surface_name(_alloc) 00048 , grasps_to_evaluate(_alloc) 00049 , movable_obstacles(_alloc) 00050 { 00051 } 00052 00053 typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _arm_name_type; 00054 std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > arm_name; 00055 00056 typedef ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> _target_type; 00057 ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> target; 00058 00059 typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _collision_object_name_type; 00060 std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > collision_object_name; 00061 00062 typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _collision_support_surface_name_type; 00063 std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > collision_support_surface_name; 00064 00065 typedef std::vector< ::object_manipulation_msgs::Grasp_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::Grasp_<ContainerAllocator> >::other > _grasps_to_evaluate_type; 00066 std::vector< ::object_manipulation_msgs::Grasp_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::Grasp_<ContainerAllocator> >::other > grasps_to_evaluate; 00067 00068 typedef std::vector< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> >::other > _movable_obstacles_type; 00069 std::vector< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> >::other > movable_obstacles; 00070 00071 00072 ROS_DEPRECATED uint32_t get_grasps_to_evaluate_size() const { return (uint32_t)grasps_to_evaluate.size(); } 00073 ROS_DEPRECATED void set_grasps_to_evaluate_size(uint32_t size) { grasps_to_evaluate.resize((size_t)size); } 00074 ROS_DEPRECATED void get_grasps_to_evaluate_vec(std::vector< ::object_manipulation_msgs::Grasp_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::Grasp_<ContainerAllocator> >::other > & vec) const { vec = this->grasps_to_evaluate; } 00075 ROS_DEPRECATED void set_grasps_to_evaluate_vec(const std::vector< ::object_manipulation_msgs::Grasp_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::Grasp_<ContainerAllocator> >::other > & vec) { this->grasps_to_evaluate = vec; } 00076 ROS_DEPRECATED uint32_t get_movable_obstacles_size() const { return (uint32_t)movable_obstacles.size(); } 00077 ROS_DEPRECATED void set_movable_obstacles_size(uint32_t size) { movable_obstacles.resize((size_t)size); } 00078 ROS_DEPRECATED void get_movable_obstacles_vec(std::vector< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> >::other > & vec) const { vec = this->movable_obstacles; } 00079 ROS_DEPRECATED void set_movable_obstacles_vec(const std::vector< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> >::other > & vec) { this->movable_obstacles = vec; } 00080 private: 00081 static const char* __s_getDataType_() { return "object_manipulation_msgs/GraspPlanningRequest"; } 00082 public: 00083 ROS_DEPRECATED static const std::string __s_getDataType() { return __s_getDataType_(); } 00084 00085 ROS_DEPRECATED const std::string __getDataType() const { return __s_getDataType_(); } 00086 00087 private: 00088 static const char* __s_getMD5Sum_() { return "5003fdf2225280c9d81c2bce4e645c20"; } 00089 public: 00090 ROS_DEPRECATED static const std::string __s_getMD5Sum() { return __s_getMD5Sum_(); } 00091 00092 ROS_DEPRECATED const std::string __getMD5Sum() const { return __s_getMD5Sum_(); } 00093 00094 private: 00095 static const char* __s_getServerMD5Sum_() { return "01a11c1cdea613dd8705f368e1dc93dc"; } 00096 public: 00097 ROS_DEPRECATED static const std::string __s_getServerMD5Sum() { return __s_getServerMD5Sum_(); } 00098 00099 ROS_DEPRECATED const std::string __getServerMD5Sum() const { return __s_getServerMD5Sum_(); } 00100 00101 private: 00102 static const char* __s_getMessageDefinition_() { return "\n\ 00103 \n\ 00104 \n\ 00105 \n\ 00106 string arm_name\n\ 00107 \n\ 00108 \n\ 00109 GraspableObject target\n\ 00110 \n\ 00111 \n\ 00112 \n\ 00113 string collision_object_name\n\ 00114 \n\ 00115 \n\ 00116 \n\ 00117 string collision_support_surface_name\n\ 00118 \n\ 00119 \n\ 00120 Grasp[] grasps_to_evaluate\n\ 00121 \n\ 00122 \n\ 00123 \n\ 00124 GraspableObject[] movable_obstacles\n\ 00125 \n\ 00126 \n\ 00127 ================================================================================\n\ 00128 MSG: object_manipulation_msgs/GraspableObject\n\ 00129 # an object that the object_manipulator can work on\n\ 00130 \n\ 00131 # a graspable object can be represented in multiple ways. This message\n\ 00132 # can contain all of them. Which one is actually used is up to the receiver\n\ 00133 # of this message. When adding new representations, one must be careful that\n\ 00134 # they have reasonable lightweight defaults indicating that that particular\n\ 00135 # representation is not available.\n\ 00136 \n\ 00137 # the tf frame to be used as a reference frame when combining information from\n\ 00138 # the different representations below\n\ 00139 string reference_frame_id\n\ 00140 \n\ 00141 # potential recognition results from a database of models\n\ 00142 # all poses are relative to the object reference pose\n\ 00143 household_objects_database_msgs/DatabaseModelPose[] potential_models\n\ 00144 \n\ 00145 # the point cloud itself\n\ 00146 sensor_msgs/PointCloud cluster\n\ 00147 \n\ 00148 # a region of a PointCloud2 of interest\n\ 00149 object_manipulation_msgs/SceneRegion region\n\ 00150 \n\ 00151 # the name that this object has in the collision environment\n\ 00152 string collision_name\n\ 00153 ================================================================================\n\ 00154 MSG: household_objects_database_msgs/DatabaseModelPose\n\ 00155 # Informs that a specific model from the Model Database has been \n\ 00156 # identified at a certain location\n\ 00157 \n\ 00158 # the database id of the model\n\ 00159 int32 model_id\n\ 00160 \n\ 00161 # the pose that it can be found in\n\ 00162 geometry_msgs/PoseStamped pose\n\ 00163 \n\ 00164 # a measure of the confidence level in this detection result\n\ 00165 float32 confidence\n\ 00166 \n\ 00167 # the name of the object detector that generated this detection result\n\ 00168 string detector_name\n\ 00169 \n\ 00170 ================================================================================\n\ 00171 MSG: geometry_msgs/PoseStamped\n\ 00172 # A Pose with reference coordinate frame and timestamp\n\ 00173 Header header\n\ 00174 Pose pose\n\ 00175 \n\ 00176 ================================================================================\n\ 00177 MSG: std_msgs/Header\n\ 00178 # Standard metadata for higher-level stamped data types.\n\ 00179 # This is generally used to communicate timestamped data \n\ 00180 # in a particular coordinate frame.\n\ 00181 # \n\ 00182 # sequence ID: consecutively increasing ID \n\ 00183 uint32 seq\n\ 00184 #Two-integer timestamp that is expressed as:\n\ 00185 # * stamp.secs: seconds (stamp_secs) since epoch\n\ 00186 # * stamp.nsecs: nanoseconds since stamp_secs\n\ 00187 # time-handling sugar is provided by the client library\n\ 00188 time stamp\n\ 00189 #Frame this data is associated with\n\ 00190 # 0: no frame\n\ 00191 # 1: global frame\n\ 00192 string frame_id\n\ 00193 \n\ 00194 ================================================================================\n\ 00195 MSG: geometry_msgs/Pose\n\ 00196 # A representation of pose in free space, composed of postion and orientation. \n\ 00197 Point position\n\ 00198 Quaternion orientation\n\ 00199 \n\ 00200 ================================================================================\n\ 00201 MSG: geometry_msgs/Point\n\ 00202 # This contains the position of a point in free space\n\ 00203 float64 x\n\ 00204 float64 y\n\ 00205 float64 z\n\ 00206 \n\ 00207 ================================================================================\n\ 00208 MSG: geometry_msgs/Quaternion\n\ 00209 # This represents an orientation in free space in quaternion form.\n\ 00210 \n\ 00211 float64 x\n\ 00212 float64 y\n\ 00213 float64 z\n\ 00214 float64 w\n\ 00215 \n\ 00216 ================================================================================\n\ 00217 MSG: sensor_msgs/PointCloud\n\ 00218 # This message holds a collection of 3d points, plus optional additional\n\ 00219 # information about each point.\n\ 00220 \n\ 00221 # Time of sensor data acquisition, coordinate frame ID.\n\ 00222 Header header\n\ 00223 \n\ 00224 # Array of 3d points. Each Point32 should be interpreted as a 3d point\n\ 00225 # in the frame given in the header.\n\ 00226 geometry_msgs/Point32[] points\n\ 00227 \n\ 00228 # Each channel should have the same number of elements as points array,\n\ 00229 # and the data in each channel should correspond 1:1 with each point.\n\ 00230 # Channel names in common practice are listed in ChannelFloat32.msg.\n\ 00231 ChannelFloat32[] channels\n\ 00232 \n\ 00233 ================================================================================\n\ 00234 MSG: geometry_msgs/Point32\n\ 00235 # This contains the position of a point in free space(with 32 bits of precision).\n\ 00236 # It is recommeded to use Point wherever possible instead of Point32. \n\ 00237 # \n\ 00238 # This recommendation is to promote interoperability. \n\ 00239 #\n\ 00240 # This message is designed to take up less space when sending\n\ 00241 # lots of points at once, as in the case of a PointCloud. \n\ 00242 \n\ 00243 float32 x\n\ 00244 float32 y\n\ 00245 float32 z\n\ 00246 ================================================================================\n\ 00247 MSG: sensor_msgs/ChannelFloat32\n\ 00248 # This message is used by the PointCloud message to hold optional data\n\ 00249 # associated with each point in the cloud. The length of the values\n\ 00250 # array should be the same as the length of the points array in the\n\ 00251 # PointCloud, and each value should be associated with the corresponding\n\ 00252 # point.\n\ 00253 \n\ 00254 # Channel names in existing practice include:\n\ 00255 # \"u\", \"v\" - row and column (respectively) in the left stereo image.\n\ 00256 # This is opposite to usual conventions but remains for\n\ 00257 # historical reasons. The newer PointCloud2 message has no\n\ 00258 # such problem.\n\ 00259 # \"rgb\" - For point clouds produced by color stereo cameras. uint8\n\ 00260 # (R,G,B) values packed into the least significant 24 bits,\n\ 00261 # in order.\n\ 00262 # \"intensity\" - laser or pixel intensity.\n\ 00263 # \"distance\"\n\ 00264 \n\ 00265 # The channel name should give semantics of the channel (e.g.\n\ 00266 # \"intensity\" instead of \"value\").\n\ 00267 string name\n\ 00268 \n\ 00269 # The values array should be 1-1 with the elements of the associated\n\ 00270 # PointCloud.\n\ 00271 float32[] values\n\ 00272 \n\ 00273 ================================================================================\n\ 00274 MSG: object_manipulation_msgs/SceneRegion\n\ 00275 # Point cloud\n\ 00276 sensor_msgs/PointCloud2 cloud\n\ 00277 \n\ 00278 # Indices for the region of interest\n\ 00279 int32[] mask\n\ 00280 \n\ 00281 # One of the corresponding 2D images, if applicable\n\ 00282 sensor_msgs/Image image\n\ 00283 \n\ 00284 # The disparity image, if applicable\n\ 00285 sensor_msgs/Image disparity_image\n\ 00286 \n\ 00287 # Camera info for the camera that took the image\n\ 00288 sensor_msgs/CameraInfo cam_info\n\ 00289 \n\ 00290 # a 3D region of interest for grasp planning\n\ 00291 geometry_msgs/PoseStamped roi_box_pose\n\ 00292 geometry_msgs/Vector3 roi_box_dims\n\ 00293 \n\ 00294 ================================================================================\n\ 00295 MSG: sensor_msgs/PointCloud2\n\ 00296 # This message holds a collection of N-dimensional points, which may\n\ 00297 # contain additional information such as normals, intensity, etc. The\n\ 00298 # point data is stored as a binary blob, its layout described by the\n\ 00299 # contents of the \"fields\" array.\n\ 00300 \n\ 00301 # The point cloud data may be organized 2d (image-like) or 1d\n\ 00302 # (unordered). Point clouds organized as 2d images may be produced by\n\ 00303 # camera depth sensors such as stereo or time-of-flight.\n\ 00304 \n\ 00305 # Time of sensor data acquisition, and the coordinate frame ID (for 3d\n\ 00306 # points).\n\ 00307 Header header\n\ 00308 \n\ 00309 # 2D structure of the point cloud. If the cloud is unordered, height is\n\ 00310 # 1 and width is the length of the point cloud.\n\ 00311 uint32 height\n\ 00312 uint32 width\n\ 00313 \n\ 00314 # Describes the channels and their layout in the binary data blob.\n\ 00315 PointField[] fields\n\ 00316 \n\ 00317 bool is_bigendian # Is this data bigendian?\n\ 00318 uint32 point_step # Length of a point in bytes\n\ 00319 uint32 row_step # Length of a row in bytes\n\ 00320 uint8[] data # Actual point data, size is (row_step*height)\n\ 00321 \n\ 00322 bool is_dense # True if there are no invalid points\n\ 00323 \n\ 00324 ================================================================================\n\ 00325 MSG: sensor_msgs/PointField\n\ 00326 # This message holds the description of one point entry in the\n\ 00327 # PointCloud2 message format.\n\ 00328 uint8 INT8 = 1\n\ 00329 uint8 UINT8 = 2\n\ 00330 uint8 INT16 = 3\n\ 00331 uint8 UINT16 = 4\n\ 00332 uint8 INT32 = 5\n\ 00333 uint8 UINT32 = 6\n\ 00334 uint8 FLOAT32 = 7\n\ 00335 uint8 FLOAT64 = 8\n\ 00336 \n\ 00337 string name # Name of field\n\ 00338 uint32 offset # Offset from start of point struct\n\ 00339 uint8 datatype # Datatype enumeration, see above\n\ 00340 uint32 count # How many elements in the field\n\ 00341 \n\ 00342 ================================================================================\n\ 00343 MSG: sensor_msgs/Image\n\ 00344 # This message contains an uncompressed image\n\ 00345 # (0, 0) is at top-left corner of image\n\ 00346 #\n\ 00347 \n\ 00348 Header header # Header timestamp should be acquisition time of image\n\ 00349 # Header frame_id should be optical frame of camera\n\ 00350 # origin of frame should be optical center of cameara\n\ 00351 # +x should point to the right in the image\n\ 00352 # +y should point down in the image\n\ 00353 # +z should point into to plane of the image\n\ 00354 # If the frame_id here and the frame_id of the CameraInfo\n\ 00355 # message associated with the image conflict\n\ 00356 # the behavior is undefined\n\ 00357 \n\ 00358 uint32 height # image height, that is, number of rows\n\ 00359 uint32 width # image width, that is, number of columns\n\ 00360 \n\ 00361 # The legal values for encoding are in file src/image_encodings.cpp\n\ 00362 # If you want to standardize a new string format, join\n\ 00363 # ros-users@lists.sourceforge.net and send an email proposing a new encoding.\n\ 00364 \n\ 00365 string encoding # Encoding of pixels -- channel meaning, ordering, size\n\ 00366 # taken from the list of strings in src/image_encodings.cpp\n\ 00367 \n\ 00368 uint8 is_bigendian # is this data bigendian?\n\ 00369 uint32 step # Full row length in bytes\n\ 00370 uint8[] data # actual matrix data, size is (step * rows)\n\ 00371 \n\ 00372 ================================================================================\n\ 00373 MSG: sensor_msgs/CameraInfo\n\ 00374 # This message defines meta information for a camera. It should be in a\n\ 00375 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\ 00376 # image topics named:\n\ 00377 #\n\ 00378 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\ 00379 # image - monochrome, distorted\n\ 00380 # image_color - color, distorted\n\ 00381 # image_rect - monochrome, rectified\n\ 00382 # image_rect_color - color, rectified\n\ 00383 #\n\ 00384 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\ 00385 # for producing the four processed image topics from image_raw and\n\ 00386 # camera_info. The meaning of the camera parameters are described in\n\ 00387 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\ 00388 #\n\ 00389 # The image_geometry package provides a user-friendly interface to\n\ 00390 # common operations using this meta information. If you want to, e.g.,\n\ 00391 # project a 3d point into image coordinates, we strongly recommend\n\ 00392 # using image_geometry.\n\ 00393 #\n\ 00394 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\ 00395 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\ 00396 # indicates an uncalibrated camera.\n\ 00397 \n\ 00398 #######################################################################\n\ 00399 # Image acquisition info #\n\ 00400 #######################################################################\n\ 00401 \n\ 00402 # Time of image acquisition, camera coordinate frame ID\n\ 00403 Header header # Header timestamp should be acquisition time of image\n\ 00404 # Header frame_id should be optical frame of camera\n\ 00405 # origin of frame should be optical center of camera\n\ 00406 # +x should point to the right in the image\n\ 00407 # +y should point down in the image\n\ 00408 # +z should point into the plane of the image\n\ 00409 \n\ 00410 \n\ 00411 #######################################################################\n\ 00412 # Calibration Parameters #\n\ 00413 #######################################################################\n\ 00414 # These are fixed during camera calibration. Their values will be the #\n\ 00415 # same in all messages until the camera is recalibrated. Note that #\n\ 00416 # self-calibrating systems may \"recalibrate\" frequently. #\n\ 00417 # #\n\ 00418 # The internal parameters can be used to warp a raw (distorted) image #\n\ 00419 # to: #\n\ 00420 # 1. An undistorted image (requires D and K) #\n\ 00421 # 2. A rectified image (requires D, K, R) #\n\ 00422 # The projection matrix P projects 3D points into the rectified image.#\n\ 00423 #######################################################################\n\ 00424 \n\ 00425 # The image dimensions with which the camera was calibrated. Normally\n\ 00426 # this will be the full camera resolution in pixels.\n\ 00427 uint32 height\n\ 00428 uint32 width\n\ 00429 \n\ 00430 # The distortion model used. Supported models are listed in\n\ 00431 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\ 00432 # simple model of radial and tangential distortion - is sufficent.\n\ 00433 string distortion_model\n\ 00434 \n\ 00435 # The distortion parameters, size depending on the distortion model.\n\ 00436 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\ 00437 float64[] D\n\ 00438 \n\ 00439 # Intrinsic camera matrix for the raw (distorted) images.\n\ 00440 # [fx 0 cx]\n\ 00441 # K = [ 0 fy cy]\n\ 00442 # [ 0 0 1]\n\ 00443 # Projects 3D points in the camera coordinate frame to 2D pixel\n\ 00444 # coordinates using the focal lengths (fx, fy) and principal point\n\ 00445 # (cx, cy).\n\ 00446 float64[9] K # 3x3 row-major matrix\n\ 00447 \n\ 00448 # Rectification matrix (stereo cameras only)\n\ 00449 # A rotation matrix aligning the camera coordinate system to the ideal\n\ 00450 # stereo image plane so that epipolar lines in both stereo images are\n\ 00451 # parallel.\n\ 00452 float64[9] R # 3x3 row-major matrix\n\ 00453 \n\ 00454 # Projection/camera matrix\n\ 00455 # [fx' 0 cx' Tx]\n\ 00456 # P = [ 0 fy' cy' Ty]\n\ 00457 # [ 0 0 1 0]\n\ 00458 # By convention, this matrix specifies the intrinsic (camera) matrix\n\ 00459 # of the processed (rectified) image. That is, the left 3x3 portion\n\ 00460 # is the normal camera intrinsic matrix for the rectified image.\n\ 00461 # It projects 3D points in the camera coordinate frame to 2D pixel\n\ 00462 # coordinates using the focal lengths (fx', fy') and principal point\n\ 00463 # (cx', cy') - these may differ from the values in K.\n\ 00464 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\ 00465 # also have R = the identity and P[1:3,1:3] = K.\n\ 00466 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\ 00467 # position of the optical center of the second camera in the first\n\ 00468 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\ 00469 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\ 00470 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\ 00471 # Tx = -fx' * B, where B is the baseline between the cameras.\n\ 00472 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\ 00473 # the rectified image is given by:\n\ 00474 # [u v w]' = P * [X Y Z 1]'\n\ 00475 # x = u / w\n\ 00476 # y = v / w\n\ 00477 # This holds for both images of a stereo pair.\n\ 00478 float64[12] P # 3x4 row-major matrix\n\ 00479 \n\ 00480 \n\ 00481 #######################################################################\n\ 00482 # Operational Parameters #\n\ 00483 #######################################################################\n\ 00484 # These define the image region actually captured by the camera #\n\ 00485 # driver. Although they affect the geometry of the output image, they #\n\ 00486 # may be changed freely without recalibrating the camera. #\n\ 00487 #######################################################################\n\ 00488 \n\ 00489 # Binning refers here to any camera setting which combines rectangular\n\ 00490 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\ 00491 # resolution of the output image to\n\ 00492 # (width / binning_x) x (height / binning_y).\n\ 00493 # The default values binning_x = binning_y = 0 is considered the same\n\ 00494 # as binning_x = binning_y = 1 (no subsampling).\n\ 00495 uint32 binning_x\n\ 00496 uint32 binning_y\n\ 00497 \n\ 00498 # Region of interest (subwindow of full camera resolution), given in\n\ 00499 # full resolution (unbinned) image coordinates. A particular ROI\n\ 00500 # always denotes the same window of pixels on the camera sensor,\n\ 00501 # regardless of binning settings.\n\ 00502 # The default setting of roi (all values 0) is considered the same as\n\ 00503 # full resolution (roi.width = width, roi.height = height).\n\ 00504 RegionOfInterest roi\n\ 00505 \n\ 00506 ================================================================================\n\ 00507 MSG: sensor_msgs/RegionOfInterest\n\ 00508 # This message is used to specify a region of interest within an image.\n\ 00509 #\n\ 00510 # When used to specify the ROI setting of the camera when the image was\n\ 00511 # taken, the height and width fields should either match the height and\n\ 00512 # width fields for the associated image; or height = width = 0\n\ 00513 # indicates that the full resolution image was captured.\n\ 00514 \n\ 00515 uint32 x_offset # Leftmost pixel of the ROI\n\ 00516 # (0 if the ROI includes the left edge of the image)\n\ 00517 uint32 y_offset # Topmost pixel of the ROI\n\ 00518 # (0 if the ROI includes the top edge of the image)\n\ 00519 uint32 height # Height of ROI\n\ 00520 uint32 width # Width of ROI\n\ 00521 \n\ 00522 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\ 00523 # ROI in this message. Typically this should be False if the full image\n\ 00524 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\ 00525 # used).\n\ 00526 bool do_rectify\n\ 00527 \n\ 00528 ================================================================================\n\ 00529 MSG: geometry_msgs/Vector3\n\ 00530 # This represents a vector in free space. \n\ 00531 \n\ 00532 float64 x\n\ 00533 float64 y\n\ 00534 float64 z\n\ 00535 ================================================================================\n\ 00536 MSG: object_manipulation_msgs/Grasp\n\ 00537 \n\ 00538 # The internal posture of the hand for the pre-grasp\n\ 00539 # only positions are used\n\ 00540 sensor_msgs/JointState pre_grasp_posture\n\ 00541 \n\ 00542 # The internal posture of the hand for the grasp\n\ 00543 # positions and efforts are used\n\ 00544 sensor_msgs/JointState grasp_posture\n\ 00545 \n\ 00546 # The position of the end-effector for the grasp relative to a reference frame \n\ 00547 # (that is always specified elsewhere, not in this message)\n\ 00548 geometry_msgs/Pose grasp_pose\n\ 00549 \n\ 00550 # The estimated probability of success for this grasp\n\ 00551 float64 success_probability\n\ 00552 \n\ 00553 # Debug flag to indicate that this grasp would be the best in its cluster\n\ 00554 bool cluster_rep\n\ 00555 \n\ 00556 # how far the pre-grasp should ideally be away from the grasp\n\ 00557 float32 desired_approach_distance\n\ 00558 \n\ 00559 # how much distance between pre-grasp and grasp must actually be feasible \n\ 00560 # for the grasp not to be rejected\n\ 00561 float32 min_approach_distance\n\ 00562 \n\ 00563 # an optional list of obstacles that we have semantic information about\n\ 00564 # and that we expect might move in the course of executing this grasp\n\ 00565 # the grasp planner is expected to make sure they move in an OK way; during\n\ 00566 # execution, grasp executors will not check for collisions against these objects\n\ 00567 GraspableObject[] moved_obstacles\n\ 00568 \n\ 00569 ================================================================================\n\ 00570 MSG: sensor_msgs/JointState\n\ 00571 # This is a message that holds data to describe the state of a set of torque controlled joints. \n\ 00572 #\n\ 00573 # The state of each joint (revolute or prismatic) is defined by:\n\ 00574 # * the position of the joint (rad or m),\n\ 00575 # * the velocity of the joint (rad/s or m/s) and \n\ 00576 # * the effort that is applied in the joint (Nm or N).\n\ 00577 #\n\ 00578 # Each joint is uniquely identified by its name\n\ 00579 # The header specifies the time at which the joint states were recorded. All the joint states\n\ 00580 # in one message have to be recorded at the same time.\n\ 00581 #\n\ 00582 # This message consists of a multiple arrays, one for each part of the joint state. \n\ 00583 # The goal is to make each of the fields optional. When e.g. your joints have no\n\ 00584 # effort associated with them, you can leave the effort array empty. \n\ 00585 #\n\ 00586 # All arrays in this message should have the same size, or be empty.\n\ 00587 # This is the only way to uniquely associate the joint name with the correct\n\ 00588 # states.\n\ 00589 \n\ 00590 \n\ 00591 Header header\n\ 00592 \n\ 00593 string[] name\n\ 00594 float64[] position\n\ 00595 float64[] velocity\n\ 00596 float64[] effort\n\ 00597 \n\ 00598 "; } 00599 public: 00600 ROS_DEPRECATED static const std::string __s_getMessageDefinition() { return __s_getMessageDefinition_(); } 00601 00602 ROS_DEPRECATED const std::string __getMessageDefinition() const { return __s_getMessageDefinition_(); } 00603 00604 ROS_DEPRECATED virtual uint8_t *serialize(uint8_t *write_ptr, uint32_t seq) const 00605 { 00606 ros::serialization::OStream stream(write_ptr, 1000000000); 00607 ros::serialization::serialize(stream, arm_name); 00608 ros::serialization::serialize(stream, target); 00609 ros::serialization::serialize(stream, collision_object_name); 00610 ros::serialization::serialize(stream, collision_support_surface_name); 00611 ros::serialization::serialize(stream, grasps_to_evaluate); 00612 ros::serialization::serialize(stream, movable_obstacles); 00613 return stream.getData(); 00614 } 00615 00616 ROS_DEPRECATED virtual uint8_t *deserialize(uint8_t *read_ptr) 00617 { 00618 ros::serialization::IStream stream(read_ptr, 1000000000); 00619 ros::serialization::deserialize(stream, arm_name); 00620 ros::serialization::deserialize(stream, target); 00621 ros::serialization::deserialize(stream, collision_object_name); 00622 ros::serialization::deserialize(stream, collision_support_surface_name); 00623 ros::serialization::deserialize(stream, grasps_to_evaluate); 00624 ros::serialization::deserialize(stream, movable_obstacles); 00625 return stream.getData(); 00626 } 00627 00628 ROS_DEPRECATED virtual uint32_t serializationLength() const 00629 { 00630 uint32_t size = 0; 00631 size += ros::serialization::serializationLength(arm_name); 00632 size += ros::serialization::serializationLength(target); 00633 size += ros::serialization::serializationLength(collision_object_name); 00634 size += ros::serialization::serializationLength(collision_support_surface_name); 00635 size += ros::serialization::serializationLength(grasps_to_evaluate); 00636 size += ros::serialization::serializationLength(movable_obstacles); 00637 return size; 00638 } 00639 00640 typedef boost::shared_ptr< ::object_manipulation_msgs::GraspPlanningRequest_<ContainerAllocator> > Ptr; 00641 typedef boost::shared_ptr< ::object_manipulation_msgs::GraspPlanningRequest_<ContainerAllocator> const> ConstPtr; 00642 boost::shared_ptr<std::map<std::string, std::string> > __connection_header; 00643 }; // struct GraspPlanningRequest 00644 typedef ::object_manipulation_msgs::GraspPlanningRequest_<std::allocator<void> > GraspPlanningRequest; 00645 00646 typedef boost::shared_ptr< ::object_manipulation_msgs::GraspPlanningRequest> GraspPlanningRequestPtr; 00647 typedef boost::shared_ptr< ::object_manipulation_msgs::GraspPlanningRequest const> GraspPlanningRequestConstPtr; 00648 00649 00650 template <class ContainerAllocator> 00651 struct GraspPlanningResponse_ { 00652 typedef GraspPlanningResponse_<ContainerAllocator> Type; 00653 00654 GraspPlanningResponse_() 00655 : grasps() 00656 , error_code() 00657 { 00658 } 00659 00660 GraspPlanningResponse_(const ContainerAllocator& _alloc) 00661 : grasps(_alloc) 00662 , error_code(_alloc) 00663 { 00664 } 00665 00666 typedef std::vector< ::object_manipulation_msgs::Grasp_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::Grasp_<ContainerAllocator> >::other > _grasps_type; 00667 std::vector< ::object_manipulation_msgs::Grasp_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::Grasp_<ContainerAllocator> >::other > grasps; 00668 00669 typedef ::object_manipulation_msgs::GraspPlanningErrorCode_<ContainerAllocator> _error_code_type; 00670 ::object_manipulation_msgs::GraspPlanningErrorCode_<ContainerAllocator> error_code; 00671 00672 00673 ROS_DEPRECATED uint32_t get_grasps_size() const { return (uint32_t)grasps.size(); } 00674 ROS_DEPRECATED void set_grasps_size(uint32_t size) { grasps.resize((size_t)size); } 00675 ROS_DEPRECATED void get_grasps_vec(std::vector< ::object_manipulation_msgs::Grasp_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::Grasp_<ContainerAllocator> >::other > & vec) const { vec = this->grasps; } 00676 ROS_DEPRECATED void set_grasps_vec(const std::vector< ::object_manipulation_msgs::Grasp_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::Grasp_<ContainerAllocator> >::other > & vec) { this->grasps = vec; } 00677 private: 00678 static const char* __s_getDataType_() { return "object_manipulation_msgs/GraspPlanningResponse"; } 00679 public: 00680 ROS_DEPRECATED static const std::string __s_getDataType() { return __s_getDataType_(); } 00681 00682 ROS_DEPRECATED const std::string __getDataType() const { return __s_getDataType_(); } 00683 00684 private: 00685 static const char* __s_getMD5Sum_() { return "998e3d06c509abfc46d7fdf96fe1c188"; } 00686 public: 00687 ROS_DEPRECATED static const std::string __s_getMD5Sum() { return __s_getMD5Sum_(); } 00688 00689 ROS_DEPRECATED const std::string __getMD5Sum() const { return __s_getMD5Sum_(); } 00690 00691 private: 00692 static const char* __s_getServerMD5Sum_() { return "01a11c1cdea613dd8705f368e1dc93dc"; } 00693 public: 00694 ROS_DEPRECATED static const std::string __s_getServerMD5Sum() { return __s_getServerMD5Sum_(); } 00695 00696 ROS_DEPRECATED const std::string __getServerMD5Sum() const { return __s_getServerMD5Sum_(); } 00697 00698 private: 00699 static const char* __s_getMessageDefinition_() { return "\n\ 00700 \n\ 00701 Grasp[] grasps\n\ 00702 \n\ 00703 \n\ 00704 GraspPlanningErrorCode error_code\n\ 00705 \n\ 00706 \n\ 00707 ================================================================================\n\ 00708 MSG: object_manipulation_msgs/Grasp\n\ 00709 \n\ 00710 # The internal posture of the hand for the pre-grasp\n\ 00711 # only positions are used\n\ 00712 sensor_msgs/JointState pre_grasp_posture\n\ 00713 \n\ 00714 # The internal posture of the hand for the grasp\n\ 00715 # positions and efforts are used\n\ 00716 sensor_msgs/JointState grasp_posture\n\ 00717 \n\ 00718 # The position of the end-effector for the grasp relative to a reference frame \n\ 00719 # (that is always specified elsewhere, not in this message)\n\ 00720 geometry_msgs/Pose grasp_pose\n\ 00721 \n\ 00722 # The estimated probability of success for this grasp\n\ 00723 float64 success_probability\n\ 00724 \n\ 00725 # Debug flag to indicate that this grasp would be the best in its cluster\n\ 00726 bool cluster_rep\n\ 00727 \n\ 00728 # how far the pre-grasp should ideally be away from the grasp\n\ 00729 float32 desired_approach_distance\n\ 00730 \n\ 00731 # how much distance between pre-grasp and grasp must actually be feasible \n\ 00732 # for the grasp not to be rejected\n\ 00733 float32 min_approach_distance\n\ 00734 \n\ 00735 # an optional list of obstacles that we have semantic information about\n\ 00736 # and that we expect might move in the course of executing this grasp\n\ 00737 # the grasp planner is expected to make sure they move in an OK way; during\n\ 00738 # execution, grasp executors will not check for collisions against these objects\n\ 00739 GraspableObject[] moved_obstacles\n\ 00740 \n\ 00741 ================================================================================\n\ 00742 MSG: sensor_msgs/JointState\n\ 00743 # This is a message that holds data to describe the state of a set of torque controlled joints. \n\ 00744 #\n\ 00745 # The state of each joint (revolute or prismatic) is defined by:\n\ 00746 # * the position of the joint (rad or m),\n\ 00747 # * the velocity of the joint (rad/s or m/s) and \n\ 00748 # * the effort that is applied in the joint (Nm or N).\n\ 00749 #\n\ 00750 # Each joint is uniquely identified by its name\n\ 00751 # The header specifies the time at which the joint states were recorded. All the joint states\n\ 00752 # in one message have to be recorded at the same time.\n\ 00753 #\n\ 00754 # This message consists of a multiple arrays, one for each part of the joint state. \n\ 00755 # The goal is to make each of the fields optional. When e.g. your joints have no\n\ 00756 # effort associated with them, you can leave the effort array empty. \n\ 00757 #\n\ 00758 # All arrays in this message should have the same size, or be empty.\n\ 00759 # This is the only way to uniquely associate the joint name with the correct\n\ 00760 # states.\n\ 00761 \n\ 00762 \n\ 00763 Header header\n\ 00764 \n\ 00765 string[] name\n\ 00766 float64[] position\n\ 00767 float64[] velocity\n\ 00768 float64[] effort\n\ 00769 \n\ 00770 ================================================================================\n\ 00771 MSG: std_msgs/Header\n\ 00772 # Standard metadata for higher-level stamped data types.\n\ 00773 # This is generally used to communicate timestamped data \n\ 00774 # in a particular coordinate frame.\n\ 00775 # \n\ 00776 # sequence ID: consecutively increasing ID \n\ 00777 uint32 seq\n\ 00778 #Two-integer timestamp that is expressed as:\n\ 00779 # * stamp.secs: seconds (stamp_secs) since epoch\n\ 00780 # * stamp.nsecs: nanoseconds since stamp_secs\n\ 00781 # time-handling sugar is provided by the client library\n\ 00782 time stamp\n\ 00783 #Frame this data is associated with\n\ 00784 # 0: no frame\n\ 00785 # 1: global frame\n\ 00786 string frame_id\n\ 00787 \n\ 00788 ================================================================================\n\ 00789 MSG: geometry_msgs/Pose\n\ 00790 # A representation of pose in free space, composed of postion and orientation. \n\ 00791 Point position\n\ 00792 Quaternion orientation\n\ 00793 \n\ 00794 ================================================================================\n\ 00795 MSG: geometry_msgs/Point\n\ 00796 # This contains the position of a point in free space\n\ 00797 float64 x\n\ 00798 float64 y\n\ 00799 float64 z\n\ 00800 \n\ 00801 ================================================================================\n\ 00802 MSG: geometry_msgs/Quaternion\n\ 00803 # This represents an orientation in free space in quaternion form.\n\ 00804 \n\ 00805 float64 x\n\ 00806 float64 y\n\ 00807 float64 z\n\ 00808 float64 w\n\ 00809 \n\ 00810 ================================================================================\n\ 00811 MSG: object_manipulation_msgs/GraspableObject\n\ 00812 # an object that the object_manipulator can work on\n\ 00813 \n\ 00814 # a graspable object can be represented in multiple ways. This message\n\ 00815 # can contain all of them. Which one is actually used is up to the receiver\n\ 00816 # of this message. When adding new representations, one must be careful that\n\ 00817 # they have reasonable lightweight defaults indicating that that particular\n\ 00818 # representation is not available.\n\ 00819 \n\ 00820 # the tf frame to be used as a reference frame when combining information from\n\ 00821 # the different representations below\n\ 00822 string reference_frame_id\n\ 00823 \n\ 00824 # potential recognition results from a database of models\n\ 00825 # all poses are relative to the object reference pose\n\ 00826 household_objects_database_msgs/DatabaseModelPose[] potential_models\n\ 00827 \n\ 00828 # the point cloud itself\n\ 00829 sensor_msgs/PointCloud cluster\n\ 00830 \n\ 00831 # a region of a PointCloud2 of interest\n\ 00832 object_manipulation_msgs/SceneRegion region\n\ 00833 \n\ 00834 # the name that this object has in the collision environment\n\ 00835 string collision_name\n\ 00836 ================================================================================\n\ 00837 MSG: household_objects_database_msgs/DatabaseModelPose\n\ 00838 # Informs that a specific model from the Model Database has been \n\ 00839 # identified at a certain location\n\ 00840 \n\ 00841 # the database id of the model\n\ 00842 int32 model_id\n\ 00843 \n\ 00844 # the pose that it can be found in\n\ 00845 geometry_msgs/PoseStamped pose\n\ 00846 \n\ 00847 # a measure of the confidence level in this detection result\n\ 00848 float32 confidence\n\ 00849 \n\ 00850 # the name of the object detector that generated this detection result\n\ 00851 string detector_name\n\ 00852 \n\ 00853 ================================================================================\n\ 00854 MSG: geometry_msgs/PoseStamped\n\ 00855 # A Pose with reference coordinate frame and timestamp\n\ 00856 Header header\n\ 00857 Pose pose\n\ 00858 \n\ 00859 ================================================================================\n\ 00860 MSG: sensor_msgs/PointCloud\n\ 00861 # This message holds a collection of 3d points, plus optional additional\n\ 00862 # information about each point.\n\ 00863 \n\ 00864 # Time of sensor data acquisition, coordinate frame ID.\n\ 00865 Header header\n\ 00866 \n\ 00867 # Array of 3d points. Each Point32 should be interpreted as a 3d point\n\ 00868 # in the frame given in the header.\n\ 00869 geometry_msgs/Point32[] points\n\ 00870 \n\ 00871 # Each channel should have the same number of elements as points array,\n\ 00872 # and the data in each channel should correspond 1:1 with each point.\n\ 00873 # Channel names in common practice are listed in ChannelFloat32.msg.\n\ 00874 ChannelFloat32[] channels\n\ 00875 \n\ 00876 ================================================================================\n\ 00877 MSG: geometry_msgs/Point32\n\ 00878 # This contains the position of a point in free space(with 32 bits of precision).\n\ 00879 # It is recommeded to use Point wherever possible instead of Point32. \n\ 00880 # \n\ 00881 # This recommendation is to promote interoperability. \n\ 00882 #\n\ 00883 # This message is designed to take up less space when sending\n\ 00884 # lots of points at once, as in the case of a PointCloud. \n\ 00885 \n\ 00886 float32 x\n\ 00887 float32 y\n\ 00888 float32 z\n\ 00889 ================================================================================\n\ 00890 MSG: sensor_msgs/ChannelFloat32\n\ 00891 # This message is used by the PointCloud message to hold optional data\n\ 00892 # associated with each point in the cloud. The length of the values\n\ 00893 # array should be the same as the length of the points array in the\n\ 00894 # PointCloud, and each value should be associated with the corresponding\n\ 00895 # point.\n\ 00896 \n\ 00897 # Channel names in existing practice include:\n\ 00898 # \"u\", \"v\" - row and column (respectively) in the left stereo image.\n\ 00899 # This is opposite to usual conventions but remains for\n\ 00900 # historical reasons. The newer PointCloud2 message has no\n\ 00901 # such problem.\n\ 00902 # \"rgb\" - For point clouds produced by color stereo cameras. uint8\n\ 00903 # (R,G,B) values packed into the least significant 24 bits,\n\ 00904 # in order.\n\ 00905 # \"intensity\" - laser or pixel intensity.\n\ 00906 # \"distance\"\n\ 00907 \n\ 00908 # The channel name should give semantics of the channel (e.g.\n\ 00909 # \"intensity\" instead of \"value\").\n\ 00910 string name\n\ 00911 \n\ 00912 # The values array should be 1-1 with the elements of the associated\n\ 00913 # PointCloud.\n\ 00914 float32[] values\n\ 00915 \n\ 00916 ================================================================================\n\ 00917 MSG: object_manipulation_msgs/SceneRegion\n\ 00918 # Point cloud\n\ 00919 sensor_msgs/PointCloud2 cloud\n\ 00920 \n\ 00921 # Indices for the region of interest\n\ 00922 int32[] mask\n\ 00923 \n\ 00924 # One of the corresponding 2D images, if applicable\n\ 00925 sensor_msgs/Image image\n\ 00926 \n\ 00927 # The disparity image, if applicable\n\ 00928 sensor_msgs/Image disparity_image\n\ 00929 \n\ 00930 # Camera info for the camera that took the image\n\ 00931 sensor_msgs/CameraInfo cam_info\n\ 00932 \n\ 00933 # a 3D region of interest for grasp planning\n\ 00934 geometry_msgs/PoseStamped roi_box_pose\n\ 00935 geometry_msgs/Vector3 roi_box_dims\n\ 00936 \n\ 00937 ================================================================================\n\ 00938 MSG: sensor_msgs/PointCloud2\n\ 00939 # This message holds a collection of N-dimensional points, which may\n\ 00940 # contain additional information such as normals, intensity, etc. The\n\ 00941 # point data is stored as a binary blob, its layout described by the\n\ 00942 # contents of the \"fields\" array.\n\ 00943 \n\ 00944 # The point cloud data may be organized 2d (image-like) or 1d\n\ 00945 # (unordered). Point clouds organized as 2d images may be produced by\n\ 00946 # camera depth sensors such as stereo or time-of-flight.\n\ 00947 \n\ 00948 # Time of sensor data acquisition, and the coordinate frame ID (for 3d\n\ 00949 # points).\n\ 00950 Header header\n\ 00951 \n\ 00952 # 2D structure of the point cloud. If the cloud is unordered, height is\n\ 00953 # 1 and width is the length of the point cloud.\n\ 00954 uint32 height\n\ 00955 uint32 width\n\ 00956 \n\ 00957 # Describes the channels and their layout in the binary data blob.\n\ 00958 PointField[] fields\n\ 00959 \n\ 00960 bool is_bigendian # Is this data bigendian?\n\ 00961 uint32 point_step # Length of a point in bytes\n\ 00962 uint32 row_step # Length of a row in bytes\n\ 00963 uint8[] data # Actual point data, size is (row_step*height)\n\ 00964 \n\ 00965 bool is_dense # True if there are no invalid points\n\ 00966 \n\ 00967 ================================================================================\n\ 00968 MSG: sensor_msgs/PointField\n\ 00969 # This message holds the description of one point entry in the\n\ 00970 # PointCloud2 message format.\n\ 00971 uint8 INT8 = 1\n\ 00972 uint8 UINT8 = 2\n\ 00973 uint8 INT16 = 3\n\ 00974 uint8 UINT16 = 4\n\ 00975 uint8 INT32 = 5\n\ 00976 uint8 UINT32 = 6\n\ 00977 uint8 FLOAT32 = 7\n\ 00978 uint8 FLOAT64 = 8\n\ 00979 \n\ 00980 string name # Name of field\n\ 00981 uint32 offset # Offset from start of point struct\n\ 00982 uint8 datatype # Datatype enumeration, see above\n\ 00983 uint32 count # How many elements in the field\n\ 00984 \n\ 00985 ================================================================================\n\ 00986 MSG: sensor_msgs/Image\n\ 00987 # This message contains an uncompressed image\n\ 00988 # (0, 0) is at top-left corner of image\n\ 00989 #\n\ 00990 \n\ 00991 Header header # Header timestamp should be acquisition time of image\n\ 00992 # Header frame_id should be optical frame of camera\n\ 00993 # origin of frame should be optical center of cameara\n\ 00994 # +x should point to the right in the image\n\ 00995 # +y should point down in the image\n\ 00996 # +z should point into to plane of the image\n\ 00997 # If the frame_id here and the frame_id of the CameraInfo\n\ 00998 # message associated with the image conflict\n\ 00999 # the behavior is undefined\n\ 01000 \n\ 01001 uint32 height # image height, that is, number of rows\n\ 01002 uint32 width # image width, that is, number of columns\n\ 01003 \n\ 01004 # The legal values for encoding are in file src/image_encodings.cpp\n\ 01005 # If you want to standardize a new string format, join\n\ 01006 # ros-users@lists.sourceforge.net and send an email proposing a new encoding.\n\ 01007 \n\ 01008 string encoding # Encoding of pixels -- channel meaning, ordering, size\n\ 01009 # taken from the list of strings in src/image_encodings.cpp\n\ 01010 \n\ 01011 uint8 is_bigendian # is this data bigendian?\n\ 01012 uint32 step # Full row length in bytes\n\ 01013 uint8[] data # actual matrix data, size is (step * rows)\n\ 01014 \n\ 01015 ================================================================================\n\ 01016 MSG: sensor_msgs/CameraInfo\n\ 01017 # This message defines meta information for a camera. It should be in a\n\ 01018 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\ 01019 # image topics named:\n\ 01020 #\n\ 01021 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\ 01022 # image - monochrome, distorted\n\ 01023 # image_color - color, distorted\n\ 01024 # image_rect - monochrome, rectified\n\ 01025 # image_rect_color - color, rectified\n\ 01026 #\n\ 01027 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\ 01028 # for producing the four processed image topics from image_raw and\n\ 01029 # camera_info. The meaning of the camera parameters are described in\n\ 01030 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\ 01031 #\n\ 01032 # The image_geometry package provides a user-friendly interface to\n\ 01033 # common operations using this meta information. If you want to, e.g.,\n\ 01034 # project a 3d point into image coordinates, we strongly recommend\n\ 01035 # using image_geometry.\n\ 01036 #\n\ 01037 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\ 01038 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\ 01039 # indicates an uncalibrated camera.\n\ 01040 \n\ 01041 #######################################################################\n\ 01042 # Image acquisition info #\n\ 01043 #######################################################################\n\ 01044 \n\ 01045 # Time of image acquisition, camera coordinate frame ID\n\ 01046 Header header # Header timestamp should be acquisition time of image\n\ 01047 # Header frame_id should be optical frame of camera\n\ 01048 # origin of frame should be optical center of camera\n\ 01049 # +x should point to the right in the image\n\ 01050 # +y should point down in the image\n\ 01051 # +z should point into the plane of the image\n\ 01052 \n\ 01053 \n\ 01054 #######################################################################\n\ 01055 # Calibration Parameters #\n\ 01056 #######################################################################\n\ 01057 # These are fixed during camera calibration. Their values will be the #\n\ 01058 # same in all messages until the camera is recalibrated. Note that #\n\ 01059 # self-calibrating systems may \"recalibrate\" frequently. #\n\ 01060 # #\n\ 01061 # The internal parameters can be used to warp a raw (distorted) image #\n\ 01062 # to: #\n\ 01063 # 1. An undistorted image (requires D and K) #\n\ 01064 # 2. A rectified image (requires D, K, R) #\n\ 01065 # The projection matrix P projects 3D points into the rectified image.#\n\ 01066 #######################################################################\n\ 01067 \n\ 01068 # The image dimensions with which the camera was calibrated. Normally\n\ 01069 # this will be the full camera resolution in pixels.\n\ 01070 uint32 height\n\ 01071 uint32 width\n\ 01072 \n\ 01073 # The distortion model used. Supported models are listed in\n\ 01074 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\ 01075 # simple model of radial and tangential distortion - is sufficent.\n\ 01076 string distortion_model\n\ 01077 \n\ 01078 # The distortion parameters, size depending on the distortion model.\n\ 01079 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\ 01080 float64[] D\n\ 01081 \n\ 01082 # Intrinsic camera matrix for the raw (distorted) images.\n\ 01083 # [fx 0 cx]\n\ 01084 # K = [ 0 fy cy]\n\ 01085 # [ 0 0 1]\n\ 01086 # Projects 3D points in the camera coordinate frame to 2D pixel\n\ 01087 # coordinates using the focal lengths (fx, fy) and principal point\n\ 01088 # (cx, cy).\n\ 01089 float64[9] K # 3x3 row-major matrix\n\ 01090 \n\ 01091 # Rectification matrix (stereo cameras only)\n\ 01092 # A rotation matrix aligning the camera coordinate system to the ideal\n\ 01093 # stereo image plane so that epipolar lines in both stereo images are\n\ 01094 # parallel.\n\ 01095 float64[9] R # 3x3 row-major matrix\n\ 01096 \n\ 01097 # Projection/camera matrix\n\ 01098 # [fx' 0 cx' Tx]\n\ 01099 # P = [ 0 fy' cy' Ty]\n\ 01100 # [ 0 0 1 0]\n\ 01101 # By convention, this matrix specifies the intrinsic (camera) matrix\n\ 01102 # of the processed (rectified) image. That is, the left 3x3 portion\n\ 01103 # is the normal camera intrinsic matrix for the rectified image.\n\ 01104 # It projects 3D points in the camera coordinate frame to 2D pixel\n\ 01105 # coordinates using the focal lengths (fx', fy') and principal point\n\ 01106 # (cx', cy') - these may differ from the values in K.\n\ 01107 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\ 01108 # also have R = the identity and P[1:3,1:3] = K.\n\ 01109 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\ 01110 # position of the optical center of the second camera in the first\n\ 01111 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\ 01112 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\ 01113 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\ 01114 # Tx = -fx' * B, where B is the baseline between the cameras.\n\ 01115 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\ 01116 # the rectified image is given by:\n\ 01117 # [u v w]' = P * [X Y Z 1]'\n\ 01118 # x = u / w\n\ 01119 # y = v / w\n\ 01120 # This holds for both images of a stereo pair.\n\ 01121 float64[12] P # 3x4 row-major matrix\n\ 01122 \n\ 01123 \n\ 01124 #######################################################################\n\ 01125 # Operational Parameters #\n\ 01126 #######################################################################\n\ 01127 # These define the image region actually captured by the camera #\n\ 01128 # driver. Although they affect the geometry of the output image, they #\n\ 01129 # may be changed freely without recalibrating the camera. #\n\ 01130 #######################################################################\n\ 01131 \n\ 01132 # Binning refers here to any camera setting which combines rectangular\n\ 01133 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\ 01134 # resolution of the output image to\n\ 01135 # (width / binning_x) x (height / binning_y).\n\ 01136 # The default values binning_x = binning_y = 0 is considered the same\n\ 01137 # as binning_x = binning_y = 1 (no subsampling).\n\ 01138 uint32 binning_x\n\ 01139 uint32 binning_y\n\ 01140 \n\ 01141 # Region of interest (subwindow of full camera resolution), given in\n\ 01142 # full resolution (unbinned) image coordinates. A particular ROI\n\ 01143 # always denotes the same window of pixels on the camera sensor,\n\ 01144 # regardless of binning settings.\n\ 01145 # The default setting of roi (all values 0) is considered the same as\n\ 01146 # full resolution (roi.width = width, roi.height = height).\n\ 01147 RegionOfInterest roi\n\ 01148 \n\ 01149 ================================================================================\n\ 01150 MSG: sensor_msgs/RegionOfInterest\n\ 01151 # This message is used to specify a region of interest within an image.\n\ 01152 #\n\ 01153 # When used to specify the ROI setting of the camera when the image was\n\ 01154 # taken, the height and width fields should either match the height and\n\ 01155 # width fields for the associated image; or height = width = 0\n\ 01156 # indicates that the full resolution image was captured.\n\ 01157 \n\ 01158 uint32 x_offset # Leftmost pixel of the ROI\n\ 01159 # (0 if the ROI includes the left edge of the image)\n\ 01160 uint32 y_offset # Topmost pixel of the ROI\n\ 01161 # (0 if the ROI includes the top edge of the image)\n\ 01162 uint32 height # Height of ROI\n\ 01163 uint32 width # Width of ROI\n\ 01164 \n\ 01165 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\ 01166 # ROI in this message. Typically this should be False if the full image\n\ 01167 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\ 01168 # used).\n\ 01169 bool do_rectify\n\ 01170 \n\ 01171 ================================================================================\n\ 01172 MSG: geometry_msgs/Vector3\n\ 01173 # This represents a vector in free space. \n\ 01174 \n\ 01175 float64 x\n\ 01176 float64 y\n\ 01177 float64 z\n\ 01178 ================================================================================\n\ 01179 MSG: object_manipulation_msgs/GraspPlanningErrorCode\n\ 01180 # Error codes for grasp and place planning\n\ 01181 \n\ 01182 # plan completed as expected\n\ 01183 int32 SUCCESS = 0\n\ 01184 \n\ 01185 # tf error encountered while transforming\n\ 01186 int32 TF_ERROR = 1 \n\ 01187 \n\ 01188 # some other error\n\ 01189 int32 OTHER_ERROR = 2\n\ 01190 \n\ 01191 # the actual value of this error code\n\ 01192 int32 value\n\ 01193 "; } 01194 public: 01195 ROS_DEPRECATED static const std::string __s_getMessageDefinition() { return __s_getMessageDefinition_(); } 01196 01197 ROS_DEPRECATED const std::string __getMessageDefinition() const { return __s_getMessageDefinition_(); } 01198 01199 ROS_DEPRECATED virtual uint8_t *serialize(uint8_t *write_ptr, uint32_t seq) const 01200 { 01201 ros::serialization::OStream stream(write_ptr, 1000000000); 01202 ros::serialization::serialize(stream, grasps); 01203 ros::serialization::serialize(stream, error_code); 01204 return stream.getData(); 01205 } 01206 01207 ROS_DEPRECATED virtual uint8_t *deserialize(uint8_t *read_ptr) 01208 { 01209 ros::serialization::IStream stream(read_ptr, 1000000000); 01210 ros::serialization::deserialize(stream, grasps); 01211 ros::serialization::deserialize(stream, error_code); 01212 return stream.getData(); 01213 } 01214 01215 ROS_DEPRECATED virtual uint32_t serializationLength() const 01216 { 01217 uint32_t size = 0; 01218 size += ros::serialization::serializationLength(grasps); 01219 size += ros::serialization::serializationLength(error_code); 01220 return size; 01221 } 01222 01223 typedef boost::shared_ptr< ::object_manipulation_msgs::GraspPlanningResponse_<ContainerAllocator> > Ptr; 01224 typedef boost::shared_ptr< ::object_manipulation_msgs::GraspPlanningResponse_<ContainerAllocator> const> ConstPtr; 01225 boost::shared_ptr<std::map<std::string, std::string> > __connection_header; 01226 }; // struct GraspPlanningResponse 01227 typedef ::object_manipulation_msgs::GraspPlanningResponse_<std::allocator<void> > GraspPlanningResponse; 01228 01229 typedef boost::shared_ptr< ::object_manipulation_msgs::GraspPlanningResponse> GraspPlanningResponsePtr; 01230 typedef boost::shared_ptr< ::object_manipulation_msgs::GraspPlanningResponse const> GraspPlanningResponseConstPtr; 01231 01232 struct GraspPlanning 01233 { 01234 01235 typedef GraspPlanningRequest Request; 01236 typedef GraspPlanningResponse Response; 01237 Request request; 01238 Response response; 01239 01240 typedef Request RequestType; 01241 typedef Response ResponseType; 01242 }; // struct GraspPlanning 01243 } // namespace object_manipulation_msgs 01244 01245 namespace ros 01246 { 01247 namespace message_traits 01248 { 01249 template<class ContainerAllocator> struct IsMessage< ::object_manipulation_msgs::GraspPlanningRequest_<ContainerAllocator> > : public TrueType {}; 01250 template<class ContainerAllocator> struct IsMessage< ::object_manipulation_msgs::GraspPlanningRequest_<ContainerAllocator> const> : public TrueType {}; 01251 template<class ContainerAllocator> 01252 struct MD5Sum< ::object_manipulation_msgs::GraspPlanningRequest_<ContainerAllocator> > { 01253 static const char* value() 01254 { 01255 return "5003fdf2225280c9d81c2bce4e645c20"; 01256 } 01257 01258 static const char* value(const ::object_manipulation_msgs::GraspPlanningRequest_<ContainerAllocator> &) { return value(); } 01259 static const uint64_t static_value1 = 0x5003fdf2225280c9ULL; 01260 static const uint64_t static_value2 = 0xd81c2bce4e645c20ULL; 01261 }; 01262 01263 template<class ContainerAllocator> 01264 struct DataType< ::object_manipulation_msgs::GraspPlanningRequest_<ContainerAllocator> > { 01265 static const char* value() 01266 { 01267 return "object_manipulation_msgs/GraspPlanningRequest"; 01268 } 01269 01270 static const char* value(const ::object_manipulation_msgs::GraspPlanningRequest_<ContainerAllocator> &) { return value(); } 01271 }; 01272 01273 template<class ContainerAllocator> 01274 struct Definition< ::object_manipulation_msgs::GraspPlanningRequest_<ContainerAllocator> > { 01275 static const char* value() 01276 { 01277 return "\n\ 01278 \n\ 01279 \n\ 01280 \n\ 01281 string arm_name\n\ 01282 \n\ 01283 \n\ 01284 GraspableObject target\n\ 01285 \n\ 01286 \n\ 01287 \n\ 01288 string collision_object_name\n\ 01289 \n\ 01290 \n\ 01291 \n\ 01292 string collision_support_surface_name\n\ 01293 \n\ 01294 \n\ 01295 Grasp[] grasps_to_evaluate\n\ 01296 \n\ 01297 \n\ 01298 \n\ 01299 GraspableObject[] movable_obstacles\n\ 01300 \n\ 01301 \n\ 01302 ================================================================================\n\ 01303 MSG: object_manipulation_msgs/GraspableObject\n\ 01304 # an object that the object_manipulator can work on\n\ 01305 \n\ 01306 # a graspable object can be represented in multiple ways. This message\n\ 01307 # can contain all of them. Which one is actually used is up to the receiver\n\ 01308 # of this message. When adding new representations, one must be careful that\n\ 01309 # they have reasonable lightweight defaults indicating that that particular\n\ 01310 # representation is not available.\n\ 01311 \n\ 01312 # the tf frame to be used as a reference frame when combining information from\n\ 01313 # the different representations below\n\ 01314 string reference_frame_id\n\ 01315 \n\ 01316 # potential recognition results from a database of models\n\ 01317 # all poses are relative to the object reference pose\n\ 01318 household_objects_database_msgs/DatabaseModelPose[] potential_models\n\ 01319 \n\ 01320 # the point cloud itself\n\ 01321 sensor_msgs/PointCloud cluster\n\ 01322 \n\ 01323 # a region of a PointCloud2 of interest\n\ 01324 object_manipulation_msgs/SceneRegion region\n\ 01325 \n\ 01326 # the name that this object has in the collision environment\n\ 01327 string collision_name\n\ 01328 ================================================================================\n\ 01329 MSG: household_objects_database_msgs/DatabaseModelPose\n\ 01330 # Informs that a specific model from the Model Database has been \n\ 01331 # identified at a certain location\n\ 01332 \n\ 01333 # the database id of the model\n\ 01334 int32 model_id\n\ 01335 \n\ 01336 # the pose that it can be found in\n\ 01337 geometry_msgs/PoseStamped pose\n\ 01338 \n\ 01339 # a measure of the confidence level in this detection result\n\ 01340 float32 confidence\n\ 01341 \n\ 01342 # the name of the object detector that generated this detection result\n\ 01343 string detector_name\n\ 01344 \n\ 01345 ================================================================================\n\ 01346 MSG: geometry_msgs/PoseStamped\n\ 01347 # A Pose with reference coordinate frame and timestamp\n\ 01348 Header header\n\ 01349 Pose pose\n\ 01350 \n\ 01351 ================================================================================\n\ 01352 MSG: std_msgs/Header\n\ 01353 # Standard metadata for higher-level stamped data types.\n\ 01354 # This is generally used to communicate timestamped data \n\ 01355 # in a particular coordinate frame.\n\ 01356 # \n\ 01357 # sequence ID: consecutively increasing ID \n\ 01358 uint32 seq\n\ 01359 #Two-integer timestamp that is expressed as:\n\ 01360 # * stamp.secs: seconds (stamp_secs) since epoch\n\ 01361 # * stamp.nsecs: nanoseconds since stamp_secs\n\ 01362 # time-handling sugar is provided by the client library\n\ 01363 time stamp\n\ 01364 #Frame this data is associated with\n\ 01365 # 0: no frame\n\ 01366 # 1: global frame\n\ 01367 string frame_id\n\ 01368 \n\ 01369 ================================================================================\n\ 01370 MSG: geometry_msgs/Pose\n\ 01371 # A representation of pose in free space, composed of postion and orientation. \n\ 01372 Point position\n\ 01373 Quaternion orientation\n\ 01374 \n\ 01375 ================================================================================\n\ 01376 MSG: geometry_msgs/Point\n\ 01377 # This contains the position of a point in free space\n\ 01378 float64 x\n\ 01379 float64 y\n\ 01380 float64 z\n\ 01381 \n\ 01382 ================================================================================\n\ 01383 MSG: geometry_msgs/Quaternion\n\ 01384 # This represents an orientation in free space in quaternion form.\n\ 01385 \n\ 01386 float64 x\n\ 01387 float64 y\n\ 01388 float64 z\n\ 01389 float64 w\n\ 01390 \n\ 01391 ================================================================================\n\ 01392 MSG: sensor_msgs/PointCloud\n\ 01393 # This message holds a collection of 3d points, plus optional additional\n\ 01394 # information about each point.\n\ 01395 \n\ 01396 # Time of sensor data acquisition, coordinate frame ID.\n\ 01397 Header header\n\ 01398 \n\ 01399 # Array of 3d points. Each Point32 should be interpreted as a 3d point\n\ 01400 # in the frame given in the header.\n\ 01401 geometry_msgs/Point32[] points\n\ 01402 \n\ 01403 # Each channel should have the same number of elements as points array,\n\ 01404 # and the data in each channel should correspond 1:1 with each point.\n\ 01405 # Channel names in common practice are listed in ChannelFloat32.msg.\n\ 01406 ChannelFloat32[] channels\n\ 01407 \n\ 01408 ================================================================================\n\ 01409 MSG: geometry_msgs/Point32\n\ 01410 # This contains the position of a point in free space(with 32 bits of precision).\n\ 01411 # It is recommeded to use Point wherever possible instead of Point32. \n\ 01412 # \n\ 01413 # This recommendation is to promote interoperability. \n\ 01414 #\n\ 01415 # This message is designed to take up less space when sending\n\ 01416 # lots of points at once, as in the case of a PointCloud. \n\ 01417 \n\ 01418 float32 x\n\ 01419 float32 y\n\ 01420 float32 z\n\ 01421 ================================================================================\n\ 01422 MSG: sensor_msgs/ChannelFloat32\n\ 01423 # This message is used by the PointCloud message to hold optional data\n\ 01424 # associated with each point in the cloud. The length of the values\n\ 01425 # array should be the same as the length of the points array in the\n\ 01426 # PointCloud, and each value should be associated with the corresponding\n\ 01427 # point.\n\ 01428 \n\ 01429 # Channel names in existing practice include:\n\ 01430 # \"u\", \"v\" - row and column (respectively) in the left stereo image.\n\ 01431 # This is opposite to usual conventions but remains for\n\ 01432 # historical reasons. The newer PointCloud2 message has no\n\ 01433 # such problem.\n\ 01434 # \"rgb\" - For point clouds produced by color stereo cameras. uint8\n\ 01435 # (R,G,B) values packed into the least significant 24 bits,\n\ 01436 # in order.\n\ 01437 # \"intensity\" - laser or pixel intensity.\n\ 01438 # \"distance\"\n\ 01439 \n\ 01440 # The channel name should give semantics of the channel (e.g.\n\ 01441 # \"intensity\" instead of \"value\").\n\ 01442 string name\n\ 01443 \n\ 01444 # The values array should be 1-1 with the elements of the associated\n\ 01445 # PointCloud.\n\ 01446 float32[] values\n\ 01447 \n\ 01448 ================================================================================\n\ 01449 MSG: object_manipulation_msgs/SceneRegion\n\ 01450 # Point cloud\n\ 01451 sensor_msgs/PointCloud2 cloud\n\ 01452 \n\ 01453 # Indices for the region of interest\n\ 01454 int32[] mask\n\ 01455 \n\ 01456 # One of the corresponding 2D images, if applicable\n\ 01457 sensor_msgs/Image image\n\ 01458 \n\ 01459 # The disparity image, if applicable\n\ 01460 sensor_msgs/Image disparity_image\n\ 01461 \n\ 01462 # Camera info for the camera that took the image\n\ 01463 sensor_msgs/CameraInfo cam_info\n\ 01464 \n\ 01465 # a 3D region of interest for grasp planning\n\ 01466 geometry_msgs/PoseStamped roi_box_pose\n\ 01467 geometry_msgs/Vector3 roi_box_dims\n\ 01468 \n\ 01469 ================================================================================\n\ 01470 MSG: sensor_msgs/PointCloud2\n\ 01471 # This message holds a collection of N-dimensional points, which may\n\ 01472 # contain additional information such as normals, intensity, etc. The\n\ 01473 # point data is stored as a binary blob, its layout described by the\n\ 01474 # contents of the \"fields\" array.\n\ 01475 \n\ 01476 # The point cloud data may be organized 2d (image-like) or 1d\n\ 01477 # (unordered). Point clouds organized as 2d images may be produced by\n\ 01478 # camera depth sensors such as stereo or time-of-flight.\n\ 01479 \n\ 01480 # Time of sensor data acquisition, and the coordinate frame ID (for 3d\n\ 01481 # points).\n\ 01482 Header header\n\ 01483 \n\ 01484 # 2D structure of the point cloud. If the cloud is unordered, height is\n\ 01485 # 1 and width is the length of the point cloud.\n\ 01486 uint32 height\n\ 01487 uint32 width\n\ 01488 \n\ 01489 # Describes the channels and their layout in the binary data blob.\n\ 01490 PointField[] fields\n\ 01491 \n\ 01492 bool is_bigendian # Is this data bigendian?\n\ 01493 uint32 point_step # Length of a point in bytes\n\ 01494 uint32 row_step # Length of a row in bytes\n\ 01495 uint8[] data # Actual point data, size is (row_step*height)\n\ 01496 \n\ 01497 bool is_dense # True if there are no invalid points\n\ 01498 \n\ 01499 ================================================================================\n\ 01500 MSG: sensor_msgs/PointField\n\ 01501 # This message holds the description of one point entry in the\n\ 01502 # PointCloud2 message format.\n\ 01503 uint8 INT8 = 1\n\ 01504 uint8 UINT8 = 2\n\ 01505 uint8 INT16 = 3\n\ 01506 uint8 UINT16 = 4\n\ 01507 uint8 INT32 = 5\n\ 01508 uint8 UINT32 = 6\n\ 01509 uint8 FLOAT32 = 7\n\ 01510 uint8 FLOAT64 = 8\n\ 01511 \n\ 01512 string name # Name of field\n\ 01513 uint32 offset # Offset from start of point struct\n\ 01514 uint8 datatype # Datatype enumeration, see above\n\ 01515 uint32 count # How many elements in the field\n\ 01516 \n\ 01517 ================================================================================\n\ 01518 MSG: sensor_msgs/Image\n\ 01519 # This message contains an uncompressed image\n\ 01520 # (0, 0) is at top-left corner of image\n\ 01521 #\n\ 01522 \n\ 01523 Header header # Header timestamp should be acquisition time of image\n\ 01524 # Header frame_id should be optical frame of camera\n\ 01525 # origin of frame should be optical center of cameara\n\ 01526 # +x should point to the right in the image\n\ 01527 # +y should point down in the image\n\ 01528 # +z should point into to plane of the image\n\ 01529 # If the frame_id here and the frame_id of the CameraInfo\n\ 01530 # message associated with the image conflict\n\ 01531 # the behavior is undefined\n\ 01532 \n\ 01533 uint32 height # image height, that is, number of rows\n\ 01534 uint32 width # image width, that is, number of columns\n\ 01535 \n\ 01536 # The legal values for encoding are in file src/image_encodings.cpp\n\ 01537 # If you want to standardize a new string format, join\n\ 01538 # ros-users@lists.sourceforge.net and send an email proposing a new encoding.\n\ 01539 \n\ 01540 string encoding # Encoding of pixels -- channel meaning, ordering, size\n\ 01541 # taken from the list of strings in src/image_encodings.cpp\n\ 01542 \n\ 01543 uint8 is_bigendian # is this data bigendian?\n\ 01544 uint32 step # Full row length in bytes\n\ 01545 uint8[] data # actual matrix data, size is (step * rows)\n\ 01546 \n\ 01547 ================================================================================\n\ 01548 MSG: sensor_msgs/CameraInfo\n\ 01549 # This message defines meta information for a camera. It should be in a\n\ 01550 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\ 01551 # image topics named:\n\ 01552 #\n\ 01553 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\ 01554 # image - monochrome, distorted\n\ 01555 # image_color - color, distorted\n\ 01556 # image_rect - monochrome, rectified\n\ 01557 # image_rect_color - color, rectified\n\ 01558 #\n\ 01559 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\ 01560 # for producing the four processed image topics from image_raw and\n\ 01561 # camera_info. The meaning of the camera parameters are described in\n\ 01562 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\ 01563 #\n\ 01564 # The image_geometry package provides a user-friendly interface to\n\ 01565 # common operations using this meta information. If you want to, e.g.,\n\ 01566 # project a 3d point into image coordinates, we strongly recommend\n\ 01567 # using image_geometry.\n\ 01568 #\n\ 01569 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\ 01570 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\ 01571 # indicates an uncalibrated camera.\n\ 01572 \n\ 01573 #######################################################################\n\ 01574 # Image acquisition info #\n\ 01575 #######################################################################\n\ 01576 \n\ 01577 # Time of image acquisition, camera coordinate frame ID\n\ 01578 Header header # Header timestamp should be acquisition time of image\n\ 01579 # Header frame_id should be optical frame of camera\n\ 01580 # origin of frame should be optical center of camera\n\ 01581 # +x should point to the right in the image\n\ 01582 # +y should point down in the image\n\ 01583 # +z should point into the plane of the image\n\ 01584 \n\ 01585 \n\ 01586 #######################################################################\n\ 01587 # Calibration Parameters #\n\ 01588 #######################################################################\n\ 01589 # These are fixed during camera calibration. Their values will be the #\n\ 01590 # same in all messages until the camera is recalibrated. Note that #\n\ 01591 # self-calibrating systems may \"recalibrate\" frequently. #\n\ 01592 # #\n\ 01593 # The internal parameters can be used to warp a raw (distorted) image #\n\ 01594 # to: #\n\ 01595 # 1. An undistorted image (requires D and K) #\n\ 01596 # 2. A rectified image (requires D, K, R) #\n\ 01597 # The projection matrix P projects 3D points into the rectified image.#\n\ 01598 #######################################################################\n\ 01599 \n\ 01600 # The image dimensions with which the camera was calibrated. Normally\n\ 01601 # this will be the full camera resolution in pixels.\n\ 01602 uint32 height\n\ 01603 uint32 width\n\ 01604 \n\ 01605 # The distortion model used. Supported models are listed in\n\ 01606 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\ 01607 # simple model of radial and tangential distortion - is sufficent.\n\ 01608 string distortion_model\n\ 01609 \n\ 01610 # The distortion parameters, size depending on the distortion model.\n\ 01611 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\ 01612 float64[] D\n\ 01613 \n\ 01614 # Intrinsic camera matrix for the raw (distorted) images.\n\ 01615 # [fx 0 cx]\n\ 01616 # K = [ 0 fy cy]\n\ 01617 # [ 0 0 1]\n\ 01618 # Projects 3D points in the camera coordinate frame to 2D pixel\n\ 01619 # coordinates using the focal lengths (fx, fy) and principal point\n\ 01620 # (cx, cy).\n\ 01621 float64[9] K # 3x3 row-major matrix\n\ 01622 \n\ 01623 # Rectification matrix (stereo cameras only)\n\ 01624 # A rotation matrix aligning the camera coordinate system to the ideal\n\ 01625 # stereo image plane so that epipolar lines in both stereo images are\n\ 01626 # parallel.\n\ 01627 float64[9] R # 3x3 row-major matrix\n\ 01628 \n\ 01629 # Projection/camera matrix\n\ 01630 # [fx' 0 cx' Tx]\n\ 01631 # P = [ 0 fy' cy' Ty]\n\ 01632 # [ 0 0 1 0]\n\ 01633 # By convention, this matrix specifies the intrinsic (camera) matrix\n\ 01634 # of the processed (rectified) image. That is, the left 3x3 portion\n\ 01635 # is the normal camera intrinsic matrix for the rectified image.\n\ 01636 # It projects 3D points in the camera coordinate frame to 2D pixel\n\ 01637 # coordinates using the focal lengths (fx', fy') and principal point\n\ 01638 # (cx', cy') - these may differ from the values in K.\n\ 01639 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\ 01640 # also have R = the identity and P[1:3,1:3] = K.\n\ 01641 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\ 01642 # position of the optical center of the second camera in the first\n\ 01643 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\ 01644 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\ 01645 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\ 01646 # Tx = -fx' * B, where B is the baseline between the cameras.\n\ 01647 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\ 01648 # the rectified image is given by:\n\ 01649 # [u v w]' = P * [X Y Z 1]'\n\ 01650 # x = u / w\n\ 01651 # y = v / w\n\ 01652 # This holds for both images of a stereo pair.\n\ 01653 float64[12] P # 3x4 row-major matrix\n\ 01654 \n\ 01655 \n\ 01656 #######################################################################\n\ 01657 # Operational Parameters #\n\ 01658 #######################################################################\n\ 01659 # These define the image region actually captured by the camera #\n\ 01660 # driver. Although they affect the geometry of the output image, they #\n\ 01661 # may be changed freely without recalibrating the camera. #\n\ 01662 #######################################################################\n\ 01663 \n\ 01664 # Binning refers here to any camera setting which combines rectangular\n\ 01665 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\ 01666 # resolution of the output image to\n\ 01667 # (width / binning_x) x (height / binning_y).\n\ 01668 # The default values binning_x = binning_y = 0 is considered the same\n\ 01669 # as binning_x = binning_y = 1 (no subsampling).\n\ 01670 uint32 binning_x\n\ 01671 uint32 binning_y\n\ 01672 \n\ 01673 # Region of interest (subwindow of full camera resolution), given in\n\ 01674 # full resolution (unbinned) image coordinates. A particular ROI\n\ 01675 # always denotes the same window of pixels on the camera sensor,\n\ 01676 # regardless of binning settings.\n\ 01677 # The default setting of roi (all values 0) is considered the same as\n\ 01678 # full resolution (roi.width = width, roi.height = height).\n\ 01679 RegionOfInterest roi\n\ 01680 \n\ 01681 ================================================================================\n\ 01682 MSG: sensor_msgs/RegionOfInterest\n\ 01683 # This message is used to specify a region of interest within an image.\n\ 01684 #\n\ 01685 # When used to specify the ROI setting of the camera when the image was\n\ 01686 # taken, the height and width fields should either match the height and\n\ 01687 # width fields for the associated image; or height = width = 0\n\ 01688 # indicates that the full resolution image was captured.\n\ 01689 \n\ 01690 uint32 x_offset # Leftmost pixel of the ROI\n\ 01691 # (0 if the ROI includes the left edge of the image)\n\ 01692 uint32 y_offset # Topmost pixel of the ROI\n\ 01693 # (0 if the ROI includes the top edge of the image)\n\ 01694 uint32 height # Height of ROI\n\ 01695 uint32 width # Width of ROI\n\ 01696 \n\ 01697 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\ 01698 # ROI in this message. Typically this should be False if the full image\n\ 01699 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\ 01700 # used).\n\ 01701 bool do_rectify\n\ 01702 \n\ 01703 ================================================================================\n\ 01704 MSG: geometry_msgs/Vector3\n\ 01705 # This represents a vector in free space. \n\ 01706 \n\ 01707 float64 x\n\ 01708 float64 y\n\ 01709 float64 z\n\ 01710 ================================================================================\n\ 01711 MSG: object_manipulation_msgs/Grasp\n\ 01712 \n\ 01713 # The internal posture of the hand for the pre-grasp\n\ 01714 # only positions are used\n\ 01715 sensor_msgs/JointState pre_grasp_posture\n\ 01716 \n\ 01717 # The internal posture of the hand for the grasp\n\ 01718 # positions and efforts are used\n\ 01719 sensor_msgs/JointState grasp_posture\n\ 01720 \n\ 01721 # The position of the end-effector for the grasp relative to a reference frame \n\ 01722 # (that is always specified elsewhere, not in this message)\n\ 01723 geometry_msgs/Pose grasp_pose\n\ 01724 \n\ 01725 # The estimated probability of success for this grasp\n\ 01726 float64 success_probability\n\ 01727 \n\ 01728 # Debug flag to indicate that this grasp would be the best in its cluster\n\ 01729 bool cluster_rep\n\ 01730 \n\ 01731 # how far the pre-grasp should ideally be away from the grasp\n\ 01732 float32 desired_approach_distance\n\ 01733 \n\ 01734 # how much distance between pre-grasp and grasp must actually be feasible \n\ 01735 # for the grasp not to be rejected\n\ 01736 float32 min_approach_distance\n\ 01737 \n\ 01738 # an optional list of obstacles that we have semantic information about\n\ 01739 # and that we expect might move in the course of executing this grasp\n\ 01740 # the grasp planner is expected to make sure they move in an OK way; during\n\ 01741 # execution, grasp executors will not check for collisions against these objects\n\ 01742 GraspableObject[] moved_obstacles\n\ 01743 \n\ 01744 ================================================================================\n\ 01745 MSG: sensor_msgs/JointState\n\ 01746 # This is a message that holds data to describe the state of a set of torque controlled joints. \n\ 01747 #\n\ 01748 # The state of each joint (revolute or prismatic) is defined by:\n\ 01749 # * the position of the joint (rad or m),\n\ 01750 # * the velocity of the joint (rad/s or m/s) and \n\ 01751 # * the effort that is applied in the joint (Nm or N).\n\ 01752 #\n\ 01753 # Each joint is uniquely identified by its name\n\ 01754 # The header specifies the time at which the joint states were recorded. All the joint states\n\ 01755 # in one message have to be recorded at the same time.\n\ 01756 #\n\ 01757 # This message consists of a multiple arrays, one for each part of the joint state. \n\ 01758 # The goal is to make each of the fields optional. When e.g. your joints have no\n\ 01759 # effort associated with them, you can leave the effort array empty. \n\ 01760 #\n\ 01761 # All arrays in this message should have the same size, or be empty.\n\ 01762 # This is the only way to uniquely associate the joint name with the correct\n\ 01763 # states.\n\ 01764 \n\ 01765 \n\ 01766 Header header\n\ 01767 \n\ 01768 string[] name\n\ 01769 float64[] position\n\ 01770 float64[] velocity\n\ 01771 float64[] effort\n\ 01772 \n\ 01773 "; 01774 } 01775 01776 static const char* value(const ::object_manipulation_msgs::GraspPlanningRequest_<ContainerAllocator> &) { return value(); } 01777 }; 01778 01779 } // namespace message_traits 01780 } // namespace ros 01781 01782 01783 namespace ros 01784 { 01785 namespace message_traits 01786 { 01787 template<class ContainerAllocator> struct IsMessage< ::object_manipulation_msgs::GraspPlanningResponse_<ContainerAllocator> > : public TrueType {}; 01788 template<class ContainerAllocator> struct IsMessage< ::object_manipulation_msgs::GraspPlanningResponse_<ContainerAllocator> const> : public TrueType {}; 01789 template<class ContainerAllocator> 01790 struct MD5Sum< ::object_manipulation_msgs::GraspPlanningResponse_<ContainerAllocator> > { 01791 static const char* value() 01792 { 01793 return "998e3d06c509abfc46d7fdf96fe1c188"; 01794 } 01795 01796 static const char* value(const ::object_manipulation_msgs::GraspPlanningResponse_<ContainerAllocator> &) { return value(); } 01797 static const uint64_t static_value1 = 0x998e3d06c509abfcULL; 01798 static const uint64_t static_value2 = 0x46d7fdf96fe1c188ULL; 01799 }; 01800 01801 template<class ContainerAllocator> 01802 struct DataType< ::object_manipulation_msgs::GraspPlanningResponse_<ContainerAllocator> > { 01803 static const char* value() 01804 { 01805 return "object_manipulation_msgs/GraspPlanningResponse"; 01806 } 01807 01808 static const char* value(const ::object_manipulation_msgs::GraspPlanningResponse_<ContainerAllocator> &) { return value(); } 01809 }; 01810 01811 template<class ContainerAllocator> 01812 struct Definition< ::object_manipulation_msgs::GraspPlanningResponse_<ContainerAllocator> > { 01813 static const char* value() 01814 { 01815 return "\n\ 01816 \n\ 01817 Grasp[] grasps\n\ 01818 \n\ 01819 \n\ 01820 GraspPlanningErrorCode error_code\n\ 01821 \n\ 01822 \n\ 01823 ================================================================================\n\ 01824 MSG: object_manipulation_msgs/Grasp\n\ 01825 \n\ 01826 # The internal posture of the hand for the pre-grasp\n\ 01827 # only positions are used\n\ 01828 sensor_msgs/JointState pre_grasp_posture\n\ 01829 \n\ 01830 # The internal posture of the hand for the grasp\n\ 01831 # positions and efforts are used\n\ 01832 sensor_msgs/JointState grasp_posture\n\ 01833 \n\ 01834 # The position of the end-effector for the grasp relative to a reference frame \n\ 01835 # (that is always specified elsewhere, not in this message)\n\ 01836 geometry_msgs/Pose grasp_pose\n\ 01837 \n\ 01838 # The estimated probability of success for this grasp\n\ 01839 float64 success_probability\n\ 01840 \n\ 01841 # Debug flag to indicate that this grasp would be the best in its cluster\n\ 01842 bool cluster_rep\n\ 01843 \n\ 01844 # how far the pre-grasp should ideally be away from the grasp\n\ 01845 float32 desired_approach_distance\n\ 01846 \n\ 01847 # how much distance between pre-grasp and grasp must actually be feasible \n\ 01848 # for the grasp not to be rejected\n\ 01849 float32 min_approach_distance\n\ 01850 \n\ 01851 # an optional list of obstacles that we have semantic information about\n\ 01852 # and that we expect might move in the course of executing this grasp\n\ 01853 # the grasp planner is expected to make sure they move in an OK way; during\n\ 01854 # execution, grasp executors will not check for collisions against these objects\n\ 01855 GraspableObject[] moved_obstacles\n\ 01856 \n\ 01857 ================================================================================\n\ 01858 MSG: sensor_msgs/JointState\n\ 01859 # This is a message that holds data to describe the state of a set of torque controlled joints. \n\ 01860 #\n\ 01861 # The state of each joint (revolute or prismatic) is defined by:\n\ 01862 # * the position of the joint (rad or m),\n\ 01863 # * the velocity of the joint (rad/s or m/s) and \n\ 01864 # * the effort that is applied in the joint (Nm or N).\n\ 01865 #\n\ 01866 # Each joint is uniquely identified by its name\n\ 01867 # The header specifies the time at which the joint states were recorded. All the joint states\n\ 01868 # in one message have to be recorded at the same time.\n\ 01869 #\n\ 01870 # This message consists of a multiple arrays, one for each part of the joint state. \n\ 01871 # The goal is to make each of the fields optional. When e.g. your joints have no\n\ 01872 # effort associated with them, you can leave the effort array empty. \n\ 01873 #\n\ 01874 # All arrays in this message should have the same size, or be empty.\n\ 01875 # This is the only way to uniquely associate the joint name with the correct\n\ 01876 # states.\n\ 01877 \n\ 01878 \n\ 01879 Header header\n\ 01880 \n\ 01881 string[] name\n\ 01882 float64[] position\n\ 01883 float64[] velocity\n\ 01884 float64[] effort\n\ 01885 \n\ 01886 ================================================================================\n\ 01887 MSG: std_msgs/Header\n\ 01888 # Standard metadata for higher-level stamped data types.\n\ 01889 # This is generally used to communicate timestamped data \n\ 01890 # in a particular coordinate frame.\n\ 01891 # \n\ 01892 # sequence ID: consecutively increasing ID \n\ 01893 uint32 seq\n\ 01894 #Two-integer timestamp that is expressed as:\n\ 01895 # * stamp.secs: seconds (stamp_secs) since epoch\n\ 01896 # * stamp.nsecs: nanoseconds since stamp_secs\n\ 01897 # time-handling sugar is provided by the client library\n\ 01898 time stamp\n\ 01899 #Frame this data is associated with\n\ 01900 # 0: no frame\n\ 01901 # 1: global frame\n\ 01902 string frame_id\n\ 01903 \n\ 01904 ================================================================================\n\ 01905 MSG: geometry_msgs/Pose\n\ 01906 # A representation of pose in free space, composed of postion and orientation. \n\ 01907 Point position\n\ 01908 Quaternion orientation\n\ 01909 \n\ 01910 ================================================================================\n\ 01911 MSG: geometry_msgs/Point\n\ 01912 # This contains the position of a point in free space\n\ 01913 float64 x\n\ 01914 float64 y\n\ 01915 float64 z\n\ 01916 \n\ 01917 ================================================================================\n\ 01918 MSG: geometry_msgs/Quaternion\n\ 01919 # This represents an orientation in free space in quaternion form.\n\ 01920 \n\ 01921 float64 x\n\ 01922 float64 y\n\ 01923 float64 z\n\ 01924 float64 w\n\ 01925 \n\ 01926 ================================================================================\n\ 01927 MSG: object_manipulation_msgs/GraspableObject\n\ 01928 # an object that the object_manipulator can work on\n\ 01929 \n\ 01930 # a graspable object can be represented in multiple ways. This message\n\ 01931 # can contain all of them. Which one is actually used is up to the receiver\n\ 01932 # of this message. When adding new representations, one must be careful that\n\ 01933 # they have reasonable lightweight defaults indicating that that particular\n\ 01934 # representation is not available.\n\ 01935 \n\ 01936 # the tf frame to be used as a reference frame when combining information from\n\ 01937 # the different representations below\n\ 01938 string reference_frame_id\n\ 01939 \n\ 01940 # potential recognition results from a database of models\n\ 01941 # all poses are relative to the object reference pose\n\ 01942 household_objects_database_msgs/DatabaseModelPose[] potential_models\n\ 01943 \n\ 01944 # the point cloud itself\n\ 01945 sensor_msgs/PointCloud cluster\n\ 01946 \n\ 01947 # a region of a PointCloud2 of interest\n\ 01948 object_manipulation_msgs/SceneRegion region\n\ 01949 \n\ 01950 # the name that this object has in the collision environment\n\ 01951 string collision_name\n\ 01952 ================================================================================\n\ 01953 MSG: household_objects_database_msgs/DatabaseModelPose\n\ 01954 # Informs that a specific model from the Model Database has been \n\ 01955 # identified at a certain location\n\ 01956 \n\ 01957 # the database id of the model\n\ 01958 int32 model_id\n\ 01959 \n\ 01960 # the pose that it can be found in\n\ 01961 geometry_msgs/PoseStamped pose\n\ 01962 \n\ 01963 # a measure of the confidence level in this detection result\n\ 01964 float32 confidence\n\ 01965 \n\ 01966 # the name of the object detector that generated this detection result\n\ 01967 string detector_name\n\ 01968 \n\ 01969 ================================================================================\n\ 01970 MSG: geometry_msgs/PoseStamped\n\ 01971 # A Pose with reference coordinate frame and timestamp\n\ 01972 Header header\n\ 01973 Pose pose\n\ 01974 \n\ 01975 ================================================================================\n\ 01976 MSG: sensor_msgs/PointCloud\n\ 01977 # This message holds a collection of 3d points, plus optional additional\n\ 01978 # information about each point.\n\ 01979 \n\ 01980 # Time of sensor data acquisition, coordinate frame ID.\n\ 01981 Header header\n\ 01982 \n\ 01983 # Array of 3d points. Each Point32 should be interpreted as a 3d point\n\ 01984 # in the frame given in the header.\n\ 01985 geometry_msgs/Point32[] points\n\ 01986 \n\ 01987 # Each channel should have the same number of elements as points array,\n\ 01988 # and the data in each channel should correspond 1:1 with each point.\n\ 01989 # Channel names in common practice are listed in ChannelFloat32.msg.\n\ 01990 ChannelFloat32[] channels\n\ 01991 \n\ 01992 ================================================================================\n\ 01993 MSG: geometry_msgs/Point32\n\ 01994 # This contains the position of a point in free space(with 32 bits of precision).\n\ 01995 # It is recommeded to use Point wherever possible instead of Point32. \n\ 01996 # \n\ 01997 # This recommendation is to promote interoperability. \n\ 01998 #\n\ 01999 # This message is designed to take up less space when sending\n\ 02000 # lots of points at once, as in the case of a PointCloud. \n\ 02001 \n\ 02002 float32 x\n\ 02003 float32 y\n\ 02004 float32 z\n\ 02005 ================================================================================\n\ 02006 MSG: sensor_msgs/ChannelFloat32\n\ 02007 # This message is used by the PointCloud message to hold optional data\n\ 02008 # associated with each point in the cloud. The length of the values\n\ 02009 # array should be the same as the length of the points array in the\n\ 02010 # PointCloud, and each value should be associated with the corresponding\n\ 02011 # point.\n\ 02012 \n\ 02013 # Channel names in existing practice include:\n\ 02014 # \"u\", \"v\" - row and column (respectively) in the left stereo image.\n\ 02015 # This is opposite to usual conventions but remains for\n\ 02016 # historical reasons. The newer PointCloud2 message has no\n\ 02017 # such problem.\n\ 02018 # \"rgb\" - For point clouds produced by color stereo cameras. uint8\n\ 02019 # (R,G,B) values packed into the least significant 24 bits,\n\ 02020 # in order.\n\ 02021 # \"intensity\" - laser or pixel intensity.\n\ 02022 # \"distance\"\n\ 02023 \n\ 02024 # The channel name should give semantics of the channel (e.g.\n\ 02025 # \"intensity\" instead of \"value\").\n\ 02026 string name\n\ 02027 \n\ 02028 # The values array should be 1-1 with the elements of the associated\n\ 02029 # PointCloud.\n\ 02030 float32[] values\n\ 02031 \n\ 02032 ================================================================================\n\ 02033 MSG: object_manipulation_msgs/SceneRegion\n\ 02034 # Point cloud\n\ 02035 sensor_msgs/PointCloud2 cloud\n\ 02036 \n\ 02037 # Indices for the region of interest\n\ 02038 int32[] mask\n\ 02039 \n\ 02040 # One of the corresponding 2D images, if applicable\n\ 02041 sensor_msgs/Image image\n\ 02042 \n\ 02043 # The disparity image, if applicable\n\ 02044 sensor_msgs/Image disparity_image\n\ 02045 \n\ 02046 # Camera info for the camera that took the image\n\ 02047 sensor_msgs/CameraInfo cam_info\n\ 02048 \n\ 02049 # a 3D region of interest for grasp planning\n\ 02050 geometry_msgs/PoseStamped roi_box_pose\n\ 02051 geometry_msgs/Vector3 roi_box_dims\n\ 02052 \n\ 02053 ================================================================================\n\ 02054 MSG: sensor_msgs/PointCloud2\n\ 02055 # This message holds a collection of N-dimensional points, which may\n\ 02056 # contain additional information such as normals, intensity, etc. The\n\ 02057 # point data is stored as a binary blob, its layout described by the\n\ 02058 # contents of the \"fields\" array.\n\ 02059 \n\ 02060 # The point cloud data may be organized 2d (image-like) or 1d\n\ 02061 # (unordered). Point clouds organized as 2d images may be produced by\n\ 02062 # camera depth sensors such as stereo or time-of-flight.\n\ 02063 \n\ 02064 # Time of sensor data acquisition, and the coordinate frame ID (for 3d\n\ 02065 # points).\n\ 02066 Header header\n\ 02067 \n\ 02068 # 2D structure of the point cloud. If the cloud is unordered, height is\n\ 02069 # 1 and width is the length of the point cloud.\n\ 02070 uint32 height\n\ 02071 uint32 width\n\ 02072 \n\ 02073 # Describes the channels and their layout in the binary data blob.\n\ 02074 PointField[] fields\n\ 02075 \n\ 02076 bool is_bigendian # Is this data bigendian?\n\ 02077 uint32 point_step # Length of a point in bytes\n\ 02078 uint32 row_step # Length of a row in bytes\n\ 02079 uint8[] data # Actual point data, size is (row_step*height)\n\ 02080 \n\ 02081 bool is_dense # True if there are no invalid points\n\ 02082 \n\ 02083 ================================================================================\n\ 02084 MSG: sensor_msgs/PointField\n\ 02085 # This message holds the description of one point entry in the\n\ 02086 # PointCloud2 message format.\n\ 02087 uint8 INT8 = 1\n\ 02088 uint8 UINT8 = 2\n\ 02089 uint8 INT16 = 3\n\ 02090 uint8 UINT16 = 4\n\ 02091 uint8 INT32 = 5\n\ 02092 uint8 UINT32 = 6\n\ 02093 uint8 FLOAT32 = 7\n\ 02094 uint8 FLOAT64 = 8\n\ 02095 \n\ 02096 string name # Name of field\n\ 02097 uint32 offset # Offset from start of point struct\n\ 02098 uint8 datatype # Datatype enumeration, see above\n\ 02099 uint32 count # How many elements in the field\n\ 02100 \n\ 02101 ================================================================================\n\ 02102 MSG: sensor_msgs/Image\n\ 02103 # This message contains an uncompressed image\n\ 02104 # (0, 0) is at top-left corner of image\n\ 02105 #\n\ 02106 \n\ 02107 Header header # Header timestamp should be acquisition time of image\n\ 02108 # Header frame_id should be optical frame of camera\n\ 02109 # origin of frame should be optical center of cameara\n\ 02110 # +x should point to the right in the image\n\ 02111 # +y should point down in the image\n\ 02112 # +z should point into to plane of the image\n\ 02113 # If the frame_id here and the frame_id of the CameraInfo\n\ 02114 # message associated with the image conflict\n\ 02115 # the behavior is undefined\n\ 02116 \n\ 02117 uint32 height # image height, that is, number of rows\n\ 02118 uint32 width # image width, that is, number of columns\n\ 02119 \n\ 02120 # The legal values for encoding are in file src/image_encodings.cpp\n\ 02121 # If you want to standardize a new string format, join\n\ 02122 # ros-users@lists.sourceforge.net and send an email proposing a new encoding.\n\ 02123 \n\ 02124 string encoding # Encoding of pixels -- channel meaning, ordering, size\n\ 02125 # taken from the list of strings in src/image_encodings.cpp\n\ 02126 \n\ 02127 uint8 is_bigendian # is this data bigendian?\n\ 02128 uint32 step # Full row length in bytes\n\ 02129 uint8[] data # actual matrix data, size is (step * rows)\n\ 02130 \n\ 02131 ================================================================================\n\ 02132 MSG: sensor_msgs/CameraInfo\n\ 02133 # This message defines meta information for a camera. It should be in a\n\ 02134 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\ 02135 # image topics named:\n\ 02136 #\n\ 02137 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\ 02138 # image - monochrome, distorted\n\ 02139 # image_color - color, distorted\n\ 02140 # image_rect - monochrome, rectified\n\ 02141 # image_rect_color - color, rectified\n\ 02142 #\n\ 02143 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\ 02144 # for producing the four processed image topics from image_raw and\n\ 02145 # camera_info. The meaning of the camera parameters are described in\n\ 02146 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\ 02147 #\n\ 02148 # The image_geometry package provides a user-friendly interface to\n\ 02149 # common operations using this meta information. If you want to, e.g.,\n\ 02150 # project a 3d point into image coordinates, we strongly recommend\n\ 02151 # using image_geometry.\n\ 02152 #\n\ 02153 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\ 02154 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\ 02155 # indicates an uncalibrated camera.\n\ 02156 \n\ 02157 #######################################################################\n\ 02158 # Image acquisition info #\n\ 02159 #######################################################################\n\ 02160 \n\ 02161 # Time of image acquisition, camera coordinate frame ID\n\ 02162 Header header # Header timestamp should be acquisition time of image\n\ 02163 # Header frame_id should be optical frame of camera\n\ 02164 # origin of frame should be optical center of camera\n\ 02165 # +x should point to the right in the image\n\ 02166 # +y should point down in the image\n\ 02167 # +z should point into the plane of the image\n\ 02168 \n\ 02169 \n\ 02170 #######################################################################\n\ 02171 # Calibration Parameters #\n\ 02172 #######################################################################\n\ 02173 # These are fixed during camera calibration. Their values will be the #\n\ 02174 # same in all messages until the camera is recalibrated. Note that #\n\ 02175 # self-calibrating systems may \"recalibrate\" frequently. #\n\ 02176 # #\n\ 02177 # The internal parameters can be used to warp a raw (distorted) image #\n\ 02178 # to: #\n\ 02179 # 1. An undistorted image (requires D and K) #\n\ 02180 # 2. A rectified image (requires D, K, R) #\n\ 02181 # The projection matrix P projects 3D points into the rectified image.#\n\ 02182 #######################################################################\n\ 02183 \n\ 02184 # The image dimensions with which the camera was calibrated. Normally\n\ 02185 # this will be the full camera resolution in pixels.\n\ 02186 uint32 height\n\ 02187 uint32 width\n\ 02188 \n\ 02189 # The distortion model used. Supported models are listed in\n\ 02190 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\ 02191 # simple model of radial and tangential distortion - is sufficent.\n\ 02192 string distortion_model\n\ 02193 \n\ 02194 # The distortion parameters, size depending on the distortion model.\n\ 02195 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\ 02196 float64[] D\n\ 02197 \n\ 02198 # Intrinsic camera matrix for the raw (distorted) images.\n\ 02199 # [fx 0 cx]\n\ 02200 # K = [ 0 fy cy]\n\ 02201 # [ 0 0 1]\n\ 02202 # Projects 3D points in the camera coordinate frame to 2D pixel\n\ 02203 # coordinates using the focal lengths (fx, fy) and principal point\n\ 02204 # (cx, cy).\n\ 02205 float64[9] K # 3x3 row-major matrix\n\ 02206 \n\ 02207 # Rectification matrix (stereo cameras only)\n\ 02208 # A rotation matrix aligning the camera coordinate system to the ideal\n\ 02209 # stereo image plane so that epipolar lines in both stereo images are\n\ 02210 # parallel.\n\ 02211 float64[9] R # 3x3 row-major matrix\n\ 02212 \n\ 02213 # Projection/camera matrix\n\ 02214 # [fx' 0 cx' Tx]\n\ 02215 # P = [ 0 fy' cy' Ty]\n\ 02216 # [ 0 0 1 0]\n\ 02217 # By convention, this matrix specifies the intrinsic (camera) matrix\n\ 02218 # of the processed (rectified) image. That is, the left 3x3 portion\n\ 02219 # is the normal camera intrinsic matrix for the rectified image.\n\ 02220 # It projects 3D points in the camera coordinate frame to 2D pixel\n\ 02221 # coordinates using the focal lengths (fx', fy') and principal point\n\ 02222 # (cx', cy') - these may differ from the values in K.\n\ 02223 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\ 02224 # also have R = the identity and P[1:3,1:3] = K.\n\ 02225 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\ 02226 # position of the optical center of the second camera in the first\n\ 02227 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\ 02228 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\ 02229 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\ 02230 # Tx = -fx' * B, where B is the baseline between the cameras.\n\ 02231 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\ 02232 # the rectified image is given by:\n\ 02233 # [u v w]' = P * [X Y Z 1]'\n\ 02234 # x = u / w\n\ 02235 # y = v / w\n\ 02236 # This holds for both images of a stereo pair.\n\ 02237 float64[12] P # 3x4 row-major matrix\n\ 02238 \n\ 02239 \n\ 02240 #######################################################################\n\ 02241 # Operational Parameters #\n\ 02242 #######################################################################\n\ 02243 # These define the image region actually captured by the camera #\n\ 02244 # driver. Although they affect the geometry of the output image, they #\n\ 02245 # may be changed freely without recalibrating the camera. #\n\ 02246 #######################################################################\n\ 02247 \n\ 02248 # Binning refers here to any camera setting which combines rectangular\n\ 02249 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\ 02250 # resolution of the output image to\n\ 02251 # (width / binning_x) x (height / binning_y).\n\ 02252 # The default values binning_x = binning_y = 0 is considered the same\n\ 02253 # as binning_x = binning_y = 1 (no subsampling).\n\ 02254 uint32 binning_x\n\ 02255 uint32 binning_y\n\ 02256 \n\ 02257 # Region of interest (subwindow of full camera resolution), given in\n\ 02258 # full resolution (unbinned) image coordinates. A particular ROI\n\ 02259 # always denotes the same window of pixels on the camera sensor,\n\ 02260 # regardless of binning settings.\n\ 02261 # The default setting of roi (all values 0) is considered the same as\n\ 02262 # full resolution (roi.width = width, roi.height = height).\n\ 02263 RegionOfInterest roi\n\ 02264 \n\ 02265 ================================================================================\n\ 02266 MSG: sensor_msgs/RegionOfInterest\n\ 02267 # This message is used to specify a region of interest within an image.\n\ 02268 #\n\ 02269 # When used to specify the ROI setting of the camera when the image was\n\ 02270 # taken, the height and width fields should either match the height and\n\ 02271 # width fields for the associated image; or height = width = 0\n\ 02272 # indicates that the full resolution image was captured.\n\ 02273 \n\ 02274 uint32 x_offset # Leftmost pixel of the ROI\n\ 02275 # (0 if the ROI includes the left edge of the image)\n\ 02276 uint32 y_offset # Topmost pixel of the ROI\n\ 02277 # (0 if the ROI includes the top edge of the image)\n\ 02278 uint32 height # Height of ROI\n\ 02279 uint32 width # Width of ROI\n\ 02280 \n\ 02281 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\ 02282 # ROI in this message. Typically this should be False if the full image\n\ 02283 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\ 02284 # used).\n\ 02285 bool do_rectify\n\ 02286 \n\ 02287 ================================================================================\n\ 02288 MSG: geometry_msgs/Vector3\n\ 02289 # This represents a vector in free space. \n\ 02290 \n\ 02291 float64 x\n\ 02292 float64 y\n\ 02293 float64 z\n\ 02294 ================================================================================\n\ 02295 MSG: object_manipulation_msgs/GraspPlanningErrorCode\n\ 02296 # Error codes for grasp and place planning\n\ 02297 \n\ 02298 # plan completed as expected\n\ 02299 int32 SUCCESS = 0\n\ 02300 \n\ 02301 # tf error encountered while transforming\n\ 02302 int32 TF_ERROR = 1 \n\ 02303 \n\ 02304 # some other error\n\ 02305 int32 OTHER_ERROR = 2\n\ 02306 \n\ 02307 # the actual value of this error code\n\ 02308 int32 value\n\ 02309 "; 02310 } 02311 02312 static const char* value(const ::object_manipulation_msgs::GraspPlanningResponse_<ContainerAllocator> &) { return value(); } 02313 }; 02314 02315 } // namespace message_traits 02316 } // namespace ros 02317 02318 namespace ros 02319 { 02320 namespace serialization 02321 { 02322 02323 template<class ContainerAllocator> struct Serializer< ::object_manipulation_msgs::GraspPlanningRequest_<ContainerAllocator> > 02324 { 02325 template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m) 02326 { 02327 stream.next(m.arm_name); 02328 stream.next(m.target); 02329 stream.next(m.collision_object_name); 02330 stream.next(m.collision_support_surface_name); 02331 stream.next(m.grasps_to_evaluate); 02332 stream.next(m.movable_obstacles); 02333 } 02334 02335 ROS_DECLARE_ALLINONE_SERIALIZER; 02336 }; // struct GraspPlanningRequest_ 02337 } // namespace serialization 02338 } // namespace ros 02339 02340 02341 namespace ros 02342 { 02343 namespace serialization 02344 { 02345 02346 template<class ContainerAllocator> struct Serializer< ::object_manipulation_msgs::GraspPlanningResponse_<ContainerAllocator> > 02347 { 02348 template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m) 02349 { 02350 stream.next(m.grasps); 02351 stream.next(m.error_code); 02352 } 02353 02354 ROS_DECLARE_ALLINONE_SERIALIZER; 02355 }; // struct GraspPlanningResponse_ 02356 } // namespace serialization 02357 } // namespace ros 02358 02359 namespace ros 02360 { 02361 namespace service_traits 02362 { 02363 template<> 02364 struct MD5Sum<object_manipulation_msgs::GraspPlanning> { 02365 static const char* value() 02366 { 02367 return "01a11c1cdea613dd8705f368e1dc93dc"; 02368 } 02369 02370 static const char* value(const object_manipulation_msgs::GraspPlanning&) { return value(); } 02371 }; 02372 02373 template<> 02374 struct DataType<object_manipulation_msgs::GraspPlanning> { 02375 static const char* value() 02376 { 02377 return "object_manipulation_msgs/GraspPlanning"; 02378 } 02379 02380 static const char* value(const object_manipulation_msgs::GraspPlanning&) { return value(); } 02381 }; 02382 02383 template<class ContainerAllocator> 02384 struct MD5Sum<object_manipulation_msgs::GraspPlanningRequest_<ContainerAllocator> > { 02385 static const char* value() 02386 { 02387 return "01a11c1cdea613dd8705f368e1dc93dc"; 02388 } 02389 02390 static const char* value(const object_manipulation_msgs::GraspPlanningRequest_<ContainerAllocator> &) { return value(); } 02391 }; 02392 02393 template<class ContainerAllocator> 02394 struct DataType<object_manipulation_msgs::GraspPlanningRequest_<ContainerAllocator> > { 02395 static const char* value() 02396 { 02397 return "object_manipulation_msgs/GraspPlanning"; 02398 } 02399 02400 static const char* value(const object_manipulation_msgs::GraspPlanningRequest_<ContainerAllocator> &) { return value(); } 02401 }; 02402 02403 template<class ContainerAllocator> 02404 struct MD5Sum<object_manipulation_msgs::GraspPlanningResponse_<ContainerAllocator> > { 02405 static const char* value() 02406 { 02407 return "01a11c1cdea613dd8705f368e1dc93dc"; 02408 } 02409 02410 static const char* value(const object_manipulation_msgs::GraspPlanningResponse_<ContainerAllocator> &) { return value(); } 02411 }; 02412 02413 template<class ContainerAllocator> 02414 struct DataType<object_manipulation_msgs::GraspPlanningResponse_<ContainerAllocator> > { 02415 static const char* value() 02416 { 02417 return "object_manipulation_msgs/GraspPlanning"; 02418 } 02419 02420 static const char* value(const object_manipulation_msgs::GraspPlanningResponse_<ContainerAllocator> &) { return value(); } 02421 }; 02422 02423 } // namespace service_traits 02424 } // namespace ros 02425 02426 #endif // OBJECT_MANIPULATION_MSGS_SERVICE_GRASPPLANNING_H 02427