00001
00002 #ifndef OBJECT_MANIPULATION_MSGS_SERVICE_PLACEPLANNING_H
00003 #define OBJECT_MANIPULATION_MSGS_SERVICE_PLACEPLANNING_H
00004 #include <string>
00005 #include <vector>
00006 #include <ostream>
00007 #include "ros/serialization.h"
00008 #include "ros/builtin_message_traits.h"
00009 #include "ros/message_operations.h"
00010 #include "ros/message.h"
00011 #include "ros/time.h"
00012
00013 #include "ros/service_traits.h"
00014
00015 #include "object_manipulation_msgs/GraspableObject.h"
00016 #include "geometry_msgs/Quaternion.h"
00017 #include "geometry_msgs/Pose.h"
00018
00019
00020 #include "geometry_msgs/PoseStamped.h"
00021 #include "object_manipulation_msgs/GraspPlanningErrorCode.h"
00022
00023 namespace object_manipulation_msgs
00024 {
00025 template <class ContainerAllocator>
00026 struct PlacePlanningRequest_ : public ros::Message
00027 {
00028 typedef PlacePlanningRequest_<ContainerAllocator> Type;
00029
00030 PlacePlanningRequest_()
00031 : arm_name()
00032 , target()
00033 , default_orientation()
00034 , grasp_pose()
00035 , collision_object_name()
00036 , collision_support_surface_name()
00037 {
00038 }
00039
00040 PlacePlanningRequest_(const ContainerAllocator& _alloc)
00041 : arm_name(_alloc)
00042 , target(_alloc)
00043 , default_orientation(_alloc)
00044 , grasp_pose(_alloc)
00045 , collision_object_name(_alloc)
00046 , collision_support_surface_name(_alloc)
00047 {
00048 }
00049
00050 typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _arm_name_type;
00051 std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > arm_name;
00052
00053 typedef ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> _target_type;
00054 ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> target;
00055
00056 typedef ::geometry_msgs::Quaternion_<ContainerAllocator> _default_orientation_type;
00057 ::geometry_msgs::Quaternion_<ContainerAllocator> default_orientation;
00058
00059 typedef ::geometry_msgs::Pose_<ContainerAllocator> _grasp_pose_type;
00060 ::geometry_msgs::Pose_<ContainerAllocator> grasp_pose;
00061
00062 typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _collision_object_name_type;
00063 std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > collision_object_name;
00064
00065 typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _collision_support_surface_name_type;
00066 std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > collision_support_surface_name;
00067
00068
00069 private:
00070 static const char* __s_getDataType_() { return "object_manipulation_msgs/PlacePlanningRequest"; }
00071 public:
00072 ROS_DEPRECATED static const std::string __s_getDataType() { return __s_getDataType_(); }
00073
00074 ROS_DEPRECATED const std::string __getDataType() const { return __s_getDataType_(); }
00075
00076 private:
00077 static const char* __s_getMD5Sum_() { return "93595ec6c0f82eb04bf5107b29b21823"; }
00078 public:
00079 ROS_DEPRECATED static const std::string __s_getMD5Sum() { return __s_getMD5Sum_(); }
00080
00081 ROS_DEPRECATED const std::string __getMD5Sum() const { return __s_getMD5Sum_(); }
00082
00083 private:
00084 static const char* __s_getServerMD5Sum_() { return "3f8bf1509af52b03e7578ed04861e277"; }
00085 public:
00086 ROS_DEPRECATED static const std::string __s_getServerMD5Sum() { return __s_getServerMD5Sum_(); }
00087
00088 ROS_DEPRECATED const std::string __getServerMD5Sum() const { return __s_getServerMD5Sum_(); }
00089
00090 private:
00091 static const char* __s_getMessageDefinition_() { return "\n\
00092 \n\
00093 \n\
00094 string arm_name\n\
00095 \n\
00096 \n\
00097 GraspableObject target\n\
00098 \n\
00099 \n\
00100 \n\
00101 \n\
00102 geometry_msgs/Quaternion default_orientation\n\
00103 \n\
00104 \n\
00105 geometry_msgs/Pose grasp_pose\n\
00106 \n\
00107 \n\
00108 \n\
00109 string collision_object_name\n\
00110 \n\
00111 \n\
00112 \n\
00113 string collision_support_surface_name\n\
00114 \n\
00115 \n\
00116 ================================================================================\n\
00117 MSG: object_manipulation_msgs/GraspableObject\n\
00118 # an object that the object_manipulator can work on\n\
00119 \n\
00120 # a graspable object can be represented in multiple ways. This message\n\
00121 # can contain all of them. Which one is actually used is up to the receiver\n\
00122 # of this message. When adding new representations, one must be careful that\n\
00123 # they have reasonable lightweight defaults indicating that that particular\n\
00124 # representation is not available.\n\
00125 \n\
00126 # the tf frame to be used as a reference frame when combining information from\n\
00127 # the different representations below\n\
00128 string reference_frame_id\n\
00129 \n\
00130 # potential recognition results from a database of models\n\
00131 # all poses are relative to the object reference pose\n\
00132 household_objects_database_msgs/DatabaseModelPose[] potential_models\n\
00133 \n\
00134 # the point cloud itself\n\
00135 sensor_msgs/PointCloud cluster\n\
00136 \n\
00137 # a region of a PointCloud2 of interest\n\
00138 object_manipulation_msgs/SceneRegion region\n\
00139 \n\
00140 \n\
00141 ================================================================================\n\
00142 MSG: household_objects_database_msgs/DatabaseModelPose\n\
00143 # Informs that a specific model from the Model Database has been \n\
00144 # identified at a certain location\n\
00145 \n\
00146 # the database id of the model\n\
00147 int32 model_id\n\
00148 \n\
00149 # the pose that it can be found in\n\
00150 geometry_msgs/PoseStamped pose\n\
00151 \n\
00152 # a measure of the confidence level in this detection result\n\
00153 float32 confidence\n\
00154 ================================================================================\n\
00155 MSG: geometry_msgs/PoseStamped\n\
00156 # A Pose with reference coordinate frame and timestamp\n\
00157 Header header\n\
00158 Pose pose\n\
00159 \n\
00160 ================================================================================\n\
00161 MSG: std_msgs/Header\n\
00162 # Standard metadata for higher-level stamped data types.\n\
00163 # This is generally used to communicate timestamped data \n\
00164 # in a particular coordinate frame.\n\
00165 # \n\
00166 # sequence ID: consecutively increasing ID \n\
00167 uint32 seq\n\
00168 #Two-integer timestamp that is expressed as:\n\
00169 # * stamp.secs: seconds (stamp_secs) since epoch\n\
00170 # * stamp.nsecs: nanoseconds since stamp_secs\n\
00171 # time-handling sugar is provided by the client library\n\
00172 time stamp\n\
00173 #Frame this data is associated with\n\
00174 # 0: no frame\n\
00175 # 1: global frame\n\
00176 string frame_id\n\
00177 \n\
00178 ================================================================================\n\
00179 MSG: geometry_msgs/Pose\n\
00180 # A representation of pose in free space, composed of postion and orientation. \n\
00181 Point position\n\
00182 Quaternion orientation\n\
00183 \n\
00184 ================================================================================\n\
00185 MSG: geometry_msgs/Point\n\
00186 # This contains the position of a point in free space\n\
00187 float64 x\n\
00188 float64 y\n\
00189 float64 z\n\
00190 \n\
00191 ================================================================================\n\
00192 MSG: geometry_msgs/Quaternion\n\
00193 # This represents an orientation in free space in quaternion form.\n\
00194 \n\
00195 float64 x\n\
00196 float64 y\n\
00197 float64 z\n\
00198 float64 w\n\
00199 \n\
00200 ================================================================================\n\
00201 MSG: sensor_msgs/PointCloud\n\
00202 # This message holds a collection of 3d points, plus optional additional\n\
00203 # information about each point.\n\
00204 \n\
00205 # Time of sensor data acquisition, coordinate frame ID.\n\
00206 Header header\n\
00207 \n\
00208 # Array of 3d points. Each Point32 should be interpreted as a 3d point\n\
00209 # in the frame given in the header.\n\
00210 geometry_msgs/Point32[] points\n\
00211 \n\
00212 # Each channel should have the same number of elements as points array,\n\
00213 # and the data in each channel should correspond 1:1 with each point.\n\
00214 # Channel names in common practice are listed in ChannelFloat32.msg.\n\
00215 ChannelFloat32[] channels\n\
00216 \n\
00217 ================================================================================\n\
00218 MSG: geometry_msgs/Point32\n\
00219 # This contains the position of a point in free space(with 32 bits of precision).\n\
00220 # It is recommeded to use Point wherever possible instead of Point32. \n\
00221 # \n\
00222 # This recommendation is to promote interoperability. \n\
00223 #\n\
00224 # This message is designed to take up less space when sending\n\
00225 # lots of points at once, as in the case of a PointCloud. \n\
00226 \n\
00227 float32 x\n\
00228 float32 y\n\
00229 float32 z\n\
00230 ================================================================================\n\
00231 MSG: sensor_msgs/ChannelFloat32\n\
00232 # This message is used by the PointCloud message to hold optional data\n\
00233 # associated with each point in the cloud. The length of the values\n\
00234 # array should be the same as the length of the points array in the\n\
00235 # PointCloud, and each value should be associated with the corresponding\n\
00236 # point.\n\
00237 \n\
00238 # Channel names in existing practice include:\n\
00239 # \"u\", \"v\" - row and column (respectively) in the left stereo image.\n\
00240 # This is opposite to usual conventions but remains for\n\
00241 # historical reasons. The newer PointCloud2 message has no\n\
00242 # such problem.\n\
00243 # \"rgb\" - For point clouds produced by color stereo cameras. uint8\n\
00244 # (R,G,B) values packed into the least significant 24 bits,\n\
00245 # in order.\n\
00246 # \"intensity\" - laser or pixel intensity.\n\
00247 # \"distance\"\n\
00248 \n\
00249 # The channel name should give semantics of the channel (e.g.\n\
00250 # \"intensity\" instead of \"value\").\n\
00251 string name\n\
00252 \n\
00253 # The values array should be 1-1 with the elements of the associated\n\
00254 # PointCloud.\n\
00255 float32[] values\n\
00256 \n\
00257 ================================================================================\n\
00258 MSG: object_manipulation_msgs/SceneRegion\n\
00259 # Point cloud\n\
00260 sensor_msgs/PointCloud2 cloud\n\
00261 \n\
00262 # Indices for the region of interest\n\
00263 int32[] mask\n\
00264 \n\
00265 # One of the corresponding 2D images, if applicable\n\
00266 sensor_msgs/Image image\n\
00267 \n\
00268 # The disparity image, if applicable\n\
00269 sensor_msgs/Image disparity_image\n\
00270 \n\
00271 # Camera info for the camera that took the image\n\
00272 sensor_msgs/CameraInfo cam_info\n\
00273 \n\
00274 ================================================================================\n\
00275 MSG: sensor_msgs/PointCloud2\n\
00276 # This message holds a collection of N-dimensional points, which may\n\
00277 # contain additional information such as normals, intensity, etc. The\n\
00278 # point data is stored as a binary blob, its layout described by the\n\
00279 # contents of the \"fields\" array.\n\
00280 \n\
00281 # The point cloud data may be organized 2d (image-like) or 1d\n\
00282 # (unordered). Point clouds organized as 2d images may be produced by\n\
00283 # camera depth sensors such as stereo or time-of-flight.\n\
00284 \n\
00285 # Time of sensor data acquisition, and the coordinate frame ID (for 3d\n\
00286 # points).\n\
00287 Header header\n\
00288 \n\
00289 # 2D structure of the point cloud. If the cloud is unordered, height is\n\
00290 # 1 and width is the length of the point cloud.\n\
00291 uint32 height\n\
00292 uint32 width\n\
00293 \n\
00294 # Describes the channels and their layout in the binary data blob.\n\
00295 PointField[] fields\n\
00296 \n\
00297 bool is_bigendian # Is this data bigendian?\n\
00298 uint32 point_step # Length of a point in bytes\n\
00299 uint32 row_step # Length of a row in bytes\n\
00300 uint8[] data # Actual point data, size is (row_step*height)\n\
00301 \n\
00302 bool is_dense # True if there are no invalid points\n\
00303 \n\
00304 ================================================================================\n\
00305 MSG: sensor_msgs/PointField\n\
00306 # This message holds the description of one point entry in the\n\
00307 # PointCloud2 message format.\n\
00308 uint8 INT8 = 1\n\
00309 uint8 UINT8 = 2\n\
00310 uint8 INT16 = 3\n\
00311 uint8 UINT16 = 4\n\
00312 uint8 INT32 = 5\n\
00313 uint8 UINT32 = 6\n\
00314 uint8 FLOAT32 = 7\n\
00315 uint8 FLOAT64 = 8\n\
00316 \n\
00317 string name # Name of field\n\
00318 uint32 offset # Offset from start of point struct\n\
00319 uint8 datatype # Datatype enumeration, see above\n\
00320 uint32 count # How many elements in the field\n\
00321 \n\
00322 ================================================================================\n\
00323 MSG: sensor_msgs/Image\n\
00324 # This message contains an uncompressed image\n\
00325 # (0, 0) is at top-left corner of image\n\
00326 #\n\
00327 \n\
00328 Header header # Header timestamp should be acquisition time of image\n\
00329 # Header frame_id should be optical frame of camera\n\
00330 # origin of frame should be optical center of cameara\n\
00331 # +x should point to the right in the image\n\
00332 # +y should point down in the image\n\
00333 # +z should point into to plane of the image\n\
00334 # If the frame_id here and the frame_id of the CameraInfo\n\
00335 # message associated with the image conflict\n\
00336 # the behavior is undefined\n\
00337 \n\
00338 uint32 height # image height, that is, number of rows\n\
00339 uint32 width # image width, that is, number of columns\n\
00340 \n\
00341 # The legal values for encoding are in file src/image_encodings.cpp\n\
00342 # If you want to standardize a new string format, join\n\
00343 # ros-users@lists.sourceforge.net and send an email proposing a new encoding.\n\
00344 \n\
00345 string encoding # Encoding of pixels -- channel meaning, ordering, size\n\
00346 # taken from the list of strings in src/image_encodings.cpp\n\
00347 \n\
00348 uint8 is_bigendian # is this data bigendian?\n\
00349 uint32 step # Full row length in bytes\n\
00350 uint8[] data # actual matrix data, size is (step * rows)\n\
00351 \n\
00352 ================================================================================\n\
00353 MSG: sensor_msgs/CameraInfo\n\
00354 # This message defines meta information for a camera. It should be in a\n\
00355 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\
00356 # image topics named:\n\
00357 #\n\
00358 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\
00359 # image - monochrome, distorted\n\
00360 # image_color - color, distorted\n\
00361 # image_rect - monochrome, rectified\n\
00362 # image_rect_color - color, rectified\n\
00363 #\n\
00364 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\
00365 # for producing the four processed image topics from image_raw and\n\
00366 # camera_info. The meaning of the camera parameters are described in\n\
00367 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\
00368 #\n\
00369 # The image_geometry package provides a user-friendly interface to\n\
00370 # common operations using this meta information. If you want to, e.g.,\n\
00371 # project a 3d point into image coordinates, we strongly recommend\n\
00372 # using image_geometry.\n\
00373 #\n\
00374 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\
00375 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\
00376 # indicates an uncalibrated camera.\n\
00377 \n\
00378 #######################################################################\n\
00379 # Image acquisition info #\n\
00380 #######################################################################\n\
00381 \n\
00382 # Time of image acquisition, camera coordinate frame ID\n\
00383 Header header # Header timestamp should be acquisition time of image\n\
00384 # Header frame_id should be optical frame of camera\n\
00385 # origin of frame should be optical center of camera\n\
00386 # +x should point to the right in the image\n\
00387 # +y should point down in the image\n\
00388 # +z should point into the plane of the image\n\
00389 \n\
00390 \n\
00391 #######################################################################\n\
00392 # Calibration Parameters #\n\
00393 #######################################################################\n\
00394 # These are fixed during camera calibration. Their values will be the #\n\
00395 # same in all messages until the camera is recalibrated. Note that #\n\
00396 # self-calibrating systems may \"recalibrate\" frequently. #\n\
00397 # #\n\
00398 # The internal parameters can be used to warp a raw (distorted) image #\n\
00399 # to: #\n\
00400 # 1. An undistorted image (requires D and K) #\n\
00401 # 2. A rectified image (requires D, K, R) #\n\
00402 # The projection matrix P projects 3D points into the rectified image.#\n\
00403 #######################################################################\n\
00404 \n\
00405 # The image dimensions with which the camera was calibrated. Normally\n\
00406 # this will be the full camera resolution in pixels.\n\
00407 uint32 height\n\
00408 uint32 width\n\
00409 \n\
00410 # The distortion model used. Supported models are listed in\n\
00411 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\
00412 # simple model of radial and tangential distortion - is sufficent.\n\
00413 string distortion_model\n\
00414 \n\
00415 # The distortion parameters, size depending on the distortion model.\n\
00416 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\
00417 float64[] D\n\
00418 \n\
00419 # Intrinsic camera matrix for the raw (distorted) images.\n\
00420 # [fx 0 cx]\n\
00421 # K = [ 0 fy cy]\n\
00422 # [ 0 0 1]\n\
00423 # Projects 3D points in the camera coordinate frame to 2D pixel\n\
00424 # coordinates using the focal lengths (fx, fy) and principal point\n\
00425 # (cx, cy).\n\
00426 float64[9] K # 3x3 row-major matrix\n\
00427 \n\
00428 # Rectification matrix (stereo cameras only)\n\
00429 # A rotation matrix aligning the camera coordinate system to the ideal\n\
00430 # stereo image plane so that epipolar lines in both stereo images are\n\
00431 # parallel.\n\
00432 float64[9] R # 3x3 row-major matrix\n\
00433 \n\
00434 # Projection/camera matrix\n\
00435 # [fx' 0 cx' Tx]\n\
00436 # P = [ 0 fy' cy' Ty]\n\
00437 # [ 0 0 1 0]\n\
00438 # By convention, this matrix specifies the intrinsic (camera) matrix\n\
00439 # of the processed (rectified) image. That is, the left 3x3 portion\n\
00440 # is the normal camera intrinsic matrix for the rectified image.\n\
00441 # It projects 3D points in the camera coordinate frame to 2D pixel\n\
00442 # coordinates using the focal lengths (fx', fy') and principal point\n\
00443 # (cx', cy') - these may differ from the values in K.\n\
00444 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\
00445 # also have R = the identity and P[1:3,1:3] = K.\n\
00446 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\
00447 # position of the optical center of the second camera in the first\n\
00448 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\
00449 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\
00450 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\
00451 # Tx = -fx' * B, where B is the baseline between the cameras.\n\
00452 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\
00453 # the rectified image is given by:\n\
00454 # [u v w]' = P * [X Y Z 1]'\n\
00455 # x = u / w\n\
00456 # y = v / w\n\
00457 # This holds for both images of a stereo pair.\n\
00458 float64[12] P # 3x4 row-major matrix\n\
00459 \n\
00460 \n\
00461 #######################################################################\n\
00462 # Operational Parameters #\n\
00463 #######################################################################\n\
00464 # These define the image region actually captured by the camera #\n\
00465 # driver. Although they affect the geometry of the output image, they #\n\
00466 # may be changed freely without recalibrating the camera. #\n\
00467 #######################################################################\n\
00468 \n\
00469 # Binning refers here to any camera setting which combines rectangular\n\
00470 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\
00471 # resolution of the output image to\n\
00472 # (width / binning_x) x (height / binning_y).\n\
00473 # The default values binning_x = binning_y = 0 is considered the same\n\
00474 # as binning_x = binning_y = 1 (no subsampling).\n\
00475 uint32 binning_x\n\
00476 uint32 binning_y\n\
00477 \n\
00478 # Region of interest (subwindow of full camera resolution), given in\n\
00479 # full resolution (unbinned) image coordinates. A particular ROI\n\
00480 # always denotes the same window of pixels on the camera sensor,\n\
00481 # regardless of binning settings.\n\
00482 # The default setting of roi (all values 0) is considered the same as\n\
00483 # full resolution (roi.width = width, roi.height = height).\n\
00484 RegionOfInterest roi\n\
00485 \n\
00486 ================================================================================\n\
00487 MSG: sensor_msgs/RegionOfInterest\n\
00488 # This message is used to specify a region of interest within an image.\n\
00489 #\n\
00490 # When used to specify the ROI setting of the camera when the image was\n\
00491 # taken, the height and width fields should either match the height and\n\
00492 # width fields for the associated image; or height = width = 0\n\
00493 # indicates that the full resolution image was captured.\n\
00494 \n\
00495 uint32 x_offset # Leftmost pixel of the ROI\n\
00496 # (0 if the ROI includes the left edge of the image)\n\
00497 uint32 y_offset # Topmost pixel of the ROI\n\
00498 # (0 if the ROI includes the top edge of the image)\n\
00499 uint32 height # Height of ROI\n\
00500 uint32 width # Width of ROI\n\
00501 \n\
00502 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\
00503 # ROI in this message. Typically this should be False if the full image\n\
00504 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\
00505 # used).\n\
00506 bool do_rectify\n\
00507 \n\
00508 "; }
00509 public:
00510 ROS_DEPRECATED static const std::string __s_getMessageDefinition() { return __s_getMessageDefinition_(); }
00511
00512 ROS_DEPRECATED const std::string __getMessageDefinition() const { return __s_getMessageDefinition_(); }
00513
00514 ROS_DEPRECATED virtual uint8_t *serialize(uint8_t *write_ptr, uint32_t seq) const
00515 {
00516 ros::serialization::OStream stream(write_ptr, 1000000000);
00517 ros::serialization::serialize(stream, arm_name);
00518 ros::serialization::serialize(stream, target);
00519 ros::serialization::serialize(stream, default_orientation);
00520 ros::serialization::serialize(stream, grasp_pose);
00521 ros::serialization::serialize(stream, collision_object_name);
00522 ros::serialization::serialize(stream, collision_support_surface_name);
00523 return stream.getData();
00524 }
00525
00526 ROS_DEPRECATED virtual uint8_t *deserialize(uint8_t *read_ptr)
00527 {
00528 ros::serialization::IStream stream(read_ptr, 1000000000);
00529 ros::serialization::deserialize(stream, arm_name);
00530 ros::serialization::deserialize(stream, target);
00531 ros::serialization::deserialize(stream, default_orientation);
00532 ros::serialization::deserialize(stream, grasp_pose);
00533 ros::serialization::deserialize(stream, collision_object_name);
00534 ros::serialization::deserialize(stream, collision_support_surface_name);
00535 return stream.getData();
00536 }
00537
00538 ROS_DEPRECATED virtual uint32_t serializationLength() const
00539 {
00540 uint32_t size = 0;
00541 size += ros::serialization::serializationLength(arm_name);
00542 size += ros::serialization::serializationLength(target);
00543 size += ros::serialization::serializationLength(default_orientation);
00544 size += ros::serialization::serializationLength(grasp_pose);
00545 size += ros::serialization::serializationLength(collision_object_name);
00546 size += ros::serialization::serializationLength(collision_support_surface_name);
00547 return size;
00548 }
00549
00550 typedef boost::shared_ptr< ::object_manipulation_msgs::PlacePlanningRequest_<ContainerAllocator> > Ptr;
00551 typedef boost::shared_ptr< ::object_manipulation_msgs::PlacePlanningRequest_<ContainerAllocator> const> ConstPtr;
00552 };
00553 typedef ::object_manipulation_msgs::PlacePlanningRequest_<std::allocator<void> > PlacePlanningRequest;
00554
00555 typedef boost::shared_ptr< ::object_manipulation_msgs::PlacePlanningRequest> PlacePlanningRequestPtr;
00556 typedef boost::shared_ptr< ::object_manipulation_msgs::PlacePlanningRequest const> PlacePlanningRequestConstPtr;
00557
00558
00559 template <class ContainerAllocator>
00560 struct PlacePlanningResponse_ : public ros::Message
00561 {
00562 typedef PlacePlanningResponse_<ContainerAllocator> Type;
00563
00564 PlacePlanningResponse_()
00565 : place_locations()
00566 , error_code()
00567 {
00568 }
00569
00570 PlacePlanningResponse_(const ContainerAllocator& _alloc)
00571 : place_locations(_alloc)
00572 , error_code(_alloc)
00573 {
00574 }
00575
00576 typedef std::vector< ::geometry_msgs::PoseStamped_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::geometry_msgs::PoseStamped_<ContainerAllocator> >::other > _place_locations_type;
00577 std::vector< ::geometry_msgs::PoseStamped_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::geometry_msgs::PoseStamped_<ContainerAllocator> >::other > place_locations;
00578
00579 typedef ::object_manipulation_msgs::GraspPlanningErrorCode_<ContainerAllocator> _error_code_type;
00580 ::object_manipulation_msgs::GraspPlanningErrorCode_<ContainerAllocator> error_code;
00581
00582
00583 ROS_DEPRECATED uint32_t get_place_locations_size() const { return (uint32_t)place_locations.size(); }
00584 ROS_DEPRECATED void set_place_locations_size(uint32_t size) { place_locations.resize((size_t)size); }
00585 ROS_DEPRECATED void get_place_locations_vec(std::vector< ::geometry_msgs::PoseStamped_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::geometry_msgs::PoseStamped_<ContainerAllocator> >::other > & vec) const { vec = this->place_locations; }
00586 ROS_DEPRECATED void set_place_locations_vec(const std::vector< ::geometry_msgs::PoseStamped_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::geometry_msgs::PoseStamped_<ContainerAllocator> >::other > & vec) { this->place_locations = vec; }
00587 private:
00588 static const char* __s_getDataType_() { return "object_manipulation_msgs/PlacePlanningResponse"; }
00589 public:
00590 ROS_DEPRECATED static const std::string __s_getDataType() { return __s_getDataType_(); }
00591
00592 ROS_DEPRECATED const std::string __getDataType() const { return __s_getDataType_(); }
00593
00594 private:
00595 static const char* __s_getMD5Sum_() { return "0382b328d7a72bb56384d8d5a71b04a1"; }
00596 public:
00597 ROS_DEPRECATED static const std::string __s_getMD5Sum() { return __s_getMD5Sum_(); }
00598
00599 ROS_DEPRECATED const std::string __getMD5Sum() const { return __s_getMD5Sum_(); }
00600
00601 private:
00602 static const char* __s_getServerMD5Sum_() { return "3f8bf1509af52b03e7578ed04861e277"; }
00603 public:
00604 ROS_DEPRECATED static const std::string __s_getServerMD5Sum() { return __s_getServerMD5Sum_(); }
00605
00606 ROS_DEPRECATED const std::string __getServerMD5Sum() const { return __s_getServerMD5Sum_(); }
00607
00608 private:
00609 static const char* __s_getMessageDefinition_() { return "\n\
00610 \n\
00611 geometry_msgs/PoseStamped[] place_locations\n\
00612 \n\
00613 \n\
00614 GraspPlanningErrorCode error_code\n\
00615 \n\
00616 \n\
00617 ================================================================================\n\
00618 MSG: geometry_msgs/PoseStamped\n\
00619 # A Pose with reference coordinate frame and timestamp\n\
00620 Header header\n\
00621 Pose pose\n\
00622 \n\
00623 ================================================================================\n\
00624 MSG: std_msgs/Header\n\
00625 # Standard metadata for higher-level stamped data types.\n\
00626 # This is generally used to communicate timestamped data \n\
00627 # in a particular coordinate frame.\n\
00628 # \n\
00629 # sequence ID: consecutively increasing ID \n\
00630 uint32 seq\n\
00631 #Two-integer timestamp that is expressed as:\n\
00632 # * stamp.secs: seconds (stamp_secs) since epoch\n\
00633 # * stamp.nsecs: nanoseconds since stamp_secs\n\
00634 # time-handling sugar is provided by the client library\n\
00635 time stamp\n\
00636 #Frame this data is associated with\n\
00637 # 0: no frame\n\
00638 # 1: global frame\n\
00639 string frame_id\n\
00640 \n\
00641 ================================================================================\n\
00642 MSG: geometry_msgs/Pose\n\
00643 # A representation of pose in free space, composed of postion and orientation. \n\
00644 Point position\n\
00645 Quaternion orientation\n\
00646 \n\
00647 ================================================================================\n\
00648 MSG: geometry_msgs/Point\n\
00649 # This contains the position of a point in free space\n\
00650 float64 x\n\
00651 float64 y\n\
00652 float64 z\n\
00653 \n\
00654 ================================================================================\n\
00655 MSG: geometry_msgs/Quaternion\n\
00656 # This represents an orientation in free space in quaternion form.\n\
00657 \n\
00658 float64 x\n\
00659 float64 y\n\
00660 float64 z\n\
00661 float64 w\n\
00662 \n\
00663 ================================================================================\n\
00664 MSG: object_manipulation_msgs/GraspPlanningErrorCode\n\
00665 # Error codes for grasp and place planning\n\
00666 \n\
00667 # plan completed as expected\n\
00668 int32 SUCCESS = 0\n\
00669 \n\
00670 # tf error encountered while transforming\n\
00671 int32 TF_ERROR = 1 \n\
00672 \n\
00673 # some other error\n\
00674 int32 OTHER_ERROR = 2\n\
00675 \n\
00676 # the actual value of this error code\n\
00677 int32 value\n\
00678 "; }
00679 public:
00680 ROS_DEPRECATED static const std::string __s_getMessageDefinition() { return __s_getMessageDefinition_(); }
00681
00682 ROS_DEPRECATED const std::string __getMessageDefinition() const { return __s_getMessageDefinition_(); }
00683
00684 ROS_DEPRECATED virtual uint8_t *serialize(uint8_t *write_ptr, uint32_t seq) const
00685 {
00686 ros::serialization::OStream stream(write_ptr, 1000000000);
00687 ros::serialization::serialize(stream, place_locations);
00688 ros::serialization::serialize(stream, error_code);
00689 return stream.getData();
00690 }
00691
00692 ROS_DEPRECATED virtual uint8_t *deserialize(uint8_t *read_ptr)
00693 {
00694 ros::serialization::IStream stream(read_ptr, 1000000000);
00695 ros::serialization::deserialize(stream, place_locations);
00696 ros::serialization::deserialize(stream, error_code);
00697 return stream.getData();
00698 }
00699
00700 ROS_DEPRECATED virtual uint32_t serializationLength() const
00701 {
00702 uint32_t size = 0;
00703 size += ros::serialization::serializationLength(place_locations);
00704 size += ros::serialization::serializationLength(error_code);
00705 return size;
00706 }
00707
00708 typedef boost::shared_ptr< ::object_manipulation_msgs::PlacePlanningResponse_<ContainerAllocator> > Ptr;
00709 typedef boost::shared_ptr< ::object_manipulation_msgs::PlacePlanningResponse_<ContainerAllocator> const> ConstPtr;
00710 };
00711 typedef ::object_manipulation_msgs::PlacePlanningResponse_<std::allocator<void> > PlacePlanningResponse;
00712
00713 typedef boost::shared_ptr< ::object_manipulation_msgs::PlacePlanningResponse> PlacePlanningResponsePtr;
00714 typedef boost::shared_ptr< ::object_manipulation_msgs::PlacePlanningResponse const> PlacePlanningResponseConstPtr;
00715
00716 struct PlacePlanning
00717 {
00718
00719 typedef PlacePlanningRequest Request;
00720 typedef PlacePlanningResponse Response;
00721 Request request;
00722 Response response;
00723
00724 typedef Request RequestType;
00725 typedef Response ResponseType;
00726 };
00727 }
00728
00729 namespace ros
00730 {
00731 namespace message_traits
00732 {
00733 template<class ContainerAllocator>
00734 struct MD5Sum< ::object_manipulation_msgs::PlacePlanningRequest_<ContainerAllocator> > {
00735 static const char* value()
00736 {
00737 return "93595ec6c0f82eb04bf5107b29b21823";
00738 }
00739
00740 static const char* value(const ::object_manipulation_msgs::PlacePlanningRequest_<ContainerAllocator> &) { return value(); }
00741 static const uint64_t static_value1 = 0x93595ec6c0f82eb0ULL;
00742 static const uint64_t static_value2 = 0x4bf5107b29b21823ULL;
00743 };
00744
00745 template<class ContainerAllocator>
00746 struct DataType< ::object_manipulation_msgs::PlacePlanningRequest_<ContainerAllocator> > {
00747 static const char* value()
00748 {
00749 return "object_manipulation_msgs/PlacePlanningRequest";
00750 }
00751
00752 static const char* value(const ::object_manipulation_msgs::PlacePlanningRequest_<ContainerAllocator> &) { return value(); }
00753 };
00754
00755 template<class ContainerAllocator>
00756 struct Definition< ::object_manipulation_msgs::PlacePlanningRequest_<ContainerAllocator> > {
00757 static const char* value()
00758 {
00759 return "\n\
00760 \n\
00761 \n\
00762 string arm_name\n\
00763 \n\
00764 \n\
00765 GraspableObject target\n\
00766 \n\
00767 \n\
00768 \n\
00769 \n\
00770 geometry_msgs/Quaternion default_orientation\n\
00771 \n\
00772 \n\
00773 geometry_msgs/Pose grasp_pose\n\
00774 \n\
00775 \n\
00776 \n\
00777 string collision_object_name\n\
00778 \n\
00779 \n\
00780 \n\
00781 string collision_support_surface_name\n\
00782 \n\
00783 \n\
00784 ================================================================================\n\
00785 MSG: object_manipulation_msgs/GraspableObject\n\
00786 # an object that the object_manipulator can work on\n\
00787 \n\
00788 # a graspable object can be represented in multiple ways. This message\n\
00789 # can contain all of them. Which one is actually used is up to the receiver\n\
00790 # of this message. When adding new representations, one must be careful that\n\
00791 # they have reasonable lightweight defaults indicating that that particular\n\
00792 # representation is not available.\n\
00793 \n\
00794 # the tf frame to be used as a reference frame when combining information from\n\
00795 # the different representations below\n\
00796 string reference_frame_id\n\
00797 \n\
00798 # potential recognition results from a database of models\n\
00799 # all poses are relative to the object reference pose\n\
00800 household_objects_database_msgs/DatabaseModelPose[] potential_models\n\
00801 \n\
00802 # the point cloud itself\n\
00803 sensor_msgs/PointCloud cluster\n\
00804 \n\
00805 # a region of a PointCloud2 of interest\n\
00806 object_manipulation_msgs/SceneRegion region\n\
00807 \n\
00808 \n\
00809 ================================================================================\n\
00810 MSG: household_objects_database_msgs/DatabaseModelPose\n\
00811 # Informs that a specific model from the Model Database has been \n\
00812 # identified at a certain location\n\
00813 \n\
00814 # the database id of the model\n\
00815 int32 model_id\n\
00816 \n\
00817 # the pose that it can be found in\n\
00818 geometry_msgs/PoseStamped pose\n\
00819 \n\
00820 # a measure of the confidence level in this detection result\n\
00821 float32 confidence\n\
00822 ================================================================================\n\
00823 MSG: geometry_msgs/PoseStamped\n\
00824 # A Pose with reference coordinate frame and timestamp\n\
00825 Header header\n\
00826 Pose pose\n\
00827 \n\
00828 ================================================================================\n\
00829 MSG: std_msgs/Header\n\
00830 # Standard metadata for higher-level stamped data types.\n\
00831 # This is generally used to communicate timestamped data \n\
00832 # in a particular coordinate frame.\n\
00833 # \n\
00834 # sequence ID: consecutively increasing ID \n\
00835 uint32 seq\n\
00836 #Two-integer timestamp that is expressed as:\n\
00837 # * stamp.secs: seconds (stamp_secs) since epoch\n\
00838 # * stamp.nsecs: nanoseconds since stamp_secs\n\
00839 # time-handling sugar is provided by the client library\n\
00840 time stamp\n\
00841 #Frame this data is associated with\n\
00842 # 0: no frame\n\
00843 # 1: global frame\n\
00844 string frame_id\n\
00845 \n\
00846 ================================================================================\n\
00847 MSG: geometry_msgs/Pose\n\
00848 # A representation of pose in free space, composed of postion and orientation. \n\
00849 Point position\n\
00850 Quaternion orientation\n\
00851 \n\
00852 ================================================================================\n\
00853 MSG: geometry_msgs/Point\n\
00854 # This contains the position of a point in free space\n\
00855 float64 x\n\
00856 float64 y\n\
00857 float64 z\n\
00858 \n\
00859 ================================================================================\n\
00860 MSG: geometry_msgs/Quaternion\n\
00861 # This represents an orientation in free space in quaternion form.\n\
00862 \n\
00863 float64 x\n\
00864 float64 y\n\
00865 float64 z\n\
00866 float64 w\n\
00867 \n\
00868 ================================================================================\n\
00869 MSG: sensor_msgs/PointCloud\n\
00870 # This message holds a collection of 3d points, plus optional additional\n\
00871 # information about each point.\n\
00872 \n\
00873 # Time of sensor data acquisition, coordinate frame ID.\n\
00874 Header header\n\
00875 \n\
00876 # Array of 3d points. Each Point32 should be interpreted as a 3d point\n\
00877 # in the frame given in the header.\n\
00878 geometry_msgs/Point32[] points\n\
00879 \n\
00880 # Each channel should have the same number of elements as points array,\n\
00881 # and the data in each channel should correspond 1:1 with each point.\n\
00882 # Channel names in common practice are listed in ChannelFloat32.msg.\n\
00883 ChannelFloat32[] channels\n\
00884 \n\
00885 ================================================================================\n\
00886 MSG: geometry_msgs/Point32\n\
00887 # This contains the position of a point in free space(with 32 bits of precision).\n\
00888 # It is recommeded to use Point wherever possible instead of Point32. \n\
00889 # \n\
00890 # This recommendation is to promote interoperability. \n\
00891 #\n\
00892 # This message is designed to take up less space when sending\n\
00893 # lots of points at once, as in the case of a PointCloud. \n\
00894 \n\
00895 float32 x\n\
00896 float32 y\n\
00897 float32 z\n\
00898 ================================================================================\n\
00899 MSG: sensor_msgs/ChannelFloat32\n\
00900 # This message is used by the PointCloud message to hold optional data\n\
00901 # associated with each point in the cloud. The length of the values\n\
00902 # array should be the same as the length of the points array in the\n\
00903 # PointCloud, and each value should be associated with the corresponding\n\
00904 # point.\n\
00905 \n\
00906 # Channel names in existing practice include:\n\
00907 # \"u\", \"v\" - row and column (respectively) in the left stereo image.\n\
00908 # This is opposite to usual conventions but remains for\n\
00909 # historical reasons. The newer PointCloud2 message has no\n\
00910 # such problem.\n\
00911 # \"rgb\" - For point clouds produced by color stereo cameras. uint8\n\
00912 # (R,G,B) values packed into the least significant 24 bits,\n\
00913 # in order.\n\
00914 # \"intensity\" - laser or pixel intensity.\n\
00915 # \"distance\"\n\
00916 \n\
00917 # The channel name should give semantics of the channel (e.g.\n\
00918 # \"intensity\" instead of \"value\").\n\
00919 string name\n\
00920 \n\
00921 # The values array should be 1-1 with the elements of the associated\n\
00922 # PointCloud.\n\
00923 float32[] values\n\
00924 \n\
00925 ================================================================================\n\
00926 MSG: object_manipulation_msgs/SceneRegion\n\
00927 # Point cloud\n\
00928 sensor_msgs/PointCloud2 cloud\n\
00929 \n\
00930 # Indices for the region of interest\n\
00931 int32[] mask\n\
00932 \n\
00933 # One of the corresponding 2D images, if applicable\n\
00934 sensor_msgs/Image image\n\
00935 \n\
00936 # The disparity image, if applicable\n\
00937 sensor_msgs/Image disparity_image\n\
00938 \n\
00939 # Camera info for the camera that took the image\n\
00940 sensor_msgs/CameraInfo cam_info\n\
00941 \n\
00942 ================================================================================\n\
00943 MSG: sensor_msgs/PointCloud2\n\
00944 # This message holds a collection of N-dimensional points, which may\n\
00945 # contain additional information such as normals, intensity, etc. The\n\
00946 # point data is stored as a binary blob, its layout described by the\n\
00947 # contents of the \"fields\" array.\n\
00948 \n\
00949 # The point cloud data may be organized 2d (image-like) or 1d\n\
00950 # (unordered). Point clouds organized as 2d images may be produced by\n\
00951 # camera depth sensors such as stereo or time-of-flight.\n\
00952 \n\
00953 # Time of sensor data acquisition, and the coordinate frame ID (for 3d\n\
00954 # points).\n\
00955 Header header\n\
00956 \n\
00957 # 2D structure of the point cloud. If the cloud is unordered, height is\n\
00958 # 1 and width is the length of the point cloud.\n\
00959 uint32 height\n\
00960 uint32 width\n\
00961 \n\
00962 # Describes the channels and their layout in the binary data blob.\n\
00963 PointField[] fields\n\
00964 \n\
00965 bool is_bigendian # Is this data bigendian?\n\
00966 uint32 point_step # Length of a point in bytes\n\
00967 uint32 row_step # Length of a row in bytes\n\
00968 uint8[] data # Actual point data, size is (row_step*height)\n\
00969 \n\
00970 bool is_dense # True if there are no invalid points\n\
00971 \n\
00972 ================================================================================\n\
00973 MSG: sensor_msgs/PointField\n\
00974 # This message holds the description of one point entry in the\n\
00975 # PointCloud2 message format.\n\
00976 uint8 INT8 = 1\n\
00977 uint8 UINT8 = 2\n\
00978 uint8 INT16 = 3\n\
00979 uint8 UINT16 = 4\n\
00980 uint8 INT32 = 5\n\
00981 uint8 UINT32 = 6\n\
00982 uint8 FLOAT32 = 7\n\
00983 uint8 FLOAT64 = 8\n\
00984 \n\
00985 string name # Name of field\n\
00986 uint32 offset # Offset from start of point struct\n\
00987 uint8 datatype # Datatype enumeration, see above\n\
00988 uint32 count # How many elements in the field\n\
00989 \n\
00990 ================================================================================\n\
00991 MSG: sensor_msgs/Image\n\
00992 # This message contains an uncompressed image\n\
00993 # (0, 0) is at top-left corner of image\n\
00994 #\n\
00995 \n\
00996 Header header # Header timestamp should be acquisition time of image\n\
00997 # Header frame_id should be optical frame of camera\n\
00998 # origin of frame should be optical center of cameara\n\
00999 # +x should point to the right in the image\n\
01000 # +y should point down in the image\n\
01001 # +z should point into to plane of the image\n\
01002 # If the frame_id here and the frame_id of the CameraInfo\n\
01003 # message associated with the image conflict\n\
01004 # the behavior is undefined\n\
01005 \n\
01006 uint32 height # image height, that is, number of rows\n\
01007 uint32 width # image width, that is, number of columns\n\
01008 \n\
01009 # The legal values for encoding are in file src/image_encodings.cpp\n\
01010 # If you want to standardize a new string format, join\n\
01011 # ros-users@lists.sourceforge.net and send an email proposing a new encoding.\n\
01012 \n\
01013 string encoding # Encoding of pixels -- channel meaning, ordering, size\n\
01014 # taken from the list of strings in src/image_encodings.cpp\n\
01015 \n\
01016 uint8 is_bigendian # is this data bigendian?\n\
01017 uint32 step # Full row length in bytes\n\
01018 uint8[] data # actual matrix data, size is (step * rows)\n\
01019 \n\
01020 ================================================================================\n\
01021 MSG: sensor_msgs/CameraInfo\n\
01022 # This message defines meta information for a camera. It should be in a\n\
01023 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\
01024 # image topics named:\n\
01025 #\n\
01026 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\
01027 # image - monochrome, distorted\n\
01028 # image_color - color, distorted\n\
01029 # image_rect - monochrome, rectified\n\
01030 # image_rect_color - color, rectified\n\
01031 #\n\
01032 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\
01033 # for producing the four processed image topics from image_raw and\n\
01034 # camera_info. The meaning of the camera parameters are described in\n\
01035 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\
01036 #\n\
01037 # The image_geometry package provides a user-friendly interface to\n\
01038 # common operations using this meta information. If you want to, e.g.,\n\
01039 # project a 3d point into image coordinates, we strongly recommend\n\
01040 # using image_geometry.\n\
01041 #\n\
01042 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\
01043 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\
01044 # indicates an uncalibrated camera.\n\
01045 \n\
01046 #######################################################################\n\
01047 # Image acquisition info #\n\
01048 #######################################################################\n\
01049 \n\
01050 # Time of image acquisition, camera coordinate frame ID\n\
01051 Header header # Header timestamp should be acquisition time of image\n\
01052 # Header frame_id should be optical frame of camera\n\
01053 # origin of frame should be optical center of camera\n\
01054 # +x should point to the right in the image\n\
01055 # +y should point down in the image\n\
01056 # +z should point into the plane of the image\n\
01057 \n\
01058 \n\
01059 #######################################################################\n\
01060 # Calibration Parameters #\n\
01061 #######################################################################\n\
01062 # These are fixed during camera calibration. Their values will be the #\n\
01063 # same in all messages until the camera is recalibrated. Note that #\n\
01064 # self-calibrating systems may \"recalibrate\" frequently. #\n\
01065 # #\n\
01066 # The internal parameters can be used to warp a raw (distorted) image #\n\
01067 # to: #\n\
01068 # 1. An undistorted image (requires D and K) #\n\
01069 # 2. A rectified image (requires D, K, R) #\n\
01070 # The projection matrix P projects 3D points into the rectified image.#\n\
01071 #######################################################################\n\
01072 \n\
01073 # The image dimensions with which the camera was calibrated. Normally\n\
01074 # this will be the full camera resolution in pixels.\n\
01075 uint32 height\n\
01076 uint32 width\n\
01077 \n\
01078 # The distortion model used. Supported models are listed in\n\
01079 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\
01080 # simple model of radial and tangential distortion - is sufficent.\n\
01081 string distortion_model\n\
01082 \n\
01083 # The distortion parameters, size depending on the distortion model.\n\
01084 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\
01085 float64[] D\n\
01086 \n\
01087 # Intrinsic camera matrix for the raw (distorted) images.\n\
01088 # [fx 0 cx]\n\
01089 # K = [ 0 fy cy]\n\
01090 # [ 0 0 1]\n\
01091 # Projects 3D points in the camera coordinate frame to 2D pixel\n\
01092 # coordinates using the focal lengths (fx, fy) and principal point\n\
01093 # (cx, cy).\n\
01094 float64[9] K # 3x3 row-major matrix\n\
01095 \n\
01096 # Rectification matrix (stereo cameras only)\n\
01097 # A rotation matrix aligning the camera coordinate system to the ideal\n\
01098 # stereo image plane so that epipolar lines in both stereo images are\n\
01099 # parallel.\n\
01100 float64[9] R # 3x3 row-major matrix\n\
01101 \n\
01102 # Projection/camera matrix\n\
01103 # [fx' 0 cx' Tx]\n\
01104 # P = [ 0 fy' cy' Ty]\n\
01105 # [ 0 0 1 0]\n\
01106 # By convention, this matrix specifies the intrinsic (camera) matrix\n\
01107 # of the processed (rectified) image. That is, the left 3x3 portion\n\
01108 # is the normal camera intrinsic matrix for the rectified image.\n\
01109 # It projects 3D points in the camera coordinate frame to 2D pixel\n\
01110 # coordinates using the focal lengths (fx', fy') and principal point\n\
01111 # (cx', cy') - these may differ from the values in K.\n\
01112 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\
01113 # also have R = the identity and P[1:3,1:3] = K.\n\
01114 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\
01115 # position of the optical center of the second camera in the first\n\
01116 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\
01117 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\
01118 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\
01119 # Tx = -fx' * B, where B is the baseline between the cameras.\n\
01120 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\
01121 # the rectified image is given by:\n\
01122 # [u v w]' = P * [X Y Z 1]'\n\
01123 # x = u / w\n\
01124 # y = v / w\n\
01125 # This holds for both images of a stereo pair.\n\
01126 float64[12] P # 3x4 row-major matrix\n\
01127 \n\
01128 \n\
01129 #######################################################################\n\
01130 # Operational Parameters #\n\
01131 #######################################################################\n\
01132 # These define the image region actually captured by the camera #\n\
01133 # driver. Although they affect the geometry of the output image, they #\n\
01134 # may be changed freely without recalibrating the camera. #\n\
01135 #######################################################################\n\
01136 \n\
01137 # Binning refers here to any camera setting which combines rectangular\n\
01138 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\
01139 # resolution of the output image to\n\
01140 # (width / binning_x) x (height / binning_y).\n\
01141 # The default values binning_x = binning_y = 0 is considered the same\n\
01142 # as binning_x = binning_y = 1 (no subsampling).\n\
01143 uint32 binning_x\n\
01144 uint32 binning_y\n\
01145 \n\
01146 # Region of interest (subwindow of full camera resolution), given in\n\
01147 # full resolution (unbinned) image coordinates. A particular ROI\n\
01148 # always denotes the same window of pixels on the camera sensor,\n\
01149 # regardless of binning settings.\n\
01150 # The default setting of roi (all values 0) is considered the same as\n\
01151 # full resolution (roi.width = width, roi.height = height).\n\
01152 RegionOfInterest roi\n\
01153 \n\
01154 ================================================================================\n\
01155 MSG: sensor_msgs/RegionOfInterest\n\
01156 # This message is used to specify a region of interest within an image.\n\
01157 #\n\
01158 # When used to specify the ROI setting of the camera when the image was\n\
01159 # taken, the height and width fields should either match the height and\n\
01160 # width fields for the associated image; or height = width = 0\n\
01161 # indicates that the full resolution image was captured.\n\
01162 \n\
01163 uint32 x_offset # Leftmost pixel of the ROI\n\
01164 # (0 if the ROI includes the left edge of the image)\n\
01165 uint32 y_offset # Topmost pixel of the ROI\n\
01166 # (0 if the ROI includes the top edge of the image)\n\
01167 uint32 height # Height of ROI\n\
01168 uint32 width # Width of ROI\n\
01169 \n\
01170 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\
01171 # ROI in this message. Typically this should be False if the full image\n\
01172 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\
01173 # used).\n\
01174 bool do_rectify\n\
01175 \n\
01176 ";
01177 }
01178
01179 static const char* value(const ::object_manipulation_msgs::PlacePlanningRequest_<ContainerAllocator> &) { return value(); }
01180 };
01181
01182 }
01183 }
01184
01185
01186 namespace ros
01187 {
01188 namespace message_traits
01189 {
01190 template<class ContainerAllocator>
01191 struct MD5Sum< ::object_manipulation_msgs::PlacePlanningResponse_<ContainerAllocator> > {
01192 static const char* value()
01193 {
01194 return "0382b328d7a72bb56384d8d5a71b04a1";
01195 }
01196
01197 static const char* value(const ::object_manipulation_msgs::PlacePlanningResponse_<ContainerAllocator> &) { return value(); }
01198 static const uint64_t static_value1 = 0x0382b328d7a72bb5ULL;
01199 static const uint64_t static_value2 = 0x6384d8d5a71b04a1ULL;
01200 };
01201
01202 template<class ContainerAllocator>
01203 struct DataType< ::object_manipulation_msgs::PlacePlanningResponse_<ContainerAllocator> > {
01204 static const char* value()
01205 {
01206 return "object_manipulation_msgs/PlacePlanningResponse";
01207 }
01208
01209 static const char* value(const ::object_manipulation_msgs::PlacePlanningResponse_<ContainerAllocator> &) { return value(); }
01210 };
01211
01212 template<class ContainerAllocator>
01213 struct Definition< ::object_manipulation_msgs::PlacePlanningResponse_<ContainerAllocator> > {
01214 static const char* value()
01215 {
01216 return "\n\
01217 \n\
01218 geometry_msgs/PoseStamped[] place_locations\n\
01219 \n\
01220 \n\
01221 GraspPlanningErrorCode error_code\n\
01222 \n\
01223 \n\
01224 ================================================================================\n\
01225 MSG: geometry_msgs/PoseStamped\n\
01226 # A Pose with reference coordinate frame and timestamp\n\
01227 Header header\n\
01228 Pose pose\n\
01229 \n\
01230 ================================================================================\n\
01231 MSG: std_msgs/Header\n\
01232 # Standard metadata for higher-level stamped data types.\n\
01233 # This is generally used to communicate timestamped data \n\
01234 # in a particular coordinate frame.\n\
01235 # \n\
01236 # sequence ID: consecutively increasing ID \n\
01237 uint32 seq\n\
01238 #Two-integer timestamp that is expressed as:\n\
01239 # * stamp.secs: seconds (stamp_secs) since epoch\n\
01240 # * stamp.nsecs: nanoseconds since stamp_secs\n\
01241 # time-handling sugar is provided by the client library\n\
01242 time stamp\n\
01243 #Frame this data is associated with\n\
01244 # 0: no frame\n\
01245 # 1: global frame\n\
01246 string frame_id\n\
01247 \n\
01248 ================================================================================\n\
01249 MSG: geometry_msgs/Pose\n\
01250 # A representation of pose in free space, composed of postion and orientation. \n\
01251 Point position\n\
01252 Quaternion orientation\n\
01253 \n\
01254 ================================================================================\n\
01255 MSG: geometry_msgs/Point\n\
01256 # This contains the position of a point in free space\n\
01257 float64 x\n\
01258 float64 y\n\
01259 float64 z\n\
01260 \n\
01261 ================================================================================\n\
01262 MSG: geometry_msgs/Quaternion\n\
01263 # This represents an orientation in free space in quaternion form.\n\
01264 \n\
01265 float64 x\n\
01266 float64 y\n\
01267 float64 z\n\
01268 float64 w\n\
01269 \n\
01270 ================================================================================\n\
01271 MSG: object_manipulation_msgs/GraspPlanningErrorCode\n\
01272 # Error codes for grasp and place planning\n\
01273 \n\
01274 # plan completed as expected\n\
01275 int32 SUCCESS = 0\n\
01276 \n\
01277 # tf error encountered while transforming\n\
01278 int32 TF_ERROR = 1 \n\
01279 \n\
01280 # some other error\n\
01281 int32 OTHER_ERROR = 2\n\
01282 \n\
01283 # the actual value of this error code\n\
01284 int32 value\n\
01285 ";
01286 }
01287
01288 static const char* value(const ::object_manipulation_msgs::PlacePlanningResponse_<ContainerAllocator> &) { return value(); }
01289 };
01290
01291 }
01292 }
01293
01294 namespace ros
01295 {
01296 namespace serialization
01297 {
01298
01299 template<class ContainerAllocator> struct Serializer< ::object_manipulation_msgs::PlacePlanningRequest_<ContainerAllocator> >
01300 {
01301 template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
01302 {
01303 stream.next(m.arm_name);
01304 stream.next(m.target);
01305 stream.next(m.default_orientation);
01306 stream.next(m.grasp_pose);
01307 stream.next(m.collision_object_name);
01308 stream.next(m.collision_support_surface_name);
01309 }
01310
01311 ROS_DECLARE_ALLINONE_SERIALIZER;
01312 };
01313 }
01314 }
01315
01316
01317 namespace ros
01318 {
01319 namespace serialization
01320 {
01321
01322 template<class ContainerAllocator> struct Serializer< ::object_manipulation_msgs::PlacePlanningResponse_<ContainerAllocator> >
01323 {
01324 template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
01325 {
01326 stream.next(m.place_locations);
01327 stream.next(m.error_code);
01328 }
01329
01330 ROS_DECLARE_ALLINONE_SERIALIZER;
01331 };
01332 }
01333 }
01334
01335 namespace ros
01336 {
01337 namespace service_traits
01338 {
01339 template<>
01340 struct MD5Sum<object_manipulation_msgs::PlacePlanning> {
01341 static const char* value()
01342 {
01343 return "3f8bf1509af52b03e7578ed04861e277";
01344 }
01345
01346 static const char* value(const object_manipulation_msgs::PlacePlanning&) { return value(); }
01347 };
01348
01349 template<>
01350 struct DataType<object_manipulation_msgs::PlacePlanning> {
01351 static const char* value()
01352 {
01353 return "object_manipulation_msgs/PlacePlanning";
01354 }
01355
01356 static const char* value(const object_manipulation_msgs::PlacePlanning&) { return value(); }
01357 };
01358
01359 template<class ContainerAllocator>
01360 struct MD5Sum<object_manipulation_msgs::PlacePlanningRequest_<ContainerAllocator> > {
01361 static const char* value()
01362 {
01363 return "3f8bf1509af52b03e7578ed04861e277";
01364 }
01365
01366 static const char* value(const object_manipulation_msgs::PlacePlanningRequest_<ContainerAllocator> &) { return value(); }
01367 };
01368
01369 template<class ContainerAllocator>
01370 struct DataType<object_manipulation_msgs::PlacePlanningRequest_<ContainerAllocator> > {
01371 static const char* value()
01372 {
01373 return "object_manipulation_msgs/PlacePlanning";
01374 }
01375
01376 static const char* value(const object_manipulation_msgs::PlacePlanningRequest_<ContainerAllocator> &) { return value(); }
01377 };
01378
01379 template<class ContainerAllocator>
01380 struct MD5Sum<object_manipulation_msgs::PlacePlanningResponse_<ContainerAllocator> > {
01381 static const char* value()
01382 {
01383 return "3f8bf1509af52b03e7578ed04861e277";
01384 }
01385
01386 static const char* value(const object_manipulation_msgs::PlacePlanningResponse_<ContainerAllocator> &) { return value(); }
01387 };
01388
01389 template<class ContainerAllocator>
01390 struct DataType<object_manipulation_msgs::PlacePlanningResponse_<ContainerAllocator> > {
01391 static const char* value()
01392 {
01393 return "object_manipulation_msgs/PlacePlanning";
01394 }
01395
01396 static const char* value(const object_manipulation_msgs::PlacePlanningResponse_<ContainerAllocator> &) { return value(); }
01397 };
01398
01399 }
01400 }
01401
01402 #endif // OBJECT_MANIPULATION_MSGS_SERVICE_PLACEPLANNING_H
01403