$search
00001 /* Auto-generated by genmsg_cpp for file /home/rosbuild/hudson/workspace/doc-electric-object_manipulation/doc_stacks/2013-03-01_16-13-18.345538/object_manipulation/object_manipulation_msgs/msg/ReactiveGraspGoal.msg */ 00002 #ifndef OBJECT_MANIPULATION_MSGS_MESSAGE_REACTIVEGRASPGOAL_H 00003 #define OBJECT_MANIPULATION_MSGS_MESSAGE_REACTIVEGRASPGOAL_H 00004 #include <string> 00005 #include <vector> 00006 #include <map> 00007 #include <ostream> 00008 #include "ros/serialization.h" 00009 #include "ros/builtin_message_traits.h" 00010 #include "ros/message_operations.h" 00011 #include "ros/time.h" 00012 00013 #include "ros/macros.h" 00014 00015 #include "ros/assert.h" 00016 00017 #include "object_manipulation_msgs/GraspableObject.h" 00018 #include "geometry_msgs/PoseStamped.h" 00019 #include "trajectory_msgs/JointTrajectory.h" 00020 #include "sensor_msgs/JointState.h" 00021 #include "sensor_msgs/JointState.h" 00022 00023 namespace object_manipulation_msgs 00024 { 00025 template <class ContainerAllocator> 00026 struct ReactiveGraspGoal_ { 00027 typedef ReactiveGraspGoal_<ContainerAllocator> Type; 00028 00029 ReactiveGraspGoal_() 00030 : arm_name() 00031 , target() 00032 , final_grasp_pose() 00033 , trajectory() 00034 , collision_support_surface_name() 00035 , pre_grasp_posture() 00036 , grasp_posture() 00037 , max_contact_force(0.0) 00038 { 00039 } 00040 00041 ReactiveGraspGoal_(const ContainerAllocator& _alloc) 00042 : arm_name(_alloc) 00043 , target(_alloc) 00044 , final_grasp_pose(_alloc) 00045 , trajectory(_alloc) 00046 , collision_support_surface_name(_alloc) 00047 , pre_grasp_posture(_alloc) 00048 , grasp_posture(_alloc) 00049 , max_contact_force(0.0) 00050 { 00051 } 00052 00053 typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _arm_name_type; 00054 std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > arm_name; 00055 00056 typedef ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> _target_type; 00057 ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> target; 00058 00059 typedef ::geometry_msgs::PoseStamped_<ContainerAllocator> _final_grasp_pose_type; 00060 ::geometry_msgs::PoseStamped_<ContainerAllocator> final_grasp_pose; 00061 00062 typedef ::trajectory_msgs::JointTrajectory_<ContainerAllocator> _trajectory_type; 00063 ::trajectory_msgs::JointTrajectory_<ContainerAllocator> trajectory; 00064 00065 typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _collision_support_surface_name_type; 00066 std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > collision_support_surface_name; 00067 00068 typedef ::sensor_msgs::JointState_<ContainerAllocator> _pre_grasp_posture_type; 00069 ::sensor_msgs::JointState_<ContainerAllocator> pre_grasp_posture; 00070 00071 typedef ::sensor_msgs::JointState_<ContainerAllocator> _grasp_posture_type; 00072 ::sensor_msgs::JointState_<ContainerAllocator> grasp_posture; 00073 00074 typedef float _max_contact_force_type; 00075 float max_contact_force; 00076 00077 00078 private: 00079 static const char* __s_getDataType_() { return "object_manipulation_msgs/ReactiveGraspGoal"; } 00080 public: 00081 ROS_DEPRECATED static const std::string __s_getDataType() { return __s_getDataType_(); } 00082 00083 ROS_DEPRECATED const std::string __getDataType() const { return __s_getDataType_(); } 00084 00085 private: 00086 static const char* __s_getMD5Sum_() { return "f48f8da93655260d66ea21ae9aaef12f"; } 00087 public: 00088 ROS_DEPRECATED static const std::string __s_getMD5Sum() { return __s_getMD5Sum_(); } 00089 00090 ROS_DEPRECATED const std::string __getMD5Sum() const { return __s_getMD5Sum_(); } 00091 00092 private: 00093 static const char* __s_getMessageDefinition_() { return "# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n\ 00094 # an action for reactive grasping\n\ 00095 # a reactive grasp starts from the current pose of the gripper and ends\n\ 00096 # at a desired grasp pose, presumably using the touch sensors along the way\n\ 00097 \n\ 00098 # the name of the arm being used\n\ 00099 string arm_name\n\ 00100 \n\ 00101 # the object to be grasped\n\ 00102 GraspableObject target\n\ 00103 \n\ 00104 # the desired grasp pose for the hand\n\ 00105 geometry_msgs/PoseStamped final_grasp_pose\n\ 00106 \n\ 00107 # the joint trajectory to use for the approach (if available)\n\ 00108 # this trajectory is expected to start at the current pose of the gripper\n\ 00109 # and end at the desired grasp pose\n\ 00110 trajectory_msgs/JointTrajectory trajectory\n\ 00111 \n\ 00112 # the name of the support surface in the collision environment, if any\n\ 00113 string collision_support_surface_name\n\ 00114 \n\ 00115 # The internal posture of the hand for the pre-grasp\n\ 00116 # only positions are used\n\ 00117 sensor_msgs/JointState pre_grasp_posture\n\ 00118 \n\ 00119 # The internal posture of the hand for the grasp\n\ 00120 # positions and efforts are used\n\ 00121 sensor_msgs/JointState grasp_posture\n\ 00122 \n\ 00123 # The max contact force to use while grasping (<=0 to disable)\n\ 00124 float32 max_contact_force\n\ 00125 \n\ 00126 \n\ 00127 ================================================================================\n\ 00128 MSG: object_manipulation_msgs/GraspableObject\n\ 00129 # an object that the object_manipulator can work on\n\ 00130 \n\ 00131 # a graspable object can be represented in multiple ways. This message\n\ 00132 # can contain all of them. Which one is actually used is up to the receiver\n\ 00133 # of this message. When adding new representations, one must be careful that\n\ 00134 # they have reasonable lightweight defaults indicating that that particular\n\ 00135 # representation is not available.\n\ 00136 \n\ 00137 # the tf frame to be used as a reference frame when combining information from\n\ 00138 # the different representations below\n\ 00139 string reference_frame_id\n\ 00140 \n\ 00141 # potential recognition results from a database of models\n\ 00142 # all poses are relative to the object reference pose\n\ 00143 household_objects_database_msgs/DatabaseModelPose[] potential_models\n\ 00144 \n\ 00145 # the point cloud itself\n\ 00146 sensor_msgs/PointCloud cluster\n\ 00147 \n\ 00148 # a region of a PointCloud2 of interest\n\ 00149 object_manipulation_msgs/SceneRegion region\n\ 00150 \n\ 00151 # the name that this object has in the collision environment\n\ 00152 string collision_name\n\ 00153 ================================================================================\n\ 00154 MSG: household_objects_database_msgs/DatabaseModelPose\n\ 00155 # Informs that a specific model from the Model Database has been \n\ 00156 # identified at a certain location\n\ 00157 \n\ 00158 # the database id of the model\n\ 00159 int32 model_id\n\ 00160 \n\ 00161 # the pose that it can be found in\n\ 00162 geometry_msgs/PoseStamped pose\n\ 00163 \n\ 00164 # a measure of the confidence level in this detection result\n\ 00165 float32 confidence\n\ 00166 \n\ 00167 # the name of the object detector that generated this detection result\n\ 00168 string detector_name\n\ 00169 \n\ 00170 ================================================================================\n\ 00171 MSG: geometry_msgs/PoseStamped\n\ 00172 # A Pose with reference coordinate frame and timestamp\n\ 00173 Header header\n\ 00174 Pose pose\n\ 00175 \n\ 00176 ================================================================================\n\ 00177 MSG: std_msgs/Header\n\ 00178 # Standard metadata for higher-level stamped data types.\n\ 00179 # This is generally used to communicate timestamped data \n\ 00180 # in a particular coordinate frame.\n\ 00181 # \n\ 00182 # sequence ID: consecutively increasing ID \n\ 00183 uint32 seq\n\ 00184 #Two-integer timestamp that is expressed as:\n\ 00185 # * stamp.secs: seconds (stamp_secs) since epoch\n\ 00186 # * stamp.nsecs: nanoseconds since stamp_secs\n\ 00187 # time-handling sugar is provided by the client library\n\ 00188 time stamp\n\ 00189 #Frame this data is associated with\n\ 00190 # 0: no frame\n\ 00191 # 1: global frame\n\ 00192 string frame_id\n\ 00193 \n\ 00194 ================================================================================\n\ 00195 MSG: geometry_msgs/Pose\n\ 00196 # A representation of pose in free space, composed of postion and orientation. \n\ 00197 Point position\n\ 00198 Quaternion orientation\n\ 00199 \n\ 00200 ================================================================================\n\ 00201 MSG: geometry_msgs/Point\n\ 00202 # This contains the position of a point in free space\n\ 00203 float64 x\n\ 00204 float64 y\n\ 00205 float64 z\n\ 00206 \n\ 00207 ================================================================================\n\ 00208 MSG: geometry_msgs/Quaternion\n\ 00209 # This represents an orientation in free space in quaternion form.\n\ 00210 \n\ 00211 float64 x\n\ 00212 float64 y\n\ 00213 float64 z\n\ 00214 float64 w\n\ 00215 \n\ 00216 ================================================================================\n\ 00217 MSG: sensor_msgs/PointCloud\n\ 00218 # This message holds a collection of 3d points, plus optional additional\n\ 00219 # information about each point.\n\ 00220 \n\ 00221 # Time of sensor data acquisition, coordinate frame ID.\n\ 00222 Header header\n\ 00223 \n\ 00224 # Array of 3d points. Each Point32 should be interpreted as a 3d point\n\ 00225 # in the frame given in the header.\n\ 00226 geometry_msgs/Point32[] points\n\ 00227 \n\ 00228 # Each channel should have the same number of elements as points array,\n\ 00229 # and the data in each channel should correspond 1:1 with each point.\n\ 00230 # Channel names in common practice are listed in ChannelFloat32.msg.\n\ 00231 ChannelFloat32[] channels\n\ 00232 \n\ 00233 ================================================================================\n\ 00234 MSG: geometry_msgs/Point32\n\ 00235 # This contains the position of a point in free space(with 32 bits of precision).\n\ 00236 # It is recommeded to use Point wherever possible instead of Point32. \n\ 00237 # \n\ 00238 # This recommendation is to promote interoperability. \n\ 00239 #\n\ 00240 # This message is designed to take up less space when sending\n\ 00241 # lots of points at once, as in the case of a PointCloud. \n\ 00242 \n\ 00243 float32 x\n\ 00244 float32 y\n\ 00245 float32 z\n\ 00246 ================================================================================\n\ 00247 MSG: sensor_msgs/ChannelFloat32\n\ 00248 # This message is used by the PointCloud message to hold optional data\n\ 00249 # associated with each point in the cloud. The length of the values\n\ 00250 # array should be the same as the length of the points array in the\n\ 00251 # PointCloud, and each value should be associated with the corresponding\n\ 00252 # point.\n\ 00253 \n\ 00254 # Channel names in existing practice include:\n\ 00255 # \"u\", \"v\" - row and column (respectively) in the left stereo image.\n\ 00256 # This is opposite to usual conventions but remains for\n\ 00257 # historical reasons. The newer PointCloud2 message has no\n\ 00258 # such problem.\n\ 00259 # \"rgb\" - For point clouds produced by color stereo cameras. uint8\n\ 00260 # (R,G,B) values packed into the least significant 24 bits,\n\ 00261 # in order.\n\ 00262 # \"intensity\" - laser or pixel intensity.\n\ 00263 # \"distance\"\n\ 00264 \n\ 00265 # The channel name should give semantics of the channel (e.g.\n\ 00266 # \"intensity\" instead of \"value\").\n\ 00267 string name\n\ 00268 \n\ 00269 # The values array should be 1-1 with the elements of the associated\n\ 00270 # PointCloud.\n\ 00271 float32[] values\n\ 00272 \n\ 00273 ================================================================================\n\ 00274 MSG: object_manipulation_msgs/SceneRegion\n\ 00275 # Point cloud\n\ 00276 sensor_msgs/PointCloud2 cloud\n\ 00277 \n\ 00278 # Indices for the region of interest\n\ 00279 int32[] mask\n\ 00280 \n\ 00281 # One of the corresponding 2D images, if applicable\n\ 00282 sensor_msgs/Image image\n\ 00283 \n\ 00284 # The disparity image, if applicable\n\ 00285 sensor_msgs/Image disparity_image\n\ 00286 \n\ 00287 # Camera info for the camera that took the image\n\ 00288 sensor_msgs/CameraInfo cam_info\n\ 00289 \n\ 00290 # a 3D region of interest for grasp planning\n\ 00291 geometry_msgs/PoseStamped roi_box_pose\n\ 00292 geometry_msgs/Vector3 roi_box_dims\n\ 00293 \n\ 00294 ================================================================================\n\ 00295 MSG: sensor_msgs/PointCloud2\n\ 00296 # This message holds a collection of N-dimensional points, which may\n\ 00297 # contain additional information such as normals, intensity, etc. The\n\ 00298 # point data is stored as a binary blob, its layout described by the\n\ 00299 # contents of the \"fields\" array.\n\ 00300 \n\ 00301 # The point cloud data may be organized 2d (image-like) or 1d\n\ 00302 # (unordered). Point clouds organized as 2d images may be produced by\n\ 00303 # camera depth sensors such as stereo or time-of-flight.\n\ 00304 \n\ 00305 # Time of sensor data acquisition, and the coordinate frame ID (for 3d\n\ 00306 # points).\n\ 00307 Header header\n\ 00308 \n\ 00309 # 2D structure of the point cloud. If the cloud is unordered, height is\n\ 00310 # 1 and width is the length of the point cloud.\n\ 00311 uint32 height\n\ 00312 uint32 width\n\ 00313 \n\ 00314 # Describes the channels and their layout in the binary data blob.\n\ 00315 PointField[] fields\n\ 00316 \n\ 00317 bool is_bigendian # Is this data bigendian?\n\ 00318 uint32 point_step # Length of a point in bytes\n\ 00319 uint32 row_step # Length of a row in bytes\n\ 00320 uint8[] data # Actual point data, size is (row_step*height)\n\ 00321 \n\ 00322 bool is_dense # True if there are no invalid points\n\ 00323 \n\ 00324 ================================================================================\n\ 00325 MSG: sensor_msgs/PointField\n\ 00326 # This message holds the description of one point entry in the\n\ 00327 # PointCloud2 message format.\n\ 00328 uint8 INT8 = 1\n\ 00329 uint8 UINT8 = 2\n\ 00330 uint8 INT16 = 3\n\ 00331 uint8 UINT16 = 4\n\ 00332 uint8 INT32 = 5\n\ 00333 uint8 UINT32 = 6\n\ 00334 uint8 FLOAT32 = 7\n\ 00335 uint8 FLOAT64 = 8\n\ 00336 \n\ 00337 string name # Name of field\n\ 00338 uint32 offset # Offset from start of point struct\n\ 00339 uint8 datatype # Datatype enumeration, see above\n\ 00340 uint32 count # How many elements in the field\n\ 00341 \n\ 00342 ================================================================================\n\ 00343 MSG: sensor_msgs/Image\n\ 00344 # This message contains an uncompressed image\n\ 00345 # (0, 0) is at top-left corner of image\n\ 00346 #\n\ 00347 \n\ 00348 Header header # Header timestamp should be acquisition time of image\n\ 00349 # Header frame_id should be optical frame of camera\n\ 00350 # origin of frame should be optical center of cameara\n\ 00351 # +x should point to the right in the image\n\ 00352 # +y should point down in the image\n\ 00353 # +z should point into to plane of the image\n\ 00354 # If the frame_id here and the frame_id of the CameraInfo\n\ 00355 # message associated with the image conflict\n\ 00356 # the behavior is undefined\n\ 00357 \n\ 00358 uint32 height # image height, that is, number of rows\n\ 00359 uint32 width # image width, that is, number of columns\n\ 00360 \n\ 00361 # The legal values for encoding are in file src/image_encodings.cpp\n\ 00362 # If you want to standardize a new string format, join\n\ 00363 # ros-users@lists.sourceforge.net and send an email proposing a new encoding.\n\ 00364 \n\ 00365 string encoding # Encoding of pixels -- channel meaning, ordering, size\n\ 00366 # taken from the list of strings in src/image_encodings.cpp\n\ 00367 \n\ 00368 uint8 is_bigendian # is this data bigendian?\n\ 00369 uint32 step # Full row length in bytes\n\ 00370 uint8[] data # actual matrix data, size is (step * rows)\n\ 00371 \n\ 00372 ================================================================================\n\ 00373 MSG: sensor_msgs/CameraInfo\n\ 00374 # This message defines meta information for a camera. It should be in a\n\ 00375 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\ 00376 # image topics named:\n\ 00377 #\n\ 00378 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\ 00379 # image - monochrome, distorted\n\ 00380 # image_color - color, distorted\n\ 00381 # image_rect - monochrome, rectified\n\ 00382 # image_rect_color - color, rectified\n\ 00383 #\n\ 00384 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\ 00385 # for producing the four processed image topics from image_raw and\n\ 00386 # camera_info. The meaning of the camera parameters are described in\n\ 00387 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\ 00388 #\n\ 00389 # The image_geometry package provides a user-friendly interface to\n\ 00390 # common operations using this meta information. If you want to, e.g.,\n\ 00391 # project a 3d point into image coordinates, we strongly recommend\n\ 00392 # using image_geometry.\n\ 00393 #\n\ 00394 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\ 00395 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\ 00396 # indicates an uncalibrated camera.\n\ 00397 \n\ 00398 #######################################################################\n\ 00399 # Image acquisition info #\n\ 00400 #######################################################################\n\ 00401 \n\ 00402 # Time of image acquisition, camera coordinate frame ID\n\ 00403 Header header # Header timestamp should be acquisition time of image\n\ 00404 # Header frame_id should be optical frame of camera\n\ 00405 # origin of frame should be optical center of camera\n\ 00406 # +x should point to the right in the image\n\ 00407 # +y should point down in the image\n\ 00408 # +z should point into the plane of the image\n\ 00409 \n\ 00410 \n\ 00411 #######################################################################\n\ 00412 # Calibration Parameters #\n\ 00413 #######################################################################\n\ 00414 # These are fixed during camera calibration. Their values will be the #\n\ 00415 # same in all messages until the camera is recalibrated. Note that #\n\ 00416 # self-calibrating systems may \"recalibrate\" frequently. #\n\ 00417 # #\n\ 00418 # The internal parameters can be used to warp a raw (distorted) image #\n\ 00419 # to: #\n\ 00420 # 1. An undistorted image (requires D and K) #\n\ 00421 # 2. A rectified image (requires D, K, R) #\n\ 00422 # The projection matrix P projects 3D points into the rectified image.#\n\ 00423 #######################################################################\n\ 00424 \n\ 00425 # The image dimensions with which the camera was calibrated. Normally\n\ 00426 # this will be the full camera resolution in pixels.\n\ 00427 uint32 height\n\ 00428 uint32 width\n\ 00429 \n\ 00430 # The distortion model used. Supported models are listed in\n\ 00431 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\ 00432 # simple model of radial and tangential distortion - is sufficent.\n\ 00433 string distortion_model\n\ 00434 \n\ 00435 # The distortion parameters, size depending on the distortion model.\n\ 00436 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\ 00437 float64[] D\n\ 00438 \n\ 00439 # Intrinsic camera matrix for the raw (distorted) images.\n\ 00440 # [fx 0 cx]\n\ 00441 # K = [ 0 fy cy]\n\ 00442 # [ 0 0 1]\n\ 00443 # Projects 3D points in the camera coordinate frame to 2D pixel\n\ 00444 # coordinates using the focal lengths (fx, fy) and principal point\n\ 00445 # (cx, cy).\n\ 00446 float64[9] K # 3x3 row-major matrix\n\ 00447 \n\ 00448 # Rectification matrix (stereo cameras only)\n\ 00449 # A rotation matrix aligning the camera coordinate system to the ideal\n\ 00450 # stereo image plane so that epipolar lines in both stereo images are\n\ 00451 # parallel.\n\ 00452 float64[9] R # 3x3 row-major matrix\n\ 00453 \n\ 00454 # Projection/camera matrix\n\ 00455 # [fx' 0 cx' Tx]\n\ 00456 # P = [ 0 fy' cy' Ty]\n\ 00457 # [ 0 0 1 0]\n\ 00458 # By convention, this matrix specifies the intrinsic (camera) matrix\n\ 00459 # of the processed (rectified) image. That is, the left 3x3 portion\n\ 00460 # is the normal camera intrinsic matrix for the rectified image.\n\ 00461 # It projects 3D points in the camera coordinate frame to 2D pixel\n\ 00462 # coordinates using the focal lengths (fx', fy') and principal point\n\ 00463 # (cx', cy') - these may differ from the values in K.\n\ 00464 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\ 00465 # also have R = the identity and P[1:3,1:3] = K.\n\ 00466 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\ 00467 # position of the optical center of the second camera in the first\n\ 00468 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\ 00469 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\ 00470 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\ 00471 # Tx = -fx' * B, where B is the baseline between the cameras.\n\ 00472 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\ 00473 # the rectified image is given by:\n\ 00474 # [u v w]' = P * [X Y Z 1]'\n\ 00475 # x = u / w\n\ 00476 # y = v / w\n\ 00477 # This holds for both images of a stereo pair.\n\ 00478 float64[12] P # 3x4 row-major matrix\n\ 00479 \n\ 00480 \n\ 00481 #######################################################################\n\ 00482 # Operational Parameters #\n\ 00483 #######################################################################\n\ 00484 # These define the image region actually captured by the camera #\n\ 00485 # driver. Although they affect the geometry of the output image, they #\n\ 00486 # may be changed freely without recalibrating the camera. #\n\ 00487 #######################################################################\n\ 00488 \n\ 00489 # Binning refers here to any camera setting which combines rectangular\n\ 00490 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\ 00491 # resolution of the output image to\n\ 00492 # (width / binning_x) x (height / binning_y).\n\ 00493 # The default values binning_x = binning_y = 0 is considered the same\n\ 00494 # as binning_x = binning_y = 1 (no subsampling).\n\ 00495 uint32 binning_x\n\ 00496 uint32 binning_y\n\ 00497 \n\ 00498 # Region of interest (subwindow of full camera resolution), given in\n\ 00499 # full resolution (unbinned) image coordinates. A particular ROI\n\ 00500 # always denotes the same window of pixels on the camera sensor,\n\ 00501 # regardless of binning settings.\n\ 00502 # The default setting of roi (all values 0) is considered the same as\n\ 00503 # full resolution (roi.width = width, roi.height = height).\n\ 00504 RegionOfInterest roi\n\ 00505 \n\ 00506 ================================================================================\n\ 00507 MSG: sensor_msgs/RegionOfInterest\n\ 00508 # This message is used to specify a region of interest within an image.\n\ 00509 #\n\ 00510 # When used to specify the ROI setting of the camera when the image was\n\ 00511 # taken, the height and width fields should either match the height and\n\ 00512 # width fields for the associated image; or height = width = 0\n\ 00513 # indicates that the full resolution image was captured.\n\ 00514 \n\ 00515 uint32 x_offset # Leftmost pixel of the ROI\n\ 00516 # (0 if the ROI includes the left edge of the image)\n\ 00517 uint32 y_offset # Topmost pixel of the ROI\n\ 00518 # (0 if the ROI includes the top edge of the image)\n\ 00519 uint32 height # Height of ROI\n\ 00520 uint32 width # Width of ROI\n\ 00521 \n\ 00522 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\ 00523 # ROI in this message. Typically this should be False if the full image\n\ 00524 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\ 00525 # used).\n\ 00526 bool do_rectify\n\ 00527 \n\ 00528 ================================================================================\n\ 00529 MSG: geometry_msgs/Vector3\n\ 00530 # This represents a vector in free space. \n\ 00531 \n\ 00532 float64 x\n\ 00533 float64 y\n\ 00534 float64 z\n\ 00535 ================================================================================\n\ 00536 MSG: trajectory_msgs/JointTrajectory\n\ 00537 Header header\n\ 00538 string[] joint_names\n\ 00539 JointTrajectoryPoint[] points\n\ 00540 ================================================================================\n\ 00541 MSG: trajectory_msgs/JointTrajectoryPoint\n\ 00542 float64[] positions\n\ 00543 float64[] velocities\n\ 00544 float64[] accelerations\n\ 00545 duration time_from_start\n\ 00546 ================================================================================\n\ 00547 MSG: sensor_msgs/JointState\n\ 00548 # This is a message that holds data to describe the state of a set of torque controlled joints. \n\ 00549 #\n\ 00550 # The state of each joint (revolute or prismatic) is defined by:\n\ 00551 # * the position of the joint (rad or m),\n\ 00552 # * the velocity of the joint (rad/s or m/s) and \n\ 00553 # * the effort that is applied in the joint (Nm or N).\n\ 00554 #\n\ 00555 # Each joint is uniquely identified by its name\n\ 00556 # The header specifies the time at which the joint states were recorded. All the joint states\n\ 00557 # in one message have to be recorded at the same time.\n\ 00558 #\n\ 00559 # This message consists of a multiple arrays, one for each part of the joint state. \n\ 00560 # The goal is to make each of the fields optional. When e.g. your joints have no\n\ 00561 # effort associated with them, you can leave the effort array empty. \n\ 00562 #\n\ 00563 # All arrays in this message should have the same size, or be empty.\n\ 00564 # This is the only way to uniquely associate the joint name with the correct\n\ 00565 # states.\n\ 00566 \n\ 00567 \n\ 00568 Header header\n\ 00569 \n\ 00570 string[] name\n\ 00571 float64[] position\n\ 00572 float64[] velocity\n\ 00573 float64[] effort\n\ 00574 \n\ 00575 "; } 00576 public: 00577 ROS_DEPRECATED static const std::string __s_getMessageDefinition() { return __s_getMessageDefinition_(); } 00578 00579 ROS_DEPRECATED const std::string __getMessageDefinition() const { return __s_getMessageDefinition_(); } 00580 00581 ROS_DEPRECATED virtual uint8_t *serialize(uint8_t *write_ptr, uint32_t seq) const 00582 { 00583 ros::serialization::OStream stream(write_ptr, 1000000000); 00584 ros::serialization::serialize(stream, arm_name); 00585 ros::serialization::serialize(stream, target); 00586 ros::serialization::serialize(stream, final_grasp_pose); 00587 ros::serialization::serialize(stream, trajectory); 00588 ros::serialization::serialize(stream, collision_support_surface_name); 00589 ros::serialization::serialize(stream, pre_grasp_posture); 00590 ros::serialization::serialize(stream, grasp_posture); 00591 ros::serialization::serialize(stream, max_contact_force); 00592 return stream.getData(); 00593 } 00594 00595 ROS_DEPRECATED virtual uint8_t *deserialize(uint8_t *read_ptr) 00596 { 00597 ros::serialization::IStream stream(read_ptr, 1000000000); 00598 ros::serialization::deserialize(stream, arm_name); 00599 ros::serialization::deserialize(stream, target); 00600 ros::serialization::deserialize(stream, final_grasp_pose); 00601 ros::serialization::deserialize(stream, trajectory); 00602 ros::serialization::deserialize(stream, collision_support_surface_name); 00603 ros::serialization::deserialize(stream, pre_grasp_posture); 00604 ros::serialization::deserialize(stream, grasp_posture); 00605 ros::serialization::deserialize(stream, max_contact_force); 00606 return stream.getData(); 00607 } 00608 00609 ROS_DEPRECATED virtual uint32_t serializationLength() const 00610 { 00611 uint32_t size = 0; 00612 size += ros::serialization::serializationLength(arm_name); 00613 size += ros::serialization::serializationLength(target); 00614 size += ros::serialization::serializationLength(final_grasp_pose); 00615 size += ros::serialization::serializationLength(trajectory); 00616 size += ros::serialization::serializationLength(collision_support_surface_name); 00617 size += ros::serialization::serializationLength(pre_grasp_posture); 00618 size += ros::serialization::serializationLength(grasp_posture); 00619 size += ros::serialization::serializationLength(max_contact_force); 00620 return size; 00621 } 00622 00623 typedef boost::shared_ptr< ::object_manipulation_msgs::ReactiveGraspGoal_<ContainerAllocator> > Ptr; 00624 typedef boost::shared_ptr< ::object_manipulation_msgs::ReactiveGraspGoal_<ContainerAllocator> const> ConstPtr; 00625 boost::shared_ptr<std::map<std::string, std::string> > __connection_header; 00626 }; // struct ReactiveGraspGoal 00627 typedef ::object_manipulation_msgs::ReactiveGraspGoal_<std::allocator<void> > ReactiveGraspGoal; 00628 00629 typedef boost::shared_ptr< ::object_manipulation_msgs::ReactiveGraspGoal> ReactiveGraspGoalPtr; 00630 typedef boost::shared_ptr< ::object_manipulation_msgs::ReactiveGraspGoal const> ReactiveGraspGoalConstPtr; 00631 00632 00633 template<typename ContainerAllocator> 00634 std::ostream& operator<<(std::ostream& s, const ::object_manipulation_msgs::ReactiveGraspGoal_<ContainerAllocator> & v) 00635 { 00636 ros::message_operations::Printer< ::object_manipulation_msgs::ReactiveGraspGoal_<ContainerAllocator> >::stream(s, "", v); 00637 return s;} 00638 00639 } // namespace object_manipulation_msgs 00640 00641 namespace ros 00642 { 00643 namespace message_traits 00644 { 00645 template<class ContainerAllocator> struct IsMessage< ::object_manipulation_msgs::ReactiveGraspGoal_<ContainerAllocator> > : public TrueType {}; 00646 template<class ContainerAllocator> struct IsMessage< ::object_manipulation_msgs::ReactiveGraspGoal_<ContainerAllocator> const> : public TrueType {}; 00647 template<class ContainerAllocator> 00648 struct MD5Sum< ::object_manipulation_msgs::ReactiveGraspGoal_<ContainerAllocator> > { 00649 static const char* value() 00650 { 00651 return "f48f8da93655260d66ea21ae9aaef12f"; 00652 } 00653 00654 static const char* value(const ::object_manipulation_msgs::ReactiveGraspGoal_<ContainerAllocator> &) { return value(); } 00655 static const uint64_t static_value1 = 0xf48f8da93655260dULL; 00656 static const uint64_t static_value2 = 0x66ea21ae9aaef12fULL; 00657 }; 00658 00659 template<class ContainerAllocator> 00660 struct DataType< ::object_manipulation_msgs::ReactiveGraspGoal_<ContainerAllocator> > { 00661 static const char* value() 00662 { 00663 return "object_manipulation_msgs/ReactiveGraspGoal"; 00664 } 00665 00666 static const char* value(const ::object_manipulation_msgs::ReactiveGraspGoal_<ContainerAllocator> &) { return value(); } 00667 }; 00668 00669 template<class ContainerAllocator> 00670 struct Definition< ::object_manipulation_msgs::ReactiveGraspGoal_<ContainerAllocator> > { 00671 static const char* value() 00672 { 00673 return "# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n\ 00674 # an action for reactive grasping\n\ 00675 # a reactive grasp starts from the current pose of the gripper and ends\n\ 00676 # at a desired grasp pose, presumably using the touch sensors along the way\n\ 00677 \n\ 00678 # the name of the arm being used\n\ 00679 string arm_name\n\ 00680 \n\ 00681 # the object to be grasped\n\ 00682 GraspableObject target\n\ 00683 \n\ 00684 # the desired grasp pose for the hand\n\ 00685 geometry_msgs/PoseStamped final_grasp_pose\n\ 00686 \n\ 00687 # the joint trajectory to use for the approach (if available)\n\ 00688 # this trajectory is expected to start at the current pose of the gripper\n\ 00689 # and end at the desired grasp pose\n\ 00690 trajectory_msgs/JointTrajectory trajectory\n\ 00691 \n\ 00692 # the name of the support surface in the collision environment, if any\n\ 00693 string collision_support_surface_name\n\ 00694 \n\ 00695 # The internal posture of the hand for the pre-grasp\n\ 00696 # only positions are used\n\ 00697 sensor_msgs/JointState pre_grasp_posture\n\ 00698 \n\ 00699 # The internal posture of the hand for the grasp\n\ 00700 # positions and efforts are used\n\ 00701 sensor_msgs/JointState grasp_posture\n\ 00702 \n\ 00703 # The max contact force to use while grasping (<=0 to disable)\n\ 00704 float32 max_contact_force\n\ 00705 \n\ 00706 \n\ 00707 ================================================================================\n\ 00708 MSG: object_manipulation_msgs/GraspableObject\n\ 00709 # an object that the object_manipulator can work on\n\ 00710 \n\ 00711 # a graspable object can be represented in multiple ways. This message\n\ 00712 # can contain all of them. Which one is actually used is up to the receiver\n\ 00713 # of this message. When adding new representations, one must be careful that\n\ 00714 # they have reasonable lightweight defaults indicating that that particular\n\ 00715 # representation is not available.\n\ 00716 \n\ 00717 # the tf frame to be used as a reference frame when combining information from\n\ 00718 # the different representations below\n\ 00719 string reference_frame_id\n\ 00720 \n\ 00721 # potential recognition results from a database of models\n\ 00722 # all poses are relative to the object reference pose\n\ 00723 household_objects_database_msgs/DatabaseModelPose[] potential_models\n\ 00724 \n\ 00725 # the point cloud itself\n\ 00726 sensor_msgs/PointCloud cluster\n\ 00727 \n\ 00728 # a region of a PointCloud2 of interest\n\ 00729 object_manipulation_msgs/SceneRegion region\n\ 00730 \n\ 00731 # the name that this object has in the collision environment\n\ 00732 string collision_name\n\ 00733 ================================================================================\n\ 00734 MSG: household_objects_database_msgs/DatabaseModelPose\n\ 00735 # Informs that a specific model from the Model Database has been \n\ 00736 # identified at a certain location\n\ 00737 \n\ 00738 # the database id of the model\n\ 00739 int32 model_id\n\ 00740 \n\ 00741 # the pose that it can be found in\n\ 00742 geometry_msgs/PoseStamped pose\n\ 00743 \n\ 00744 # a measure of the confidence level in this detection result\n\ 00745 float32 confidence\n\ 00746 \n\ 00747 # the name of the object detector that generated this detection result\n\ 00748 string detector_name\n\ 00749 \n\ 00750 ================================================================================\n\ 00751 MSG: geometry_msgs/PoseStamped\n\ 00752 # A Pose with reference coordinate frame and timestamp\n\ 00753 Header header\n\ 00754 Pose pose\n\ 00755 \n\ 00756 ================================================================================\n\ 00757 MSG: std_msgs/Header\n\ 00758 # Standard metadata for higher-level stamped data types.\n\ 00759 # This is generally used to communicate timestamped data \n\ 00760 # in a particular coordinate frame.\n\ 00761 # \n\ 00762 # sequence ID: consecutively increasing ID \n\ 00763 uint32 seq\n\ 00764 #Two-integer timestamp that is expressed as:\n\ 00765 # * stamp.secs: seconds (stamp_secs) since epoch\n\ 00766 # * stamp.nsecs: nanoseconds since stamp_secs\n\ 00767 # time-handling sugar is provided by the client library\n\ 00768 time stamp\n\ 00769 #Frame this data is associated with\n\ 00770 # 0: no frame\n\ 00771 # 1: global frame\n\ 00772 string frame_id\n\ 00773 \n\ 00774 ================================================================================\n\ 00775 MSG: geometry_msgs/Pose\n\ 00776 # A representation of pose in free space, composed of postion and orientation. \n\ 00777 Point position\n\ 00778 Quaternion orientation\n\ 00779 \n\ 00780 ================================================================================\n\ 00781 MSG: geometry_msgs/Point\n\ 00782 # This contains the position of a point in free space\n\ 00783 float64 x\n\ 00784 float64 y\n\ 00785 float64 z\n\ 00786 \n\ 00787 ================================================================================\n\ 00788 MSG: geometry_msgs/Quaternion\n\ 00789 # This represents an orientation in free space in quaternion form.\n\ 00790 \n\ 00791 float64 x\n\ 00792 float64 y\n\ 00793 float64 z\n\ 00794 float64 w\n\ 00795 \n\ 00796 ================================================================================\n\ 00797 MSG: sensor_msgs/PointCloud\n\ 00798 # This message holds a collection of 3d points, plus optional additional\n\ 00799 # information about each point.\n\ 00800 \n\ 00801 # Time of sensor data acquisition, coordinate frame ID.\n\ 00802 Header header\n\ 00803 \n\ 00804 # Array of 3d points. Each Point32 should be interpreted as a 3d point\n\ 00805 # in the frame given in the header.\n\ 00806 geometry_msgs/Point32[] points\n\ 00807 \n\ 00808 # Each channel should have the same number of elements as points array,\n\ 00809 # and the data in each channel should correspond 1:1 with each point.\n\ 00810 # Channel names in common practice are listed in ChannelFloat32.msg.\n\ 00811 ChannelFloat32[] channels\n\ 00812 \n\ 00813 ================================================================================\n\ 00814 MSG: geometry_msgs/Point32\n\ 00815 # This contains the position of a point in free space(with 32 bits of precision).\n\ 00816 # It is recommeded to use Point wherever possible instead of Point32. \n\ 00817 # \n\ 00818 # This recommendation is to promote interoperability. \n\ 00819 #\n\ 00820 # This message is designed to take up less space when sending\n\ 00821 # lots of points at once, as in the case of a PointCloud. \n\ 00822 \n\ 00823 float32 x\n\ 00824 float32 y\n\ 00825 float32 z\n\ 00826 ================================================================================\n\ 00827 MSG: sensor_msgs/ChannelFloat32\n\ 00828 # This message is used by the PointCloud message to hold optional data\n\ 00829 # associated with each point in the cloud. The length of the values\n\ 00830 # array should be the same as the length of the points array in the\n\ 00831 # PointCloud, and each value should be associated with the corresponding\n\ 00832 # point.\n\ 00833 \n\ 00834 # Channel names in existing practice include:\n\ 00835 # \"u\", \"v\" - row and column (respectively) in the left stereo image.\n\ 00836 # This is opposite to usual conventions but remains for\n\ 00837 # historical reasons. The newer PointCloud2 message has no\n\ 00838 # such problem.\n\ 00839 # \"rgb\" - For point clouds produced by color stereo cameras. uint8\n\ 00840 # (R,G,B) values packed into the least significant 24 bits,\n\ 00841 # in order.\n\ 00842 # \"intensity\" - laser or pixel intensity.\n\ 00843 # \"distance\"\n\ 00844 \n\ 00845 # The channel name should give semantics of the channel (e.g.\n\ 00846 # \"intensity\" instead of \"value\").\n\ 00847 string name\n\ 00848 \n\ 00849 # The values array should be 1-1 with the elements of the associated\n\ 00850 # PointCloud.\n\ 00851 float32[] values\n\ 00852 \n\ 00853 ================================================================================\n\ 00854 MSG: object_manipulation_msgs/SceneRegion\n\ 00855 # Point cloud\n\ 00856 sensor_msgs/PointCloud2 cloud\n\ 00857 \n\ 00858 # Indices for the region of interest\n\ 00859 int32[] mask\n\ 00860 \n\ 00861 # One of the corresponding 2D images, if applicable\n\ 00862 sensor_msgs/Image image\n\ 00863 \n\ 00864 # The disparity image, if applicable\n\ 00865 sensor_msgs/Image disparity_image\n\ 00866 \n\ 00867 # Camera info for the camera that took the image\n\ 00868 sensor_msgs/CameraInfo cam_info\n\ 00869 \n\ 00870 # a 3D region of interest for grasp planning\n\ 00871 geometry_msgs/PoseStamped roi_box_pose\n\ 00872 geometry_msgs/Vector3 roi_box_dims\n\ 00873 \n\ 00874 ================================================================================\n\ 00875 MSG: sensor_msgs/PointCloud2\n\ 00876 # This message holds a collection of N-dimensional points, which may\n\ 00877 # contain additional information such as normals, intensity, etc. The\n\ 00878 # point data is stored as a binary blob, its layout described by the\n\ 00879 # contents of the \"fields\" array.\n\ 00880 \n\ 00881 # The point cloud data may be organized 2d (image-like) or 1d\n\ 00882 # (unordered). Point clouds organized as 2d images may be produced by\n\ 00883 # camera depth sensors such as stereo or time-of-flight.\n\ 00884 \n\ 00885 # Time of sensor data acquisition, and the coordinate frame ID (for 3d\n\ 00886 # points).\n\ 00887 Header header\n\ 00888 \n\ 00889 # 2D structure of the point cloud. If the cloud is unordered, height is\n\ 00890 # 1 and width is the length of the point cloud.\n\ 00891 uint32 height\n\ 00892 uint32 width\n\ 00893 \n\ 00894 # Describes the channels and their layout in the binary data blob.\n\ 00895 PointField[] fields\n\ 00896 \n\ 00897 bool is_bigendian # Is this data bigendian?\n\ 00898 uint32 point_step # Length of a point in bytes\n\ 00899 uint32 row_step # Length of a row in bytes\n\ 00900 uint8[] data # Actual point data, size is (row_step*height)\n\ 00901 \n\ 00902 bool is_dense # True if there are no invalid points\n\ 00903 \n\ 00904 ================================================================================\n\ 00905 MSG: sensor_msgs/PointField\n\ 00906 # This message holds the description of one point entry in the\n\ 00907 # PointCloud2 message format.\n\ 00908 uint8 INT8 = 1\n\ 00909 uint8 UINT8 = 2\n\ 00910 uint8 INT16 = 3\n\ 00911 uint8 UINT16 = 4\n\ 00912 uint8 INT32 = 5\n\ 00913 uint8 UINT32 = 6\n\ 00914 uint8 FLOAT32 = 7\n\ 00915 uint8 FLOAT64 = 8\n\ 00916 \n\ 00917 string name # Name of field\n\ 00918 uint32 offset # Offset from start of point struct\n\ 00919 uint8 datatype # Datatype enumeration, see above\n\ 00920 uint32 count # How many elements in the field\n\ 00921 \n\ 00922 ================================================================================\n\ 00923 MSG: sensor_msgs/Image\n\ 00924 # This message contains an uncompressed image\n\ 00925 # (0, 0) is at top-left corner of image\n\ 00926 #\n\ 00927 \n\ 00928 Header header # Header timestamp should be acquisition time of image\n\ 00929 # Header frame_id should be optical frame of camera\n\ 00930 # origin of frame should be optical center of cameara\n\ 00931 # +x should point to the right in the image\n\ 00932 # +y should point down in the image\n\ 00933 # +z should point into to plane of the image\n\ 00934 # If the frame_id here and the frame_id of the CameraInfo\n\ 00935 # message associated with the image conflict\n\ 00936 # the behavior is undefined\n\ 00937 \n\ 00938 uint32 height # image height, that is, number of rows\n\ 00939 uint32 width # image width, that is, number of columns\n\ 00940 \n\ 00941 # The legal values for encoding are in file src/image_encodings.cpp\n\ 00942 # If you want to standardize a new string format, join\n\ 00943 # ros-users@lists.sourceforge.net and send an email proposing a new encoding.\n\ 00944 \n\ 00945 string encoding # Encoding of pixels -- channel meaning, ordering, size\n\ 00946 # taken from the list of strings in src/image_encodings.cpp\n\ 00947 \n\ 00948 uint8 is_bigendian # is this data bigendian?\n\ 00949 uint32 step # Full row length in bytes\n\ 00950 uint8[] data # actual matrix data, size is (step * rows)\n\ 00951 \n\ 00952 ================================================================================\n\ 00953 MSG: sensor_msgs/CameraInfo\n\ 00954 # This message defines meta information for a camera. It should be in a\n\ 00955 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\ 00956 # image topics named:\n\ 00957 #\n\ 00958 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\ 00959 # image - monochrome, distorted\n\ 00960 # image_color - color, distorted\n\ 00961 # image_rect - monochrome, rectified\n\ 00962 # image_rect_color - color, rectified\n\ 00963 #\n\ 00964 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\ 00965 # for producing the four processed image topics from image_raw and\n\ 00966 # camera_info. The meaning of the camera parameters are described in\n\ 00967 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\ 00968 #\n\ 00969 # The image_geometry package provides a user-friendly interface to\n\ 00970 # common operations using this meta information. If you want to, e.g.,\n\ 00971 # project a 3d point into image coordinates, we strongly recommend\n\ 00972 # using image_geometry.\n\ 00973 #\n\ 00974 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\ 00975 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\ 00976 # indicates an uncalibrated camera.\n\ 00977 \n\ 00978 #######################################################################\n\ 00979 # Image acquisition info #\n\ 00980 #######################################################################\n\ 00981 \n\ 00982 # Time of image acquisition, camera coordinate frame ID\n\ 00983 Header header # Header timestamp should be acquisition time of image\n\ 00984 # Header frame_id should be optical frame of camera\n\ 00985 # origin of frame should be optical center of camera\n\ 00986 # +x should point to the right in the image\n\ 00987 # +y should point down in the image\n\ 00988 # +z should point into the plane of the image\n\ 00989 \n\ 00990 \n\ 00991 #######################################################################\n\ 00992 # Calibration Parameters #\n\ 00993 #######################################################################\n\ 00994 # These are fixed during camera calibration. Their values will be the #\n\ 00995 # same in all messages until the camera is recalibrated. Note that #\n\ 00996 # self-calibrating systems may \"recalibrate\" frequently. #\n\ 00997 # #\n\ 00998 # The internal parameters can be used to warp a raw (distorted) image #\n\ 00999 # to: #\n\ 01000 # 1. An undistorted image (requires D and K) #\n\ 01001 # 2. A rectified image (requires D, K, R) #\n\ 01002 # The projection matrix P projects 3D points into the rectified image.#\n\ 01003 #######################################################################\n\ 01004 \n\ 01005 # The image dimensions with which the camera was calibrated. Normally\n\ 01006 # this will be the full camera resolution in pixels.\n\ 01007 uint32 height\n\ 01008 uint32 width\n\ 01009 \n\ 01010 # The distortion model used. Supported models are listed in\n\ 01011 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\ 01012 # simple model of radial and tangential distortion - is sufficent.\n\ 01013 string distortion_model\n\ 01014 \n\ 01015 # The distortion parameters, size depending on the distortion model.\n\ 01016 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\ 01017 float64[] D\n\ 01018 \n\ 01019 # Intrinsic camera matrix for the raw (distorted) images.\n\ 01020 # [fx 0 cx]\n\ 01021 # K = [ 0 fy cy]\n\ 01022 # [ 0 0 1]\n\ 01023 # Projects 3D points in the camera coordinate frame to 2D pixel\n\ 01024 # coordinates using the focal lengths (fx, fy) and principal point\n\ 01025 # (cx, cy).\n\ 01026 float64[9] K # 3x3 row-major matrix\n\ 01027 \n\ 01028 # Rectification matrix (stereo cameras only)\n\ 01029 # A rotation matrix aligning the camera coordinate system to the ideal\n\ 01030 # stereo image plane so that epipolar lines in both stereo images are\n\ 01031 # parallel.\n\ 01032 float64[9] R # 3x3 row-major matrix\n\ 01033 \n\ 01034 # Projection/camera matrix\n\ 01035 # [fx' 0 cx' Tx]\n\ 01036 # P = [ 0 fy' cy' Ty]\n\ 01037 # [ 0 0 1 0]\n\ 01038 # By convention, this matrix specifies the intrinsic (camera) matrix\n\ 01039 # of the processed (rectified) image. That is, the left 3x3 portion\n\ 01040 # is the normal camera intrinsic matrix for the rectified image.\n\ 01041 # It projects 3D points in the camera coordinate frame to 2D pixel\n\ 01042 # coordinates using the focal lengths (fx', fy') and principal point\n\ 01043 # (cx', cy') - these may differ from the values in K.\n\ 01044 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\ 01045 # also have R = the identity and P[1:3,1:3] = K.\n\ 01046 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\ 01047 # position of the optical center of the second camera in the first\n\ 01048 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\ 01049 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\ 01050 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\ 01051 # Tx = -fx' * B, where B is the baseline between the cameras.\n\ 01052 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\ 01053 # the rectified image is given by:\n\ 01054 # [u v w]' = P * [X Y Z 1]'\n\ 01055 # x = u / w\n\ 01056 # y = v / w\n\ 01057 # This holds for both images of a stereo pair.\n\ 01058 float64[12] P # 3x4 row-major matrix\n\ 01059 \n\ 01060 \n\ 01061 #######################################################################\n\ 01062 # Operational Parameters #\n\ 01063 #######################################################################\n\ 01064 # These define the image region actually captured by the camera #\n\ 01065 # driver. Although they affect the geometry of the output image, they #\n\ 01066 # may be changed freely without recalibrating the camera. #\n\ 01067 #######################################################################\n\ 01068 \n\ 01069 # Binning refers here to any camera setting which combines rectangular\n\ 01070 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\ 01071 # resolution of the output image to\n\ 01072 # (width / binning_x) x (height / binning_y).\n\ 01073 # The default values binning_x = binning_y = 0 is considered the same\n\ 01074 # as binning_x = binning_y = 1 (no subsampling).\n\ 01075 uint32 binning_x\n\ 01076 uint32 binning_y\n\ 01077 \n\ 01078 # Region of interest (subwindow of full camera resolution), given in\n\ 01079 # full resolution (unbinned) image coordinates. A particular ROI\n\ 01080 # always denotes the same window of pixels on the camera sensor,\n\ 01081 # regardless of binning settings.\n\ 01082 # The default setting of roi (all values 0) is considered the same as\n\ 01083 # full resolution (roi.width = width, roi.height = height).\n\ 01084 RegionOfInterest roi\n\ 01085 \n\ 01086 ================================================================================\n\ 01087 MSG: sensor_msgs/RegionOfInterest\n\ 01088 # This message is used to specify a region of interest within an image.\n\ 01089 #\n\ 01090 # When used to specify the ROI setting of the camera when the image was\n\ 01091 # taken, the height and width fields should either match the height and\n\ 01092 # width fields for the associated image; or height = width = 0\n\ 01093 # indicates that the full resolution image was captured.\n\ 01094 \n\ 01095 uint32 x_offset # Leftmost pixel of the ROI\n\ 01096 # (0 if the ROI includes the left edge of the image)\n\ 01097 uint32 y_offset # Topmost pixel of the ROI\n\ 01098 # (0 if the ROI includes the top edge of the image)\n\ 01099 uint32 height # Height of ROI\n\ 01100 uint32 width # Width of ROI\n\ 01101 \n\ 01102 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\ 01103 # ROI in this message. Typically this should be False if the full image\n\ 01104 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\ 01105 # used).\n\ 01106 bool do_rectify\n\ 01107 \n\ 01108 ================================================================================\n\ 01109 MSG: geometry_msgs/Vector3\n\ 01110 # This represents a vector in free space. \n\ 01111 \n\ 01112 float64 x\n\ 01113 float64 y\n\ 01114 float64 z\n\ 01115 ================================================================================\n\ 01116 MSG: trajectory_msgs/JointTrajectory\n\ 01117 Header header\n\ 01118 string[] joint_names\n\ 01119 JointTrajectoryPoint[] points\n\ 01120 ================================================================================\n\ 01121 MSG: trajectory_msgs/JointTrajectoryPoint\n\ 01122 float64[] positions\n\ 01123 float64[] velocities\n\ 01124 float64[] accelerations\n\ 01125 duration time_from_start\n\ 01126 ================================================================================\n\ 01127 MSG: sensor_msgs/JointState\n\ 01128 # This is a message that holds data to describe the state of a set of torque controlled joints. \n\ 01129 #\n\ 01130 # The state of each joint (revolute or prismatic) is defined by:\n\ 01131 # * the position of the joint (rad or m),\n\ 01132 # * the velocity of the joint (rad/s or m/s) and \n\ 01133 # * the effort that is applied in the joint (Nm or N).\n\ 01134 #\n\ 01135 # Each joint is uniquely identified by its name\n\ 01136 # The header specifies the time at which the joint states were recorded. All the joint states\n\ 01137 # in one message have to be recorded at the same time.\n\ 01138 #\n\ 01139 # This message consists of a multiple arrays, one for each part of the joint state. \n\ 01140 # The goal is to make each of the fields optional. When e.g. your joints have no\n\ 01141 # effort associated with them, you can leave the effort array empty. \n\ 01142 #\n\ 01143 # All arrays in this message should have the same size, or be empty.\n\ 01144 # This is the only way to uniquely associate the joint name with the correct\n\ 01145 # states.\n\ 01146 \n\ 01147 \n\ 01148 Header header\n\ 01149 \n\ 01150 string[] name\n\ 01151 float64[] position\n\ 01152 float64[] velocity\n\ 01153 float64[] effort\n\ 01154 \n\ 01155 "; 01156 } 01157 01158 static const char* value(const ::object_manipulation_msgs::ReactiveGraspGoal_<ContainerAllocator> &) { return value(); } 01159 }; 01160 01161 } // namespace message_traits 01162 } // namespace ros 01163 01164 namespace ros 01165 { 01166 namespace serialization 01167 { 01168 01169 template<class ContainerAllocator> struct Serializer< ::object_manipulation_msgs::ReactiveGraspGoal_<ContainerAllocator> > 01170 { 01171 template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m) 01172 { 01173 stream.next(m.arm_name); 01174 stream.next(m.target); 01175 stream.next(m.final_grasp_pose); 01176 stream.next(m.trajectory); 01177 stream.next(m.collision_support_surface_name); 01178 stream.next(m.pre_grasp_posture); 01179 stream.next(m.grasp_posture); 01180 stream.next(m.max_contact_force); 01181 } 01182 01183 ROS_DECLARE_ALLINONE_SERIALIZER; 01184 }; // struct ReactiveGraspGoal_ 01185 } // namespace serialization 01186 } // namespace ros 01187 01188 namespace ros 01189 { 01190 namespace message_operations 01191 { 01192 01193 template<class ContainerAllocator> 01194 struct Printer< ::object_manipulation_msgs::ReactiveGraspGoal_<ContainerAllocator> > 01195 { 01196 template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::object_manipulation_msgs::ReactiveGraspGoal_<ContainerAllocator> & v) 01197 { 01198 s << indent << "arm_name: "; 01199 Printer<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::stream(s, indent + " ", v.arm_name); 01200 s << indent << "target: "; 01201 s << std::endl; 01202 Printer< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> >::stream(s, indent + " ", v.target); 01203 s << indent << "final_grasp_pose: "; 01204 s << std::endl; 01205 Printer< ::geometry_msgs::PoseStamped_<ContainerAllocator> >::stream(s, indent + " ", v.final_grasp_pose); 01206 s << indent << "trajectory: "; 01207 s << std::endl; 01208 Printer< ::trajectory_msgs::JointTrajectory_<ContainerAllocator> >::stream(s, indent + " ", v.trajectory); 01209 s << indent << "collision_support_surface_name: "; 01210 Printer<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::stream(s, indent + " ", v.collision_support_surface_name); 01211 s << indent << "pre_grasp_posture: "; 01212 s << std::endl; 01213 Printer< ::sensor_msgs::JointState_<ContainerAllocator> >::stream(s, indent + " ", v.pre_grasp_posture); 01214 s << indent << "grasp_posture: "; 01215 s << std::endl; 01216 Printer< ::sensor_msgs::JointState_<ContainerAllocator> >::stream(s, indent + " ", v.grasp_posture); 01217 s << indent << "max_contact_force: "; 01218 Printer<float>::stream(s, indent + " ", v.max_contact_force); 01219 } 01220 }; 01221 01222 01223 } // namespace message_operations 01224 } // namespace ros 01225 01226 #endif // OBJECT_MANIPULATION_MSGS_MESSAGE_REACTIVEGRASPGOAL_H 01227