$search
00001 /* Auto-generated by genmsg_cpp for file /home/rosbuild/hudson/workspace/doc-electric-pr2_object_manipulation/doc_stacks/2013-03-05_12-10-38.333207/pr2_object_manipulation/manipulation/pr2_object_manipulation_msgs/msg/IMGUIOptions.msg */ 00002 #ifndef PR2_OBJECT_MANIPULATION_MSGS_MESSAGE_IMGUIOPTIONS_H 00003 #define PR2_OBJECT_MANIPULATION_MSGS_MESSAGE_IMGUIOPTIONS_H 00004 #include <string> 00005 #include <vector> 00006 #include <map> 00007 #include <ostream> 00008 #include "ros/serialization.h" 00009 #include "ros/builtin_message_traits.h" 00010 #include "ros/message_operations.h" 00011 #include "ros/time.h" 00012 00013 #include "ros/macros.h" 00014 00015 #include "ros/assert.h" 00016 00017 #include "object_manipulation_msgs/GraspableObject.h" 00018 #include "object_manipulation_msgs/GraspableObject.h" 00019 #include "pr2_object_manipulation_msgs/IMGUIAdvancedOptions.h" 00020 00021 namespace pr2_object_manipulation_msgs 00022 { 00023 template <class ContainerAllocator> 00024 struct IMGUIOptions_ { 00025 typedef IMGUIOptions_<ContainerAllocator> Type; 00026 00027 IMGUIOptions_() 00028 : collision_checked(false) 00029 , grasp_selection(0) 00030 , arm_selection(0) 00031 , reset_choice(0) 00032 , arm_action_choice(0) 00033 , arm_planner_choice(0) 00034 , gripper_slider_position(0) 00035 , selected_object() 00036 , movable_obstacles() 00037 , adv_options() 00038 { 00039 } 00040 00041 IMGUIOptions_(const ContainerAllocator& _alloc) 00042 : collision_checked(false) 00043 , grasp_selection(0) 00044 , arm_selection(0) 00045 , reset_choice(0) 00046 , arm_action_choice(0) 00047 , arm_planner_choice(0) 00048 , gripper_slider_position(0) 00049 , selected_object(_alloc) 00050 , movable_obstacles(_alloc) 00051 , adv_options(_alloc) 00052 { 00053 } 00054 00055 typedef uint8_t _collision_checked_type; 00056 uint8_t collision_checked; 00057 00058 typedef int32_t _grasp_selection_type; 00059 int32_t grasp_selection; 00060 00061 typedef int32_t _arm_selection_type; 00062 int32_t arm_selection; 00063 00064 typedef int32_t _reset_choice_type; 00065 int32_t reset_choice; 00066 00067 typedef int32_t _arm_action_choice_type; 00068 int32_t arm_action_choice; 00069 00070 typedef int32_t _arm_planner_choice_type; 00071 int32_t arm_planner_choice; 00072 00073 typedef int32_t _gripper_slider_position_type; 00074 int32_t gripper_slider_position; 00075 00076 typedef ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> _selected_object_type; 00077 ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> selected_object; 00078 00079 typedef std::vector< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> >::other > _movable_obstacles_type; 00080 std::vector< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> >::other > movable_obstacles; 00081 00082 typedef ::pr2_object_manipulation_msgs::IMGUIAdvancedOptions_<ContainerAllocator> _adv_options_type; 00083 ::pr2_object_manipulation_msgs::IMGUIAdvancedOptions_<ContainerAllocator> adv_options; 00084 00085 00086 ROS_DEPRECATED uint32_t get_movable_obstacles_size() const { return (uint32_t)movable_obstacles.size(); } 00087 ROS_DEPRECATED void set_movable_obstacles_size(uint32_t size) { movable_obstacles.resize((size_t)size); } 00088 ROS_DEPRECATED void get_movable_obstacles_vec(std::vector< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> >::other > & vec) const { vec = this->movable_obstacles; } 00089 ROS_DEPRECATED void set_movable_obstacles_vec(const std::vector< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> >::other > & vec) { this->movable_obstacles = vec; } 00090 private: 00091 static const char* __s_getDataType_() { return "pr2_object_manipulation_msgs/IMGUIOptions"; } 00092 public: 00093 ROS_DEPRECATED static const std::string __s_getDataType() { return __s_getDataType_(); } 00094 00095 ROS_DEPRECATED const std::string __getDataType() const { return __s_getDataType_(); } 00096 00097 private: 00098 static const char* __s_getMD5Sum_() { return "8aeba15821e5be12a564fea38cf7ad87"; } 00099 public: 00100 ROS_DEPRECATED static const std::string __s_getMD5Sum() { return __s_getMD5Sum_(); } 00101 00102 ROS_DEPRECATED const std::string __getMD5Sum() const { return __s_getMD5Sum_(); } 00103 00104 private: 00105 static const char* __s_getMessageDefinition_() { return "\n\ 00106 # collision checking enabled\n\ 00107 bool collision_checked\n\ 00108 \n\ 00109 # 0=call gripper click\n\ 00110 # 1=grasp the provided graspable object\n\ 00111 int32 grasp_selection\n\ 00112 \n\ 00113 # 0=right, 1=left arm\n\ 00114 int32 arm_selection\n\ 00115 \n\ 00116 # for RESET commands\n\ 00117 # 0=reset collision objects\n\ 00118 # 1=reset attached objects\n\ 00119 int32 reset_choice\n\ 00120 \n\ 00121 # for MOVE_ARM commands\n\ 00122 # 0=side\n\ 00123 # 1=front\n\ 00124 # 2=side handoff\n\ 00125 int32 arm_action_choice\n\ 00126 \n\ 00127 # for MOVE_ARM commands\n\ 00128 # 0=open-loop\n\ 00129 # 1=with planner\n\ 00130 int32 arm_planner_choice\n\ 00131 \n\ 00132 # for MOVE_GRIPPER commands\n\ 00133 # opening of gripper (0=closed..100=open)\n\ 00134 int32 gripper_slider_position\n\ 00135 \n\ 00136 # used if grasp_selection == 1\n\ 00137 object_manipulation_msgs/GraspableObject selected_object\n\ 00138 \n\ 00139 # indicates obstacles that can be moved during grasping\n\ 00140 # presumably, the operator has marked these in some fashion\n\ 00141 object_manipulation_msgs/GraspableObject[] movable_obstacles\n\ 00142 \n\ 00143 # more options..\n\ 00144 IMGUIAdvancedOptions adv_options\n\ 00145 \n\ 00146 ================================================================================\n\ 00147 MSG: object_manipulation_msgs/GraspableObject\n\ 00148 # an object that the object_manipulator can work on\n\ 00149 \n\ 00150 # a graspable object can be represented in multiple ways. This message\n\ 00151 # can contain all of them. Which one is actually used is up to the receiver\n\ 00152 # of this message. When adding new representations, one must be careful that\n\ 00153 # they have reasonable lightweight defaults indicating that that particular\n\ 00154 # representation is not available.\n\ 00155 \n\ 00156 # the tf frame to be used as a reference frame when combining information from\n\ 00157 # the different representations below\n\ 00158 string reference_frame_id\n\ 00159 \n\ 00160 # potential recognition results from a database of models\n\ 00161 # all poses are relative to the object reference pose\n\ 00162 household_objects_database_msgs/DatabaseModelPose[] potential_models\n\ 00163 \n\ 00164 # the point cloud itself\n\ 00165 sensor_msgs/PointCloud cluster\n\ 00166 \n\ 00167 # a region of a PointCloud2 of interest\n\ 00168 object_manipulation_msgs/SceneRegion region\n\ 00169 \n\ 00170 # the name that this object has in the collision environment\n\ 00171 string collision_name\n\ 00172 ================================================================================\n\ 00173 MSG: household_objects_database_msgs/DatabaseModelPose\n\ 00174 # Informs that a specific model from the Model Database has been \n\ 00175 # identified at a certain location\n\ 00176 \n\ 00177 # the database id of the model\n\ 00178 int32 model_id\n\ 00179 \n\ 00180 # the pose that it can be found in\n\ 00181 geometry_msgs/PoseStamped pose\n\ 00182 \n\ 00183 # a measure of the confidence level in this detection result\n\ 00184 float32 confidence\n\ 00185 \n\ 00186 # the name of the object detector that generated this detection result\n\ 00187 string detector_name\n\ 00188 \n\ 00189 ================================================================================\n\ 00190 MSG: geometry_msgs/PoseStamped\n\ 00191 # A Pose with reference coordinate frame and timestamp\n\ 00192 Header header\n\ 00193 Pose pose\n\ 00194 \n\ 00195 ================================================================================\n\ 00196 MSG: std_msgs/Header\n\ 00197 # Standard metadata for higher-level stamped data types.\n\ 00198 # This is generally used to communicate timestamped data \n\ 00199 # in a particular coordinate frame.\n\ 00200 # \n\ 00201 # sequence ID: consecutively increasing ID \n\ 00202 uint32 seq\n\ 00203 #Two-integer timestamp that is expressed as:\n\ 00204 # * stamp.secs: seconds (stamp_secs) since epoch\n\ 00205 # * stamp.nsecs: nanoseconds since stamp_secs\n\ 00206 # time-handling sugar is provided by the client library\n\ 00207 time stamp\n\ 00208 #Frame this data is associated with\n\ 00209 # 0: no frame\n\ 00210 # 1: global frame\n\ 00211 string frame_id\n\ 00212 \n\ 00213 ================================================================================\n\ 00214 MSG: geometry_msgs/Pose\n\ 00215 # A representation of pose in free space, composed of postion and orientation. \n\ 00216 Point position\n\ 00217 Quaternion orientation\n\ 00218 \n\ 00219 ================================================================================\n\ 00220 MSG: geometry_msgs/Point\n\ 00221 # This contains the position of a point in free space\n\ 00222 float64 x\n\ 00223 float64 y\n\ 00224 float64 z\n\ 00225 \n\ 00226 ================================================================================\n\ 00227 MSG: geometry_msgs/Quaternion\n\ 00228 # This represents an orientation in free space in quaternion form.\n\ 00229 \n\ 00230 float64 x\n\ 00231 float64 y\n\ 00232 float64 z\n\ 00233 float64 w\n\ 00234 \n\ 00235 ================================================================================\n\ 00236 MSG: sensor_msgs/PointCloud\n\ 00237 # This message holds a collection of 3d points, plus optional additional\n\ 00238 # information about each point.\n\ 00239 \n\ 00240 # Time of sensor data acquisition, coordinate frame ID.\n\ 00241 Header header\n\ 00242 \n\ 00243 # Array of 3d points. Each Point32 should be interpreted as a 3d point\n\ 00244 # in the frame given in the header.\n\ 00245 geometry_msgs/Point32[] points\n\ 00246 \n\ 00247 # Each channel should have the same number of elements as points array,\n\ 00248 # and the data in each channel should correspond 1:1 with each point.\n\ 00249 # Channel names in common practice are listed in ChannelFloat32.msg.\n\ 00250 ChannelFloat32[] channels\n\ 00251 \n\ 00252 ================================================================================\n\ 00253 MSG: geometry_msgs/Point32\n\ 00254 # This contains the position of a point in free space(with 32 bits of precision).\n\ 00255 # It is recommeded to use Point wherever possible instead of Point32. \n\ 00256 # \n\ 00257 # This recommendation is to promote interoperability. \n\ 00258 #\n\ 00259 # This message is designed to take up less space when sending\n\ 00260 # lots of points at once, as in the case of a PointCloud. \n\ 00261 \n\ 00262 float32 x\n\ 00263 float32 y\n\ 00264 float32 z\n\ 00265 ================================================================================\n\ 00266 MSG: sensor_msgs/ChannelFloat32\n\ 00267 # This message is used by the PointCloud message to hold optional data\n\ 00268 # associated with each point in the cloud. The length of the values\n\ 00269 # array should be the same as the length of the points array in the\n\ 00270 # PointCloud, and each value should be associated with the corresponding\n\ 00271 # point.\n\ 00272 \n\ 00273 # Channel names in existing practice include:\n\ 00274 # \"u\", \"v\" - row and column (respectively) in the left stereo image.\n\ 00275 # This is opposite to usual conventions but remains for\n\ 00276 # historical reasons. The newer PointCloud2 message has no\n\ 00277 # such problem.\n\ 00278 # \"rgb\" - For point clouds produced by color stereo cameras. uint8\n\ 00279 # (R,G,B) values packed into the least significant 24 bits,\n\ 00280 # in order.\n\ 00281 # \"intensity\" - laser or pixel intensity.\n\ 00282 # \"distance\"\n\ 00283 \n\ 00284 # The channel name should give semantics of the channel (e.g.\n\ 00285 # \"intensity\" instead of \"value\").\n\ 00286 string name\n\ 00287 \n\ 00288 # The values array should be 1-1 with the elements of the associated\n\ 00289 # PointCloud.\n\ 00290 float32[] values\n\ 00291 \n\ 00292 ================================================================================\n\ 00293 MSG: object_manipulation_msgs/SceneRegion\n\ 00294 # Point cloud\n\ 00295 sensor_msgs/PointCloud2 cloud\n\ 00296 \n\ 00297 # Indices for the region of interest\n\ 00298 int32[] mask\n\ 00299 \n\ 00300 # One of the corresponding 2D images, if applicable\n\ 00301 sensor_msgs/Image image\n\ 00302 \n\ 00303 # The disparity image, if applicable\n\ 00304 sensor_msgs/Image disparity_image\n\ 00305 \n\ 00306 # Camera info for the camera that took the image\n\ 00307 sensor_msgs/CameraInfo cam_info\n\ 00308 \n\ 00309 # a 3D region of interest for grasp planning\n\ 00310 geometry_msgs/PoseStamped roi_box_pose\n\ 00311 geometry_msgs/Vector3 roi_box_dims\n\ 00312 \n\ 00313 ================================================================================\n\ 00314 MSG: sensor_msgs/PointCloud2\n\ 00315 # This message holds a collection of N-dimensional points, which may\n\ 00316 # contain additional information such as normals, intensity, etc. The\n\ 00317 # point data is stored as a binary blob, its layout described by the\n\ 00318 # contents of the \"fields\" array.\n\ 00319 \n\ 00320 # The point cloud data may be organized 2d (image-like) or 1d\n\ 00321 # (unordered). Point clouds organized as 2d images may be produced by\n\ 00322 # camera depth sensors such as stereo or time-of-flight.\n\ 00323 \n\ 00324 # Time of sensor data acquisition, and the coordinate frame ID (for 3d\n\ 00325 # points).\n\ 00326 Header header\n\ 00327 \n\ 00328 # 2D structure of the point cloud. If the cloud is unordered, height is\n\ 00329 # 1 and width is the length of the point cloud.\n\ 00330 uint32 height\n\ 00331 uint32 width\n\ 00332 \n\ 00333 # Describes the channels and their layout in the binary data blob.\n\ 00334 PointField[] fields\n\ 00335 \n\ 00336 bool is_bigendian # Is this data bigendian?\n\ 00337 uint32 point_step # Length of a point in bytes\n\ 00338 uint32 row_step # Length of a row in bytes\n\ 00339 uint8[] data # Actual point data, size is (row_step*height)\n\ 00340 \n\ 00341 bool is_dense # True if there are no invalid points\n\ 00342 \n\ 00343 ================================================================================\n\ 00344 MSG: sensor_msgs/PointField\n\ 00345 # This message holds the description of one point entry in the\n\ 00346 # PointCloud2 message format.\n\ 00347 uint8 INT8 = 1\n\ 00348 uint8 UINT8 = 2\n\ 00349 uint8 INT16 = 3\n\ 00350 uint8 UINT16 = 4\n\ 00351 uint8 INT32 = 5\n\ 00352 uint8 UINT32 = 6\n\ 00353 uint8 FLOAT32 = 7\n\ 00354 uint8 FLOAT64 = 8\n\ 00355 \n\ 00356 string name # Name of field\n\ 00357 uint32 offset # Offset from start of point struct\n\ 00358 uint8 datatype # Datatype enumeration, see above\n\ 00359 uint32 count # How many elements in the field\n\ 00360 \n\ 00361 ================================================================================\n\ 00362 MSG: sensor_msgs/Image\n\ 00363 # This message contains an uncompressed image\n\ 00364 # (0, 0) is at top-left corner of image\n\ 00365 #\n\ 00366 \n\ 00367 Header header # Header timestamp should be acquisition time of image\n\ 00368 # Header frame_id should be optical frame of camera\n\ 00369 # origin of frame should be optical center of cameara\n\ 00370 # +x should point to the right in the image\n\ 00371 # +y should point down in the image\n\ 00372 # +z should point into to plane of the image\n\ 00373 # If the frame_id here and the frame_id of the CameraInfo\n\ 00374 # message associated with the image conflict\n\ 00375 # the behavior is undefined\n\ 00376 \n\ 00377 uint32 height # image height, that is, number of rows\n\ 00378 uint32 width # image width, that is, number of columns\n\ 00379 \n\ 00380 # The legal values for encoding are in file src/image_encodings.cpp\n\ 00381 # If you want to standardize a new string format, join\n\ 00382 # ros-users@lists.sourceforge.net and send an email proposing a new encoding.\n\ 00383 \n\ 00384 string encoding # Encoding of pixels -- channel meaning, ordering, size\n\ 00385 # taken from the list of strings in src/image_encodings.cpp\n\ 00386 \n\ 00387 uint8 is_bigendian # is this data bigendian?\n\ 00388 uint32 step # Full row length in bytes\n\ 00389 uint8[] data # actual matrix data, size is (step * rows)\n\ 00390 \n\ 00391 ================================================================================\n\ 00392 MSG: sensor_msgs/CameraInfo\n\ 00393 # This message defines meta information for a camera. It should be in a\n\ 00394 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\ 00395 # image topics named:\n\ 00396 #\n\ 00397 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\ 00398 # image - monochrome, distorted\n\ 00399 # image_color - color, distorted\n\ 00400 # image_rect - monochrome, rectified\n\ 00401 # image_rect_color - color, rectified\n\ 00402 #\n\ 00403 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\ 00404 # for producing the four processed image topics from image_raw and\n\ 00405 # camera_info. The meaning of the camera parameters are described in\n\ 00406 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\ 00407 #\n\ 00408 # The image_geometry package provides a user-friendly interface to\n\ 00409 # common operations using this meta information. If you want to, e.g.,\n\ 00410 # project a 3d point into image coordinates, we strongly recommend\n\ 00411 # using image_geometry.\n\ 00412 #\n\ 00413 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\ 00414 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\ 00415 # indicates an uncalibrated camera.\n\ 00416 \n\ 00417 #######################################################################\n\ 00418 # Image acquisition info #\n\ 00419 #######################################################################\n\ 00420 \n\ 00421 # Time of image acquisition, camera coordinate frame ID\n\ 00422 Header header # Header timestamp should be acquisition time of image\n\ 00423 # Header frame_id should be optical frame of camera\n\ 00424 # origin of frame should be optical center of camera\n\ 00425 # +x should point to the right in the image\n\ 00426 # +y should point down in the image\n\ 00427 # +z should point into the plane of the image\n\ 00428 \n\ 00429 \n\ 00430 #######################################################################\n\ 00431 # Calibration Parameters #\n\ 00432 #######################################################################\n\ 00433 # These are fixed during camera calibration. Their values will be the #\n\ 00434 # same in all messages until the camera is recalibrated. Note that #\n\ 00435 # self-calibrating systems may \"recalibrate\" frequently. #\n\ 00436 # #\n\ 00437 # The internal parameters can be used to warp a raw (distorted) image #\n\ 00438 # to: #\n\ 00439 # 1. An undistorted image (requires D and K) #\n\ 00440 # 2. A rectified image (requires D, K, R) #\n\ 00441 # The projection matrix P projects 3D points into the rectified image.#\n\ 00442 #######################################################################\n\ 00443 \n\ 00444 # The image dimensions with which the camera was calibrated. Normally\n\ 00445 # this will be the full camera resolution in pixels.\n\ 00446 uint32 height\n\ 00447 uint32 width\n\ 00448 \n\ 00449 # The distortion model used. Supported models are listed in\n\ 00450 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\ 00451 # simple model of radial and tangential distortion - is sufficent.\n\ 00452 string distortion_model\n\ 00453 \n\ 00454 # The distortion parameters, size depending on the distortion model.\n\ 00455 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\ 00456 float64[] D\n\ 00457 \n\ 00458 # Intrinsic camera matrix for the raw (distorted) images.\n\ 00459 # [fx 0 cx]\n\ 00460 # K = [ 0 fy cy]\n\ 00461 # [ 0 0 1]\n\ 00462 # Projects 3D points in the camera coordinate frame to 2D pixel\n\ 00463 # coordinates using the focal lengths (fx, fy) and principal point\n\ 00464 # (cx, cy).\n\ 00465 float64[9] K # 3x3 row-major matrix\n\ 00466 \n\ 00467 # Rectification matrix (stereo cameras only)\n\ 00468 # A rotation matrix aligning the camera coordinate system to the ideal\n\ 00469 # stereo image plane so that epipolar lines in both stereo images are\n\ 00470 # parallel.\n\ 00471 float64[9] R # 3x3 row-major matrix\n\ 00472 \n\ 00473 # Projection/camera matrix\n\ 00474 # [fx' 0 cx' Tx]\n\ 00475 # P = [ 0 fy' cy' Ty]\n\ 00476 # [ 0 0 1 0]\n\ 00477 # By convention, this matrix specifies the intrinsic (camera) matrix\n\ 00478 # of the processed (rectified) image. That is, the left 3x3 portion\n\ 00479 # is the normal camera intrinsic matrix for the rectified image.\n\ 00480 # It projects 3D points in the camera coordinate frame to 2D pixel\n\ 00481 # coordinates using the focal lengths (fx', fy') and principal point\n\ 00482 # (cx', cy') - these may differ from the values in K.\n\ 00483 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\ 00484 # also have R = the identity and P[1:3,1:3] = K.\n\ 00485 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\ 00486 # position of the optical center of the second camera in the first\n\ 00487 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\ 00488 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\ 00489 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\ 00490 # Tx = -fx' * B, where B is the baseline between the cameras.\n\ 00491 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\ 00492 # the rectified image is given by:\n\ 00493 # [u v w]' = P * [X Y Z 1]'\n\ 00494 # x = u / w\n\ 00495 # y = v / w\n\ 00496 # This holds for both images of a stereo pair.\n\ 00497 float64[12] P # 3x4 row-major matrix\n\ 00498 \n\ 00499 \n\ 00500 #######################################################################\n\ 00501 # Operational Parameters #\n\ 00502 #######################################################################\n\ 00503 # These define the image region actually captured by the camera #\n\ 00504 # driver. Although they affect the geometry of the output image, they #\n\ 00505 # may be changed freely without recalibrating the camera. #\n\ 00506 #######################################################################\n\ 00507 \n\ 00508 # Binning refers here to any camera setting which combines rectangular\n\ 00509 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\ 00510 # resolution of the output image to\n\ 00511 # (width / binning_x) x (height / binning_y).\n\ 00512 # The default values binning_x = binning_y = 0 is considered the same\n\ 00513 # as binning_x = binning_y = 1 (no subsampling).\n\ 00514 uint32 binning_x\n\ 00515 uint32 binning_y\n\ 00516 \n\ 00517 # Region of interest (subwindow of full camera resolution), given in\n\ 00518 # full resolution (unbinned) image coordinates. A particular ROI\n\ 00519 # always denotes the same window of pixels on the camera sensor,\n\ 00520 # regardless of binning settings.\n\ 00521 # The default setting of roi (all values 0) is considered the same as\n\ 00522 # full resolution (roi.width = width, roi.height = height).\n\ 00523 RegionOfInterest roi\n\ 00524 \n\ 00525 ================================================================================\n\ 00526 MSG: sensor_msgs/RegionOfInterest\n\ 00527 # This message is used to specify a region of interest within an image.\n\ 00528 #\n\ 00529 # When used to specify the ROI setting of the camera when the image was\n\ 00530 # taken, the height and width fields should either match the height and\n\ 00531 # width fields for the associated image; or height = width = 0\n\ 00532 # indicates that the full resolution image was captured.\n\ 00533 \n\ 00534 uint32 x_offset # Leftmost pixel of the ROI\n\ 00535 # (0 if the ROI includes the left edge of the image)\n\ 00536 uint32 y_offset # Topmost pixel of the ROI\n\ 00537 # (0 if the ROI includes the top edge of the image)\n\ 00538 uint32 height # Height of ROI\n\ 00539 uint32 width # Width of ROI\n\ 00540 \n\ 00541 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\ 00542 # ROI in this message. Typically this should be False if the full image\n\ 00543 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\ 00544 # used).\n\ 00545 bool do_rectify\n\ 00546 \n\ 00547 ================================================================================\n\ 00548 MSG: geometry_msgs/Vector3\n\ 00549 # This represents a vector in free space. \n\ 00550 \n\ 00551 float64 x\n\ 00552 float64 y\n\ 00553 float64 z\n\ 00554 ================================================================================\n\ 00555 MSG: pr2_object_manipulation_msgs/IMGUIAdvancedOptions\n\ 00556 \n\ 00557 bool reactive_grasping\n\ 00558 bool reactive_force \n\ 00559 bool reactive_place\n\ 00560 int32 lift_steps\n\ 00561 int32 retreat_steps\n\ 00562 int32 lift_direction_choice\n\ 00563 int32 desired_approach\n\ 00564 int32 min_approach\n\ 00565 float32 max_contact_force\n\ 00566 \n\ 00567 "; } 00568 public: 00569 ROS_DEPRECATED static const std::string __s_getMessageDefinition() { return __s_getMessageDefinition_(); } 00570 00571 ROS_DEPRECATED const std::string __getMessageDefinition() const { return __s_getMessageDefinition_(); } 00572 00573 ROS_DEPRECATED virtual uint8_t *serialize(uint8_t *write_ptr, uint32_t seq) const 00574 { 00575 ros::serialization::OStream stream(write_ptr, 1000000000); 00576 ros::serialization::serialize(stream, collision_checked); 00577 ros::serialization::serialize(stream, grasp_selection); 00578 ros::serialization::serialize(stream, arm_selection); 00579 ros::serialization::serialize(stream, reset_choice); 00580 ros::serialization::serialize(stream, arm_action_choice); 00581 ros::serialization::serialize(stream, arm_planner_choice); 00582 ros::serialization::serialize(stream, gripper_slider_position); 00583 ros::serialization::serialize(stream, selected_object); 00584 ros::serialization::serialize(stream, movable_obstacles); 00585 ros::serialization::serialize(stream, adv_options); 00586 return stream.getData(); 00587 } 00588 00589 ROS_DEPRECATED virtual uint8_t *deserialize(uint8_t *read_ptr) 00590 { 00591 ros::serialization::IStream stream(read_ptr, 1000000000); 00592 ros::serialization::deserialize(stream, collision_checked); 00593 ros::serialization::deserialize(stream, grasp_selection); 00594 ros::serialization::deserialize(stream, arm_selection); 00595 ros::serialization::deserialize(stream, reset_choice); 00596 ros::serialization::deserialize(stream, arm_action_choice); 00597 ros::serialization::deserialize(stream, arm_planner_choice); 00598 ros::serialization::deserialize(stream, gripper_slider_position); 00599 ros::serialization::deserialize(stream, selected_object); 00600 ros::serialization::deserialize(stream, movable_obstacles); 00601 ros::serialization::deserialize(stream, adv_options); 00602 return stream.getData(); 00603 } 00604 00605 ROS_DEPRECATED virtual uint32_t serializationLength() const 00606 { 00607 uint32_t size = 0; 00608 size += ros::serialization::serializationLength(collision_checked); 00609 size += ros::serialization::serializationLength(grasp_selection); 00610 size += ros::serialization::serializationLength(arm_selection); 00611 size += ros::serialization::serializationLength(reset_choice); 00612 size += ros::serialization::serializationLength(arm_action_choice); 00613 size += ros::serialization::serializationLength(arm_planner_choice); 00614 size += ros::serialization::serializationLength(gripper_slider_position); 00615 size += ros::serialization::serializationLength(selected_object); 00616 size += ros::serialization::serializationLength(movable_obstacles); 00617 size += ros::serialization::serializationLength(adv_options); 00618 return size; 00619 } 00620 00621 typedef boost::shared_ptr< ::pr2_object_manipulation_msgs::IMGUIOptions_<ContainerAllocator> > Ptr; 00622 typedef boost::shared_ptr< ::pr2_object_manipulation_msgs::IMGUIOptions_<ContainerAllocator> const> ConstPtr; 00623 boost::shared_ptr<std::map<std::string, std::string> > __connection_header; 00624 }; // struct IMGUIOptions 00625 typedef ::pr2_object_manipulation_msgs::IMGUIOptions_<std::allocator<void> > IMGUIOptions; 00626 00627 typedef boost::shared_ptr< ::pr2_object_manipulation_msgs::IMGUIOptions> IMGUIOptionsPtr; 00628 typedef boost::shared_ptr< ::pr2_object_manipulation_msgs::IMGUIOptions const> IMGUIOptionsConstPtr; 00629 00630 00631 template<typename ContainerAllocator> 00632 std::ostream& operator<<(std::ostream& s, const ::pr2_object_manipulation_msgs::IMGUIOptions_<ContainerAllocator> & v) 00633 { 00634 ros::message_operations::Printer< ::pr2_object_manipulation_msgs::IMGUIOptions_<ContainerAllocator> >::stream(s, "", v); 00635 return s;} 00636 00637 } // namespace pr2_object_manipulation_msgs 00638 00639 namespace ros 00640 { 00641 namespace message_traits 00642 { 00643 template<class ContainerAllocator> struct IsMessage< ::pr2_object_manipulation_msgs::IMGUIOptions_<ContainerAllocator> > : public TrueType {}; 00644 template<class ContainerAllocator> struct IsMessage< ::pr2_object_manipulation_msgs::IMGUIOptions_<ContainerAllocator> const> : public TrueType {}; 00645 template<class ContainerAllocator> 00646 struct MD5Sum< ::pr2_object_manipulation_msgs::IMGUIOptions_<ContainerAllocator> > { 00647 static const char* value() 00648 { 00649 return "8aeba15821e5be12a564fea38cf7ad87"; 00650 } 00651 00652 static const char* value(const ::pr2_object_manipulation_msgs::IMGUIOptions_<ContainerAllocator> &) { return value(); } 00653 static const uint64_t static_value1 = 0x8aeba15821e5be12ULL; 00654 static const uint64_t static_value2 = 0xa564fea38cf7ad87ULL; 00655 }; 00656 00657 template<class ContainerAllocator> 00658 struct DataType< ::pr2_object_manipulation_msgs::IMGUIOptions_<ContainerAllocator> > { 00659 static const char* value() 00660 { 00661 return "pr2_object_manipulation_msgs/IMGUIOptions"; 00662 } 00663 00664 static const char* value(const ::pr2_object_manipulation_msgs::IMGUIOptions_<ContainerAllocator> &) { return value(); } 00665 }; 00666 00667 template<class ContainerAllocator> 00668 struct Definition< ::pr2_object_manipulation_msgs::IMGUIOptions_<ContainerAllocator> > { 00669 static const char* value() 00670 { 00671 return "\n\ 00672 # collision checking enabled\n\ 00673 bool collision_checked\n\ 00674 \n\ 00675 # 0=call gripper click\n\ 00676 # 1=grasp the provided graspable object\n\ 00677 int32 grasp_selection\n\ 00678 \n\ 00679 # 0=right, 1=left arm\n\ 00680 int32 arm_selection\n\ 00681 \n\ 00682 # for RESET commands\n\ 00683 # 0=reset collision objects\n\ 00684 # 1=reset attached objects\n\ 00685 int32 reset_choice\n\ 00686 \n\ 00687 # for MOVE_ARM commands\n\ 00688 # 0=side\n\ 00689 # 1=front\n\ 00690 # 2=side handoff\n\ 00691 int32 arm_action_choice\n\ 00692 \n\ 00693 # for MOVE_ARM commands\n\ 00694 # 0=open-loop\n\ 00695 # 1=with planner\n\ 00696 int32 arm_planner_choice\n\ 00697 \n\ 00698 # for MOVE_GRIPPER commands\n\ 00699 # opening of gripper (0=closed..100=open)\n\ 00700 int32 gripper_slider_position\n\ 00701 \n\ 00702 # used if grasp_selection == 1\n\ 00703 object_manipulation_msgs/GraspableObject selected_object\n\ 00704 \n\ 00705 # indicates obstacles that can be moved during grasping\n\ 00706 # presumably, the operator has marked these in some fashion\n\ 00707 object_manipulation_msgs/GraspableObject[] movable_obstacles\n\ 00708 \n\ 00709 # more options..\n\ 00710 IMGUIAdvancedOptions adv_options\n\ 00711 \n\ 00712 ================================================================================\n\ 00713 MSG: object_manipulation_msgs/GraspableObject\n\ 00714 # an object that the object_manipulator can work on\n\ 00715 \n\ 00716 # a graspable object can be represented in multiple ways. This message\n\ 00717 # can contain all of them. Which one is actually used is up to the receiver\n\ 00718 # of this message. When adding new representations, one must be careful that\n\ 00719 # they have reasonable lightweight defaults indicating that that particular\n\ 00720 # representation is not available.\n\ 00721 \n\ 00722 # the tf frame to be used as a reference frame when combining information from\n\ 00723 # the different representations below\n\ 00724 string reference_frame_id\n\ 00725 \n\ 00726 # potential recognition results from a database of models\n\ 00727 # all poses are relative to the object reference pose\n\ 00728 household_objects_database_msgs/DatabaseModelPose[] potential_models\n\ 00729 \n\ 00730 # the point cloud itself\n\ 00731 sensor_msgs/PointCloud cluster\n\ 00732 \n\ 00733 # a region of a PointCloud2 of interest\n\ 00734 object_manipulation_msgs/SceneRegion region\n\ 00735 \n\ 00736 # the name that this object has in the collision environment\n\ 00737 string collision_name\n\ 00738 ================================================================================\n\ 00739 MSG: household_objects_database_msgs/DatabaseModelPose\n\ 00740 # Informs that a specific model from the Model Database has been \n\ 00741 # identified at a certain location\n\ 00742 \n\ 00743 # the database id of the model\n\ 00744 int32 model_id\n\ 00745 \n\ 00746 # the pose that it can be found in\n\ 00747 geometry_msgs/PoseStamped pose\n\ 00748 \n\ 00749 # a measure of the confidence level in this detection result\n\ 00750 float32 confidence\n\ 00751 \n\ 00752 # the name of the object detector that generated this detection result\n\ 00753 string detector_name\n\ 00754 \n\ 00755 ================================================================================\n\ 00756 MSG: geometry_msgs/PoseStamped\n\ 00757 # A Pose with reference coordinate frame and timestamp\n\ 00758 Header header\n\ 00759 Pose pose\n\ 00760 \n\ 00761 ================================================================================\n\ 00762 MSG: std_msgs/Header\n\ 00763 # Standard metadata for higher-level stamped data types.\n\ 00764 # This is generally used to communicate timestamped data \n\ 00765 # in a particular coordinate frame.\n\ 00766 # \n\ 00767 # sequence ID: consecutively increasing ID \n\ 00768 uint32 seq\n\ 00769 #Two-integer timestamp that is expressed as:\n\ 00770 # * stamp.secs: seconds (stamp_secs) since epoch\n\ 00771 # * stamp.nsecs: nanoseconds since stamp_secs\n\ 00772 # time-handling sugar is provided by the client library\n\ 00773 time stamp\n\ 00774 #Frame this data is associated with\n\ 00775 # 0: no frame\n\ 00776 # 1: global frame\n\ 00777 string frame_id\n\ 00778 \n\ 00779 ================================================================================\n\ 00780 MSG: geometry_msgs/Pose\n\ 00781 # A representation of pose in free space, composed of postion and orientation. \n\ 00782 Point position\n\ 00783 Quaternion orientation\n\ 00784 \n\ 00785 ================================================================================\n\ 00786 MSG: geometry_msgs/Point\n\ 00787 # This contains the position of a point in free space\n\ 00788 float64 x\n\ 00789 float64 y\n\ 00790 float64 z\n\ 00791 \n\ 00792 ================================================================================\n\ 00793 MSG: geometry_msgs/Quaternion\n\ 00794 # This represents an orientation in free space in quaternion form.\n\ 00795 \n\ 00796 float64 x\n\ 00797 float64 y\n\ 00798 float64 z\n\ 00799 float64 w\n\ 00800 \n\ 00801 ================================================================================\n\ 00802 MSG: sensor_msgs/PointCloud\n\ 00803 # This message holds a collection of 3d points, plus optional additional\n\ 00804 # information about each point.\n\ 00805 \n\ 00806 # Time of sensor data acquisition, coordinate frame ID.\n\ 00807 Header header\n\ 00808 \n\ 00809 # Array of 3d points. Each Point32 should be interpreted as a 3d point\n\ 00810 # in the frame given in the header.\n\ 00811 geometry_msgs/Point32[] points\n\ 00812 \n\ 00813 # Each channel should have the same number of elements as points array,\n\ 00814 # and the data in each channel should correspond 1:1 with each point.\n\ 00815 # Channel names in common practice are listed in ChannelFloat32.msg.\n\ 00816 ChannelFloat32[] channels\n\ 00817 \n\ 00818 ================================================================================\n\ 00819 MSG: geometry_msgs/Point32\n\ 00820 # This contains the position of a point in free space(with 32 bits of precision).\n\ 00821 # It is recommeded to use Point wherever possible instead of Point32. \n\ 00822 # \n\ 00823 # This recommendation is to promote interoperability. \n\ 00824 #\n\ 00825 # This message is designed to take up less space when sending\n\ 00826 # lots of points at once, as in the case of a PointCloud. \n\ 00827 \n\ 00828 float32 x\n\ 00829 float32 y\n\ 00830 float32 z\n\ 00831 ================================================================================\n\ 00832 MSG: sensor_msgs/ChannelFloat32\n\ 00833 # This message is used by the PointCloud message to hold optional data\n\ 00834 # associated with each point in the cloud. The length of the values\n\ 00835 # array should be the same as the length of the points array in the\n\ 00836 # PointCloud, and each value should be associated with the corresponding\n\ 00837 # point.\n\ 00838 \n\ 00839 # Channel names in existing practice include:\n\ 00840 # \"u\", \"v\" - row and column (respectively) in the left stereo image.\n\ 00841 # This is opposite to usual conventions but remains for\n\ 00842 # historical reasons. The newer PointCloud2 message has no\n\ 00843 # such problem.\n\ 00844 # \"rgb\" - For point clouds produced by color stereo cameras. uint8\n\ 00845 # (R,G,B) values packed into the least significant 24 bits,\n\ 00846 # in order.\n\ 00847 # \"intensity\" - laser or pixel intensity.\n\ 00848 # \"distance\"\n\ 00849 \n\ 00850 # The channel name should give semantics of the channel (e.g.\n\ 00851 # \"intensity\" instead of \"value\").\n\ 00852 string name\n\ 00853 \n\ 00854 # The values array should be 1-1 with the elements of the associated\n\ 00855 # PointCloud.\n\ 00856 float32[] values\n\ 00857 \n\ 00858 ================================================================================\n\ 00859 MSG: object_manipulation_msgs/SceneRegion\n\ 00860 # Point cloud\n\ 00861 sensor_msgs/PointCloud2 cloud\n\ 00862 \n\ 00863 # Indices for the region of interest\n\ 00864 int32[] mask\n\ 00865 \n\ 00866 # One of the corresponding 2D images, if applicable\n\ 00867 sensor_msgs/Image image\n\ 00868 \n\ 00869 # The disparity image, if applicable\n\ 00870 sensor_msgs/Image disparity_image\n\ 00871 \n\ 00872 # Camera info for the camera that took the image\n\ 00873 sensor_msgs/CameraInfo cam_info\n\ 00874 \n\ 00875 # a 3D region of interest for grasp planning\n\ 00876 geometry_msgs/PoseStamped roi_box_pose\n\ 00877 geometry_msgs/Vector3 roi_box_dims\n\ 00878 \n\ 00879 ================================================================================\n\ 00880 MSG: sensor_msgs/PointCloud2\n\ 00881 # This message holds a collection of N-dimensional points, which may\n\ 00882 # contain additional information such as normals, intensity, etc. The\n\ 00883 # point data is stored as a binary blob, its layout described by the\n\ 00884 # contents of the \"fields\" array.\n\ 00885 \n\ 00886 # The point cloud data may be organized 2d (image-like) or 1d\n\ 00887 # (unordered). Point clouds organized as 2d images may be produced by\n\ 00888 # camera depth sensors such as stereo or time-of-flight.\n\ 00889 \n\ 00890 # Time of sensor data acquisition, and the coordinate frame ID (for 3d\n\ 00891 # points).\n\ 00892 Header header\n\ 00893 \n\ 00894 # 2D structure of the point cloud. If the cloud is unordered, height is\n\ 00895 # 1 and width is the length of the point cloud.\n\ 00896 uint32 height\n\ 00897 uint32 width\n\ 00898 \n\ 00899 # Describes the channels and their layout in the binary data blob.\n\ 00900 PointField[] fields\n\ 00901 \n\ 00902 bool is_bigendian # Is this data bigendian?\n\ 00903 uint32 point_step # Length of a point in bytes\n\ 00904 uint32 row_step # Length of a row in bytes\n\ 00905 uint8[] data # Actual point data, size is (row_step*height)\n\ 00906 \n\ 00907 bool is_dense # True if there are no invalid points\n\ 00908 \n\ 00909 ================================================================================\n\ 00910 MSG: sensor_msgs/PointField\n\ 00911 # This message holds the description of one point entry in the\n\ 00912 # PointCloud2 message format.\n\ 00913 uint8 INT8 = 1\n\ 00914 uint8 UINT8 = 2\n\ 00915 uint8 INT16 = 3\n\ 00916 uint8 UINT16 = 4\n\ 00917 uint8 INT32 = 5\n\ 00918 uint8 UINT32 = 6\n\ 00919 uint8 FLOAT32 = 7\n\ 00920 uint8 FLOAT64 = 8\n\ 00921 \n\ 00922 string name # Name of field\n\ 00923 uint32 offset # Offset from start of point struct\n\ 00924 uint8 datatype # Datatype enumeration, see above\n\ 00925 uint32 count # How many elements in the field\n\ 00926 \n\ 00927 ================================================================================\n\ 00928 MSG: sensor_msgs/Image\n\ 00929 # This message contains an uncompressed image\n\ 00930 # (0, 0) is at top-left corner of image\n\ 00931 #\n\ 00932 \n\ 00933 Header header # Header timestamp should be acquisition time of image\n\ 00934 # Header frame_id should be optical frame of camera\n\ 00935 # origin of frame should be optical center of cameara\n\ 00936 # +x should point to the right in the image\n\ 00937 # +y should point down in the image\n\ 00938 # +z should point into to plane of the image\n\ 00939 # If the frame_id here and the frame_id of the CameraInfo\n\ 00940 # message associated with the image conflict\n\ 00941 # the behavior is undefined\n\ 00942 \n\ 00943 uint32 height # image height, that is, number of rows\n\ 00944 uint32 width # image width, that is, number of columns\n\ 00945 \n\ 00946 # The legal values for encoding are in file src/image_encodings.cpp\n\ 00947 # If you want to standardize a new string format, join\n\ 00948 # ros-users@lists.sourceforge.net and send an email proposing a new encoding.\n\ 00949 \n\ 00950 string encoding # Encoding of pixels -- channel meaning, ordering, size\n\ 00951 # taken from the list of strings in src/image_encodings.cpp\n\ 00952 \n\ 00953 uint8 is_bigendian # is this data bigendian?\n\ 00954 uint32 step # Full row length in bytes\n\ 00955 uint8[] data # actual matrix data, size is (step * rows)\n\ 00956 \n\ 00957 ================================================================================\n\ 00958 MSG: sensor_msgs/CameraInfo\n\ 00959 # This message defines meta information for a camera. It should be in a\n\ 00960 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\ 00961 # image topics named:\n\ 00962 #\n\ 00963 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\ 00964 # image - monochrome, distorted\n\ 00965 # image_color - color, distorted\n\ 00966 # image_rect - monochrome, rectified\n\ 00967 # image_rect_color - color, rectified\n\ 00968 #\n\ 00969 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\ 00970 # for producing the four processed image topics from image_raw and\n\ 00971 # camera_info. The meaning of the camera parameters are described in\n\ 00972 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\ 00973 #\n\ 00974 # The image_geometry package provides a user-friendly interface to\n\ 00975 # common operations using this meta information. If you want to, e.g.,\n\ 00976 # project a 3d point into image coordinates, we strongly recommend\n\ 00977 # using image_geometry.\n\ 00978 #\n\ 00979 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\ 00980 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\ 00981 # indicates an uncalibrated camera.\n\ 00982 \n\ 00983 #######################################################################\n\ 00984 # Image acquisition info #\n\ 00985 #######################################################################\n\ 00986 \n\ 00987 # Time of image acquisition, camera coordinate frame ID\n\ 00988 Header header # Header timestamp should be acquisition time of image\n\ 00989 # Header frame_id should be optical frame of camera\n\ 00990 # origin of frame should be optical center of camera\n\ 00991 # +x should point to the right in the image\n\ 00992 # +y should point down in the image\n\ 00993 # +z should point into the plane of the image\n\ 00994 \n\ 00995 \n\ 00996 #######################################################################\n\ 00997 # Calibration Parameters #\n\ 00998 #######################################################################\n\ 00999 # These are fixed during camera calibration. Their values will be the #\n\ 01000 # same in all messages until the camera is recalibrated. Note that #\n\ 01001 # self-calibrating systems may \"recalibrate\" frequently. #\n\ 01002 # #\n\ 01003 # The internal parameters can be used to warp a raw (distorted) image #\n\ 01004 # to: #\n\ 01005 # 1. An undistorted image (requires D and K) #\n\ 01006 # 2. A rectified image (requires D, K, R) #\n\ 01007 # The projection matrix P projects 3D points into the rectified image.#\n\ 01008 #######################################################################\n\ 01009 \n\ 01010 # The image dimensions with which the camera was calibrated. Normally\n\ 01011 # this will be the full camera resolution in pixels.\n\ 01012 uint32 height\n\ 01013 uint32 width\n\ 01014 \n\ 01015 # The distortion model used. Supported models are listed in\n\ 01016 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\ 01017 # simple model of radial and tangential distortion - is sufficent.\n\ 01018 string distortion_model\n\ 01019 \n\ 01020 # The distortion parameters, size depending on the distortion model.\n\ 01021 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\ 01022 float64[] D\n\ 01023 \n\ 01024 # Intrinsic camera matrix for the raw (distorted) images.\n\ 01025 # [fx 0 cx]\n\ 01026 # K = [ 0 fy cy]\n\ 01027 # [ 0 0 1]\n\ 01028 # Projects 3D points in the camera coordinate frame to 2D pixel\n\ 01029 # coordinates using the focal lengths (fx, fy) and principal point\n\ 01030 # (cx, cy).\n\ 01031 float64[9] K # 3x3 row-major matrix\n\ 01032 \n\ 01033 # Rectification matrix (stereo cameras only)\n\ 01034 # A rotation matrix aligning the camera coordinate system to the ideal\n\ 01035 # stereo image plane so that epipolar lines in both stereo images are\n\ 01036 # parallel.\n\ 01037 float64[9] R # 3x3 row-major matrix\n\ 01038 \n\ 01039 # Projection/camera matrix\n\ 01040 # [fx' 0 cx' Tx]\n\ 01041 # P = [ 0 fy' cy' Ty]\n\ 01042 # [ 0 0 1 0]\n\ 01043 # By convention, this matrix specifies the intrinsic (camera) matrix\n\ 01044 # of the processed (rectified) image. That is, the left 3x3 portion\n\ 01045 # is the normal camera intrinsic matrix for the rectified image.\n\ 01046 # It projects 3D points in the camera coordinate frame to 2D pixel\n\ 01047 # coordinates using the focal lengths (fx', fy') and principal point\n\ 01048 # (cx', cy') - these may differ from the values in K.\n\ 01049 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\ 01050 # also have R = the identity and P[1:3,1:3] = K.\n\ 01051 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\ 01052 # position of the optical center of the second camera in the first\n\ 01053 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\ 01054 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\ 01055 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\ 01056 # Tx = -fx' * B, where B is the baseline between the cameras.\n\ 01057 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\ 01058 # the rectified image is given by:\n\ 01059 # [u v w]' = P * [X Y Z 1]'\n\ 01060 # x = u / w\n\ 01061 # y = v / w\n\ 01062 # This holds for both images of a stereo pair.\n\ 01063 float64[12] P # 3x4 row-major matrix\n\ 01064 \n\ 01065 \n\ 01066 #######################################################################\n\ 01067 # Operational Parameters #\n\ 01068 #######################################################################\n\ 01069 # These define the image region actually captured by the camera #\n\ 01070 # driver. Although they affect the geometry of the output image, they #\n\ 01071 # may be changed freely without recalibrating the camera. #\n\ 01072 #######################################################################\n\ 01073 \n\ 01074 # Binning refers here to any camera setting which combines rectangular\n\ 01075 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\ 01076 # resolution of the output image to\n\ 01077 # (width / binning_x) x (height / binning_y).\n\ 01078 # The default values binning_x = binning_y = 0 is considered the same\n\ 01079 # as binning_x = binning_y = 1 (no subsampling).\n\ 01080 uint32 binning_x\n\ 01081 uint32 binning_y\n\ 01082 \n\ 01083 # Region of interest (subwindow of full camera resolution), given in\n\ 01084 # full resolution (unbinned) image coordinates. A particular ROI\n\ 01085 # always denotes the same window of pixels on the camera sensor,\n\ 01086 # regardless of binning settings.\n\ 01087 # The default setting of roi (all values 0) is considered the same as\n\ 01088 # full resolution (roi.width = width, roi.height = height).\n\ 01089 RegionOfInterest roi\n\ 01090 \n\ 01091 ================================================================================\n\ 01092 MSG: sensor_msgs/RegionOfInterest\n\ 01093 # This message is used to specify a region of interest within an image.\n\ 01094 #\n\ 01095 # When used to specify the ROI setting of the camera when the image was\n\ 01096 # taken, the height and width fields should either match the height and\n\ 01097 # width fields for the associated image; or height = width = 0\n\ 01098 # indicates that the full resolution image was captured.\n\ 01099 \n\ 01100 uint32 x_offset # Leftmost pixel of the ROI\n\ 01101 # (0 if the ROI includes the left edge of the image)\n\ 01102 uint32 y_offset # Topmost pixel of the ROI\n\ 01103 # (0 if the ROI includes the top edge of the image)\n\ 01104 uint32 height # Height of ROI\n\ 01105 uint32 width # Width of ROI\n\ 01106 \n\ 01107 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\ 01108 # ROI in this message. Typically this should be False if the full image\n\ 01109 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\ 01110 # used).\n\ 01111 bool do_rectify\n\ 01112 \n\ 01113 ================================================================================\n\ 01114 MSG: geometry_msgs/Vector3\n\ 01115 # This represents a vector in free space. \n\ 01116 \n\ 01117 float64 x\n\ 01118 float64 y\n\ 01119 float64 z\n\ 01120 ================================================================================\n\ 01121 MSG: pr2_object_manipulation_msgs/IMGUIAdvancedOptions\n\ 01122 \n\ 01123 bool reactive_grasping\n\ 01124 bool reactive_force \n\ 01125 bool reactive_place\n\ 01126 int32 lift_steps\n\ 01127 int32 retreat_steps\n\ 01128 int32 lift_direction_choice\n\ 01129 int32 desired_approach\n\ 01130 int32 min_approach\n\ 01131 float32 max_contact_force\n\ 01132 \n\ 01133 "; 01134 } 01135 01136 static const char* value(const ::pr2_object_manipulation_msgs::IMGUIOptions_<ContainerAllocator> &) { return value(); } 01137 }; 01138 01139 } // namespace message_traits 01140 } // namespace ros 01141 01142 namespace ros 01143 { 01144 namespace serialization 01145 { 01146 01147 template<class ContainerAllocator> struct Serializer< ::pr2_object_manipulation_msgs::IMGUIOptions_<ContainerAllocator> > 01148 { 01149 template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m) 01150 { 01151 stream.next(m.collision_checked); 01152 stream.next(m.grasp_selection); 01153 stream.next(m.arm_selection); 01154 stream.next(m.reset_choice); 01155 stream.next(m.arm_action_choice); 01156 stream.next(m.arm_planner_choice); 01157 stream.next(m.gripper_slider_position); 01158 stream.next(m.selected_object); 01159 stream.next(m.movable_obstacles); 01160 stream.next(m.adv_options); 01161 } 01162 01163 ROS_DECLARE_ALLINONE_SERIALIZER; 01164 }; // struct IMGUIOptions_ 01165 } // namespace serialization 01166 } // namespace ros 01167 01168 namespace ros 01169 { 01170 namespace message_operations 01171 { 01172 01173 template<class ContainerAllocator> 01174 struct Printer< ::pr2_object_manipulation_msgs::IMGUIOptions_<ContainerAllocator> > 01175 { 01176 template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::pr2_object_manipulation_msgs::IMGUIOptions_<ContainerAllocator> & v) 01177 { 01178 s << indent << "collision_checked: "; 01179 Printer<uint8_t>::stream(s, indent + " ", v.collision_checked); 01180 s << indent << "grasp_selection: "; 01181 Printer<int32_t>::stream(s, indent + " ", v.grasp_selection); 01182 s << indent << "arm_selection: "; 01183 Printer<int32_t>::stream(s, indent + " ", v.arm_selection); 01184 s << indent << "reset_choice: "; 01185 Printer<int32_t>::stream(s, indent + " ", v.reset_choice); 01186 s << indent << "arm_action_choice: "; 01187 Printer<int32_t>::stream(s, indent + " ", v.arm_action_choice); 01188 s << indent << "arm_planner_choice: "; 01189 Printer<int32_t>::stream(s, indent + " ", v.arm_planner_choice); 01190 s << indent << "gripper_slider_position: "; 01191 Printer<int32_t>::stream(s, indent + " ", v.gripper_slider_position); 01192 s << indent << "selected_object: "; 01193 s << std::endl; 01194 Printer< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> >::stream(s, indent + " ", v.selected_object); 01195 s << indent << "movable_obstacles[]" << std::endl; 01196 for (size_t i = 0; i < v.movable_obstacles.size(); ++i) 01197 { 01198 s << indent << " movable_obstacles[" << i << "]: "; 01199 s << std::endl; 01200 s << indent; 01201 Printer< ::object_manipulation_msgs::GraspableObject_<ContainerAllocator> >::stream(s, indent + " ", v.movable_obstacles[i]); 01202 } 01203 s << indent << "adv_options: "; 01204 s << std::endl; 01205 Printer< ::pr2_object_manipulation_msgs::IMGUIAdvancedOptions_<ContainerAllocator> >::stream(s, indent + " ", v.adv_options); 01206 } 01207 }; 01208 01209 01210 } // namespace message_operations 01211 } // namespace ros 01212 01213 #endif // PR2_OBJECT_MANIPULATION_MSGS_MESSAGE_IMGUIOPTIONS_H 01214