$search
00001 /* Auto-generated by genmsg_cpp for file /home/rosbuild/hudson/workspace/doc-electric-camera_pose/doc_stacks/2013-03-01_14-27-38.801509/camera_pose/camera_pose_calibration/msg/RobotMeasurement.msg */ 00002 #ifndef CAMERA_POSE_CALIBRATION_MESSAGE_ROBOTMEASUREMENT_H 00003 #define CAMERA_POSE_CALIBRATION_MESSAGE_ROBOTMEASUREMENT_H 00004 #include <string> 00005 #include <vector> 00006 #include <map> 00007 #include <ostream> 00008 #include "ros/serialization.h" 00009 #include "ros/builtin_message_traits.h" 00010 #include "ros/message_operations.h" 00011 #include "ros/time.h" 00012 00013 #include "ros/macros.h" 00014 00015 #include "ros/assert.h" 00016 00017 #include "std_msgs/Header.h" 00018 #include "camera_pose_calibration/CameraMeasurement.h" 00019 00020 namespace camera_pose_calibration 00021 { 00022 template <class ContainerAllocator> 00023 struct RobotMeasurement_ { 00024 typedef RobotMeasurement_<ContainerAllocator> Type; 00025 00026 RobotMeasurement_() 00027 : header() 00028 , M_cam() 00029 { 00030 } 00031 00032 RobotMeasurement_(const ContainerAllocator& _alloc) 00033 : header(_alloc) 00034 , M_cam(_alloc) 00035 { 00036 } 00037 00038 typedef ::std_msgs::Header_<ContainerAllocator> _header_type; 00039 ::std_msgs::Header_<ContainerAllocator> header; 00040 00041 typedef std::vector< ::camera_pose_calibration::CameraMeasurement_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::camera_pose_calibration::CameraMeasurement_<ContainerAllocator> >::other > _M_cam_type; 00042 std::vector< ::camera_pose_calibration::CameraMeasurement_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::camera_pose_calibration::CameraMeasurement_<ContainerAllocator> >::other > M_cam; 00043 00044 00045 ROS_DEPRECATED uint32_t get_M_cam_size() const { return (uint32_t)M_cam.size(); } 00046 ROS_DEPRECATED void set_M_cam_size(uint32_t size) { M_cam.resize((size_t)size); } 00047 ROS_DEPRECATED void get_M_cam_vec(std::vector< ::camera_pose_calibration::CameraMeasurement_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::camera_pose_calibration::CameraMeasurement_<ContainerAllocator> >::other > & vec) const { vec = this->M_cam; } 00048 ROS_DEPRECATED void set_M_cam_vec(const std::vector< ::camera_pose_calibration::CameraMeasurement_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::camera_pose_calibration::CameraMeasurement_<ContainerAllocator> >::other > & vec) { this->M_cam = vec; } 00049 private: 00050 static const char* __s_getDataType_() { return "camera_pose_calibration/RobotMeasurement"; } 00051 public: 00052 ROS_DEPRECATED static const std::string __s_getDataType() { return __s_getDataType_(); } 00053 00054 ROS_DEPRECATED const std::string __getDataType() const { return __s_getDataType_(); } 00055 00056 private: 00057 static const char* __s_getMD5Sum_() { return "c1a18b7e641c3a5dbf96f32a5c580575"; } 00058 public: 00059 ROS_DEPRECATED static const std::string __s_getMD5Sum() { return __s_getMD5Sum_(); } 00060 00061 ROS_DEPRECATED const std::string __getMD5Sum() const { return __s_getMD5Sum_(); } 00062 00063 private: 00064 static const char* __s_getMessageDefinition_() { return "Header header\n\ 00065 CameraMeasurement[] M_cam\n\ 00066 \n\ 00067 ================================================================================\n\ 00068 MSG: std_msgs/Header\n\ 00069 # Standard metadata for higher-level stamped data types.\n\ 00070 # This is generally used to communicate timestamped data \n\ 00071 # in a particular coordinate frame.\n\ 00072 # \n\ 00073 # sequence ID: consecutively increasing ID \n\ 00074 uint32 seq\n\ 00075 #Two-integer timestamp that is expressed as:\n\ 00076 # * stamp.secs: seconds (stamp_secs) since epoch\n\ 00077 # * stamp.nsecs: nanoseconds since stamp_secs\n\ 00078 # time-handling sugar is provided by the client library\n\ 00079 time stamp\n\ 00080 #Frame this data is associated with\n\ 00081 # 0: no frame\n\ 00082 # 1: global frame\n\ 00083 string frame_id\n\ 00084 \n\ 00085 ================================================================================\n\ 00086 MSG: camera_pose_calibration/CameraMeasurement\n\ 00087 Header header\n\ 00088 string camera_id\n\ 00089 calibration_msgs/CalibrationPattern features\n\ 00090 sensor_msgs/CameraInfo cam_info\n\ 00091 \n\ 00092 ================================================================================\n\ 00093 MSG: calibration_msgs/CalibrationPattern\n\ 00094 Header header\n\ 00095 geometry_msgs/Point32[] object_points\n\ 00096 ImagePoint[] image_points\n\ 00097 uint8 success\n\ 00098 \n\ 00099 ================================================================================\n\ 00100 MSG: geometry_msgs/Point32\n\ 00101 # This contains the position of a point in free space(with 32 bits of precision).\n\ 00102 # It is recommeded to use Point wherever possible instead of Point32. \n\ 00103 # \n\ 00104 # This recommendation is to promote interoperability. \n\ 00105 #\n\ 00106 # This message is designed to take up less space when sending\n\ 00107 # lots of points at once, as in the case of a PointCloud. \n\ 00108 \n\ 00109 float32 x\n\ 00110 float32 y\n\ 00111 float32 z\n\ 00112 ================================================================================\n\ 00113 MSG: calibration_msgs/ImagePoint\n\ 00114 float32 x\n\ 00115 float32 y\n\ 00116 \n\ 00117 ================================================================================\n\ 00118 MSG: sensor_msgs/CameraInfo\n\ 00119 # This message defines meta information for a camera. It should be in a\n\ 00120 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\ 00121 # image topics named:\n\ 00122 #\n\ 00123 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\ 00124 # image - monochrome, distorted\n\ 00125 # image_color - color, distorted\n\ 00126 # image_rect - monochrome, rectified\n\ 00127 # image_rect_color - color, rectified\n\ 00128 #\n\ 00129 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\ 00130 # for producing the four processed image topics from image_raw and\n\ 00131 # camera_info. The meaning of the camera parameters are described in\n\ 00132 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\ 00133 #\n\ 00134 # The image_geometry package provides a user-friendly interface to\n\ 00135 # common operations using this meta information. If you want to, e.g.,\n\ 00136 # project a 3d point into image coordinates, we strongly recommend\n\ 00137 # using image_geometry.\n\ 00138 #\n\ 00139 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\ 00140 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\ 00141 # indicates an uncalibrated camera.\n\ 00142 \n\ 00143 #######################################################################\n\ 00144 # Image acquisition info #\n\ 00145 #######################################################################\n\ 00146 \n\ 00147 # Time of image acquisition, camera coordinate frame ID\n\ 00148 Header header # Header timestamp should be acquisition time of image\n\ 00149 # Header frame_id should be optical frame of camera\n\ 00150 # origin of frame should be optical center of camera\n\ 00151 # +x should point to the right in the image\n\ 00152 # +y should point down in the image\n\ 00153 # +z should point into the plane of the image\n\ 00154 \n\ 00155 \n\ 00156 #######################################################################\n\ 00157 # Calibration Parameters #\n\ 00158 #######################################################################\n\ 00159 # These are fixed during camera calibration. Their values will be the #\n\ 00160 # same in all messages until the camera is recalibrated. Note that #\n\ 00161 # self-calibrating systems may \"recalibrate\" frequently. #\n\ 00162 # #\n\ 00163 # The internal parameters can be used to warp a raw (distorted) image #\n\ 00164 # to: #\n\ 00165 # 1. An undistorted image (requires D and K) #\n\ 00166 # 2. A rectified image (requires D, K, R) #\n\ 00167 # The projection matrix P projects 3D points into the rectified image.#\n\ 00168 #######################################################################\n\ 00169 \n\ 00170 # The image dimensions with which the camera was calibrated. Normally\n\ 00171 # this will be the full camera resolution in pixels.\n\ 00172 uint32 height\n\ 00173 uint32 width\n\ 00174 \n\ 00175 # The distortion model used. Supported models are listed in\n\ 00176 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\ 00177 # simple model of radial and tangential distortion - is sufficent.\n\ 00178 string distortion_model\n\ 00179 \n\ 00180 # The distortion parameters, size depending on the distortion model.\n\ 00181 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\ 00182 float64[] D\n\ 00183 \n\ 00184 # Intrinsic camera matrix for the raw (distorted) images.\n\ 00185 # [fx 0 cx]\n\ 00186 # K = [ 0 fy cy]\n\ 00187 # [ 0 0 1]\n\ 00188 # Projects 3D points in the camera coordinate frame to 2D pixel\n\ 00189 # coordinates using the focal lengths (fx, fy) and principal point\n\ 00190 # (cx, cy).\n\ 00191 float64[9] K # 3x3 row-major matrix\n\ 00192 \n\ 00193 # Rectification matrix (stereo cameras only)\n\ 00194 # A rotation matrix aligning the camera coordinate system to the ideal\n\ 00195 # stereo image plane so that epipolar lines in both stereo images are\n\ 00196 # parallel.\n\ 00197 float64[9] R # 3x3 row-major matrix\n\ 00198 \n\ 00199 # Projection/camera matrix\n\ 00200 # [fx' 0 cx' Tx]\n\ 00201 # P = [ 0 fy' cy' Ty]\n\ 00202 # [ 0 0 1 0]\n\ 00203 # By convention, this matrix specifies the intrinsic (camera) matrix\n\ 00204 # of the processed (rectified) image. That is, the left 3x3 portion\n\ 00205 # is the normal camera intrinsic matrix for the rectified image.\n\ 00206 # It projects 3D points in the camera coordinate frame to 2D pixel\n\ 00207 # coordinates using the focal lengths (fx', fy') and principal point\n\ 00208 # (cx', cy') - these may differ from the values in K.\n\ 00209 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\ 00210 # also have R = the identity and P[1:3,1:3] = K.\n\ 00211 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\ 00212 # position of the optical center of the second camera in the first\n\ 00213 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\ 00214 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\ 00215 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\ 00216 # Tx = -fx' * B, where B is the baseline between the cameras.\n\ 00217 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\ 00218 # the rectified image is given by:\n\ 00219 # [u v w]' = P * [X Y Z 1]'\n\ 00220 # x = u / w\n\ 00221 # y = v / w\n\ 00222 # This holds for both images of a stereo pair.\n\ 00223 float64[12] P # 3x4 row-major matrix\n\ 00224 \n\ 00225 \n\ 00226 #######################################################################\n\ 00227 # Operational Parameters #\n\ 00228 #######################################################################\n\ 00229 # These define the image region actually captured by the camera #\n\ 00230 # driver. Although they affect the geometry of the output image, they #\n\ 00231 # may be changed freely without recalibrating the camera. #\n\ 00232 #######################################################################\n\ 00233 \n\ 00234 # Binning refers here to any camera setting which combines rectangular\n\ 00235 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\ 00236 # resolution of the output image to\n\ 00237 # (width / binning_x) x (height / binning_y).\n\ 00238 # The default values binning_x = binning_y = 0 is considered the same\n\ 00239 # as binning_x = binning_y = 1 (no subsampling).\n\ 00240 uint32 binning_x\n\ 00241 uint32 binning_y\n\ 00242 \n\ 00243 # Region of interest (subwindow of full camera resolution), given in\n\ 00244 # full resolution (unbinned) image coordinates. A particular ROI\n\ 00245 # always denotes the same window of pixels on the camera sensor,\n\ 00246 # regardless of binning settings.\n\ 00247 # The default setting of roi (all values 0) is considered the same as\n\ 00248 # full resolution (roi.width = width, roi.height = height).\n\ 00249 RegionOfInterest roi\n\ 00250 \n\ 00251 ================================================================================\n\ 00252 MSG: sensor_msgs/RegionOfInterest\n\ 00253 # This message is used to specify a region of interest within an image.\n\ 00254 #\n\ 00255 # When used to specify the ROI setting of the camera when the image was\n\ 00256 # taken, the height and width fields should either match the height and\n\ 00257 # width fields for the associated image; or height = width = 0\n\ 00258 # indicates that the full resolution image was captured.\n\ 00259 \n\ 00260 uint32 x_offset # Leftmost pixel of the ROI\n\ 00261 # (0 if the ROI includes the left edge of the image)\n\ 00262 uint32 y_offset # Topmost pixel of the ROI\n\ 00263 # (0 if the ROI includes the top edge of the image)\n\ 00264 uint32 height # Height of ROI\n\ 00265 uint32 width # Width of ROI\n\ 00266 \n\ 00267 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\ 00268 # ROI in this message. Typically this should be False if the full image\n\ 00269 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\ 00270 # used).\n\ 00271 bool do_rectify\n\ 00272 \n\ 00273 "; } 00274 public: 00275 ROS_DEPRECATED static const std::string __s_getMessageDefinition() { return __s_getMessageDefinition_(); } 00276 00277 ROS_DEPRECATED const std::string __getMessageDefinition() const { return __s_getMessageDefinition_(); } 00278 00279 ROS_DEPRECATED virtual uint8_t *serialize(uint8_t *write_ptr, uint32_t seq) const 00280 { 00281 ros::serialization::OStream stream(write_ptr, 1000000000); 00282 ros::serialization::serialize(stream, header); 00283 ros::serialization::serialize(stream, M_cam); 00284 return stream.getData(); 00285 } 00286 00287 ROS_DEPRECATED virtual uint8_t *deserialize(uint8_t *read_ptr) 00288 { 00289 ros::serialization::IStream stream(read_ptr, 1000000000); 00290 ros::serialization::deserialize(stream, header); 00291 ros::serialization::deserialize(stream, M_cam); 00292 return stream.getData(); 00293 } 00294 00295 ROS_DEPRECATED virtual uint32_t serializationLength() const 00296 { 00297 uint32_t size = 0; 00298 size += ros::serialization::serializationLength(header); 00299 size += ros::serialization::serializationLength(M_cam); 00300 return size; 00301 } 00302 00303 typedef boost::shared_ptr< ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> > Ptr; 00304 typedef boost::shared_ptr< ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> const> ConstPtr; 00305 boost::shared_ptr<std::map<std::string, std::string> > __connection_header; 00306 }; // struct RobotMeasurement 00307 typedef ::camera_pose_calibration::RobotMeasurement_<std::allocator<void> > RobotMeasurement; 00308 00309 typedef boost::shared_ptr< ::camera_pose_calibration::RobotMeasurement> RobotMeasurementPtr; 00310 typedef boost::shared_ptr< ::camera_pose_calibration::RobotMeasurement const> RobotMeasurementConstPtr; 00311 00312 00313 template<typename ContainerAllocator> 00314 std::ostream& operator<<(std::ostream& s, const ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> & v) 00315 { 00316 ros::message_operations::Printer< ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> >::stream(s, "", v); 00317 return s;} 00318 00319 } // namespace camera_pose_calibration 00320 00321 namespace ros 00322 { 00323 namespace message_traits 00324 { 00325 template<class ContainerAllocator> struct IsMessage< ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> > : public TrueType {}; 00326 template<class ContainerAllocator> struct IsMessage< ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> const> : public TrueType {}; 00327 template<class ContainerAllocator> 00328 struct MD5Sum< ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> > { 00329 static const char* value() 00330 { 00331 return "c1a18b7e641c3a5dbf96f32a5c580575"; 00332 } 00333 00334 static const char* value(const ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> &) { return value(); } 00335 static const uint64_t static_value1 = 0xc1a18b7e641c3a5dULL; 00336 static const uint64_t static_value2 = 0xbf96f32a5c580575ULL; 00337 }; 00338 00339 template<class ContainerAllocator> 00340 struct DataType< ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> > { 00341 static const char* value() 00342 { 00343 return "camera_pose_calibration/RobotMeasurement"; 00344 } 00345 00346 static const char* value(const ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> &) { return value(); } 00347 }; 00348 00349 template<class ContainerAllocator> 00350 struct Definition< ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> > { 00351 static const char* value() 00352 { 00353 return "Header header\n\ 00354 CameraMeasurement[] M_cam\n\ 00355 \n\ 00356 ================================================================================\n\ 00357 MSG: std_msgs/Header\n\ 00358 # Standard metadata for higher-level stamped data types.\n\ 00359 # This is generally used to communicate timestamped data \n\ 00360 # in a particular coordinate frame.\n\ 00361 # \n\ 00362 # sequence ID: consecutively increasing ID \n\ 00363 uint32 seq\n\ 00364 #Two-integer timestamp that is expressed as:\n\ 00365 # * stamp.secs: seconds (stamp_secs) since epoch\n\ 00366 # * stamp.nsecs: nanoseconds since stamp_secs\n\ 00367 # time-handling sugar is provided by the client library\n\ 00368 time stamp\n\ 00369 #Frame this data is associated with\n\ 00370 # 0: no frame\n\ 00371 # 1: global frame\n\ 00372 string frame_id\n\ 00373 \n\ 00374 ================================================================================\n\ 00375 MSG: camera_pose_calibration/CameraMeasurement\n\ 00376 Header header\n\ 00377 string camera_id\n\ 00378 calibration_msgs/CalibrationPattern features\n\ 00379 sensor_msgs/CameraInfo cam_info\n\ 00380 \n\ 00381 ================================================================================\n\ 00382 MSG: calibration_msgs/CalibrationPattern\n\ 00383 Header header\n\ 00384 geometry_msgs/Point32[] object_points\n\ 00385 ImagePoint[] image_points\n\ 00386 uint8 success\n\ 00387 \n\ 00388 ================================================================================\n\ 00389 MSG: geometry_msgs/Point32\n\ 00390 # This contains the position of a point in free space(with 32 bits of precision).\n\ 00391 # It is recommeded to use Point wherever possible instead of Point32. \n\ 00392 # \n\ 00393 # This recommendation is to promote interoperability. \n\ 00394 #\n\ 00395 # This message is designed to take up less space when sending\n\ 00396 # lots of points at once, as in the case of a PointCloud. \n\ 00397 \n\ 00398 float32 x\n\ 00399 float32 y\n\ 00400 float32 z\n\ 00401 ================================================================================\n\ 00402 MSG: calibration_msgs/ImagePoint\n\ 00403 float32 x\n\ 00404 float32 y\n\ 00405 \n\ 00406 ================================================================================\n\ 00407 MSG: sensor_msgs/CameraInfo\n\ 00408 # This message defines meta information for a camera. It should be in a\n\ 00409 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\ 00410 # image topics named:\n\ 00411 #\n\ 00412 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\ 00413 # image - monochrome, distorted\n\ 00414 # image_color - color, distorted\n\ 00415 # image_rect - monochrome, rectified\n\ 00416 # image_rect_color - color, rectified\n\ 00417 #\n\ 00418 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\ 00419 # for producing the four processed image topics from image_raw and\n\ 00420 # camera_info. The meaning of the camera parameters are described in\n\ 00421 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\ 00422 #\n\ 00423 # The image_geometry package provides a user-friendly interface to\n\ 00424 # common operations using this meta information. If you want to, e.g.,\n\ 00425 # project a 3d point into image coordinates, we strongly recommend\n\ 00426 # using image_geometry.\n\ 00427 #\n\ 00428 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\ 00429 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\ 00430 # indicates an uncalibrated camera.\n\ 00431 \n\ 00432 #######################################################################\n\ 00433 # Image acquisition info #\n\ 00434 #######################################################################\n\ 00435 \n\ 00436 # Time of image acquisition, camera coordinate frame ID\n\ 00437 Header header # Header timestamp should be acquisition time of image\n\ 00438 # Header frame_id should be optical frame of camera\n\ 00439 # origin of frame should be optical center of camera\n\ 00440 # +x should point to the right in the image\n\ 00441 # +y should point down in the image\n\ 00442 # +z should point into the plane of the image\n\ 00443 \n\ 00444 \n\ 00445 #######################################################################\n\ 00446 # Calibration Parameters #\n\ 00447 #######################################################################\n\ 00448 # These are fixed during camera calibration. Their values will be the #\n\ 00449 # same in all messages until the camera is recalibrated. Note that #\n\ 00450 # self-calibrating systems may \"recalibrate\" frequently. #\n\ 00451 # #\n\ 00452 # The internal parameters can be used to warp a raw (distorted) image #\n\ 00453 # to: #\n\ 00454 # 1. An undistorted image (requires D and K) #\n\ 00455 # 2. A rectified image (requires D, K, R) #\n\ 00456 # The projection matrix P projects 3D points into the rectified image.#\n\ 00457 #######################################################################\n\ 00458 \n\ 00459 # The image dimensions with which the camera was calibrated. Normally\n\ 00460 # this will be the full camera resolution in pixels.\n\ 00461 uint32 height\n\ 00462 uint32 width\n\ 00463 \n\ 00464 # The distortion model used. Supported models are listed in\n\ 00465 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\ 00466 # simple model of radial and tangential distortion - is sufficent.\n\ 00467 string distortion_model\n\ 00468 \n\ 00469 # The distortion parameters, size depending on the distortion model.\n\ 00470 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\ 00471 float64[] D\n\ 00472 \n\ 00473 # Intrinsic camera matrix for the raw (distorted) images.\n\ 00474 # [fx 0 cx]\n\ 00475 # K = [ 0 fy cy]\n\ 00476 # [ 0 0 1]\n\ 00477 # Projects 3D points in the camera coordinate frame to 2D pixel\n\ 00478 # coordinates using the focal lengths (fx, fy) and principal point\n\ 00479 # (cx, cy).\n\ 00480 float64[9] K # 3x3 row-major matrix\n\ 00481 \n\ 00482 # Rectification matrix (stereo cameras only)\n\ 00483 # A rotation matrix aligning the camera coordinate system to the ideal\n\ 00484 # stereo image plane so that epipolar lines in both stereo images are\n\ 00485 # parallel.\n\ 00486 float64[9] R # 3x3 row-major matrix\n\ 00487 \n\ 00488 # Projection/camera matrix\n\ 00489 # [fx' 0 cx' Tx]\n\ 00490 # P = [ 0 fy' cy' Ty]\n\ 00491 # [ 0 0 1 0]\n\ 00492 # By convention, this matrix specifies the intrinsic (camera) matrix\n\ 00493 # of the processed (rectified) image. That is, the left 3x3 portion\n\ 00494 # is the normal camera intrinsic matrix for the rectified image.\n\ 00495 # It projects 3D points in the camera coordinate frame to 2D pixel\n\ 00496 # coordinates using the focal lengths (fx', fy') and principal point\n\ 00497 # (cx', cy') - these may differ from the values in K.\n\ 00498 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\ 00499 # also have R = the identity and P[1:3,1:3] = K.\n\ 00500 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\ 00501 # position of the optical center of the second camera in the first\n\ 00502 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\ 00503 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\ 00504 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\ 00505 # Tx = -fx' * B, where B is the baseline between the cameras.\n\ 00506 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\ 00507 # the rectified image is given by:\n\ 00508 # [u v w]' = P * [X Y Z 1]'\n\ 00509 # x = u / w\n\ 00510 # y = v / w\n\ 00511 # This holds for both images of a stereo pair.\n\ 00512 float64[12] P # 3x4 row-major matrix\n\ 00513 \n\ 00514 \n\ 00515 #######################################################################\n\ 00516 # Operational Parameters #\n\ 00517 #######################################################################\n\ 00518 # These define the image region actually captured by the camera #\n\ 00519 # driver. Although they affect the geometry of the output image, they #\n\ 00520 # may be changed freely without recalibrating the camera. #\n\ 00521 #######################################################################\n\ 00522 \n\ 00523 # Binning refers here to any camera setting which combines rectangular\n\ 00524 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\ 00525 # resolution of the output image to\n\ 00526 # (width / binning_x) x (height / binning_y).\n\ 00527 # The default values binning_x = binning_y = 0 is considered the same\n\ 00528 # as binning_x = binning_y = 1 (no subsampling).\n\ 00529 uint32 binning_x\n\ 00530 uint32 binning_y\n\ 00531 \n\ 00532 # Region of interest (subwindow of full camera resolution), given in\n\ 00533 # full resolution (unbinned) image coordinates. A particular ROI\n\ 00534 # always denotes the same window of pixels on the camera sensor,\n\ 00535 # regardless of binning settings.\n\ 00536 # The default setting of roi (all values 0) is considered the same as\n\ 00537 # full resolution (roi.width = width, roi.height = height).\n\ 00538 RegionOfInterest roi\n\ 00539 \n\ 00540 ================================================================================\n\ 00541 MSG: sensor_msgs/RegionOfInterest\n\ 00542 # This message is used to specify a region of interest within an image.\n\ 00543 #\n\ 00544 # When used to specify the ROI setting of the camera when the image was\n\ 00545 # taken, the height and width fields should either match the height and\n\ 00546 # width fields for the associated image; or height = width = 0\n\ 00547 # indicates that the full resolution image was captured.\n\ 00548 \n\ 00549 uint32 x_offset # Leftmost pixel of the ROI\n\ 00550 # (0 if the ROI includes the left edge of the image)\n\ 00551 uint32 y_offset # Topmost pixel of the ROI\n\ 00552 # (0 if the ROI includes the top edge of the image)\n\ 00553 uint32 height # Height of ROI\n\ 00554 uint32 width # Width of ROI\n\ 00555 \n\ 00556 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\ 00557 # ROI in this message. Typically this should be False if the full image\n\ 00558 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\ 00559 # used).\n\ 00560 bool do_rectify\n\ 00561 \n\ 00562 "; 00563 } 00564 00565 static const char* value(const ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> &) { return value(); } 00566 }; 00567 00568 template<class ContainerAllocator> struct HasHeader< ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> > : public TrueType {}; 00569 template<class ContainerAllocator> struct HasHeader< const ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> > : public TrueType {}; 00570 } // namespace message_traits 00571 } // namespace ros 00572 00573 namespace ros 00574 { 00575 namespace serialization 00576 { 00577 00578 template<class ContainerAllocator> struct Serializer< ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> > 00579 { 00580 template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m) 00581 { 00582 stream.next(m.header); 00583 stream.next(m.M_cam); 00584 } 00585 00586 ROS_DECLARE_ALLINONE_SERIALIZER; 00587 }; // struct RobotMeasurement_ 00588 } // namespace serialization 00589 } // namespace ros 00590 00591 namespace ros 00592 { 00593 namespace message_operations 00594 { 00595 00596 template<class ContainerAllocator> 00597 struct Printer< ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> > 00598 { 00599 template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::camera_pose_calibration::RobotMeasurement_<ContainerAllocator> & v) 00600 { 00601 s << indent << "header: "; 00602 s << std::endl; 00603 Printer< ::std_msgs::Header_<ContainerAllocator> >::stream(s, indent + " ", v.header); 00604 s << indent << "M_cam[]" << std::endl; 00605 for (size_t i = 0; i < v.M_cam.size(); ++i) 00606 { 00607 s << indent << " M_cam[" << i << "]: "; 00608 s << std::endl; 00609 s << indent; 00610 Printer< ::camera_pose_calibration::CameraMeasurement_<ContainerAllocator> >::stream(s, indent + " ", v.M_cam[i]); 00611 } 00612 } 00613 }; 00614 00615 00616 } // namespace message_operations 00617 } // namespace ros 00618 00619 #endif // CAMERA_POSE_CALIBRATION_MESSAGE_ROBOTMEASUREMENT_H 00620