Go to the documentation of this file.
5 #ifndef SENSOR_MSGS_MESSAGE_CAMERAINFO_H
6 #define SENSOR_MSGS_MESSAGE_CAMERAINFO_H
24 template <
class ContainerAllocator>
78 typedef std::basic_string<char, std::char_traits<char>,
typename std::allocator_traits< ContainerAllocator >::template rebind_alloc< char > >
_distortion_model_type;
81 typedef std::vector<double, typename std::allocator_traits< ContainerAllocator >::template rebind_alloc< double > >
_D_type;
99 typedef ::sensor_msgs::RegionOfInterest_<ContainerAllocator>
_roi_type;
105 typedef std::shared_ptr< ::sensor_msgs::CameraInfo_<ContainerAllocator> >
Ptr;
106 typedef std::shared_ptr< ::sensor_msgs::CameraInfo_<ContainerAllocator>
const>
ConstPtr;
110 typedef ::sensor_msgs::CameraInfo_<std::allocator<void> >
CameraInfo;
119 template<
typename ContainerAllocator>
120 std::ostream&
operator<<(std::ostream&
s, const ::sensor_msgs::CameraInfo_<ContainerAllocator> &
v)
130 namespace message_traits
143 template <
class ContainerAllocator>
148 template <
class ContainerAllocator>
153 template <
class ContainerAllocator>
158 template <
class ContainerAllocator>
163 template <
class ContainerAllocator>
168 template <
class ContainerAllocator>
174 template<
class ContainerAllocator>
179 return "c9a58c1b0b154e0e6da7578cb991d214";
182 static const char*
value(const ::sensor_msgs::CameraInfo_<ContainerAllocator>&) {
return value(); }
183 static const uint64_t static_value1 = 0xc9a58c1b0b154e0eULL;
184 static const uint64_t static_value2 = 0x6da7578cb991d214ULL;
187 template<
class ContainerAllocator>
192 return "sensor_msgs/CameraInfo";
195 static const char*
value(const ::sensor_msgs::CameraInfo_<ContainerAllocator>&) {
return value(); }
198 template<
class ContainerAllocator>
203 return "# This message defines meta information for a camera. It should be in a\n\
204 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\
205 # image topics named:\n\
207 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\
208 # image - monochrome, distorted\n\
209 # image_color - color, distorted\n\
210 # image_rect - monochrome, rectified\n\
211 # image_rect_color - color, rectified\n\
213 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\
214 # for producing the four processed image topics from image_raw and\n\
215 # camera_info. The meaning of the camera parameters are described in\n\
216 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\
218 # The image_geometry package provides a user-friendly interface to\n\
219 # common operations using this meta information. If you want to, e.g.,\n\
220 # project a 3d point into image coordinates, we strongly recommend\n\
221 # using image_geometry.\n\
223 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\
224 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\
225 # indicates an uncalibrated camera.\n\
227 #######################################################################\n\
228 # Image acquisition info #\n\
229 #######################################################################\n\
231 # Time of image acquisition, camera coordinate frame ID\n\
232 Header header # Header timestamp should be acquisition time of image\n\
233 # Header frame_id should be optical frame of camera\n\
234 # origin of frame should be optical center of camera\n\
235 # +x should point to the right in the image\n\
236 # +y should point down in the image\n\
237 # +z should point into the plane of the image\n\
240 #######################################################################\n\
241 # Calibration Parameters #\n\
242 #######################################################################\n\
243 # These are fixed during camera calibration. Their values will be the #\n\
244 # same in all messages until the camera is recalibrated. Note that #\n\
245 # self-calibrating systems may \"recalibrate\" frequently. #\n\
247 # The internal parameters can be used to warp a raw (distorted) image #\n\
249 # 1. An undistorted image (requires D and K) #\n\
250 # 2. A rectified image (requires D, K, R) #\n\
251 # The projection matrix P projects 3D points into the rectified image.#\n\
252 #######################################################################\n\
254 # The image dimensions with which the camera was calibrated. Normally\n\
255 # this will be the full camera resolution in pixels.\n\
259 # The distortion model used. Supported models are listed in\n\
260 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\
261 # simple model of radial and tangential distortion - is sufficient.\n\
262 string distortion_model\n\
264 # The distortion parameters, size depending on the distortion model.\n\
265 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\
268 # Intrinsic camera matrix for the raw (distorted) images.\n\
272 # Projects 3D points in the camera coordinate frame to 2D pixel\n\
273 # coordinates using the focal lengths (fx, fy) and principal point\n\
275 float64[9] K # 3x3 row-major matrix\n\
277 # Rectification matrix (stereo cameras only)\n\
278 # A rotation matrix aligning the camera coordinate system to the ideal\n\
279 # stereo image plane so that epipolar lines in both stereo images are\n\
281 float64[9] R # 3x3 row-major matrix\n\
283 # Projection/camera matrix\n\
285 # P = [ 0 fy' cy' Ty]\n\
287 # By convention, this matrix specifies the intrinsic (camera) matrix\n\
288 # of the processed (rectified) image. That is, the left 3x3 portion\n\
289 # is the normal camera intrinsic matrix for the rectified image.\n\
290 # It projects 3D points in the camera coordinate frame to 2D pixel\n\
291 # coordinates using the focal lengths (fx', fy') and principal point\n\
292 # (cx', cy') - these may differ from the values in K.\n\
293 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\
294 # also have R = the identity and P[1:3,1:3] = K.\n\
295 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\
296 # position of the optical center of the second camera in the first\n\
297 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\
298 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\
299 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\
300 # Tx = -fx' * B, where B is the baseline between the cameras.\n\
301 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\
302 # the rectified image is given by:\n\
303 # [u v w]' = P * [X Y Z 1]'\n\
306 # This holds for both images of a stereo pair.\n\
307 float64[12] P # 3x4 row-major matrix\n\
310 #######################################################################\n\
311 # Operational Parameters #\n\
312 #######################################################################\n\
313 # These define the image region actually captured by the camera #\n\
314 # driver. Although they affect the geometry of the output image, they #\n\
315 # may be changed freely without recalibrating the camera. #\n\
316 #######################################################################\n\
318 # Binning refers here to any camera setting which combines rectangular\n\
319 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\
320 # resolution of the output image to\n\
321 # (width / binning_x) x (height / binning_y).\n\
322 # The default values binning_x = binning_y = 0 is considered the same\n\
323 # as binning_x = binning_y = 1 (no subsampling).\n\
327 # Region of interest (subwindow of full camera resolution), given in\n\
328 # full resolution (unbinned) image coordinates. A particular ROI\n\
329 # always denotes the same window of pixels on the camera sensor,\n\
330 # regardless of binning settings.\n\
331 # The default setting of roi (all values 0) is considered the same as\n\
332 # full resolution (roi.width = width, roi.height = height).\n\
333 RegionOfInterest roi\n\
335 ================================================================================\n\
336 MSG: std_msgs/Header\n\
337 # Standard metadata for higher-level stamped data types.\n\
338 # This is generally used to communicate timestamped data \n\
339 # in a particular coordinate frame.\n\
341 # sequence ID: consecutively increasing ID \n\
343 #Two-integer timestamp that is expressed as:\n\
344 # * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\n\
345 # * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\n\
346 # time-handling sugar is provided by the client library\n\
348 #Frame this data is associated with\n\
353 ================================================================================\n\
354 MSG: sensor_msgs/RegionOfInterest\n\
355 # This message is used to specify a region of interest within an image.\n\
357 # When used to specify the ROI setting of the camera when the image was\n\
358 # taken, the height and width fields should either match the height and\n\
359 # width fields for the associated image; or height = width = 0\n\
360 # indicates that the full resolution image was captured.\n\
362 uint32 x_offset # Leftmost pixel of the ROI\n\
363 # (0 if the ROI includes the left edge of the image)\n\
364 uint32 y_offset # Topmost pixel of the ROI\n\
365 # (0 if the ROI includes the top edge of the image)\n\
366 uint32 height # Height of ROI\n\
367 uint32 width # Width of ROI\n\
369 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\
370 # ROI in this message. Typically this should be False if the full image\n\
371 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\
377 static const char*
value(const ::sensor_msgs::CameraInfo_<ContainerAllocator>&) {
return value(); }
385 namespace serialization
395 stream.next(
m.distortion_model);
413 namespace message_operations
416 template<
class ContainerAllocator>
419 template<
typename Stream>
static void stream(Stream&
s,
const std::string&
indent, const ::sensor_msgs::CameraInfo_<ContainerAllocator>&
v)
428 s <<
indent <<
"distortion_model: ";
430 s <<
indent <<
"D[]" << std::endl;
431 for (
size_t i = 0;
i <
v.D.size(); ++
i)
436 s <<
indent <<
"K[]" << std::endl;
437 for (
size_t i = 0;
i <
v.K.size(); ++
i)
442 s <<
indent <<
"R[]" << std::endl;
443 for (
size_t i = 0;
i <
v.R.size(); ++
i)
448 s <<
indent <<
"P[]" << std::endl;
449 for (
size_t i = 0;
i <
v.P.size(); ++
i)
467 #endif // SENSOR_MSGS_MESSAGE_CAMERAINFO_H
CameraInfo_< ContainerAllocator > Type
static const char * value(const ::sensor_msgs::CameraInfo_< ContainerAllocator > &)
GLsizei const GLchar *const * string
typedef void(APIENTRY *GLDEBUGPROC)(GLenum source
std::basic_string< char, std::char_traits< char >, typename std::allocator_traits< ContainerAllocator >::template rebind_alloc< char > > _distortion_model_type
static const char * value()
A fixed-size datatype is one whose size is constant, i.e. it has no variable-length arrays or strings...
std::shared_ptr< ::sensor_msgs::CameraInfo_< ContainerAllocator > const > ConstPtr
::std_msgs::Header_< ContainerAllocator > _header_type
std::vector< double, typename std::allocator_traits< ContainerAllocator >::template rebind_alloc< double > > _D_type
std::ostream & operator<<(std::ostream &s, const ::sensor_msgs::BatteryState_< ContainerAllocator > &v)
GLint GLsizei GLsizei height
std::shared_ptr< ::sensor_msgs::CameraInfo const > CameraInfoConstPtr
unsigned __int64 uint64_t
std::array< double, 12 > _P_type
_binning_y_type binning_y
_distortion_model_type distortion_model
std::shared_ptr< ::sensor_msgs::CameraInfo > CameraInfoPtr
CameraInfo_(const ContainerAllocator &_alloc)
def indent(str, line_prefix=' ')
static const char * value()
Specialize to provide the definition for a message.
Specialize to provide the md5sum for a message.
std::array< double, 9 > _R_type
std::shared_ptr< ::sensor_msgs::CameraInfo_< ContainerAllocator > > Ptr
::sensor_msgs::RegionOfInterest_< ContainerAllocator > _roi_type
static void allInOne(Stream &stream, T m)
Templated serialization class. Default implementation provides backwards compatibility with old messa...
static const char * value()
#define ROS_DECLARE_ALLINONE_SERIALIZER
Declare your serializer to use an allInOne member instead of requiring 3 different serialization func...
static void stream(Stream &s, const std::string &indent, const M &value)
Specialize to provide the datatype for a message.
::sensor_msgs::CameraInfo_< std::allocator< void > > CameraInfo
static const char * value(const ::sensor_msgs::CameraInfo_< ContainerAllocator > &)
Stream base-class, provides common functionality for IStream and OStream.
Tools for manipulating sensor_msgs.
static void stream(Stream &s, const std::string &indent, const ::sensor_msgs::CameraInfo_< ContainerAllocator > &v)
std::array< double, 9 > _K_type
static const char * value(const ::sensor_msgs::CameraInfo_< ContainerAllocator > &)
_binning_x_type binning_x
librealsense2
Author(s): LibRealSense ROS Team
autogenerated on Fri Aug 2 2024 08:30:01