CameraInfo.h
Go to the documentation of this file.
1 // Generated by gencpp from file sensor_msgs/CameraInfo.msg
2 // DO NOT EDIT!
3 
4 
5 #ifndef SENSOR_MSGS_MESSAGE_CAMERAINFO_H
6 #define SENSOR_MSGS_MESSAGE_CAMERAINFO_H
7 
8 #include <memory>
9 #include <array>
10 #include <string>
11 #include <vector>
12 #include <map>
13 
14 #include <ros/types.h>
15 #include <ros/serialization.h>
17 #include <ros/message_operations.h>
18 
19 #include <std_msgs/Header.h>
21 
22 namespace sensor_msgs
23 {
24 template <class ContainerAllocator>
26 {
28 
30  : header()
31  , height(0)
32  , width(0)
34  , D()
35  , K()
36  , R()
37  , P()
38  , binning_x(0)
39  , binning_y(0)
40  , roi() {
41  K.fill(0.0);
42 
43  R.fill(0.0);
44 
45  P.fill(0.0);
46  }
47  CameraInfo_(const ContainerAllocator& _alloc)
48  : header(_alloc)
49  , height(0)
50  , width(0)
51  , distortion_model(_alloc)
52  , D(_alloc)
53  , K()
54  , R()
55  , P()
56  , binning_x(0)
57  , binning_y(0)
58  , roi(_alloc) {
59  (void)_alloc;
60  K.fill(0.0);
61 
62  R.fill(0.0);
63 
64  P.fill(0.0);
65  }
66 
67 
68 
69  typedef ::std_msgs::Header_<ContainerAllocator> _header_type;
70  _header_type header;
71 
73  _height_type height;
74 
76  _width_type width;
77 
78  typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _distortion_model_type;
79  _distortion_model_type distortion_model;
80 
81  typedef std::vector<double, typename ContainerAllocator::template rebind<double>::other > _D_type;
82  _D_type D;
83 
84  typedef std::array<double, 9> _K_type;
85  _K_type K;
86 
87  typedef std::array<double, 9> _R_type;
88  _R_type R;
89 
90  typedef std::array<double, 12> _P_type;
91  _P_type P;
92 
94  _binning_x_type binning_x;
95 
97  _binning_y_type binning_y;
98 
99  typedef ::sensor_msgs::RegionOfInterest_<ContainerAllocator> _roi_type;
100  _roi_type roi;
101 
102 
103 
104 
105  typedef std::shared_ptr< ::sensor_msgs::CameraInfo_<ContainerAllocator> > Ptr;
106  typedef std::shared_ptr< ::sensor_msgs::CameraInfo_<ContainerAllocator> const> ConstPtr;
107 
108 }; // struct CameraInfo_
109 
110 typedef ::sensor_msgs::CameraInfo_<std::allocator<void> > CameraInfo;
111 
112 typedef std::shared_ptr< ::sensor_msgs::CameraInfo > CameraInfoPtr;
113 typedef std::shared_ptr< ::sensor_msgs::CameraInfo const> CameraInfoConstPtr;
114 
115 // constants requiring out of line definition
116 
117 
118 
119 template<typename ContainerAllocator>
120 std::ostream& operator<<(std::ostream& s, const ::sensor_msgs::CameraInfo_<ContainerAllocator> & v)
121 {
123 return s;
124 }
125 
126 } // namespace sensor_msgs
127 
128 namespace rs2rosinternal
129 {
130 namespace message_traits
131 {
132 
133 
134 
135 // BOOLTRAITS {'IsFixedSize': False, 'IsMessage': True, 'HasHeader': True}
136 // {'std_msgs': ['/opt/ros/kinetic/share/std_msgs/cmake/../msg'], 'geometry_msgs': ['/opt/ros/kinetic/share/geometry_msgs/cmake/../msg'], 'sensor_msgs': ['/tmp/binarydeb/ros-kinetic-sensor-msgs-1.12.5/msg']}
137 
138 // !!!!!!!!!!! ['__class__', '__delattr__', '__dict__', '__doc__', '__eq__', '__format__', '__getattribute__', '__hash__', '__init__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_parsed_fields', 'constants', 'fields', 'full_name', 'has_header', 'header_present', 'names', 'package', 'parsed_fields', 'short_name', 'text', 'types']
139 
140 
141 
142 
143 template <class ContainerAllocator>
144 struct IsFixedSize< ::sensor_msgs::CameraInfo_<ContainerAllocator> >
145  : FalseType
146  { };
147 
148 template <class ContainerAllocator>
149 struct IsFixedSize< ::sensor_msgs::CameraInfo_<ContainerAllocator> const>
150  : FalseType
151  { };
152 
153 template <class ContainerAllocator>
154 struct IsMessage< ::sensor_msgs::CameraInfo_<ContainerAllocator> >
155  : TrueType
156  { };
157 
158 template <class ContainerAllocator>
159 struct IsMessage< ::sensor_msgs::CameraInfo_<ContainerAllocator> const>
160  : TrueType
161  { };
162 
163 template <class ContainerAllocator>
164 struct HasHeader< ::sensor_msgs::CameraInfo_<ContainerAllocator> >
165  : TrueType
166  { };
167 
168 template <class ContainerAllocator>
169 struct HasHeader< ::sensor_msgs::CameraInfo_<ContainerAllocator> const>
170  : TrueType
171  { };
172 
173 
174 template<class ContainerAllocator>
175 struct MD5Sum< ::sensor_msgs::CameraInfo_<ContainerAllocator> >
176 {
177  static const char* value()
178  {
179  return "c9a58c1b0b154e0e6da7578cb991d214";
180  }
181 
182  static const char* value(const ::sensor_msgs::CameraInfo_<ContainerAllocator>&) { return value(); }
183  static const uint64_t static_value1 = 0xc9a58c1b0b154e0eULL;
184  static const uint64_t static_value2 = 0x6da7578cb991d214ULL;
185 };
186 
187 template<class ContainerAllocator>
188 struct DataType< ::sensor_msgs::CameraInfo_<ContainerAllocator> >
189 {
190  static const char* value()
191  {
192  return "sensor_msgs/CameraInfo";
193  }
194 
195  static const char* value(const ::sensor_msgs::CameraInfo_<ContainerAllocator>&) { return value(); }
196 };
197 
198 template<class ContainerAllocator>
199 struct Definition< ::sensor_msgs::CameraInfo_<ContainerAllocator> >
200 {
201  static const char* value()
202  {
203  return "# This message defines meta information for a camera. It should be in a\n\
204 # camera namespace on topic \"camera_info\" and accompanied by up to five\n\
205 # image topics named:\n\
206 #\n\
207 # image_raw - raw data from the camera driver, possibly Bayer encoded\n\
208 # image - monochrome, distorted\n\
209 # image_color - color, distorted\n\
210 # image_rect - monochrome, rectified\n\
211 # image_rect_color - color, rectified\n\
212 #\n\
213 # The image_pipeline contains packages (image_proc, stereo_image_proc)\n\
214 # for producing the four processed image topics from image_raw and\n\
215 # camera_info. The meaning of the camera parameters are described in\n\
216 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.\n\
217 #\n\
218 # The image_geometry package provides a user-friendly interface to\n\
219 # common operations using this meta information. If you want to, e.g.,\n\
220 # project a 3d point into image coordinates, we strongly recommend\n\
221 # using image_geometry.\n\
222 #\n\
223 # If the camera is uncalibrated, the matrices D, K, R, P should be left\n\
224 # zeroed out. In particular, clients may assume that K[0] == 0.0\n\
225 # indicates an uncalibrated camera.\n\
226 \n\
227 #######################################################################\n\
228 # Image acquisition info #\n\
229 #######################################################################\n\
230 \n\
231 # Time of image acquisition, camera coordinate frame ID\n\
232 Header header # Header timestamp should be acquisition time of image\n\
233  # Header frame_id should be optical frame of camera\n\
234  # origin of frame should be optical center of camera\n\
235  # +x should point to the right in the image\n\
236  # +y should point down in the image\n\
237  # +z should point into the plane of the image\n\
238 \n\
239 \n\
240 #######################################################################\n\
241 # Calibration Parameters #\n\
242 #######################################################################\n\
243 # These are fixed during camera calibration. Their values will be the #\n\
244 # same in all messages until the camera is recalibrated. Note that #\n\
245 # self-calibrating systems may \"recalibrate\" frequently. #\n\
246 # #\n\
247 # The internal parameters can be used to warp a raw (distorted) image #\n\
248 # to: #\n\
249 # 1. An undistorted image (requires D and K) #\n\
250 # 2. A rectified image (requires D, K, R) #\n\
251 # The projection matrix P projects 3D points into the rectified image.#\n\
252 #######################################################################\n\
253 \n\
254 # The image dimensions with which the camera was calibrated. Normally\n\
255 # this will be the full camera resolution in pixels.\n\
256 uint32 height\n\
257 uint32 width\n\
258 \n\
259 # The distortion model used. Supported models are listed in\n\
260 # sensor_msgs/distortion_models.h. For most cameras, \"plumb_bob\" - a\n\
261 # simple model of radial and tangential distortion - is sufficient.\n\
262 string distortion_model\n\
263 \n\
264 # The distortion parameters, size depending on the distortion model.\n\
265 # For \"plumb_bob\", the 5 parameters are: (k1, k2, t1, t2, k3).\n\
266 float64[] D\n\
267 \n\
268 # Intrinsic camera matrix for the raw (distorted) images.\n\
269 # [fx 0 cx]\n\
270 # K = [ 0 fy cy]\n\
271 # [ 0 0 1]\n\
272 # Projects 3D points in the camera coordinate frame to 2D pixel\n\
273 # coordinates using the focal lengths (fx, fy) and principal point\n\
274 # (cx, cy).\n\
275 float64[9] K # 3x3 row-major matrix\n\
276 \n\
277 # Rectification matrix (stereo cameras only)\n\
278 # A rotation matrix aligning the camera coordinate system to the ideal\n\
279 # stereo image plane so that epipolar lines in both stereo images are\n\
280 # parallel.\n\
281 float64[9] R # 3x3 row-major matrix\n\
282 \n\
283 # Projection/camera matrix\n\
284 # [fx' 0 cx' Tx]\n\
285 # P = [ 0 fy' cy' Ty]\n\
286 # [ 0 0 1 0]\n\
287 # By convention, this matrix specifies the intrinsic (camera) matrix\n\
288 # of the processed (rectified) image. That is, the left 3x3 portion\n\
289 # is the normal camera intrinsic matrix for the rectified image.\n\
290 # It projects 3D points in the camera coordinate frame to 2D pixel\n\
291 # coordinates using the focal lengths (fx', fy') and principal point\n\
292 # (cx', cy') - these may differ from the values in K.\n\
293 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will\n\
294 # also have R = the identity and P[1:3,1:3] = K.\n\
295 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the\n\
296 # position of the optical center of the second camera in the first\n\
297 # camera's frame. We assume Tz = 0 so both cameras are in the same\n\
298 # stereo image plane. The first camera always has Tx = Ty = 0. For\n\
299 # the right (second) camera of a horizontal stereo pair, Ty = 0 and\n\
300 # Tx = -fx' * B, where B is the baseline between the cameras.\n\
301 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto\n\
302 # the rectified image is given by:\n\
303 # [u v w]' = P * [X Y Z 1]'\n\
304 # x = u / w\n\
305 # y = v / w\n\
306 # This holds for both images of a stereo pair.\n\
307 float64[12] P # 3x4 row-major matrix\n\
308 \n\
309 \n\
310 #######################################################################\n\
311 # Operational Parameters #\n\
312 #######################################################################\n\
313 # These define the image region actually captured by the camera #\n\
314 # driver. Although they affect the geometry of the output image, they #\n\
315 # may be changed freely without recalibrating the camera. #\n\
316 #######################################################################\n\
317 \n\
318 # Binning refers here to any camera setting which combines rectangular\n\
319 # neighborhoods of pixels into larger \"super-pixels.\" It reduces the\n\
320 # resolution of the output image to\n\
321 # (width / binning_x) x (height / binning_y).\n\
322 # The default values binning_x = binning_y = 0 is considered the same\n\
323 # as binning_x = binning_y = 1 (no subsampling).\n\
324 uint32 binning_x\n\
325 uint32 binning_y\n\
326 \n\
327 # Region of interest (subwindow of full camera resolution), given in\n\
328 # full resolution (unbinned) image coordinates. A particular ROI\n\
329 # always denotes the same window of pixels on the camera sensor,\n\
330 # regardless of binning settings.\n\
331 # The default setting of roi (all values 0) is considered the same as\n\
332 # full resolution (roi.width = width, roi.height = height).\n\
333 RegionOfInterest roi\n\
334 \n\
335 ================================================================================\n\
336 MSG: std_msgs/Header\n\
337 # Standard metadata for higher-level stamped data types.\n\
338 # This is generally used to communicate timestamped data \n\
339 # in a particular coordinate frame.\n\
340 # \n\
341 # sequence ID: consecutively increasing ID \n\
342 uint32 seq\n\
343 #Two-integer timestamp that is expressed as:\n\
344 # * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\n\
345 # * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\n\
346 # time-handling sugar is provided by the client library\n\
347 time stamp\n\
348 #Frame this data is associated with\n\
349 # 0: no frame\n\
350 # 1: global frame\n\
351 string frame_id\n\
352 \n\
353 ================================================================================\n\
354 MSG: sensor_msgs/RegionOfInterest\n\
355 # This message is used to specify a region of interest within an image.\n\
356 #\n\
357 # When used to specify the ROI setting of the camera when the image was\n\
358 # taken, the height and width fields should either match the height and\n\
359 # width fields for the associated image; or height = width = 0\n\
360 # indicates that the full resolution image was captured.\n\
361 \n\
362 uint32 x_offset # Leftmost pixel of the ROI\n\
363  # (0 if the ROI includes the left edge of the image)\n\
364 uint32 y_offset # Topmost pixel of the ROI\n\
365  # (0 if the ROI includes the top edge of the image)\n\
366 uint32 height # Height of ROI\n\
367 uint32 width # Width of ROI\n\
368 \n\
369 # True if a distinct rectified ROI should be calculated from the \"raw\"\n\
370 # ROI in this message. Typically this should be False if the full image\n\
371 # is captured (ROI not used), and True if a subwindow is captured (ROI\n\
372 # used).\n\
373 bool do_rectify\n\
374 ";
375  }
376 
377  static const char* value(const ::sensor_msgs::CameraInfo_<ContainerAllocator>&) { return value(); }
378 };
379 
380 } // namespace message_traits
381 } // namespace rs2rosinternal
382 
383 namespace rs2rosinternal
384 {
385 namespace serialization
386 {
387 
388  template<class ContainerAllocator> struct Serializer< ::sensor_msgs::CameraInfo_<ContainerAllocator> >
389  {
390  template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
391  {
392  stream.next(m.header);
393  stream.next(m.height);
394  stream.next(m.width);
395  stream.next(m.distortion_model);
396  stream.next(m.D);
397  stream.next(m.K);
398  stream.next(m.R);
399  stream.next(m.P);
400  stream.next(m.binning_x);
401  stream.next(m.binning_y);
402  stream.next(m.roi);
403  }
404 
406  }; // struct CameraInfo_
407 
408 } // namespace serialization
409 } // namespace rs2rosinternal
410 
411 namespace rs2rosinternal
412 {
413 namespace message_operations
414 {
415 
416 template<class ContainerAllocator>
417 struct Printer< ::sensor_msgs::CameraInfo_<ContainerAllocator> >
418 {
419  template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::sensor_msgs::CameraInfo_<ContainerAllocator>& v)
420  {
421  s << indent << "header: ";
422  s << std::endl;
424  s << indent << "height: ";
425  Printer<uint32_t>::stream(s, indent + " ", v.height);
426  s << indent << "width: ";
427  Printer<uint32_t>::stream(s, indent + " ", v.width);
428  s << indent << "distortion_model: ";
429  Printer<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::stream(s, indent + " ", v.distortion_model);
430  s << indent << "D[]" << std::endl;
431  for (size_t i = 0; i < v.D.size(); ++i)
432  {
433  s << indent << " D[" << i << "]: ";
434  Printer<double>::stream(s, indent + " ", v.D[i]);
435  }
436  s << indent << "K[]" << std::endl;
437  for (size_t i = 0; i < v.K.size(); ++i)
438  {
439  s << indent << " K[" << i << "]: ";
440  Printer<double>::stream(s, indent + " ", v.K[i]);
441  }
442  s << indent << "R[]" << std::endl;
443  for (size_t i = 0; i < v.R.size(); ++i)
444  {
445  s << indent << " R[" << i << "]: ";
446  Printer<double>::stream(s, indent + " ", v.R[i]);
447  }
448  s << indent << "P[]" << std::endl;
449  for (size_t i = 0; i < v.P.size(); ++i)
450  {
451  s << indent << " P[" << i << "]: ";
452  Printer<double>::stream(s, indent + " ", v.P[i]);
453  }
454  s << indent << "binning_x: ";
455  Printer<uint32_t>::stream(s, indent + " ", v.binning_x);
456  s << indent << "binning_y: ";
457  Printer<uint32_t>::stream(s, indent + " ", v.binning_y);
458  s << indent << "roi: ";
459  s << std::endl;
461  }
462 };
463 
464 } // namespace message_operations
465 } // namespace rs2rosinternal
466 
467 #endif // SENSOR_MSGS_MESSAGE_CAMERAINFO_H
static void stream(Stream &s, const std::string &indent, const ::sensor_msgs::CameraInfo_< ContainerAllocator > &v)
Definition: CameraInfo.h:419
typedef void(APIENTRY *GLDEBUGPROC)(GLenum source
Base type for compile-type true/false tests. Compatible with Boost.MPL. classes inheriting from this ...
std::array< double, 9 > _K_type
Definition: CameraInfo.h:84
GLdouble s
_binning_x_type binning_x
Definition: CameraInfo.h:94
::sensor_msgs::CameraInfo_< std::allocator< void > > CameraInfo
Definition: CameraInfo.h:110
const GLfloat * m
Definition: glext.h:6814
Specialize to provide the md5sum for a message.
std::vector< double, typename ContainerAllocator::template rebind< double >::other > _D_type
Definition: CameraInfo.h:81
Base type for compile-type true/false tests. Compatible with Boost.MPL. classes inheriting from this ...
GLsizei const GLchar *const * string
Specialize to provide the datatype for a message.
HasHeader informs whether or not there is a header that gets serialized as the first thing in the mes...
GLuint GLuint stream
Definition: glext.h:1790
::std_msgs::Header_< ContainerAllocator > _header_type
Definition: CameraInfo.h:69
std::shared_ptr< ::sensor_msgs::CameraInfo const > CameraInfoConstPtr
Definition: CameraInfo.h:113
A fixed-size datatype is one whose size is constant, i.e. it has no variable-length arrays or strings...
Stream base-class, provides common functionality for IStream and OStream.
std::shared_ptr< ::sensor_msgs::CameraInfo_< ContainerAllocator > const > ConstPtr
Definition: CameraInfo.h:106
std::basic_string< char, std::char_traits< char >, typename ContainerAllocator::template rebind< char >::other > _distortion_model_type
Definition: CameraInfo.h:78
unsigned int uint32_t
Definition: stdint.h:80
Tools for manipulating sensor_msgs.
Definition: BatteryState.h:20
GLint GLsizei GLsizei height
std::shared_ptr< ::sensor_msgs::CameraInfo > CameraInfoPtr
Definition: CameraInfo.h:112
#define ROS_DECLARE_ALLINONE_SERIALIZER
Declare your serializer to use an allInOne member instead of requiring 3 different serialization func...
static const char * value(const ::sensor_msgs::CameraInfo_< ContainerAllocator > &)
Definition: CameraInfo.h:182
unsigned __int64 uint64_t
Definition: stdint.h:90
Specialize to provide the definition for a message.
static const char * value(const ::sensor_msgs::CameraInfo_< ContainerAllocator > &)
Definition: CameraInfo.h:195
CameraInfo_(const ContainerAllocator &_alloc)
Definition: CameraInfo.h:47
std::array< double, 12 > _P_type
Definition: CameraInfo.h:90
static const char * value(const ::sensor_msgs::CameraInfo_< ContainerAllocator > &)
Definition: CameraInfo.h:377
_binning_y_type binning_y
Definition: CameraInfo.h:97
_distortion_model_type distortion_model
Definition: CameraInfo.h:79
std::shared_ptr< ::sensor_msgs::CameraInfo_< ContainerAllocator > > Ptr
Definition: CameraInfo.h:105
std::array< double, 9 > _R_type
Definition: CameraInfo.h:87
int i
Templated serialization class. Default implementation provides backwards compatibility with old messa...
CameraInfo_< ContainerAllocator > Type
Definition: CameraInfo.h:27
::sensor_msgs::RegionOfInterest_< ContainerAllocator > _roi_type
Definition: CameraInfo.h:99
GLdouble v
GLint GLsizei width


librealsense2
Author(s): Sergey Dorodnicov , Doron Hirshberg , Mark Horn , Reagan Lopez , Itay Carpis
autogenerated on Mon May 3 2021 02:45:07