stereo_camera_model.cpp
Go to the documentation of this file.
2 
3 namespace image_geometry {
4 
6  : Q_(0.0)
7 {
8  Q_(0,0) = Q_(1,1) = 1.0;
9 }
10 
12  : left_(other.left_), right_(other.right_),
13  Q_(0.0)
14 {
15  Q_(0,0) = Q_(1,1) = 1.0;
16  if (other.initialized())
17  updateQ();
18 }
19 
21 {
22  if (other.initialized())
23  this->fromCameraInfo(other.left_.cameraInfo(), other.right_.cameraInfo());
24  return *this;
25 }
26 
27 bool StereoCameraModel::fromCameraInfo(const sensor_msgs::CameraInfo& left,
28  const sensor_msgs::CameraInfo& right)
29 {
30  bool changed_left = left_.fromCameraInfo(left);
31  bool changed_right = right_.fromCameraInfo(right);
32  bool changed = changed_left || changed_right;
33 
34  // Note: don't require identical time stamps to allow imperfectly synced stereo.
35  assert( left_.tfFrame() == right_.tfFrame() );
36  assert( left_.fx() == right_.fx() );
37  assert( left_.fy() == right_.fy() );
38  assert( left_.cy() == right_.cy() );
39  // cx may differ for verged cameras
40 
41  if (changed)
42  updateQ();
43 
44  return changed;
45 }
46 
47 bool StereoCameraModel::fromCameraInfo(const sensor_msgs::CameraInfoConstPtr& left,
48  const sensor_msgs::CameraInfoConstPtr& right)
49 {
50  return fromCameraInfo(*left, *right);
51 }
52 
54 {
55  // Update variable fields of reprojection matrix
56  /*
57  From Springer Handbook of Robotics, p. 524:
58 
59  [ Fx 0 Cx 0 ]
60  P = [ 0 Fy Cy 0 ]
61  [ 0 0 1 0 ]
62 
63  [ Fx 0 Cx' FxTx ]
64  P' = [ 0 Fy Cy 0 ]
65  [ 0 0 1 0 ]
66  where primed parameters are from the left projection matrix, unprimed from the right.
67 
68  [u v 1]^T = P * [x y z 1]^T
69  [u-d v 1]^T = P' * [x y z 1]^T
70 
71  Combining the two equations above results in the following equation
72 
73  [u v u-d 1]^T = [ Fx 0 Cx 0 ] * [ x y z 1]^T
74  [ 0 Fy Cy 0 ]
75  [ Fx 0 Cx' FxTx ]
76  [ 0 0 1 0 ]
77 
78  Subtracting the 3rd from from the first and inverting the expression
79  results in the following equation.
80 
81  [x y z 1]^T = Q * [u v d 1]^T
82 
83  Where Q is defined as
84 
85  Q = [ FyTx 0 0 -FyCxTx ]
86  [ 0 FxTx 0 -FxCyTx ]
87  [ 0 0 0 FxFyTx ]
88  [ 0 0 -Fy Fy(Cx-Cx') ]
89 
90  Using the assumption Fx = Fy Q can be simplified to the following. But for
91  compatibility with stereo cameras with different focal lengths we will use
92  the full Q matrix.
93 
94  [ 1 0 0 -Cx ]
95  Q = [ 0 1 0 -Cy ]
96  [ 0 0 0 Fx ]
97  [ 0 0 -1/Tx (Cx-Cx')/Tx ]
98 
99  Disparity = x_left - x_right
100 
101  For compatibility with stereo cameras with different focal lengths we will use
102  the full Q matrix.
103 
104  */
105  double Tx = -baseline(); // The baseline member negates our Tx. Undo this negation
106  Q_(0,0) = left_.fy() * Tx;
107  Q_(0,3) = -left_.fy() * left_.cx() * Tx;
108  Q_(1,1) = left_.fx() * Tx;
109  Q_(1,3) = -left_.fx() * left_.cy() * Tx;
110  Q_(2,3) = left_.fx() * left_.fy() * Tx;
111  Q_(3,2) = -left_.fy();
112  Q_(3,3) = left_.fy() * (left_.cx() - right_.cx()); // zero when disparities are pre-adjusted
113 }
114 
115 void StereoCameraModel::projectDisparityTo3d(const cv::Point2d& left_uv_rect, float disparity,
116  cv::Point3d& xyz) const
117 {
118  assert( initialized() );
119 
120  // Do the math inline:
121  // [X Y Z W]^T = Q * [u v d 1]^T
122  // Point = (X/W, Y/W, Z/W)
123  // cv::perspectiveTransform could be used but with more overhead.
124  double u = left_uv_rect.x, v = left_uv_rect.y;
125  cv::Point3d XYZ( (Q_(0,0) * u) + Q_(0,3), (Q_(1,1) * v) + Q_(1,3), Q_(2,3));
126  double W = Q_(3,2)*disparity + Q_(3,3);
127  xyz = XYZ * (1.0/W);
128 }
129 
130 const double StereoCameraModel::MISSING_Z = 10000.;
131 
132 void StereoCameraModel::projectDisparityImageTo3d(const cv::Mat& disparity, cv::Mat& point_cloud,
133  bool handleMissingValues) const
134 {
135  assert( initialized() );
136 
137  cv::reprojectImageTo3D(disparity, point_cloud, Q_, handleMissingValues);
138 }
139 
140 } //namespace image_geometry
StereoCameraModel & operator=(const StereoCameraModel &other)
const PinholeCameraModel & right() const
Get the right monocular camera model.
const PinholeCameraModel & left() const
Get the left monocular camera model.
bool fromCameraInfo(const sensor_msgs::CameraInfo &left, const sensor_msgs::CameraInfo &right)
Set the camera parameters from the sensor_msgs/CameraInfo messages.
const sensor_msgs::CameraInfo & cameraInfo() const
Returns the camera info message held internally.
double cx() const
Returns the x coordinate of the optical center.
void projectDisparityTo3d(const cv::Point2d &left_uv_rect, float disparity, cv::Point3d &xyz) const
Project a rectified pixel with disparity to a 3d point.
void projectDisparityImageTo3d(const cv::Mat &disparity, cv::Mat &point_cloud, bool handleMissingValues=false) const
Project a disparity image to a 3d point cloud.
double cy() const
Returns the y coordinate of the optical center.
std::string tfFrame() const
Get the name of the camera coordinate frame in tf.
bool initialized() const
Returns true if the camera has been initialized.
double baseline() const
Returns the horizontal baseline in world coordinates.
bool fromCameraInfo(const sensor_msgs::CameraInfo &msg)
Set the camera parameters from the sensor_msgs/CameraInfo message.
double fy() const
Returns the focal length (pixels) in y direction of the rectified image.
double fx() const
Returns the focal length (pixels) in x direction of the rectified image.
Simplifies interpreting stereo image pairs geometrically using the parameters from the left and right...


image_geometry
Author(s): Patrick Mihelich
autogenerated on Thu Jun 6 2019 19:58:02