processor.cpp
Go to the documentation of this file.
1 /*********************************************************************
2 * Software License Agreement (BSD License)
3 *
4 * Copyright (c) 2008, Willow Garage, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 * * Neither the name of the Willow Garage nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 *********************************************************************/
34 #include <ros/assert.h>
37 #include <cmath>
38 #include <limits>
39 
40 namespace stereo_image_proc {
41 
42 bool StereoProcessor::process(const sensor_msgs::ImageConstPtr& left_raw,
43  const sensor_msgs::ImageConstPtr& right_raw,
45  StereoImageSet& output, int flags) const
46 {
47  // Do monocular processing on left and right images
48  int left_flags = flags & LEFT_ALL;
49  int right_flags = flags & RIGHT_ALL;
50  if (flags & STEREO_ALL) {
51  // Need the rectified images for stereo processing
52  left_flags |= LEFT_RECT;
53  right_flags |= RIGHT_RECT;
54  }
55  if (flags & (POINT_CLOUD | POINT_CLOUD2)) {
56  flags |= DISPARITY;
57  // Need the color channels for the point cloud
58  left_flags |= LEFT_RECT_COLOR;
59  }
60  if (!mono_processor_.process(left_raw, model.left(), output.left, left_flags))
61  return false;
62  if (!mono_processor_.process(right_raw, model.right(), output.right, right_flags >> 4))
63  return false;
64 
65  // Do block matching to produce the disparity image
66  if (flags & DISPARITY) {
67  processDisparity(output.left.rect, output.right.rect, model, output.disparity);
68  }
69 
70  // Project disparity image to 3d point cloud
71  if (flags & POINT_CLOUD) {
72  processPoints(output.disparity, output.left.rect_color, output.left.color_encoding, model, output.points);
73  }
74 
75  // Project disparity image to 3d point cloud
76  if (flags & POINT_CLOUD2) {
77  processPoints2(output.disparity, output.left.rect_color, output.left.color_encoding, model, output.points2);
78  }
79 
80  return true;
81 }
82 
83 void StereoProcessor::processDisparity(const cv::Mat& left_rect, const cv::Mat& right_rect,
85  stereo_msgs::DisparityImage& disparity) const
86 {
87  // Fixed-point disparity is 16 times the true value: d = d_fp / 16.0 = x_l - x_r.
88  static const int DPP = 16; // disparities per pixel
89  static const double inv_dpp = 1.0 / DPP;
90 
91  // Block matcher produces 16-bit signed (fixed point) disparity image
93 #if CV_MAJOR_VERSION >= 3
94  block_matcher_->compute(left_rect, right_rect, disparity16_);
95  else
96  sg_block_matcher_->compute(left_rect, right_rect, disparity16_);
97 #else
98  block_matcher_(left_rect, right_rect, disparity16_);
99  else
100  sg_block_matcher_(left_rect, right_rect, disparity16_);
101 #endif
102 
103  // Fill in DisparityImage image data, converting to 32-bit float
104  sensor_msgs::Image& dimage = disparity.image;
105  dimage.height = disparity16_.rows;
106  dimage.width = disparity16_.cols;
108  dimage.step = dimage.width * sizeof(float);
109  dimage.data.resize(dimage.step * dimage.height);
110  cv::Mat_<float> dmat(dimage.height, dimage.width, (float*)&dimage.data[0], dimage.step);
111  // We convert from fixed-point to float disparity and also adjust for any x-offset between
112  // the principal points: d = d_fp*inv_dpp - (cx_l - cx_r)
113  disparity16_.convertTo(dmat, dmat.type(), inv_dpp, -(model.left().cx() - model.right().cx()));
114  ROS_ASSERT(dmat.data == &dimage.data[0]);
116 
117  // Stereo parameters
118  disparity.f = model.right().fx();
119  disparity.T = model.baseline();
120 
122 
123  // Disparity search range
124  disparity.min_disparity = getMinDisparity();
125  disparity.max_disparity = getMinDisparity() + getDisparityRange() - 1;
126  disparity.delta_d = inv_dpp;
127 }
128 
129 inline bool isValidPoint(const cv::Vec3f& pt)
130 {
131  // Check both for disparities explicitly marked as invalid (where OpenCV maps pt.z to MISSING_Z)
132  // and zero disparities (point mapped to infinity).
133  return pt[2] != image_geometry::StereoCameraModel::MISSING_Z && !std::isinf(pt[2]);
134 }
135 
136 void StereoProcessor::processPoints(const stereo_msgs::DisparityImage& disparity,
137  const cv::Mat& color, const std::string& encoding,
139  sensor_msgs::PointCloud& points) const
140 {
141  // Calculate dense point cloud
142  const sensor_msgs::Image& dimage = disparity.image;
143  const cv::Mat_<float> dmat(dimage.height, dimage.width, (float*)&dimage.data[0], dimage.step);
144  model.projectDisparityImageTo3d(dmat, dense_points_, true);
145 
146  // Fill in sparse point cloud message
147  points.points.resize(0);
148  points.channels.resize(3);
149  points.channels[0].name = "rgb";
150  points.channels[0].values.resize(0);
151  points.channels[1].name = "u";
152  points.channels[1].values.resize(0);
153  points.channels[2].name = "v";
154  points.channels[2].values.resize(0);
155 
156  for (int32_t u = 0; u < dense_points_.rows; ++u) {
157  for (int32_t v = 0; v < dense_points_.cols; ++v) {
158  if (isValidPoint(dense_points_(u,v))) {
159  // x,y,z
160  geometry_msgs::Point32 pt;
161  pt.x = dense_points_(u,v)[0];
162  pt.y = dense_points_(u,v)[1];
163  pt.z = dense_points_(u,v)[2];
164  points.points.push_back(pt);
165  // u,v
166  points.channels[1].values.push_back(u);
167  points.channels[2].values.push_back(v);
168  }
169  }
170  }
171 
172  // Fill in color
173  namespace enc = sensor_msgs::image_encodings;
174  points.channels[0].values.reserve(points.points.size());
175  if (encoding == enc::MONO8) {
176  for (int32_t u = 0; u < dense_points_.rows; ++u) {
177  for (int32_t v = 0; v < dense_points_.cols; ++v) {
178  if (isValidPoint(dense_points_(u,v))) {
179  uint8_t g = color.at<uint8_t>(u,v);
180  int32_t rgb = (g << 16) | (g << 8) | g;
181  points.channels[0].values.push_back(*(float*)(&rgb));
182  }
183  }
184  }
185  }
186  else if (encoding == enc::RGB8) {
187  for (int32_t u = 0; u < dense_points_.rows; ++u) {
188  for (int32_t v = 0; v < dense_points_.cols; ++v) {
189  if (isValidPoint(dense_points_(u,v))) {
190  const cv::Vec3b& rgb = color.at<cv::Vec3b>(u,v);
191  int32_t rgb_packed = (rgb[0] << 16) | (rgb[1] << 8) | rgb[2];
192  points.channels[0].values.push_back(*(float*)(&rgb_packed));
193  }
194  }
195  }
196  }
197  else if (encoding == enc::RGBA8) {
198  for (int32_t u = 0; u < dense_points_.rows; ++u) {
199  for (int32_t v = 0; v < dense_points_.cols; ++v) {
200  if (isValidPoint(dense_points_(u,v))) {
201  const cv::Vec4b& rgba = color.at<cv::Vec4b>(u,v);
202  int32_t rgb_packed = (rgba[0] << 16) | (rgba[1] << 8) | rgba[2];
203  points.channels[0].values.push_back(*(float*)(&rgb_packed));
204  }
205  }
206  }
207  }
208  else if (encoding == enc::BGR8) {
209  for (int32_t u = 0; u < dense_points_.rows; ++u) {
210  for (int32_t v = 0; v < dense_points_.cols; ++v) {
211  if (isValidPoint(dense_points_(u,v))) {
212  const cv::Vec3b& bgr = color.at<cv::Vec3b>(u,v);
213  int32_t rgb_packed = (bgr[2] << 16) | (bgr[1] << 8) | bgr[0];
214  points.channels[0].values.push_back(*(float*)(&rgb_packed));
215  }
216  }
217  }
218  }
219  else if (encoding == enc::BGRA8) {
220  for (int32_t u = 0; u < dense_points_.rows; ++u) {
221  for (int32_t v = 0; v < dense_points_.cols; ++v) {
222  if (isValidPoint(dense_points_(u,v))) {
223  const cv::Vec4b& bgra = color.at<cv::Vec4b>(u,v);
224  int32_t rgb_packed = (bgra[2] << 16) | (bgra[1] << 8) | bgra[0];
225  points.channels[0].values.push_back(*(float*)(&rgb_packed));
226  }
227  }
228  }
229  }
230  else {
231  ROS_WARN("Could not fill color channel of the point cloud, unrecognized encoding '%s'", encoding.c_str());
232  }
233 }
234 
235 void StereoProcessor::processPoints2(const stereo_msgs::DisparityImage& disparity,
236  const cv::Mat& color, const std::string& encoding,
238  sensor_msgs::PointCloud2& points) const
239 {
240  // Calculate dense point cloud
241  const sensor_msgs::Image& dimage = disparity.image;
242  const cv::Mat_<float> dmat(dimage.height, dimage.width, (float*)&dimage.data[0], dimage.step);
243  model.projectDisparityImageTo3d(dmat, dense_points_, true);
244 
245  // Fill in sparse point cloud message
246  points.height = dense_points_.rows;
247  points.width = dense_points_.cols;
248  points.fields.resize (4);
249  points.fields[0].name = "x";
250  points.fields[0].offset = 0;
251  points.fields[0].count = 1;
252  points.fields[0].datatype = sensor_msgs::PointField::FLOAT32;
253  points.fields[1].name = "y";
254  points.fields[1].offset = 4;
255  points.fields[1].count = 1;
256  points.fields[1].datatype = sensor_msgs::PointField::FLOAT32;
257  points.fields[2].name = "z";
258  points.fields[2].offset = 8;
259  points.fields[2].count = 1;
260  points.fields[2].datatype = sensor_msgs::PointField::FLOAT32;
261  points.fields[3].name = "rgb";
262  points.fields[3].offset = 12;
263  points.fields[3].count = 1;
264  points.fields[3].datatype = sensor_msgs::PointField::FLOAT32;
265  //points.is_bigendian = false; ???
266  points.point_step = 16;
267  points.row_step = points.point_step * points.width;
268  points.data.resize (points.row_step * points.height);
269  points.is_dense = false; // there may be invalid points
270 
271  float bad_point = std::numeric_limits<float>::quiet_NaN ();
272  int i = 0;
273  for (int32_t u = 0; u < dense_points_.rows; ++u) {
274  for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) {
275  if (isValidPoint(dense_points_(u,v))) {
276  // x,y,z,rgba
277  memcpy (&points.data[i * points.point_step + 0], &dense_points_(u,v)[0], sizeof (float));
278  memcpy (&points.data[i * points.point_step + 4], &dense_points_(u,v)[1], sizeof (float));
279  memcpy (&points.data[i * points.point_step + 8], &dense_points_(u,v)[2], sizeof (float));
280  }
281  else {
282  memcpy (&points.data[i * points.point_step + 0], &bad_point, sizeof (float));
283  memcpy (&points.data[i * points.point_step + 4], &bad_point, sizeof (float));
284  memcpy (&points.data[i * points.point_step + 8], &bad_point, sizeof (float));
285  }
286  }
287  }
288 
289  // Fill in color
290  namespace enc = sensor_msgs::image_encodings;
291  i = 0;
292  if (encoding == enc::MONO8) {
293  for (int32_t u = 0; u < dense_points_.rows; ++u) {
294  for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) {
295  if (isValidPoint(dense_points_(u,v))) {
296  uint8_t g = color.at<uint8_t>(u,v);
297  int32_t rgb = (g << 16) | (g << 8) | g;
298  memcpy (&points.data[i * points.point_step + 12], &rgb, sizeof (int32_t));
299  }
300  else {
301  memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float));
302  }
303  }
304  }
305  }
306  else if (encoding == enc::RGB8) {
307  for (int32_t u = 0; u < dense_points_.rows; ++u) {
308  for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) {
309  if (isValidPoint(dense_points_(u,v))) {
310  const cv::Vec3b& rgb = color.at<cv::Vec3b>(u,v);
311  int32_t rgb_packed = (rgb[0] << 16) | (rgb[1] << 8) | rgb[2];
312  memcpy (&points.data[i * points.point_step + 12], &rgb_packed, sizeof (int32_t));
313  }
314  else {
315  memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float));
316  }
317  }
318  }
319  }
320  else if (encoding == enc::RGBA8) {
321  for (int32_t u = 0; u < dense_points_.rows; ++u) {
322  for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) {
323  if (isValidPoint(dense_points_(u,v))) {
324  const cv::Vec4b& rgba = color.at<cv::Vec4b>(u,v);
325  int32_t rgb_packed = (rgba[0] << 16) | (rgba[1] << 8) | rgba[2];
326  memcpy (&points.data[i * points.point_step + 12], &rgb_packed, sizeof (int32_t));
327  }
328  else {
329  memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float));
330  }
331  }
332  }
333  }
334  else if (encoding == enc::BGR8) {
335  for (int32_t u = 0; u < dense_points_.rows; ++u) {
336  for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) {
337  if (isValidPoint(dense_points_(u,v))) {
338  const cv::Vec3b& bgr = color.at<cv::Vec3b>(u,v);
339  int32_t rgb_packed = (bgr[2] << 16) | (bgr[1] << 8) | bgr[0];
340  memcpy (&points.data[i * points.point_step + 12], &rgb_packed, sizeof (int32_t));
341  }
342  else {
343  memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float));
344  }
345  }
346  }
347  }
348  else if (encoding == enc::BGRA8) {
349  for (int32_t u = 0; u < dense_points_.rows; ++u) {
350  for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) {
351  if (isValidPoint(dense_points_(u,v))) {
352  const cv::Vec4b& bgra = color.at<cv::Vec4b>(u,v);
353  int32_t rgb_packed = (bgra[2] << 16) | (bgra[1] << 8) | bgra[0];
354  memcpy (&points.data[i * points.point_step + 12], &rgb_packed, sizeof (int32_t));
355  }
356  else {
357  memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float));
358  }
359  }
360  }
361  }
362  else {
363  ROS_WARN("Could not fill color channel of the point cloud, unrecognized encoding '%s'", encoding.c_str());
364  }
365 }
366 
367 } //namespace stereo_image_proc
image_proc::Processor mono_processor_
Definition: processor.h:168
cv::Mat_< int16_t > disparity16_
Definition: processor.h:170
std::string color_encoding
void projectDisparityImageTo3d(const cv::Mat &disparity, cv::Mat &point_cloud, bool handleMissingValues=false) const
#define ROS_WARN(...)
image_proc::ImageSet left
Definition: processor.h:47
cv::Mat_< cv::Vec3f > dense_points_
Definition: processor.h:180
const PinholeCameraModel & right() const
const std::string TYPE_32FC1
void processDisparity(const cv::Mat &left_rect, const cv::Mat &right_rect, const image_geometry::StereoCameraModel &model, stereo_msgs::DisparityImage &disparity) const
Definition: processor.cpp:83
sensor_msgs::PointCloud points
Definition: processor.h:50
void processPoints(const stereo_msgs::DisparityImage &disparity, const cv::Mat &color, const std::string &encoding, const image_geometry::StereoCameraModel &model, sensor_msgs::PointCloud &points) const
Definition: processor.cpp:136
void processPoints2(const stereo_msgs::DisparityImage &disparity, const cv::Mat &color, const std::string &encoding, const image_geometry::StereoCameraModel &model, sensor_msgs::PointCloud2 &points) const
Definition: processor.cpp:235
bool process(const sensor_msgs::ImageConstPtr &raw_image, const image_geometry::PinholeCameraModel &model, ImageSet &output, int flags=ALL) const
bool process(const sensor_msgs::ImageConstPtr &left_raw, const sensor_msgs::ImageConstPtr &right_raw, const image_geometry::StereoCameraModel &model, StereoImageSet &output, int flags) const
Definition: processor.cpp:42
bool isValidPoint(const cv::Vec3f &pt)
Definition: processor.cpp:129
stereo_msgs::DisparityImage disparity
Definition: processor.h:49
const PinholeCameraModel & left() const
#define ROS_ASSERT(cond)
image_proc::ImageSet right
Definition: processor.h:48
sensor_msgs::PointCloud2 points2
Definition: processor.h:51


stereo_image_proc
Author(s): Patrick Mihelich, Kurt Konolige, Jeremy Leibs
autogenerated on Wed Dec 7 2022 03:25:29