CameraTango.cpp
Go to the documentation of this file.
1 /*
2 Copyright (c) 2010-2016, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the Universite de Sherbrooke nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 
28 #include "CameraTango.h"
29 #include "util.h"
33 #include "rtabmap/core/util2d.h"
34 #include <tango_client_api.h>
35 #include <tango_support_api.h>
36 #include "tango-gl/camera.h"
37 
38 namespace rtabmap {
39 
40 #define nullptr 0
41 const int kVersionStringLength = 128;
42 const int holeSize = 5;
43 const float maxDepthError = 0.10;
44 const int scanDownsampling = 1;
45 
46 //android phone
47 //11 10 01 00 // portrait
48 //01 11 00 10 // left
49 //10 00 11 01 // right
50 //00 01 10 11 // down
51 
52 const float kTextureCoords0[] = {1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0};
53 const float kTextureCoords90[] = {0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0};
54 const float kTextureCoords180[] = {0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0};
55 const float kTextureCoords270[] = {1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0};
56 
57 // Callbacks
58 void onPointCloudAvailableRouter(void* context, const TangoPointCloud* point_cloud)
59 {
60  CameraTango* app = static_cast<CameraTango*>(context);
61  if(point_cloud->num_points>0)
62  {
63  app->cloudReceived(cv::Mat(1, point_cloud->num_points, CV_32FC4, point_cloud->points[0]), point_cloud->timestamp);
64  }
65 }
66 
67 void onFrameAvailableRouter(void* context, TangoCameraId id, const TangoImageBuffer* color)
68 {
69  CameraTango* app = static_cast<CameraTango*>(context);
70 
71  cv::Mat tangoImage;
72  if(color->format == TANGO_HAL_PIXEL_FORMAT_RGBA_8888)
73  {
74  tangoImage = cv::Mat(color->height, color->width, CV_8UC4, color->data);
75  }
76  else if(color->format == TANGO_HAL_PIXEL_FORMAT_YV12)
77  {
78  tangoImage = cv::Mat(color->height+color->height/2, color->width, CV_8UC1, color->data);
79  }
80  else if(color->format == TANGO_HAL_PIXEL_FORMAT_YCrCb_420_SP)
81  {
82  tangoImage = cv::Mat(color->height+color->height/2, color->width, CV_8UC1, color->data);
83  }
84  else if(color->format == 35)
85  {
86  tangoImage = cv::Mat(color->height+color->height/2, color->width, CV_8UC1, color->data);
87  }
88  else
89  {
90  LOGE("Not supported color format : %d.", color->format);
91  }
92 
93  if(!tangoImage.empty())
94  {
95  app->rgbReceived(tangoImage, (unsigned int)color->format, color->timestamp);
96  }
97 }
98 
99 void onPoseAvailableRouter(void* context, const TangoPoseData* pose)
100 {
101  if(pose->status_code == TANGO_POSE_VALID)
102  {
103  CameraTango* app = static_cast<CameraTango*>(context);
105  }
106 }
107 
108 void onTangoEventAvailableRouter(void* context, const TangoEvent* event)
109 {
110  CameraTango* app = static_cast<CameraTango*>(context);
111  app->tangoEventReceived(event->type, event->event_key, event->event_value);
112 }
113 
115 // CameraTango
117 CameraTango::CameraTango(bool colorCamera, int decimation, bool publishRawScan, bool smoothing) :
118  CameraMobile(smoothing),
119  tango_config_(0),
120  colorCamera_(colorCamera),
121  decimation_(decimation),
122  rawScanPublished_(publishRawScan),
123  tangoColorType_(0),
124  tangoColorStamp_(0)
125 {
126  UASSERT(decimation >= 1);
127 }
128 
130  // Disconnect Tango service
131  close();
132 }
133 
134 // Compute fisheye distorted coordinates from undistorted coordinates.
135 // The distortion model used by the Tango fisheye camera is called FOV and is
136 // described in 'Straight lines have to be straight' by Frederic Devernay and
137 // Olivier Faugeras. See https://hal.inria.fr/inria-00267247/document.
138 // Tango ROS Streamer: https://github.com/Intermodalics/tango_ros/blob/master/tango_ros_common/tango_ros_native/src/tango_ros_node.cpp
140  double xu, double yu, double w, double w_inverse, double two_tan_w_div_two,
141  double* xd, double* yd) {
142  double ru = sqrt(xu * xu + yu * yu);
143  constexpr double epsilon = 1e-7;
144  if (w < epsilon || ru < epsilon) {
145  *xd = xu;
146  *yd = yu ;
147  } else {
148  double rd_div_ru = std::atan(ru * two_tan_w_div_two) * w_inverse / ru;
149  *xd = xu * rd_div_ru;
150  *yd = yu * rd_div_ru;
151  }
152 }
153 // Compute the warp maps to undistort the Tango fisheye image using the FOV
154 // model. See OpenCV documentation for more information on warp maps:
155 // http://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html
156 // Tango ROS Streamer: https://github.com/Intermodalics/tango_ros/blob/master/tango_ros_common/tango_ros_native/src/tango_ros_node.cpp
157 // @param fisheyeModel the fisheye camera intrinsics.
158 // @param mapX the output map for the x direction.
159 // @param mapY the output map for the y direction.
161  const CameraModel& fisheyeModel,
162  cv::Mat & mapX, cv::Mat & mapY) {
163  const double & fx = fisheyeModel.K().at<double>(0,0);
164  const double & fy = fisheyeModel.K().at<double>(1,1);
165  const double & cx = fisheyeModel.K().at<double>(0,2);
166  const double & cy = fisheyeModel.K().at<double>(1,2);
167  const double & w = fisheyeModel.D().at<double>(0,0);
168  mapX.create(fisheyeModel.imageSize(), CV_32FC1);
169  mapY.create(fisheyeModel.imageSize(), CV_32FC1);
170  LOGD("initFisheyeRectificationMap: fx=%f fy=%f, cx=%f, cy=%f, w=%f", fx, fy, cx, cy, w);
171  // Pre-computed variables for more efficiency.
172  const double fy_inverse = 1.0 / fy;
173  const double fx_inverse = 1.0 / fx;
174  const double w_inverse = 1 / w;
175  const double two_tan_w_div_two = 2.0 * std::tan(w * 0.5);
176  // Compute warp maps in x and y directions.
177  // OpenCV expects maps from dest to src, i.e. from undistorted to distorted
178  // pixel coordinates.
179  for(int iu = 0; iu < fisheyeModel.imageHeight(); ++iu) {
180  for (int ju = 0; ju < fisheyeModel.imageWidth(); ++ju) {
181  double xu = (ju - cx) * fx_inverse;
182  double yu = (iu - cy) * fy_inverse;
183  double xd, yd;
184  applyFovModel(xu, yu, w, w_inverse, two_tan_w_div_two, &xd, &yd);
185  double jd = cx + xd * fx;
186  double id = cy + yd * fy;
187  mapX.at<float>(iu, ju) = jd;
188  mapY.at<float>(iu, ju) = id;
189  }
190  }
191 }
192 
193 bool CameraTango::init(const std::string & calibrationFolder, const std::string & cameraName)
194 {
195  close();
196 
197  TangoSupport_initialize(TangoService_getPoseAtTime, TangoService_getCameraIntrinsics);
198 
199  // Connect to Tango
200  LOGI("NativeRTABMap: Setup tango config");
201  tango_config_ = TangoService_getConfig(TANGO_CONFIG_DEFAULT);
202  if (tango_config_ == nullptr)
203  {
204  LOGE("NativeRTABMap: Failed to get default config form");
205  return false;
206  }
207 
208  // Set auto-recovery for motion tracking as requested by the user.
209  bool is_atuo_recovery = true;
210  int ret = TangoConfig_setBool(tango_config_, "config_enable_auto_recovery", is_atuo_recovery);
211  if (ret != TANGO_SUCCESS)
212  {
213  LOGE("NativeRTABMap: config_enable_auto_recovery() failed with error code: %d", ret);
214  return false;
215  }
216 
217  if(colorCamera_)
218  {
219  // Enable color.
220  ret = TangoConfig_setBool(tango_config_, "config_enable_color_camera", true);
221  if (ret != TANGO_SUCCESS)
222  {
223  LOGE("NativeRTABMap: config_enable_color_camera() failed with error code: %d", ret);
224  return false;
225  }
226  }
227 
228  // Enable depth.
229  ret = TangoConfig_setBool(tango_config_, "config_enable_depth", true);
230  if (ret != TANGO_SUCCESS)
231  {
232  LOGE("NativeRTABMap: config_enable_depth() failed with error code: %d", ret);
233  return false;
234  }
235 
236  // Need to specify the depth_mode as XYZC.
237  ret = TangoConfig_setInt32(tango_config_, "config_depth_mode", TANGO_POINTCLOUD_XYZC);
238  if (ret != TANGO_SUCCESS)
239  {
240  LOGE("Failed to set 'depth_mode' configuration flag with error code: %d", ret);
241  return false;
242  }
243 
244  // Note that it's super important for AR applications that we enable low
245  // latency imu integration so that we have pose information available as
246  // quickly as possible. Without setting this flag, you'll often receive
247  // invalid poses when calling GetPoseAtTime for an image.
248  ret = TangoConfig_setBool(tango_config_, "config_enable_low_latency_imu_integration", true);
249  if (ret != TANGO_SUCCESS)
250  {
251  LOGE("NativeRTABMap: Failed to enable low latency imu integration.");
252  return false;
253  }
254 
255  // Drift correction allows motion tracking to recover after it loses tracking.
256  //
257  // The drift corrected pose is is available through the frame pair with
258  // base frame AREA_DESCRIPTION and target frame DEVICE.
259  /*ret = TangoConfig_setBool(tango_config_, "config_enable_drift_correction", true);
260  if (ret != TANGO_SUCCESS) {
261  LOGE(
262  "NativeRTABMap: enabling config_enable_drift_correction "
263  "failed with error code: %d",
264  ret);
265  return false;
266  }*/
267 
268  // Get TangoCore version string from service.
269  char tango_core_version[kVersionStringLength];
270  ret = TangoConfig_getString(tango_config_, "tango_service_library_version", tango_core_version, kVersionStringLength);
271  if (ret != TANGO_SUCCESS)
272  {
273  LOGE("NativeRTABMap: get tango core version failed with error code: %d", ret);
274  return false;
275  }
276  LOGI("NativeRTABMap: Tango version : %s", tango_core_version);
277 
278 
279  // Callbacks
280  LOGI("NativeRTABMap: Setup callbacks");
281  // Attach the OnXYZijAvailable callback.
282  // The callback will be called after the service is connected.
283  ret = TangoService_connectOnPointCloudAvailable(onPointCloudAvailableRouter);
284  if (ret != TANGO_SUCCESS)
285  {
286  LOGE("NativeRTABMap: Failed to connect to point cloud callback with error code: %d", ret);
287  return false;
288  }
289 
290  ret = TangoService_connectOnFrameAvailable(colorCamera_?TANGO_CAMERA_COLOR:TANGO_CAMERA_FISHEYE, this, onFrameAvailableRouter);
291  if (ret != TANGO_SUCCESS)
292  {
293  LOGE("NativeRTABMap: Failed to connect to color callback with error code: %d", ret);
294  return false;
295  }
296 
297  // Attach the onPoseAvailable callback.
298  // The callback will be called after the service is connected.
299  TangoCoordinateFramePair pair;
300  //pair.base = TANGO_COORDINATE_FRAME_AREA_DESCRIPTION; // drift correction is enabled
301  pair.base = TANGO_COORDINATE_FRAME_START_OF_SERVICE;
302  pair.target = TANGO_COORDINATE_FRAME_DEVICE;
303  ret = TangoService_connectOnPoseAvailable(1, &pair, onPoseAvailableRouter);
304  if (ret != TANGO_SUCCESS)
305  {
306  LOGE("NativeRTABMap: Failed to connect to pose callback with error code: %d", ret);
307  return false;
308  }
309 
310  // Attach the onEventAvailable callback.
311  // The callback will be called after the service is connected.
312  ret = TangoService_connectOnTangoEvent(onTangoEventAvailableRouter);
313  if (ret != TANGO_SUCCESS)
314  {
315  LOGE("PointCloudApp: Failed to connect to event callback with error code: %d", ret);
316  return false;
317  }
318 
319  // Now connect service so the callbacks above will be called
320  LOGI("NativeRTABMap: Connect to tango service");
321  ret = TangoService_connect(this, tango_config_);
322  if (ret != TANGO_SUCCESS)
323  {
324  LOGE("NativeRTABMap: Failed to connect to the Tango service with error code: %d", ret);
325  return false;
326  }
327 
328  // update extrinsics
329  LOGI("NativeRTABMap: Update extrinsics");
330  TangoPoseData pose_data;
331  TangoCoordinateFramePair frame_pair;
332 
333  // TangoService_getPoseAtTime function is used for query device extrinsics
334  // as well. We use timestamp 0.0 and the target frame pair to get the
335  // extrinsics from the sensors.
336  //
337  // Get color camera with respect to device transformation matrix.
338  frame_pair.base = TANGO_COORDINATE_FRAME_DEVICE;
339  frame_pair.target = colorCamera_?TANGO_COORDINATE_FRAME_CAMERA_COLOR:TANGO_COORDINATE_FRAME_CAMERA_FISHEYE;
340  ret = TangoService_getPoseAtTime(0.0, frame_pair, &pose_data);
341  if (ret != TANGO_SUCCESS)
342  {
343  LOGE("NativeRTABMap: Failed to get transform between the color camera frame and device frames");
344  return false;
345  }
347  pose_data.translation[0],
348  pose_data.translation[1],
349  pose_data.translation[2],
350  pose_data.orientation[0],
351  pose_data.orientation[1],
352  pose_data.orientation[2],
353  pose_data.orientation[3]);
355 
356  // camera intrinsic
357  TangoCameraIntrinsics color_camera_intrinsics;
358  ret = TangoService_getCameraIntrinsics(colorCamera_?TANGO_CAMERA_COLOR:TANGO_CAMERA_FISHEYE, &color_camera_intrinsics);
359  if (ret != TANGO_SUCCESS)
360  {
361  LOGE("NativeRTABMap: Failed to get the intrinsics for the color camera with error code: %d.", ret);
362  return false;
363  }
364 
365  LOGD("Calibration: fx=%f fy=%f cx=%f cy=%f width=%d height=%d",
366  color_camera_intrinsics.fx,
367  color_camera_intrinsics.fy,
368  color_camera_intrinsics.cx,
369  color_camera_intrinsics.cy,
370  color_camera_intrinsics.width,
371  color_camera_intrinsics.height);
372 
373  cv::Mat K = cv::Mat::eye(3, 3, CV_64FC1);
374  K.at<double>(0,0) = color_camera_intrinsics.fx;
375  K.at<double>(1,1) = color_camera_intrinsics.fy;
376  K.at<double>(0,2) = color_camera_intrinsics.cx;
377  K.at<double>(1,2) = color_camera_intrinsics.cy;
378  cv::Mat D = cv::Mat::zeros(1, 5, CV_64FC1);
379  LOGD("Calibration type = %d", color_camera_intrinsics.calibration_type);
380  if(color_camera_intrinsics.calibration_type == TANGO_CALIBRATION_POLYNOMIAL_5_PARAMETERS ||
381  color_camera_intrinsics.calibration_type == TANGO_CALIBRATION_EQUIDISTANT)
382  {
383  D.at<double>(0,0) = color_camera_intrinsics.distortion[0];
384  D.at<double>(0,1) = color_camera_intrinsics.distortion[1];
385  D.at<double>(0,2) = color_camera_intrinsics.distortion[2];
386  D.at<double>(0,3) = color_camera_intrinsics.distortion[3];
387  D.at<double>(0,4) = color_camera_intrinsics.distortion[4];
388  }
389  else if(color_camera_intrinsics.calibration_type == TANGO_CALIBRATION_POLYNOMIAL_3_PARAMETERS)
390  {
391  D.at<double>(0,0) = color_camera_intrinsics.distortion[0];
392  D.at<double>(0,1) = color_camera_intrinsics.distortion[1];
393  D.at<double>(0,2) = 0.;
394  D.at<double>(0,3) = 0.;
395  D.at<double>(0,4) = color_camera_intrinsics.distortion[2];
396  }
397  else if(color_camera_intrinsics.calibration_type == TANGO_CALIBRATION_POLYNOMIAL_2_PARAMETERS)
398  {
399  D.at<double>(0,0) = color_camera_intrinsics.distortion[0];
400  D.at<double>(0,1) = color_camera_intrinsics.distortion[1];
401  D.at<double>(0,2) = 0.;
402  D.at<double>(0,3) = 0.;
403  D.at<double>(0,4) = 0.;
404  }
405 
406  cv::Mat R = cv::Mat::eye(3, 3, CV_64FC1);
407  cv::Mat P;
408 
409  LOGD("Distortion params: %f, %f, %f, %f, %f", D.at<double>(0,0), D.at<double>(0,1), D.at<double>(0,2), D.at<double>(0,3), D.at<double>(0,4));
410  model_ = CameraModel(colorCamera_?"color":"fisheye",
411  cv::Size(color_camera_intrinsics.width, color_camera_intrinsics.height),
412  K, D, R, P,
413  deviceTColorCamera_);
414 
415  if(!colorCamera_)
416  {
418  }
419 
420  LOGI("deviceTColorCameraRtabmap =%s", deviceTColorCamera_.prettyPrint().c_str());
421  return true;
422 }
423 
425 {
426  if(tango_config_)
427  {
428  TangoConfig_free(tango_config_);
429  tango_config_ = nullptr;
430  LOGI("TangoService_disconnect()");
431  TangoService_disconnect();
432  LOGI("TangoService_disconnect() done.");
433  }
434  fisheyeRectifyMapX_ = cv::Mat();
435  fisheyeRectifyMapY_ = cv::Mat();
436 
438 }
439 
440 void CameraTango::cloudReceived(const cv::Mat & cloud, double timestamp)
441 {
442  if(!cloud.empty())
443  {
444  //LOGD("Depth received! %fs (%d points)", timestamp, cloud.cols);
445 
446  UASSERT(cloud.type() == CV_32FC4);
447  boost::mutex::scoped_lock lock(dataMutex_);
448 
449  // From post: http://stackoverflow.com/questions/29236110/timing-issues-with-tango-image-frames
450  // "In the current version of Project Tango Tablet RGB IR camera
451  // is used for both depth and color images and it can only do one
452  // or the other for each frame. So in the stream we get 4 RGB frames
453  // followed by 1 Depth frame resulting in the pattern you observed. This
454  // is more of a hardware limitation."
455  //
456  // So, synchronize with the last RGB frame before the Depth is acquired
457  if(!tangoColor_.empty())
458  {
459  UTimer timer;
460  double dt = fabs(timestamp - tangoColorStamp_);
461 
462  //LOGD("Depth: %f vs %f = %f", tangoColorStamp_, timestamp, dt);
463 
464  if(dt >= 0.0 && dt < 0.5)
465  {
466  bool notify = !data_.isValid();
467 
468  cv::Mat tangoImage = tangoColor_;
469  cv::Mat rgb;
470  double cloudStamp = timestamp;
471  double rgbStamp = tangoColorStamp_;
472  int tangoColorType = tangoColorType_;
473 
474  tangoColor_ = cv::Mat();
475  tangoColorStamp_ = 0.0;
476  tangoColorType_ = 0;
477 
478  LOGD("tangoColorType=%d", tangoColorType);
479  if(tangoColorType == TANGO_HAL_PIXEL_FORMAT_RGBA_8888)
480  {
481  cv::cvtColor(tangoImage, rgb, cv::COLOR_RGBA2BGR);
482  }
483  else if(tangoColorType == TANGO_HAL_PIXEL_FORMAT_YV12)
484  {
485  cv::cvtColor(tangoImage, rgb, cv::COLOR_YUV2BGR_YV12);
486  }
487  else if(tangoColorType == TANGO_HAL_PIXEL_FORMAT_YCrCb_420_SP)
488  {
489  cv::cvtColor(tangoImage, rgb, cv::COLOR_YUV2BGR_NV21);
490  }
491  else if(tangoColorType == 35)
492  {
493  cv::cvtColor(tangoImage, rgb, cv::COLOR_YUV420sp2GRAY);
494  }
495  else
496  {
497  LOGE("Not supported color format : %d.", tangoColorType);
498  data_ = SensorData();
499  return;
500  }
501 
502  //for(int i=0; i<rgb.cols; ++i)
503  //{
504  // UERROR("%d,%d,%d", (int)rgb.at<cv::Vec3b>(i)[0], (int)rgb.at<cv::Vec3b>(i)[1], (int)rgb.at<cv::Vec3b>(i)[2]);
505  //}
506 
508 
509  if(colorCamera_)
510  {
511  if(decimation_ > 1)
512  {
513  rgb = util2d::decimate(rgb, decimation_);
514  model = model.scaled(1.0/double(decimation_));
515  }
516  }
517  else
518  {
519  //UTimer t;
520  cv::Mat rgbRect;
521  cv::remap(rgb, rgbRect, fisheyeRectifyMapX_, fisheyeRectifyMapY_, cv::INTER_LINEAR, cv::BORDER_CONSTANT, 0);
522  rgb = rgbRect;
523  //LOGD("Rectification time=%fs", t.ticks());
524  }
525 
526  // Querying the depth image's frame transformation based on the depth image's
527  // timestamp.
528  cv::Mat depth;
529 
530  // Calculate the relative pose from color camera frame at timestamp
531  // color_timestamp t1 and depth
532  // camera frame at depth_timestamp t0.
533  Transform colorToDepth;
534  TangoPoseData pose_color_image_t1_T_depth_image_t0;
535  if (TangoSupport_calculateRelativePose(
536  rgbStamp, colorCamera_?TANGO_COORDINATE_FRAME_CAMERA_COLOR:TANGO_COORDINATE_FRAME_CAMERA_FISHEYE, cloudStamp,
537  TANGO_COORDINATE_FRAME_CAMERA_DEPTH,
538  &pose_color_image_t1_T_depth_image_t0) == TANGO_SUCCESS)
539  {
540  colorToDepth = tangoPoseToTransform(&pose_color_image_t1_T_depth_image_t0);
541  }
542  else
543  {
544  LOGE(
545  "SynchronizationApplication: Could not find a valid relative pose at "
546  "time for color and "
547  " depth cameras.");
548  }
549 
550  if(colorToDepth.getNormSquared() > 100000)
551  {
552  LOGE("Very large color to depth error detected (%s)! Ignoring this frame!", colorToDepth.prettyPrint().c_str());
553  colorToDepth.setNull();
554  }
555  cv::Mat scan;
556  if(!colorToDepth.isNull())
557  {
558  // The Color Camera frame at timestamp t0 with respect to Depth
559  // Camera frame at timestamp t1.
560  //LOGD("colorToDepth=%s", colorToDepth.prettyPrint().c_str());
561  LOGD("rgb=%dx%d cloud size=%d", rgb.cols, rgb.rows, (int)cloud.total());
562 
563  int pixelsSet = 0;
564  int depthSizeDec = colorCamera_?8:1;
565  depth = cv::Mat::zeros(model_.imageHeight()/depthSizeDec, model_.imageWidth()/depthSizeDec, CV_16UC1); // mm
566  CameraModel depthModel = model_.scaled(1.0f/float(depthSizeDec));
567  std::vector<cv::Point3f> scanData(rawScanPublished_?cloud.total():0);
568  int oi=0;
569  int closePoints = 0;
570  float closeROI[4];
571  closeROI[0] = depth.cols/4;
572  closeROI[1] = 3*(depth.cols/4);
573  closeROI[2] = depth.rows/4;
574  closeROI[3] = 3*(depth.rows/4);
575  unsigned short minDepthValue=10000;
576  for(unsigned int i=0; i<cloud.total(); ++i)
577  {
578  const float * p = cloud.ptr<float>(0,i);
579  cv::Point3f pt = util3d::transformPoint(cv::Point3f(p[0], p[1], p[2]), colorToDepth);
580 
581  if(pt.z > 0.0f && i%scanDownsampling == 0 && rawScanPublished_)
582  {
583  scanData.at(oi++) = pt;
584  }
585 
586  int pixel_x_l, pixel_y_l, pixel_x_h, pixel_y_h;
587  // get the coordinate on image plane.
588  pixel_x_l = static_cast<int>((depthModel.fx()) * (pt.x / pt.z) + depthModel.cx());
589  pixel_y_l = static_cast<int>((depthModel.fy()) * (pt.y / pt.z) + depthModel.cy());
590  pixel_x_h = static_cast<int>((depthModel.fx()) * (pt.x / pt.z) + depthModel.cx() + 0.5f);
591  pixel_y_h = static_cast<int>((depthModel.fy()) * (pt.y / pt.z) + depthModel.cy() + 0.5f);
592  unsigned short depth_value(pt.z * 1000.0f);
593 
594  if(pixel_x_l>=closeROI[0] && pixel_x_l<closeROI[1] &&
595  pixel_y_l>closeROI[2] && pixel_y_l<closeROI[3] &&
596  depth_value < 600)
597  {
598  ++closePoints;
599  if(depth_value < minDepthValue)
600  {
601  minDepthValue = depth_value;
602  }
603  }
604 
605  bool pixelSet = false;
606  if(pixel_x_l>=0 && pixel_x_l<depth.cols &&
607  pixel_y_l>0 && pixel_y_l<depth.rows && // ignore first line
608  depth_value)
609  {
610  unsigned short & depthPixel = depth.at<unsigned short>(pixel_y_l, pixel_x_l);
611  if(depthPixel == 0 || depthPixel > depth_value)
612  {
613  depthPixel = depth_value;
614  pixelSet = true;
615  }
616  }
617  if(pixel_x_h>=0 && pixel_x_h<depth.cols &&
618  pixel_y_h>0 && pixel_y_h<depth.rows && // ignore first line
619  depth_value)
620  {
621  unsigned short & depthPixel = depth.at<unsigned short>(pixel_y_h, pixel_x_h);
622  if(depthPixel == 0 || depthPixel > depth_value)
623  {
624  depthPixel = depth_value;
625  pixelSet = true;
626  }
627  }
628  if(pixelSet)
629  {
630  pixelsSet += 1;
631  }
632  }
633 
634  if(closePoints > 100)
635  {
636  this->post(new CameraInfoEvent(0, "TooClose", ""));
637  }
638 
639  if(oi)
640  {
641  scan = cv::Mat(1, oi, CV_32FC3, scanData.data()).clone();
642  }
643  //LOGD("pixels depth set= %d", pixelsSet);
644  }
645  else
646  {
647  LOGE("color to depth pose is null?!? (rgb stamp=%f) (depth stamp=%f)", rgbStamp, cloudStamp);
648  }
649 
650  if(!rgb.empty() && !depth.empty())
651  {
652  depth = rtabmap::util2d::fillDepthHoles(depth, holeSize, maxDepthError);
653 
654  Transform odom = getPoseAtTimestamp(rgbStamp);
655 
656  //LOGD("Local = %s", model.localTransform().prettyPrint().c_str());
657  //LOGD("tango = %s", poseDevice.prettyPrint().c_str());
658  //LOGD("opengl(t)= %s", (opengl_world_T_tango_world * poseDevice).prettyPrint().c_str());
659 
660  // adjust origin
661  if(!getOriginOffset().isNull())
662  {
663  odom = getOriginOffset() * odom;
664  }
665 
666  // occlusion depth
667  if(!depth.empty())
668  {
669  rtabmap::CameraModel depthModel = model.scaled(float(depth.cols) / float(model.imageWidth()));
670  depthModel.setLocalTransform(odom*model.localTransform());
671  this->setOcclusionImage(depth, depthModel);
672  }
673 
674  //LOGD("rtabmap = %s", odom.prettyPrint().c_str());
675  //LOGD("opengl(r)= %s", (opengl_world_T_rtabmap_world * odom * rtabmap_device_T_opengl_device).prettyPrint().c_str());
676 
677  Transform scanLocalTransform = model.localTransform();
678 
680  {
681  data_ = SensorData(LaserScan::backwardCompatibility(scan, cloud.total()/scanDownsampling, 0, scanLocalTransform), rgb, depth, model, this->getNextSeqID(), rgbStamp);
682  }
683  else
684  {
685  data_ = SensorData(rgb, depth, model, this->getNextSeqID(), rgbStamp);
686  }
687  data_.setGroundTruth(odom);
688  }
689  else
690  {
691  LOGE("Could not get depth and rgb images!?!");
692  data_ = SensorData();
693  return;
694  }
695 
696  if(notify)
697  {
699  }
700  LOGD("process cloud received %fs", timer.ticks());
701  }
702  }
703  }
704 }
705 
706 void CameraTango::rgbReceived(const cv::Mat & tangoImage, int type, double timestamp)
707 {
708  if(!tangoImage.empty())
709  {
710  //LOGD("RGB received! %fs", timestamp);
711 
712  boost::mutex::scoped_lock lock(dataMutex_);
713 
714  tangoColor_ = tangoImage.clone();
715  tangoColorStamp_ = timestamp;
716  tangoColorType_ = type;
717  }
718 }
719 
720 void CameraTango::tangoEventReceived(int type, const char * key, const char * value)
721 {
722  this->post(new CameraInfoEvent(type, key, value));
723 }
724 
725 std::string CameraTango::getSerial() const
726 {
727  return "Tango";
728 }
729 
730 rtabmap::Transform CameraTango::tangoPoseToTransform(const TangoPoseData * tangoPose) const
731 {
732  UASSERT(tangoPose);
733  rtabmap::Transform pose;
734 
735  pose = rtabmap::Transform(
736  tangoPose->translation[0],
737  tangoPose->translation[1],
738  tangoPose->translation[2],
739  tangoPose->orientation[0],
740  tangoPose->orientation[1],
741  tangoPose->orientation[2],
742  tangoPose->orientation[3]);
743 
744  return pose;
745 }
746 
748 {
749  rtabmap::Transform pose;
750 
751  TangoPoseData pose_start_service_T_device;
752  TangoCoordinateFramePair frame_pair;
753  frame_pair.base = TANGO_COORDINATE_FRAME_START_OF_SERVICE;
754  frame_pair.target = TANGO_COORDINATE_FRAME_DEVICE;
755  TangoErrorType status = TangoService_getPoseAtTime(timestamp, frame_pair, &pose_start_service_T_device);
756  if (status != TANGO_SUCCESS)
757  {
758  LOGE(
759  "PoseData: Failed to get transform between the Start of service and "
760  "device frames at timestamp %lf",
761  timestamp);
762  }
763  if (pose_start_service_T_device.status_code != TANGO_POSE_VALID)
764  {
765  LOGW(
766  "PoseData: Failed to get transform between the Start of service and "
767  "device frames at timestamp %lf",
768  timestamp);
769  }
770  else
771  {
772  pose = rtabmap_world_T_tango_world * tangoPoseToTransform(&pose_start_service_T_device) * tango_device_T_rtabmap_world;
773  }
774 
775  return pose;
776 }
777 
779 {
780  //LOGI("Capturing image...");
781 
782  if(textureId_ == 0)
783  {
784  glGenTextures(1, &textureId_);
785  glBindTexture(GL_TEXTURE_EXTERNAL_OES, textureId_);
786  glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
787  glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
788  glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
789  glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
790  }
791 
792  // Update Texture (optional, just for first-view rendering)
793  if(colorCamera_ && textureId_)
794  {
795  double video_overlay_timestamp;
796  TangoErrorType status = TangoService_updateTextureExternalOes(TANGO_CAMERA_COLOR, textureId_, &video_overlay_timestamp);
797 
798  if (status == TANGO_SUCCESS)
799  {
800  if(info)
801  {
802  info->odomPose = getPoseAtTimestamp(video_overlay_timestamp);
803  }
804 
805  int rotation = static_cast<int>(getScreenRotation()) + 1; // remove 90deg camera rotation
806  if (rotation > 3) {
807  rotation -= 4;
808  }
809 
810  TangoDoubleMatrixTransformData matrix_transform;
811  status = TangoSupport_getDoubleMatrixTransformAtTime(
812  video_overlay_timestamp,
813  TANGO_COORDINATE_FRAME_CAMERA_COLOR,
814  TANGO_COORDINATE_FRAME_START_OF_SERVICE,
815  TANGO_SUPPORT_ENGINE_OPENGL,
816  TANGO_SUPPORT_ENGINE_OPENGL,
817  static_cast<TangoSupportRotation>(rotation),
818  &matrix_transform);
819  if (matrix_transform.status_code == TANGO_POSE_VALID)
820  {
821  // Get projection matrix
822  TangoCameraIntrinsics color_camera_intrinsics;
823  int ret = TangoSupport_getCameraIntrinsicsBasedOnDisplayRotation(
824  TANGO_CAMERA_COLOR,
825  static_cast<TangoSupportRotation>(rotation),
826  &color_camera_intrinsics);
827 
828  if (ret == TANGO_SUCCESS) {
829  float image_width = static_cast<float>(color_camera_intrinsics.width);
830  float image_height = static_cast<float>(color_camera_intrinsics.height);
831  float fx = static_cast<float>(color_camera_intrinsics.fx);
832  float fy = static_cast<float>(color_camera_intrinsics.fy);
833  float cx = static_cast<float>(color_camera_intrinsics.cx);
834  float cy = static_cast<float>(color_camera_intrinsics.cy);
835 
836  viewMatrix_ = glm::make_mat4(matrix_transform.matrix);
837  if(!getOriginOffset().isNull())
838  {
840  }
841 
843  image_width, image_height, fx, fy, cx, cy, 0.3, 50);
844 
845  switch(rotation)
846  {
847  case ROTATION_90:
848  memcpy(transformed_uvs_, kTextureCoords90, 8*sizeof(float));
849  break;
850  case ROTATION_180:
851  memcpy(transformed_uvs_, kTextureCoords180, 8*sizeof(float));
852  break;
853  case ROTATION_270:
854  memcpy(transformed_uvs_, kTextureCoords270, 8*sizeof(float));
855  break;
856  case ROTATION_0:
857  default:
858  memcpy(transformed_uvs_, kTextureCoords0, 8*sizeof(float));
859  }
860  uvs_initialized_ = true;
861  }
862  else
863  {
864  UERROR("TangoSupport_getCameraIntrinsicsBasedOnDisplayRotation failed!");
865  }
866  }
867  else
868  {
869  UERROR("TangoSupport_getDoubleMatrixTransformAtTime failed!");
870  }
871  }
872  else
873  {
874  UERROR("TangoService_updateTextureExternalOes failed!");
875  }
876  }
877 
879  if(dataReady_.acquireTry(1))
880  {
881  boost::mutex::scoped_lock lock(dataMutex_);
882  data = data_;
883  data_ = SensorData();
884  if(info)
885  {
886  info->odomPose = data.groundTruth();
887  data.setGroundTruth(Transform());
888  }
889  }
890  return data;
891 
892 }
893 
894 } /* namespace rtabmap */
virtual SensorData captureImage(CameraInfo *info=0)
void setLocalTransform(const Transform &transform)
Definition: CameraModel.h:115
glm::mat4 glmFromTransform(const rtabmap::Transform &transform)
Definition: util.h:124
cv::Mat fisheyeRectifyMapY_
Definition: CameraTango.h:81
void release(int n=1)
Definition: USemaphore.h:168
const cv::Size & imageSize() const
Definition: CameraModel.h:119
static const rtabmap::Transform rtabmap_world_T_tango_world(0.0f, 1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f)
void cloudReceived(const cv::Mat &cloud, double timestamp)
double epsilon
Definition: UTimer.h:46
int imageHeight() const
Definition: CameraModel.h:121
0 degree rotation (natural orientation)
Definition: util.h:199
const Transform & getOriginOffset() const
Definition: CameraMobile.h:99
#define LOGW(...)
cv::Point3f RTABMAP_EXP transformPoint(const cv::Point3f &pt, const Transform &transform)
CameraModel scaled(double scale) const
void onTangoEventAvailableRouter(void *context, const TangoEvent *event)
const float kTextureCoords0[]
Definition: CameraTango.cpp:52
f
virtual bool init(const std::string &calibrationFolder=".", const std::string &cameraName="")
GLM_FUNC_DECL vecType< T, P > sqrt(vecType< T, P > const &x)
const float maxDepthError
Definition: CameraTango.cpp:43
USemaphore dataReady_
Definition: CameraTango.h:79
data
GLM_FUNC_DECL genType e()
static const rtabmap::Transform rtabmap_world_T_opengl_world(0.0f, 0.0f,-1.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f)
static glm::mat4 ProjectionMatrixForCameraIntrinsics(float width, float height, float fx, float fy, float cx, float cy, float near, float far)
Definition: camera.cpp:65
#define LOGE(...)
void poseReceived(const Transform &pose)
void post(UEvent *event, bool async=true) const
const float kTextureCoords270[]
Definition: CameraTango.cpp:55
cv::Mat fisheyeRectifyMapX_
Definition: CameraTango.h:80
const int holeSize
Definition: CameraTango.cpp:42
const int scanDownsampling
Definition: CameraTango.cpp:44
boost::mutex dataMutex_
Definition: CameraTango.h:78
GLM_FUNC_DECL bool isNull(detail::tmat2x2< T, P > const &m, T const &epsilon)
#define UASSERT(condition)
static const rtabmap::Transform tango_device_T_rtabmap_world(0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f)
#define LOGD(...)
Transform deviceTColorCamera_
Definition: CameraMobile.h:133
int acquireTry(int n)
Definition: USemaphore.h:144
const int kVersionStringLength
Definition: CameraTango.cpp:41
cv::Mat RTABMAP_EXP decimate(const cv::Mat &image, int d)
Definition: util2d.cpp:1201
cv::Mat K() const
Definition: CameraModel.h:110
GLM_FUNC_DECL genType atan(genType const &y, genType const &x)
std::string prettyPrint() const
Definition: Transform.cpp:316
static const rtabmap::Transform opengl_world_T_rtabmap_world(0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f)
270 degree rotation.
Definition: util.h:208
const Transform & localTransform() const
Definition: CameraModel.h:116
const float kTextureCoords90[]
Definition: CameraTango.cpp:53
180 degree rotation.
Definition: util.h:205
ScreenRotation getScreenRotation() const
Definition: CameraMobile.h:119
float getNormSquared() const
Definition: Transform.cpp:278
CameraTango(bool colorCamera, int decimation, bool publishRawScan, bool smoothing)
const float kTextureCoords180[]
Definition: CameraTango.cpp:54
void tangoEventReceived(int type, const char *key, const char *value)
GLM_FUNC_DECL detail::tquat< T, P > rotation(detail::tvec3< T, P > const &orig, detail::tvec3< T, P > const &dest)
#define LOGI(...)
bool isNull() const
Definition: Transform.cpp:107
int getNextSeqID()
Definition: Camera.h:86
const Transform & groundTruth() const
Definition: SensorData.h:280
GLM_FUNC_DECL genType tan(genType const &angle)
QApplication * app
void onPoseAvailableRouter(void *context, const TangoPoseData *pose)
Definition: CameraTango.cpp:99
GLM_FUNC_DECL detail::tmat4x4< T, defaultp > make_mat4(T const *const ptr)
virtual std::string getSerial() const
#define UERROR(...)
90 degree rotation.
Definition: util.h:202
cv::Mat RTABMAP_EXP fillDepthHoles(const cv::Mat &depth, int maximumHoleSize=1, float errorRatio=0.02f)
Definition: util2d.cpp:1439
ULogger class and convenient macros.
void onFrameAvailableRouter(void *context, TangoCameraId id, const TangoImageBuffer *color)
Definition: CameraTango.cpp:67
double ticks()
Definition: UTimer.cpp:117
void setOcclusionImage(const cv::Mat &image, const CameraModel &model)
Definition: CameraMobile.h:121
virtual void close()
model
Definition: trace.py:4
void setGroundTruth(const Transform &pose)
Definition: SensorData.h:279
int imageWidth() const
Definition: CameraModel.h:120
virtual void close()
bool isValid() const
Definition: SensorData.h:156
Transform odomPose
Definition: CameraInfo.h:69
void initFisheyeRectificationMap(const CameraModel &fisheyeModel, cv::Mat &mapX, cv::Mat &mapY)
void rgbReceived(const cv::Mat &tangoImage, int type, double timestamp)
void onPointCloudAvailableRouter(void *context, const TangoPointCloud *point_cloud)
Definition: CameraTango.cpp:58
cv::Mat D() const
Definition: CameraModel.h:111
static LaserScan backwardCompatibility(const cv::Mat &oldScanFormat, int maxPoints=0, int maxRange=0, const Transform &localTransform=Transform::getIdentity())
Definition: LaserScan.cpp:125
rtabmap::Transform tangoPoseToTransform(const TangoPoseData *tangoPose) const
void applyFovModel(double xu, double yu, double w, double w_inverse, double two_tan_w_div_two, double *xd, double *yd)
glm::mat4 projectionMatrix_
Definition: CameraMobile.h:139
rtabmap::Transform getPoseAtTimestamp(double timestamp)
GLM_FUNC_DECL matType< T, P > inverse(matType< T, P > const &m)


rtabmap
Author(s): Mathieu Labbe
autogenerated on Mon Jan 23 2023 03:37:28