CameraTango.cpp
Go to the documentation of this file.
1 /*
2 Copyright (c) 2010-2016, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the Universite de Sherbrooke nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 
28 #include "CameraTango.h"
29 #include "util.h"
33 #include "rtabmap/core/util2d.h"
34 #include <tango_client_api.h>
35 #include <tango_support_api.h>
36 
37 namespace rtabmap {
38 
39 #define nullptr 0
40 const int kVersionStringLength = 128;
41 const int holeSize = 5;
42 const float maxDepthError = 0.10;
43 const int scanDownsampling = 1;
44 
45 // Callbacks
46 void onPointCloudAvailableRouter(void* context, const TangoPointCloud* point_cloud)
47 {
48  CameraTango* app = static_cast<CameraTango*>(context);
49  if(app->isRunning() && point_cloud->num_points>0)
50  {
51  app->cloudReceived(cv::Mat(1, point_cloud->num_points, CV_32FC4, point_cloud->points[0]), point_cloud->timestamp);
52  }
53 }
54 
55 void onFrameAvailableRouter(void* context, TangoCameraId id, const TangoImageBuffer* color)
56 {
57  CameraTango* app = static_cast<CameraTango*>(context);
58  if(app->isRunning())
59  {
60  cv::Mat tangoImage;
61  if(color->format == TANGO_HAL_PIXEL_FORMAT_RGBA_8888)
62  {
63  tangoImage = cv::Mat(color->height, color->width, CV_8UC4, color->data);
64  }
65  else if(color->format == TANGO_HAL_PIXEL_FORMAT_YV12)
66  {
67  tangoImage = cv::Mat(color->height+color->height/2, color->width, CV_8UC1, color->data);
68  }
69  else if(color->format == TANGO_HAL_PIXEL_FORMAT_YCrCb_420_SP)
70  {
71  tangoImage = cv::Mat(color->height+color->height/2, color->width, CV_8UC1, color->data);
72  }
73  else if(color->format == 35)
74  {
75  tangoImage = cv::Mat(color->height+color->height/2, color->width, CV_8UC1, color->data);
76  }
77  else
78  {
79  LOGE("Not supported color format : %d.", color->format);
80  }
81 
82  if(!tangoImage.empty())
83  {
84  app->rgbReceived(tangoImage, (unsigned int)color->format, color->timestamp);
85  }
86  }
87 }
88 
89 void onPoseAvailableRouter(void* context, const TangoPoseData* pose)
90 {
91  if(pose->status_code == TANGO_POSE_VALID)
92  {
93  CameraTango* app = static_cast<CameraTango*>(context);
94  app->poseReceived(app->tangoPoseToTransform(pose));
95  }
96 }
97 
98 void onTangoEventAvailableRouter(void* context, const TangoEvent* event)
99 {
100  CameraTango* app = static_cast<CameraTango*>(context);
101  app->tangoEventReceived(event->type, event->event_key, event->event_value);
102 }
103 
105 // CameraTango
107 const float CameraTango::bilateralFilteringSigmaS = 2.0f;
108 const float CameraTango::bilateralFilteringSigmaR = 0.075f;
109 
110 CameraTango::CameraTango(bool colorCamera, int decimation, bool publishRawScan, bool smoothing) :
111  Camera(0),
112  tango_config_(0),
113  previousStamp_(0.0),
114  stampEpochOffset_(0.0),
115  colorCamera_(colorCamera),
116  decimation_(decimation),
117  rawScanPublished_(publishRawScan),
118  smoothing_(smoothing),
119  cloudStamp_(0),
120  tangoColorType_(0),
121  tangoColorStamp_(0),
122  colorCameraToDisplayRotation_(ROTATION_0),
123  originUpdate_(false)
124 {
125  UASSERT(decimation >= 1);
126 }
127 
129  // Disconnect Tango service
130  close();
131 }
132 
133 // Compute fisheye distorted coordinates from undistorted coordinates.
134 // The distortion model used by the Tango fisheye camera is called FOV and is
135 // described in 'Straight lines have to be straight' by Frederic Devernay and
136 // Olivier Faugeras. See https://hal.inria.fr/inria-00267247/document.
137 // Tango ROS Streamer: https://github.com/Intermodalics/tango_ros/blob/master/tango_ros_common/tango_ros_native/src/tango_ros_node.cpp
139  double xu, double yu, double w, double w_inverse, double two_tan_w_div_two,
140  double* xd, double* yd) {
141  double ru = sqrt(xu * xu + yu * yu);
142  constexpr double epsilon = 1e-7;
143  if (w < epsilon || ru < epsilon) {
144  *xd = xu;
145  *yd = yu ;
146  } else {
147  double rd_div_ru = std::atan(ru * two_tan_w_div_two) * w_inverse / ru;
148  *xd = xu * rd_div_ru;
149  *yd = yu * rd_div_ru;
150  }
151 }
152 // Compute the warp maps to undistort the Tango fisheye image using the FOV
153 // model. See OpenCV documentation for more information on warp maps:
154 // http://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html
155 // Tango ROS Streamer: https://github.com/Intermodalics/tango_ros/blob/master/tango_ros_common/tango_ros_native/src/tango_ros_node.cpp
156 // @param fisheyeModel the fisheye camera intrinsics.
157 // @param mapX the output map for the x direction.
158 // @param mapY the output map for the y direction.
160  const CameraModel& fisheyeModel,
161  cv::Mat & mapX, cv::Mat & mapY) {
162  const double & fx = fisheyeModel.K().at<double>(0,0);
163  const double & fy = fisheyeModel.K().at<double>(1,1);
164  const double & cx = fisheyeModel.K().at<double>(0,2);
165  const double & cy = fisheyeModel.K().at<double>(1,2);
166  const double & w = fisheyeModel.D().at<double>(0,0);
167  mapX.create(fisheyeModel.imageSize(), CV_32FC1);
168  mapY.create(fisheyeModel.imageSize(), CV_32FC1);
169  LOGD("initFisheyeRectificationMap: fx=%f fy=%f, cx=%f, cy=%f, w=%f", fx, fy, cx, cy, w);
170  // Pre-computed variables for more efficiency.
171  const double fy_inverse = 1.0 / fy;
172  const double fx_inverse = 1.0 / fx;
173  const double w_inverse = 1 / w;
174  const double two_tan_w_div_two = 2.0 * std::tan(w * 0.5);
175  // Compute warp maps in x and y directions.
176  // OpenCV expects maps from dest to src, i.e. from undistorted to distorted
177  // pixel coordinates.
178  for(int iu = 0; iu < fisheyeModel.imageHeight(); ++iu) {
179  for (int ju = 0; ju < fisheyeModel.imageWidth(); ++ju) {
180  double xu = (ju - cx) * fx_inverse;
181  double yu = (iu - cy) * fy_inverse;
182  double xd, yd;
183  applyFovModel(xu, yu, w, w_inverse, two_tan_w_div_two, &xd, &yd);
184  double jd = cx + xd * fx;
185  double id = cy + yd * fy;
186  mapX.at<float>(iu, ju) = jd;
187  mapY.at<float>(iu, ju) = id;
188  }
189  }
190 }
191 
192 bool CameraTango::init(const std::string & calibrationFolder, const std::string & cameraName)
193 {
194  close();
195 
196  TangoSupport_initialize(TangoService_getPoseAtTime, TangoService_getCameraIntrinsics);
197 
198  // Connect to Tango
199  LOGI("NativeRTABMap: Setup tango config");
200  tango_config_ = TangoService_getConfig(TANGO_CONFIG_DEFAULT);
201  if (tango_config_ == nullptr)
202  {
203  LOGE("NativeRTABMap: Failed to get default config form");
204  return false;
205  }
206 
207  // Set auto-recovery for motion tracking as requested by the user.
208  bool is_atuo_recovery = true;
209  int ret = TangoConfig_setBool(tango_config_, "config_enable_auto_recovery", is_atuo_recovery);
210  if (ret != TANGO_SUCCESS)
211  {
212  LOGE("NativeRTABMap: config_enable_auto_recovery() failed with error code: %d", ret);
213  return false;
214  }
215 
216  if(colorCamera_)
217  {
218  // Enable color.
219  ret = TangoConfig_setBool(tango_config_, "config_enable_color_camera", true);
220  if (ret != TANGO_SUCCESS)
221  {
222  LOGE("NativeRTABMap: config_enable_color_camera() failed with error code: %d", ret);
223  return false;
224  }
225  }
226 
227  // Enable depth.
228  ret = TangoConfig_setBool(tango_config_, "config_enable_depth", true);
229  if (ret != TANGO_SUCCESS)
230  {
231  LOGE("NativeRTABMap: config_enable_depth() failed with error code: %d", ret);
232  return false;
233  }
234 
235  // Need to specify the depth_mode as XYZC.
236  ret = TangoConfig_setInt32(tango_config_, "config_depth_mode", TANGO_POINTCLOUD_XYZC);
237  if (ret != TANGO_SUCCESS)
238  {
239  LOGE("Failed to set 'depth_mode' configuration flag with error code: %d", ret);
240  return false;
241  }
242 
243  // Note that it's super important for AR applications that we enable low
244  // latency imu integration so that we have pose information available as
245  // quickly as possible. Without setting this flag, you'll often receive
246  // invalid poses when calling GetPoseAtTime for an image.
247  ret = TangoConfig_setBool(tango_config_, "config_enable_low_latency_imu_integration", true);
248  if (ret != TANGO_SUCCESS)
249  {
250  LOGE("NativeRTABMap: Failed to enable low latency imu integration.");
251  return false;
252  }
253 
254  // Drift correction allows motion tracking to recover after it loses tracking.
255  //
256  // The drift corrected pose is is available through the frame pair with
257  // base frame AREA_DESCRIPTION and target frame DEVICE.
258  /*ret = TangoConfig_setBool(tango_config_, "config_enable_drift_correction", true);
259  if (ret != TANGO_SUCCESS) {
260  LOGE(
261  "NativeRTABMap: enabling config_enable_drift_correction "
262  "failed with error code: %d",
263  ret);
264  return false;
265  }*/
266 
267  // Get TangoCore version string from service.
268  char tango_core_version[kVersionStringLength];
269  ret = TangoConfig_getString(tango_config_, "tango_service_library_version", tango_core_version, kVersionStringLength);
270  if (ret != TANGO_SUCCESS)
271  {
272  LOGE("NativeRTABMap: get tango core version failed with error code: %d", ret);
273  return false;
274  }
275  LOGI("NativeRTABMap: Tango version : %s", tango_core_version);
276 
277 
278  // Callbacks
279  LOGI("NativeRTABMap: Setup callbacks");
280  // Attach the OnXYZijAvailable callback.
281  // The callback will be called after the service is connected.
282  ret = TangoService_connectOnPointCloudAvailable(onPointCloudAvailableRouter);
283  if (ret != TANGO_SUCCESS)
284  {
285  LOGE("NativeRTABMap: Failed to connect to point cloud callback with error code: %d", ret);
286  return false;
287  }
288 
289  ret = TangoService_connectOnFrameAvailable(colorCamera_?TANGO_CAMERA_COLOR:TANGO_CAMERA_FISHEYE, this, onFrameAvailableRouter);
290  if (ret != TANGO_SUCCESS)
291  {
292  LOGE("NativeRTABMap: Failed to connect to color callback with error code: %d", ret);
293  return false;
294  }
295 
296  // Attach the onPoseAvailable callback.
297  // The callback will be called after the service is connected.
298  TangoCoordinateFramePair pair;
299  //pair.base = TANGO_COORDINATE_FRAME_AREA_DESCRIPTION; // drift correction is enabled
300  pair.base = TANGO_COORDINATE_FRAME_START_OF_SERVICE;
301  pair.target = TANGO_COORDINATE_FRAME_DEVICE;
302  ret = TangoService_connectOnPoseAvailable(1, &pair, onPoseAvailableRouter);
303  if (ret != TANGO_SUCCESS)
304  {
305  LOGE("NativeRTABMap: Failed to connect to pose callback with error code: %d", ret);
306  return false;
307  }
308 
309  // Attach the onEventAvailable callback.
310  // The callback will be called after the service is connected.
311  ret = TangoService_connectOnTangoEvent(onTangoEventAvailableRouter);
312  if (ret != TANGO_SUCCESS)
313  {
314  LOGE("PointCloudApp: Failed to connect to event callback with error code: %d", ret);
315  return false;
316  }
317 
318  // Now connect service so the callbacks above will be called
319  LOGI("NativeRTABMap: Connect to tango service");
320  ret = TangoService_connect(this, tango_config_);
321  if (ret != TANGO_SUCCESS)
322  {
323  LOGE("NativeRTABMap: Failed to connect to the Tango service with error code: %d", ret);
324  return false;
325  }
326 
327  // update extrinsics
328  LOGI("NativeRTABMap: Update extrinsics");
329  TangoPoseData pose_data;
330  TangoCoordinateFramePair frame_pair;
331 
332  // TangoService_getPoseAtTime function is used for query device extrinsics
333  // as well. We use timestamp 0.0 and the target frame pair to get the
334  // extrinsics from the sensors.
335  //
336  // Get color camera with respect to device transformation matrix.
337  frame_pair.base = TANGO_COORDINATE_FRAME_DEVICE;
338  frame_pair.target = colorCamera_?TANGO_COORDINATE_FRAME_CAMERA_COLOR:TANGO_COORDINATE_FRAME_CAMERA_FISHEYE;
339  ret = TangoService_getPoseAtTime(0.0, frame_pair, &pose_data);
340  if (ret != TANGO_SUCCESS)
341  {
342  LOGE("NativeRTABMap: Failed to get transform between the color camera frame and device frames");
343  return false;
344  }
346  pose_data.translation[0],
347  pose_data.translation[1],
348  pose_data.translation[2],
349  pose_data.orientation[0],
350  pose_data.orientation[1],
351  pose_data.orientation[2],
352  pose_data.orientation[3]);
353 
354  // camera intrinsic
355  TangoCameraIntrinsics color_camera_intrinsics;
356  ret = TangoService_getCameraIntrinsics(colorCamera_?TANGO_CAMERA_COLOR:TANGO_CAMERA_FISHEYE, &color_camera_intrinsics);
357  if (ret != TANGO_SUCCESS)
358  {
359  LOGE("NativeRTABMap: Failed to get the intrinsics for the color camera with error code: %d.", ret);
360  return false;
361  }
362 
363  LOGD("Calibration: fx=%f fy=%f cx=%f cy=%f width=%d height=%d",
364  color_camera_intrinsics.fx,
365  color_camera_intrinsics.fy,
366  color_camera_intrinsics.cx,
367  color_camera_intrinsics.cy,
368  color_camera_intrinsics.width,
369  color_camera_intrinsics.height);
370 
371  cv::Mat K = cv::Mat::eye(3, 3, CV_64FC1);
372  K.at<double>(0,0) = color_camera_intrinsics.fx;
373  K.at<double>(1,1) = color_camera_intrinsics.fy;
374  K.at<double>(0,2) = color_camera_intrinsics.cx;
375  K.at<double>(1,2) = color_camera_intrinsics.cy;
376  cv::Mat D = cv::Mat::zeros(1, 5, CV_64FC1);
377  LOGD("Calibration type = %d", color_camera_intrinsics.calibration_type);
378  if(color_camera_intrinsics.calibration_type == TANGO_CALIBRATION_POLYNOMIAL_5_PARAMETERS ||
379  color_camera_intrinsics.calibration_type == TANGO_CALIBRATION_EQUIDISTANT)
380  {
381  D.at<double>(0,0) = color_camera_intrinsics.distortion[0];
382  D.at<double>(0,1) = color_camera_intrinsics.distortion[1];
383  D.at<double>(0,2) = color_camera_intrinsics.distortion[2];
384  D.at<double>(0,3) = color_camera_intrinsics.distortion[3];
385  D.at<double>(0,4) = color_camera_intrinsics.distortion[4];
386  }
387  else if(color_camera_intrinsics.calibration_type == TANGO_CALIBRATION_POLYNOMIAL_3_PARAMETERS)
388  {
389  D.at<double>(0,0) = color_camera_intrinsics.distortion[0];
390  D.at<double>(0,1) = color_camera_intrinsics.distortion[1];
391  D.at<double>(0,2) = 0.;
392  D.at<double>(0,3) = 0.;
393  D.at<double>(0,4) = color_camera_intrinsics.distortion[2];
394  }
395  else if(color_camera_intrinsics.calibration_type == TANGO_CALIBRATION_POLYNOMIAL_2_PARAMETERS)
396  {
397  D.at<double>(0,0) = color_camera_intrinsics.distortion[0];
398  D.at<double>(0,1) = color_camera_intrinsics.distortion[1];
399  D.at<double>(0,2) = 0.;
400  D.at<double>(0,3) = 0.;
401  D.at<double>(0,4) = 0.;
402  }
403 
404  cv::Mat R = cv::Mat::eye(3, 3, CV_64FC1);
405  cv::Mat P;
406 
407  LOGD("Distortion params: %f, %f, %f, %f, %f", D.at<double>(0,0), D.at<double>(0,1), D.at<double>(0,2), D.at<double>(0,3), D.at<double>(0,4));
408  model_ = CameraModel(colorCamera_?"color":"fisheye",
409  cv::Size(color_camera_intrinsics.width, color_camera_intrinsics.height),
410  K, D, R, P,
411  tango_device_T_rtabmap_device.inverse()*deviceTColorCamera_); // device to camera optical rotation in rtabmap frame
412 
413  if(!colorCamera_)
414  {
416  }
417 
418  LOGI("deviceTColorCameraTango =%s", deviceTColorCamera_.prettyPrint().c_str());
419  LOGI("deviceTColorCameraRtabmap=%s", (tango_device_T_rtabmap_device.inverse()*deviceTColorCamera_).prettyPrint().c_str());
420 
422 
423  return true;
424 }
425 
427 {
428  if(tango_config_)
429  {
430  TangoConfig_free(tango_config_);
431  tango_config_ = nullptr;
432  LOGI("TangoService_disconnect()");
433  TangoService_disconnect();
434  LOGI("TangoService_disconnect() done.");
435  }
437  previousStamp_ = 0.0;
438  fisheyeRectifyMapX_ = cv::Mat();
439  fisheyeRectifyMapY_ = cv::Mat();
440  lastKnownGPS_ = GPS();
442  originUpdate_ = false;
443 }
444 
446 {
447  originUpdate_ = true;
448 }
449 
450 void CameraTango::cloudReceived(const cv::Mat & cloud, double timestamp)
451 {
452  if(this->isRunning() && !cloud.empty())
453  {
454  //LOGD("Depth received! %fs (%d points)", timestamp, cloud.cols);
455 
456  UASSERT(cloud.type() == CV_32FC4);
457  boost::mutex::scoped_lock lock(dataMutex_);
458 
459  // From post: http://stackoverflow.com/questions/29236110/timing-issues-with-tango-image-frames
460  // "In the current version of Project Tango Tablet RGB IR camera
461  // is used for both depth and color images and it can only do one
462  // or the other for each frame. So in the stream we get 4 RGB frames
463  // followed by 1 Depth frame resulting in the pattern you observed. This
464  // is more of a hardware limitation."
465  //
466  // So, synchronize with the last RGB frame before the Depth is acquired
467  if(!tangoColor_.empty())
468  {
469  double dt = fabs(timestamp - tangoColorStamp_);
470 
471  //LOGD("Depth: %f vs %f = %f", tangoColorStamp_, timestamp, dt);
472 
473  if(dt >= 0.0 && dt < 0.5)
474  {
475  bool notify = cloud_.empty();
476  cloud_ = cloud.clone();
477  cloudStamp_ = timestamp;
478  if(notify)
479  {
480  //LOGD("Cloud: Release semaphore");
482  }
483  }
484  }
485  }
486 }
487 
488 void CameraTango::rgbReceived(const cv::Mat & tangoImage, int type, double timestamp)
489 {
490  if(this->isRunning() && !tangoImage.empty())
491  {
492  //LOGD("RGB received! %fs", timestamp);
493 
494  boost::mutex::scoped_lock lock(dataMutex_);
495 
496  tangoColor_ = tangoImage.clone();
497  tangoColorStamp_ = timestamp;
498  tangoColorType_ = type;
499  }
500 }
501 
503  1.0f, 0.0f, 0.0f, 0.0f,
504  0.0f, -1.0f, 0.0f, 0.0f,
505  0.0f, 0.0f, -1.0f, 0.0f);
507 {
508  if(!pose.isNull())
509  {
510  // send pose of the camera (without optical rotation), not the device
512  if(originUpdate_)
513  {
515  originUpdate_ = false;
516  }
517  if(!originOffset_.isNull())
518  {
519  this->post(new PoseEvent(originOffset_*p));
520  }
521  else
522  {
523  this->post(new PoseEvent(p));
524  }
525  }
526 }
527 
528 void CameraTango::tangoEventReceived(int type, const char * key, const char * value)
529 {
530  this->post(new CameraTangoEvent(type, key, value));
531 }
532 
534 {
535  return model_.isValidForProjection();
536 }
537 
538 std::string CameraTango::getSerial() const
539 {
540  return "Tango";
541 }
542 
543 void CameraTango::setGPS(const GPS & gps)
544 {
545  lastKnownGPS_ = gps;
546 }
547 
548 rtabmap::Transform CameraTango::tangoPoseToTransform(const TangoPoseData * tangoPose) const
549 {
550  UASSERT(tangoPose);
551  rtabmap::Transform pose;
552 
553  pose = rtabmap::Transform(
554  tangoPose->translation[0],
555  tangoPose->translation[1],
556  tangoPose->translation[2],
557  tangoPose->orientation[0],
558  tangoPose->orientation[1],
559  tangoPose->orientation[2],
560  tangoPose->orientation[3]);
561 
562  return pose;
563 }
564 
566 {
567  rtabmap::Transform pose;
568 
569  TangoPoseData pose_start_service_T_device;
570  TangoCoordinateFramePair frame_pair;
571  frame_pair.base = TANGO_COORDINATE_FRAME_START_OF_SERVICE;
572  frame_pair.target = TANGO_COORDINATE_FRAME_DEVICE;
573  TangoErrorType status = TangoService_getPoseAtTime(timestamp, frame_pair, &pose_start_service_T_device);
574  if (status != TANGO_SUCCESS)
575  {
576  LOGE(
577  "PoseData: Failed to get transform between the Start of service and "
578  "device frames at timestamp %lf",
579  timestamp);
580  }
581  if (pose_start_service_T_device.status_code != TANGO_POSE_VALID)
582  {
583  LOGW(
584  "PoseData: Failed to get transform between the Start of service and "
585  "device frames at timestamp %lf",
586  timestamp);
587  }
588  else
589  {
590 
591  pose = tangoPoseToTransform(&pose_start_service_T_device);
592  }
593 
594  return pose;
595 }
596 
598 {
599  //LOGI("Capturing image...");
600 
601  SensorData data;
602  if(!dataReady_.acquire(1, 2000))
603  {
604  if(this->isRunning())
605  {
606  LOGE("Not received any frames since 2 seconds, try to restart the camera again.");
607  this->post(new CameraTangoEvent(0, "CameraTango", "No frames received since 2 seconds."));
608 
609  boost::mutex::scoped_lock lock(dataMutex_);
610  if(!cloud_.empty() && !tangoColor_.empty())
611  {
612  UERROR("cloud and image were set!?");
613  }
614  }
615  cloud_ = cv::Mat();
616  cloudStamp_ = 0.0;
617  tangoColor_ = cv::Mat();
618  tangoColorStamp_ = 0.0;
619  tangoColorType_ = 0;
620  }
621  else
622  {
623  cv::Mat cloud;
624  cv::Mat tangoImage;
625  cv::Mat rgb;
626  double cloudStamp = 0.0;
627  double rgbStamp = 0.0;
628  int tangoColorType = 0;
629 
630  {
631  boost::mutex::scoped_lock lock(dataMutex_);
632  cloud = cloud_;
633  cloudStamp = cloudStamp_;
634  cloud_ = cv::Mat();
635  cloudStamp_ = 0.0;
636  tangoImage = tangoColor_;
637  rgbStamp = tangoColorStamp_;
638  tangoColorType = tangoColorType_;
639  tangoColor_ = cv::Mat();
640  tangoColorStamp_ = 0.0;
641  tangoColorType_ = 0;
642  }
643 
644  LOGD("tangoColorType=%d", tangoColorType);
645  if(tangoColorType == TANGO_HAL_PIXEL_FORMAT_RGBA_8888)
646  {
647  cv::cvtColor(tangoImage, rgb, CV_RGBA2BGR);
648  }
649  else if(tangoColorType == TANGO_HAL_PIXEL_FORMAT_YV12)
650  {
651  cv::cvtColor(tangoImage, rgb, CV_YUV2BGR_YV12);
652  }
653  else if(tangoColorType == TANGO_HAL_PIXEL_FORMAT_YCrCb_420_SP)
654  {
655  cv::cvtColor(tangoImage, rgb, CV_YUV2BGR_NV21);
656  }
657  else if(tangoColorType == 35)
658  {
659  cv::cvtColor(tangoImage, rgb, cv::COLOR_YUV420sp2GRAY);
660  }
661  else
662  {
663  LOGE("Not supported color format : %d.", tangoColorType);
664  return data;
665  }
666 
667  //for(int i=0; i<rgb.cols; ++i)
668  //{
669  // UERROR("%d,%d,%d", (int)rgb.at<cv::Vec3b>(i)[0], (int)rgb.at<cv::Vec3b>(i)[1], (int)rgb.at<cv::Vec3b>(i)[2]);
670  //}
671 
672  CameraModel model = model_;
673 
674  if(colorCamera_)
675  {
676  if(decimation_ > 1)
677  {
678  rgb = util2d::decimate(rgb, decimation_);
679  model = model.scaled(1.0/double(decimation_));
680  }
681  }
682  else
683  {
684  //UTimer t;
685  cv::Mat rgbRect;
686  cv::remap(rgb, rgbRect, fisheyeRectifyMapX_, fisheyeRectifyMapY_, cv::INTER_LINEAR, cv::BORDER_CONSTANT, 0);
687  rgb = rgbRect;
688  //LOGD("Rectification time=%fs", t.ticks());
689  }
690 
691  // Querying the depth image's frame transformation based on the depth image's
692  // timestamp.
693  cv::Mat depth;
694 
695  // Calculate the relative pose from color camera frame at timestamp
696  // color_timestamp t1 and depth
697  // camera frame at depth_timestamp t0.
698  Transform colorToDepth;
699  TangoPoseData pose_color_image_t1_T_depth_image_t0;
700  if (TangoSupport_calculateRelativePose(
701  rgbStamp, colorCamera_?TANGO_COORDINATE_FRAME_CAMERA_COLOR:TANGO_COORDINATE_FRAME_CAMERA_FISHEYE, cloudStamp,
702  TANGO_COORDINATE_FRAME_CAMERA_DEPTH,
703  &pose_color_image_t1_T_depth_image_t0) == TANGO_SUCCESS)
704  {
705  colorToDepth = tangoPoseToTransform(&pose_color_image_t1_T_depth_image_t0);
706  }
707  else
708  {
709  LOGE(
710  "SynchronizationApplication: Could not find a valid relative pose at "
711  "time for color and "
712  " depth cameras.");
713  }
714 
715  if(colorToDepth.getNormSquared() > 100000)
716  {
717  LOGE("Very large color to depth error detected (%s)! Ignoring this frame!", colorToDepth.prettyPrint().c_str());
718  colorToDepth.setNull();
719  }
720  cv::Mat scan;
721  if(!colorToDepth.isNull())
722  {
723  // The Color Camera frame at timestamp t0 with respect to Depth
724  // Camera frame at timestamp t1.
725  //LOGD("colorToDepth=%s", colorToDepth.prettyPrint().c_str());
726  LOGD("rgb=%dx%d cloud size=%d", rgb.cols, rgb.rows, (int)cloud.total());
727 
728  int pixelsSet = 0;
729  int depthSizeDec = colorCamera_?8:1;
730  depth = cv::Mat::zeros(model_.imageHeight()/depthSizeDec, model_.imageWidth()/depthSizeDec, CV_16UC1); // mm
731  CameraModel depthModel = model_.scaled(1.0f/float(depthSizeDec));
732  std::vector<cv::Point3f> scanData(rawScanPublished_?cloud.total():0);
733  int oi=0;
734  int closePoints = 0;
735  float closeROI[4];
736  closeROI[0] = depth.cols/4;
737  closeROI[1] = 3*(depth.cols/4);
738  closeROI[2] = depth.rows/4;
739  closeROI[3] = 3*(depth.rows/4);
740  unsigned short minDepthValue=10000;
741  for(unsigned int i=0; i<cloud.total(); ++i)
742  {
743  float * p = cloud.ptr<float>(0,i);
744  cv::Point3f pt = util3d::transformPoint(cv::Point3f(p[0], p[1], p[2]), colorToDepth);
745 
746  if(pt.z > 0.0f && i%scanDownsampling == 0 && rawScanPublished_)
747  {
748  scanData.at(oi++) = pt;
749  }
750 
751  int pixel_x_l, pixel_y_l, pixel_x_h, pixel_y_h;
752  // get the coordinate on image plane.
753  pixel_x_l = static_cast<int>((depthModel.fx()) * (pt.x / pt.z) + depthModel.cx());
754  pixel_y_l = static_cast<int>((depthModel.fy()) * (pt.y / pt.z) + depthModel.cy());
755  pixel_x_h = static_cast<int>((depthModel.fx()) * (pt.x / pt.z) + depthModel.cx() + 0.5f);
756  pixel_y_h = static_cast<int>((depthModel.fy()) * (pt.y / pt.z) + depthModel.cy() + 0.5f);
757  unsigned short depth_value(pt.z * 1000.0f);
758 
759  if(pixel_x_l>=closeROI[0] && pixel_x_l<closeROI[1] &&
760  pixel_y_l>closeROI[2] && pixel_y_l<closeROI[3] &&
761  depth_value < 600)
762  {
763  ++closePoints;
764  if(depth_value < minDepthValue)
765  {
766  minDepthValue = depth_value;
767  }
768  }
769 
770  bool pixelSet = false;
771  if(pixel_x_l>=0 && pixel_x_l<depth.cols &&
772  pixel_y_l>0 && pixel_y_l<depth.rows && // ignore first line
773  depth_value)
774  {
775  unsigned short & depthPixel = depth.at<unsigned short>(pixel_y_l, pixel_x_l);
776  if(depthPixel == 0 || depthPixel > depth_value)
777  {
778  depthPixel = depth_value;
779  pixelSet = true;
780  }
781  }
782  if(pixel_x_h>=0 && pixel_x_h<depth.cols &&
783  pixel_y_h>0 && pixel_y_h<depth.rows && // ignore first line
784  depth_value)
785  {
786  unsigned short & depthPixel = depth.at<unsigned short>(pixel_y_h, pixel_x_h);
787  if(depthPixel == 0 || depthPixel > depth_value)
788  {
789  depthPixel = depth_value;
790  pixelSet = true;
791  }
792  }
793  if(pixelSet)
794  {
795  pixelsSet += 1;
796  }
797  }
798 
799  if(closePoints > 100)
800  {
801  this->post(new CameraTangoEvent(0, "TooClose", ""));
802  }
803 
804  if(oi)
805  {
806  scan = cv::Mat(1, oi, CV_32FC3, scanData.data()).clone();
807  }
808  //LOGD("pixels depth set= %d", pixelsSet);
809  }
810  else
811  {
812  LOGE("color to depth pose is null?!? (rgb stamp=%f) (depth stamp=%f)", rgbStamp, cloudStamp);
813  }
814 
815  if(!rgb.empty() && !depth.empty())
816  {
817  depth = rtabmap::util2d::fillDepthHoles(depth, holeSize, maxDepthError);
818 
819  Transform poseDevice = getPoseAtTimestamp(rgbStamp);
820 
821  // adjust origin
822  if(!originOffset_.isNull())
823  {
824  poseDevice = originOffset_ * poseDevice;
825  }
826 
827  //LOGD("Local = %s", model.localTransform().prettyPrint().c_str());
828  //LOGD("tango = %s", poseDevice.prettyPrint().c_str());
829  //LOGD("opengl(t)= %s", (opengl_world_T_tango_world * poseDevice).prettyPrint().c_str());
830 
831  //Rotate in RTAB-Map's coordinate
833 
834  //LOGD("rtabmap = %s", odom.prettyPrint().c_str());
835  //LOGD("opengl(r)= %s", (opengl_world_T_rtabmap_world * odom * rtabmap_device_T_opengl_device).prettyPrint().c_str());
836 
837  Transform scanLocalTransform = model.localTransform();
838 
839  // Rotate image depending on the camera orientation
840  if(colorCameraToDisplayRotation_ == ROTATION_90)
841  {
842  cv::Mat rgbt(rgb.cols, rgb.rows, rgb.type());
843  cv::flip(rgb,rgb,1);
844  cv::transpose(rgb,rgbt);
845  rgb = rgbt;
846  cv::Mat deptht(depth.cols, depth.rows, depth.type());
847  cv::flip(depth,depth,1);
848  cv::transpose(depth,deptht);
849  depth = deptht;
850  cv::Size sizet(model.imageHeight(), model.imageWidth());
851  model = CameraModel(model.fy(), model.fx(), model.cy(), model.cx()>0?model.imageWidth()-model.cx():0, model.localTransform()*rtabmap::Transform(0,0,0,0,0,1.57079632679489661923132169163975144));
852  model.setImageSize(sizet);
853  }
854  else if(colorCameraToDisplayRotation_ == ROTATION_180)
855  {
856  cv::flip(rgb,rgb,1);
857  cv::flip(rgb,rgb,0);
858  cv::flip(depth,depth,1);
859  cv::flip(depth,depth,0);
860  cv::Size sizet(model.imageWidth(), model.imageHeight());
861  model = CameraModel(
862  model.fx(),
863  model.fy(),
864  model.cx()>0?model.imageWidth()-model.cx():0,
865  model.cy()>0?model.imageHeight()-model.cy():0,
866  model.localTransform()*rtabmap::Transform(0,0,0,0,0,1.57079632679489661923132169163975144*2.0));
867  model.setImageSize(sizet);
868  }
869  else if(colorCameraToDisplayRotation_ == ROTATION_270)
870  {
871  cv::Mat rgbt(rgb.cols, rgb.rows, rgb.type());
872  cv::transpose(rgb,rgbt);
873  cv::flip(rgbt,rgbt,1);
874  rgb = rgbt;
875  cv::Mat deptht(depth.cols, depth.rows, depth.type());
876  cv::transpose(depth,deptht);
877  cv::flip(deptht,deptht,1);
878  depth = deptht;
879  cv::Size sizet(model.imageHeight(), model.imageWidth());
880  model = CameraModel(model.fy(), model.fx(), model.cy()>0?model.imageHeight()-model.cy():0, model.cx(), model.localTransform()*rtabmap::Transform(0,0,0,0,0,-1.57079632679489661923132169163975144));
881  model.setImageSize(sizet);
882  }
883 
884  if(smoothing_)
885  {
886  //UTimer t;
888  data.setDepthOrRightRaw(depth);
889  //LOGD("Bilateral filtering, time=%fs", t.ticks());
890  }
891 
893  {
894  data = SensorData(LaserScan::backwardCompatibility(scan, cloud.total()/scanDownsampling, 0, scanLocalTransform), rgb, depth, model, this->getNextSeqID(), rgbStamp);
895  }
896  else
897  {
898  data = SensorData(rgb, depth, model, this->getNextSeqID(), rgbStamp);
899  }
900  data.setGroundTruth(odom);
901 
902  if(lastKnownGPS_.stamp() > 0.0 && rgbStamp-lastKnownGPS_.stamp()<1.0)
903  {
904  data.setGPS(lastKnownGPS_);
905  }
906  else if(lastKnownGPS_.stamp()>0.0)
907  {
908  LOGD("GPS too old (current time=%f, gps time = %f)", rgbStamp, lastKnownGPS_.stamp());
909  }
910  }
911  else
912  {
913  LOGE("Could not get depth and rgb images!?!");
914  }
915  }
916  return data;
917 
918 }
919 
921 {
922  double t = cameraStartedTime_.elapsed();
923  if(t < 5.0)
924  {
925  uSleep((5.0-t)*1000); // just to make sure that the camera is started
926  }
927 }
928 
930 {
931  if(tango_config_)
932  {
933  SensorData data = this->captureImage();
934 
935  if(!data.groundTruth().isNull())
936  {
937  rtabmap::Transform pose = data.groundTruth();
938  data.setGroundTruth(Transform());
939 
940  // convert stamp to epoch
941  bool firstFrame = previousPose_.isNull();
942  if(firstFrame)
943  {
945  }
946  data.setStamp(stampEpochOffset_ + data.stamp());
947  OdometryInfo info;
948  if(!firstFrame)
949  {
950  info.interval = data.stamp()-previousStamp_;
951  info.transform = previousPose_.inverse() * pose;
952  }
953  info.reg.covariance = cv::Mat::eye(6,6,CV_64FC1) * (firstFrame?9999.0:0.0001);
954  LOGI("Publish odometry message (variance=%f)", firstFrame?9999:0.0001);
955  this->post(new OdometryEvent(data, pose, info));
956  previousPose_ = pose;
957  previousStamp_ = data.stamp();
958  }
959  else if(!this->isKilled())
960  {
961  LOGW("Odometry lost");
962  this->post(new OdometryEvent());
963  }
964  }
965  else
966  {
967  UERROR("Camera not initialized, cannot start thread.");
968  this->kill();
969  }
970 }
971 
972 } /* namespace rtabmap */
virtual SensorData captureImage(CameraInfo *info=0)
Transform originOffset_
Definition: CameraTango.h:133
int imageWidth() const
Definition: CameraModel.h:113
cv::Mat fisheyeRectifyMapY_
Definition: CameraTango.h:131
void release(int n=1)
Definition: USemaphore.h:168
double restart()
Definition: UTimer.h:94
void cloudReceived(const cv::Mat &cloud, double timestamp)
double epsilon
void setImageSize(const cv::Size &size)
std::string prettyPrint() const
Definition: Transform.cpp:274
#define LOGW(...)
const cv::Size & imageSize() const
Definition: CameraModel.h:112
cv::Point3f RTABMAP_EXP transformPoint(const cv::Point3f &pt, const Transform &transform)
void onTangoEventAvailableRouter(void *context, const TangoEvent *event)
Definition: CameraTango.cpp:98
double elapsed()
Definition: UTimer.h:75
f
virtual bool init(const std::string &calibrationFolder=".", const std::string &cameraName="")
TangoSupportRotation colorCameraToDisplayRotation_
Definition: CameraTango.h:129
GLM_FUNC_DECL vecType< T, P > sqrt(vecType< T, P > const &x)
const float maxDepthError
Definition: CameraTango.cpp:42
static const rtabmap::Transform rtabmap_world_T_tango_world(0.0f, 1.0f, 0.0f, 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f)
USemaphore dataReady_
Definition: CameraTango.h:126
void kill()
Definition: UThread.cpp:48
Transform translation() const
Definition: Transform.cpp:182
bool isRunning() const
Definition: UThread.cpp:245
GLM_FUNC_DECL genType e()
cv::Mat RTABMAP_EXP fastBilateralFiltering(const cv::Mat &depth, float sigmaS=15.0f, float sigmaR=0.05f, bool earlyDivision=false)
Definition: util2d.cpp:1809
static const float bilateralFilteringSigmaR
Definition: CameraTango.h:76
void setGPS(const GPS &gps)
Definition: SensorData.h:238
virtual bool isCalibrated() const
bool acquire(int n=1, int ms=0)
Definition: USemaphore.h:101
#define LOGE(...)
const Transform & groundTruth() const
Definition: SensorData.h:232
cv::Mat fisheyeRectifyMapX_
Definition: CameraTango.h:130
static rtabmap::Transform opticalRotation(1.0f, 0.0f, 0.0f, 0.0f, 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,-1.0f, 0.0f)
const int holeSize
Definition: CameraTango.cpp:41
void setGPS(const GPS &gps)
const int scanDownsampling
Definition: CameraTango.cpp:43
void post(UEvent *event, bool async=true) const
cv::Mat D() const
Definition: CameraModel.h:104
boost::mutex dataMutex_
Definition: CameraTango.h:125
#define UASSERT(condition)
#define LOGD(...)
bool isNull() const
Definition: Transform.cpp:107
void poseReceived(const Transform &pose)
const int kVersionStringLength
Definition: CameraTango.cpp:40
virtual std::string getSerial() const
cv::Mat RTABMAP_EXP decimate(const cv::Mat &image, int d)
Definition: util2d.cpp:1196
GLM_FUNC_DECL genType atan(genType const &y, genType const &x)
bool isKilled() const
Definition: UThread.cpp:255
static RTABMapApp app
double stamp() const
Definition: SensorData.h:154
CameraTango(bool colorCamera, int decimation, bool publishRawScan, bool smoothing)
void tangoEventReceived(int type, const char *key, const char *value)
#define LOGI(...)
#define false
Definition: ConvertUTF.c:56
void uSleep(unsigned int ms)
bool isValidForProjection() const
Definition: CameraModel.h:80
int getNextSeqID()
Definition: Camera.h:83
GLM_FUNC_DECL genType tan(genType const &angle)
virtual void mainLoop()
Transform previousPose_
Definition: CameraTango.h:112
virtual void mainLoopBegin()
void onPoseAvailableRouter(void *context, const TangoPoseData *pose)
Definition: CameraTango.cpp:89
static const float bilateralFilteringSigmaS
Definition: CameraTango.h:75
#define UERROR(...)
float getNormSquared() const
Definition: Transform.cpp:236
cv::Mat RTABMAP_EXP fillDepthHoles(const cv::Mat &depth, int maximumHoleSize=1, float errorRatio=0.02f)
Definition: util2d.cpp:1434
ULogger class and convenient macros.
void onFrameAvailableRouter(void *context, TangoCameraId id, const TangoImageBuffer *color)
Definition: CameraTango.cpp:55
Transform deviceTColorCamera_
Definition: CameraTango.h:128
static double now()
Definition: UTimer.cpp:73
void setStamp(double stamp)
Definition: SensorData.h:155
RegistrationInfo reg
Definition: OdometryInfo.h:91
void setGroundTruth(const Transform &pose)
Definition: SensorData.h:231
Transform inverse() const
Definition: Transform.cpp:169
static const rtabmap::Transform tango_device_T_rtabmap_device(0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f,-1.0f, 0.0f, 0.0f, 0.0f)
cv::Mat K() const
Definition: CameraModel.h:103
void initFisheyeRectificationMap(const CameraModel &fisheyeModel, cv::Mat &mapX, cv::Mat &mapY)
void rgbReceived(const cv::Mat &tangoImage, int type, double timestamp)
void setDepthOrRightRaw(const cv::Mat &depthOrImageRaw)
Definition: SensorData.h:165
void onPointCloudAvailableRouter(void *context, const TangoPointCloud *point_cloud)
Definition: CameraTango.cpp:46
static LaserScan backwardCompatibility(const cv::Mat &oldScanFormat, int maxPoints=0, int maxRange=0, const Transform &localTransform=Transform::getIdentity())
Definition: LaserScan.cpp:84
int imageHeight() const
Definition: CameraModel.h:114
void applyFovModel(double xu, double yu, double w, double w_inverse, double two_tan_w_div_two, double *xd, double *yd)
CameraModel scaled(double scale) const
CameraModel model_
Definition: CameraTango.h:127
const Transform & localTransform() const
Definition: CameraModel.h:109
rtabmap::Transform getPoseAtTimestamp(double timestamp)
rtabmap::Transform tangoPoseToTransform(const TangoPoseData *tangoPose) const
const double & stamp() const


rtabmap
Author(s): Mathieu Labbe
autogenerated on Wed Jun 5 2019 22:41:30