MsgConversion.cpp
Go to the documentation of this file.
1 /*
2 Copyright (c) 2010-2016, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the Universite de Sherbrooke nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 
29 
30 #include <opencv2/highgui/highgui.hpp>
31 #include <zlib.h>
32 #include <ros/ros.h>
33 #include <rtabmap/core/util3d.h>
37 #include <rtabmap/utilite/UStl.h>
39 #include <rtabmap/utilite/UTimer.h>
49 
50 namespace rtabmap_ros {
51 
52 void transformToTF(const rtabmap::Transform & transform, tf::Transform & tfTransform)
53 {
54  if(!transform.isNull())
55  {
56  tf::transformEigenToTF(transform.toEigen3d(), tfTransform);
57  }
58  else
59  {
60  tfTransform = tf::Transform();
61  }
62 }
63 
65 {
66  Eigen::Affine3d eigenTf;
67  tf::transformTFToEigen(transform, eigenTf);
68  return rtabmap::Transform::fromEigen3d(eigenTf);
69 }
70 
71 void transformToGeometryMsg(const rtabmap::Transform & transform, geometry_msgs::Transform & msg)
72 {
73  if(!transform.isNull())
74  {
75  tf::transformEigenToMsg(transform.toEigen3d(), msg);
76 
77  // make sure the quaternion is normalized
78  long double recipNorm = 1.0 / sqrt(msg.rotation.x * msg.rotation.x + msg.rotation.y * msg.rotation.y + msg.rotation.z * msg.rotation.z + msg.rotation.w * msg.rotation.w);
79  msg.rotation.x *= recipNorm;
80  msg.rotation.y *= recipNorm;
81  msg.rotation.z *= recipNorm;
82  msg.rotation.w *= recipNorm;
83  }
84  else
85  {
86  msg = geometry_msgs::Transform();
87  }
88 }
89 
90 
91 rtabmap::Transform transformFromGeometryMsg(const geometry_msgs::Transform & msg)
92 {
93  if(msg.rotation.w == 0 &&
94  msg.rotation.x == 0 &&
95  msg.rotation.y == 0 &&
96  msg.rotation.z ==0)
97  {
98  return rtabmap::Transform();
99  }
100 
101  Eigen::Affine3d tfTransform;
102  tf::transformMsgToEigen(msg, tfTransform);
103  return rtabmap::Transform::fromEigen3d(tfTransform);
104 }
105 
106 void transformToPoseMsg(const rtabmap::Transform & transform, geometry_msgs::Pose & msg)
107 {
108  if(!transform.isNull())
109  {
110  tf::poseEigenToMsg(transform.toEigen3d(), msg);
111  }
112  else
113  {
114  msg = geometry_msgs::Pose();
115  }
116 }
117 
118 rtabmap::Transform transformFromPoseMsg(const geometry_msgs::Pose & msg, bool ignoreRotationIfNotSet)
119 {
120  if(msg.orientation.w == 0 &&
121  msg.orientation.x == 0 &&
122  msg.orientation.y == 0 &&
123  msg.orientation.z == 0)
124  {
125  if(ignoreRotationIfNotSet)
126  {
127  return rtabmap::Transform(msg.position.x, msg.position.y, msg.position.z, 0, 0, 0);
128  }
129  return rtabmap::Transform();
130  }
131  Eigen::Affine3d tfPose;
132  tf::poseMsgToEigen(msg, tfPose);
133  return rtabmap::Transform::fromEigen3d(tfPose);
134 }
135 
136 void toCvCopy(const rtabmap_ros::RGBDImage & image, cv_bridge::CvImagePtr & rgb, cv_bridge::CvImagePtr & depth)
137 {
138  if(!image.rgb.data.empty())
139  {
140  rgb = cv_bridge::toCvCopy(image.rgb);
141  }
142  else if(!image.rgb_compressed.data.empty())
143  {
144 #ifdef CV_BRIDGE_HYDRO
145  ROS_ERROR("Unsupported compressed image copy, please upgrade at least to ROS Indigo to use this.");
146 #else
147  rgb = cv_bridge::toCvCopy(image.rgb_compressed);
148 #endif
149  }
150 
151  if(!image.depth.data.empty())
152  {
153  depth = cv_bridge::toCvCopy(image.depth);
154  }
155  else if(!image.depth_compressed.data.empty())
156  {
157  cv_bridge::CvImagePtr ptr = boost::make_shared<cv_bridge::CvImage>();
158  ptr->header = image.depth_compressed.header;
159  ptr->image = rtabmap::uncompressImage(image.depth_compressed.data);
160  ROS_ASSERT(ptr->image.empty() || ptr->image.type() == CV_32FC1 || ptr->image.type() == CV_16UC1);
161  ptr->encoding = ptr->image.empty()?"":ptr->image.type() == CV_32FC1?sensor_msgs::image_encodings::TYPE_32FC1:sensor_msgs::image_encodings::TYPE_16UC1;
162  depth = ptr;
163  }
164 }
165 
166 void toCvShare(const rtabmap_ros::RGBDImageConstPtr & image, cv_bridge::CvImageConstPtr & rgb, cv_bridge::CvImageConstPtr & depth)
167 {
168  toCvShare(*image, image, rgb, depth);
169 }
170 
171 void toCvShare(const rtabmap_ros::RGBDImage & image, const boost::shared_ptr<void const>& trackedObject, cv_bridge::CvImageConstPtr & rgb, cv_bridge::CvImageConstPtr & depth)
172 {
173  if(!image.rgb.data.empty())
174  {
175  rgb = cv_bridge::toCvShare(image.rgb, trackedObject);
176  }
177  else if(!image.rgb_compressed.data.empty())
178  {
179 #ifdef CV_BRIDGE_HYDRO
180  ROS_ERROR("Unsupported compressed image copy, please upgrade at least to ROS Indigo to use this.");
181 #else
182  rgb = cv_bridge::toCvCopy(image.rgb_compressed);
183 #endif
184  }
185 
186  if(!image.depth.data.empty())
187  {
188  depth = cv_bridge::toCvShare(image.depth, trackedObject);
189  }
190  else if(!image.depth_compressed.data.empty())
191  {
192  if(image.depth_compressed.format.compare("jpg")==0)
193  {
194 #ifdef CV_BRIDGE_HYDRO
195  ROS_ERROR("Unsupported compressed image copy, please upgrade at least to ROS Indigo to use this.");
196 #else
197  depth = cv_bridge::toCvCopy(image.depth_compressed);
198 #endif
199  }
200  else
201  {
202  cv_bridge::CvImagePtr ptr = boost::make_shared<cv_bridge::CvImage>();
203  ptr->header = image.depth_compressed.header;
204  ptr->image = rtabmap::uncompressImage(image.depth_compressed.data);
205  ROS_ASSERT(ptr->image.empty() || ptr->image.type() == CV_32FC1 || ptr->image.type() == CV_16UC1);
206  ptr->encoding = ptr->image.empty()?"":ptr->image.type() == CV_32FC1?sensor_msgs::image_encodings::TYPE_32FC1:sensor_msgs::image_encodings::TYPE_16UC1;
207  depth = ptr;
208  }
209  }
210 }
211 
212 void rgbdImageToROS(const rtabmap::SensorData & data, rtabmap_ros::RGBDImage & msg, const std::string & sensorFrameId)
213 {
214  std_msgs::Header header;
215  header.frame_id = sensorFrameId;
216  header.stamp = ros::Time(data.stamp());
217  rtabmap::Transform localTransform;
218  if(data.cameraModels().size()>1)
219  {
220  UERROR("Cannot convert multi-camera data to rgbd image");
221  return;
222  }
223  if(data.cameraModels().size() == 1)
224  {
225  //rgb+depth
226  rtabmap_ros::cameraModelToROS(data.cameraModels().front(), msg.rgb_camera_info);
227  msg.rgb_camera_info.header = header;
228  localTransform = data.cameraModels().front().localTransform();
229  }
230  else if(data.stereoCameraModels().size() == 1)
231  {
232  //stereo
233  rtabmap_ros::cameraModelToROS(data.stereoCameraModels()[0].left(), msg.rgb_camera_info);
234  rtabmap_ros::cameraModelToROS(data.stereoCameraModels()[0].right(), msg.depth_camera_info);
235  msg.rgb_camera_info.header = header;
236  msg.depth_camera_info.header = header;
237  localTransform = data.stereoCameraModels()[0].localTransform();
238  }
239 
240  if(!data.imageRaw().empty())
241  {
242  cv_bridge::CvImage cvImg;
243  cvImg.header = header;
244  cvImg.image = data.imageRaw();
245  UASSERT(data.imageRaw().type()==CV_8UC1 || data.imageRaw().type()==CV_8UC3);
247  cvImg.toImageMsg(msg.rgb);
248  }
249  else if(!data.imageCompressed().empty())
250  {
251  ROS_ERROR("Conversion of compressed SensorData to RGBDImage is not implemented...");
252  }
253 
254  if(!data.depthOrRightRaw().empty())
255  {
256  cv_bridge::CvImage cvDepth;
257  cvDepth.header = header;
258  cvDepth.image = data.depthOrRightRaw();
259  UASSERT(data.depthOrRightRaw().type()==CV_8UC1 || data.depthOrRightRaw().type()==CV_16UC1 || data.depthOrRightRaw().type()==CV_32FC1);
261  cvDepth.toImageMsg(msg.depth);
262  }
263  else if(!data.depthOrRightCompressed().empty())
264  {
265  ROS_ERROR("Conversion of compressed SensorData to RGBDImage is not implemented...");
266  }
267 
268  //convert features
269  if(!data.keypoints().empty())
270  {
271  rtabmap_ros::keypointsToROS(data.keypoints(), msg.key_points);
272  }
273  if(!data.keypoints3D().empty())
274  {
275  rtabmap_ros::points3fToROS(data.keypoints3D(), msg.points, localTransform.inverse());
276  }
277  if(!data.descriptors().empty())
278  {
279  msg.descriptors = rtabmap::compressData(data.descriptors());
280  }
281  if(!data.globalDescriptors().empty())
282  {
283  rtabmap_ros::globalDescriptorToROS(data.globalDescriptors().front(), msg.global_descriptor);
284  msg.global_descriptor.header = header;
285  }
286 }
287 
288 rtabmap::SensorData rgbdImageFromROS(const rtabmap_ros::RGBDImageConstPtr & image)
289 {
293  toCvShare(image, imageMsg, depthMsg);
294 
295  rtabmap::StereoCameraModel stereoModel = stereoCameraModelFromROS(image->rgb_camera_info, image->depth_camera_info, rtabmap::Transform::getIdentity());
296 
297  if(stereoModel.isValidForProjection())
298  {
299  cv_bridge::CvImageConstPtr imageRectLeft = imageMsg;
300  cv_bridge::CvImageConstPtr imageRectRight = depthMsg;
301  if(!(imageRectLeft->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) ==0 ||
302  imageRectLeft->encoding.compare(sensor_msgs::image_encodings::MONO8) ==0 ||
303  imageRectLeft->encoding.compare(sensor_msgs::image_encodings::MONO16) ==0 ||
304  imageRectLeft->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
305  imageRectLeft->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0) ||
306  !(imageRectRight->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) ==0 ||
307  imageRectRight->encoding.compare(sensor_msgs::image_encodings::MONO8) ==0 ||
308  imageRectRight->encoding.compare(sensor_msgs::image_encodings::MONO16) ==0 ||
309  imageRectRight->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
310  imageRectRight->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0))
311  {
312  ROS_ERROR("Input type must be image=mono8,mono16,rgb8,bgr8 (mono8 recommended), received types are %s (left) and %s (right)",
313  imageRectLeft->encoding.c_str(), imageRectRight->encoding.c_str());
314  return data;
315  }
316 
317  if(!imageRectLeft->image.empty() && !imageRectRight->image.empty())
318  {
319  if(stereoModel.baseline() > 10.0)
320  {
321  static bool shown = false;
322  if(!shown)
323  {
324  ROS_WARN("Detected baseline (%f m) is quite large! Is your "
325  "right camera_info P(0,3) correctly set? Note that "
326  "baseline=-P(0,3)/P(0,0). This warning is printed only once.",
327  stereoModel.baseline());
328  shown = true;
329  }
330  }
331 
332  cv::Mat left, right;
333  if(imageRectLeft->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
334  imageRectLeft->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0)
335  {
336  left = imageRectLeft->image;
337  }
338  else if(imageRectLeft->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0)
339  {
340  left = cv_bridge::cvtColor(imageRectLeft, "mono8")->image;
341  }
342  else
343  {
344  left = cv_bridge::cvtColor(imageRectLeft, "bgr8")->image;
345  }
346  if(imageRectRight->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
347  imageRectRight->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0)
348  {
349  right = imageRectRight->image;
350  }
351  else
352  {
353  right = cv_bridge::cvtColor(imageRectRight, "mono8")->image;
354  }
355 
356  //
357 
358  data = rtabmap::SensorData(
359  left,
360  right,
361  stereoModel,
362  0,
363  rtabmap_ros::timestampFromROS(image->header.stamp));
364  }
365  else
366  {
367  ROS_WARN("Odom: input images empty?!?");
368  }
369  }
370  else //depth
371  {
372  ros::Time higherStamp;
373  int imageWidth = imageMsg->image.cols;
374  int imageHeight = imageMsg->image.rows;
375  int depthWidth = depthMsg->image.cols;
376  int depthHeight = depthMsg->image.rows;
377 
378  UASSERT_MSG(
379  imageWidth/depthWidth == imageHeight/depthHeight,
380  uFormat("rgb=%dx%d depth=%dx%d", imageWidth, imageHeight, depthWidth, depthHeight).c_str());
381 
382  cv::Mat rgb;
383  cv::Mat depth;
384  rtabmap::CameraModel cameraModels;
385 
386  if(!(imageMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) ==0 ||
387  imageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) ==0 ||
388  imageMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) ==0 ||
389  imageMsg->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
390  imageMsg->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
391  imageMsg->encoding.compare(sensor_msgs::image_encodings::BGRA8) == 0 ||
392  imageMsg->encoding.compare(sensor_msgs::image_encodings::RGBA8) == 0 ||
393  imageMsg->encoding.compare(sensor_msgs::image_encodings::BAYER_GRBG8) == 0) ||
394  !(depthMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1) == 0 ||
395  depthMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1) == 0 ||
396  depthMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0))
397  {
398  ROS_ERROR("Input type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8 and "
399  "image_depth=32FC1,16UC1,mono16. Current rgb=%s and depth=%s",
400  imageMsg->encoding.c_str(),
401  depthMsg->encoding.c_str());
402  return data;
403  }
404 
405  cv_bridge::CvImageConstPtr ptrImage = imageMsg;
406  if(imageMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1)==0 ||
407  imageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
408  imageMsg->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0)
409  {
410  // do nothing
411  }
412  else if(imageMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0)
413  {
414  ptrImage = cv_bridge::cvtColor(imageMsg, "mono8");
415  }
416  else
417  {
418  ptrImage = cv_bridge::cvtColor(imageMsg, "bgr8");
419  }
420 
421  cv_bridge::CvImageConstPtr ptrDepth = depthMsg;
422  data = rtabmap::SensorData(
423  ptrImage->image,
424  ptrDepth->image,
425  rtabmap_ros::cameraModelFromROS(image->rgb_camera_info),
426  0,
427  rtabmap_ros::timestampFromROS(image->header.stamp));
428  }
429 
430  return data;
431 }
432 
433 void compressedMatToBytes(const cv::Mat & compressed, std::vector<unsigned char> & bytes)
434 {
435  UASSERT(compressed.empty() || compressed.type() == CV_8UC1);
436  bytes.clear();
437  if(!compressed.empty())
438  {
439  bytes.resize(compressed.cols * compressed.rows);
440  memcpy(bytes.data(), compressed.data, bytes.size());
441  }
442 }
443 
444 cv::Mat compressedMatFromBytes(const std::vector<unsigned char> & bytes, bool copy)
445 {
446  cv::Mat out;
447  if(bytes.size())
448  {
449  out = cv::Mat(1, bytes.size(), CV_8UC1, (void*)bytes.data());
450  if(copy)
451  {
452  out = out.clone();
453  }
454  }
455  return out;
456 }
457 
458 void infoFromROS(const rtabmap_ros::Info & info, rtabmap::Statistics & stat)
459 {
460  stat.setExtended(true); // Extended
461 
462  // rtabmap_ros::Info
463  stat.setRefImageId(info.refId);
464  stat.setLoopClosureId(info.loopClosureId);
465  stat.setProximityDetectionId(info.proximityDetectionId);
466  stat.setStamp(info.header.stamp.toSec());
467 
468  stat.setLoopClosureTransform(rtabmap_ros::transformFromGeometryMsg(info.loopClosureTransform));
469 
470  //wmState
471  stat.setWmState(info.wmState);
472 
473  //Posterior, likelihood, childCount
474  std::map<int, float> mapIntFloat;
475  for(unsigned int i=0; i<info.posteriorKeys.size() && i<info.posteriorValues.size(); ++i)
476  {
477  mapIntFloat.insert(std::pair<int, float>(info.posteriorKeys.at(i), info.posteriorValues.at(i)));
478  }
479  stat.setPosterior(mapIntFloat);
480 
481  mapIntFloat.clear();
482  for(unsigned int i=0; i<info.likelihoodKeys.size() && i<info.likelihoodValues.size(); ++i)
483  {
484  mapIntFloat.insert(std::pair<int, float>(info.likelihoodKeys.at(i), info.likelihoodValues.at(i)));
485  }
486  stat.setLikelihood(mapIntFloat);
487 
488  mapIntFloat.clear();
489  for(unsigned int i=0; i<info.rawLikelihoodKeys.size() && i<info.rawLikelihoodValues.size(); ++i)
490  {
491  mapIntFloat.insert(std::pair<int, float>(info.rawLikelihoodKeys.at(i), info.rawLikelihoodValues.at(i)));
492  }
493  stat.setRawLikelihood(mapIntFloat);
494 
495  std::map<int, int> mapIntInt;
496  for(unsigned int i=0; i<info.weightsKeys.size() && i<info.weightsValues.size(); ++i)
497  {
498  mapIntInt.insert(std::pair<int, int>(info.weightsKeys.at(i), info.weightsValues.at(i)));
499  }
500  stat.setWeights(mapIntInt);
501 
502  std::map<int, std::string> mapIntStr;
503  for(unsigned int i=0; i<info.labelsKeys.size() && i<info.labelsValues.size(); ++i)
504  {
505  mapIntStr.insert(std::pair<int, std::string>(info.labelsKeys.at(i), info.labelsValues.at(i)));
506  }
507  stat.setLabels(mapIntStr);
508 
509  stat.setLocalPath(info.localPath);
510  stat.setCurrentGoalId(info.currentGoalId);
511 
512  std::map<int, rtabmap::Transform> poses;
513  std::multimap<int, rtabmap::Link> constraints;
515  mapGraphFromROS(info.odom_cache, poses, constraints, t);
516  stat.setOdomCachePoses(poses);
517  stat.setOdomCacheConstraints(constraints);
518 
519  // Statistics data
520  for(unsigned int i=0; i<info.statsKeys.size() && i<info.statsValues.size(); i++)
521  {
522  stat.addStatistic(info.statsKeys.at(i), info.statsValues.at(i));
523  }
524 }
525 
526 void infoToROS(const rtabmap::Statistics & stats, rtabmap_ros::Info & info)
527 {
528  info.refId = stats.refImageId();
529  info.loopClosureId = stats.loopClosureId();
530  info.proximityDetectionId = stats.proximityDetectionId();
531  info.landmarkId = static_cast<int>(uValue(stats.data(), rtabmap::Statistics::kLoopLandmark_detected(), 0.0f));
532 
533  rtabmap_ros::transformToGeometryMsg(stats.loopClosureTransform(), info.loopClosureTransform);
534 
535  // Detailed info
536  if(stats.extended())
537  {
538  //wmState
539  info.wmState = stats.wmState();
540 
541  //Posterior, likelihood, childCount
542  info.posteriorKeys = uKeys(stats.posterior());
543  info.posteriorValues = uValues(stats.posterior());
544  info.likelihoodKeys = uKeys(stats.likelihood());
545  info.likelihoodValues = uValues(stats.likelihood());
546  info.rawLikelihoodKeys = uKeys(stats.rawLikelihood());
547  info.rawLikelihoodValues = uValues(stats.rawLikelihood());
548  info.weightsKeys = uKeys(stats.weights());
549  info.weightsValues = uValues(stats.weights());
550  info.labelsKeys = uKeys(stats.labels());
551  info.labelsValues = uValues(stats.labels());
552  info.localPath = stats.localPath();
553  info.currentGoalId = stats.currentGoalId();
554  mapGraphToROS(stats.odomCachePoses(), stats.odomCacheConstraints(), stats.mapCorrection(), info.odom_cache);
555 
556  // Statistics data
557  info.statsKeys = uKeys(stats.data());
558  info.statsValues = uValues(stats.data());
559  }
560 }
561 
562 rtabmap::Link linkFromROS(const rtabmap_ros::Link & msg)
563 {
564  cv::Mat information = cv::Mat(6,6,CV_64FC1, (void*)msg.information.data()).clone();
565  return rtabmap::Link(msg.fromId, msg.toId, (rtabmap::Link::Type)msg.type, transformFromGeometryMsg(msg.transform), information);
566 }
567 
568 void linkToROS(const rtabmap::Link & link, rtabmap_ros::Link & msg)
569 {
570  msg.fromId = link.from();
571  msg.toId = link.to();
572  msg.type = link.type();
573  if(link.infMatrix().type() == CV_64FC1 && link.infMatrix().cols == 6 && link.infMatrix().rows == 6)
574  {
575  memcpy(msg.information.data(), link.infMatrix().data, 36*sizeof(double));
576  }
577  transformToGeometryMsg(link.transform(), msg.transform);
578 }
579 
580 cv::KeyPoint keypointFromROS(const rtabmap_ros::KeyPoint & msg)
581 {
582  return cv::KeyPoint(msg.pt.x, msg.pt.y, msg.size, msg.angle, msg.response, msg.octave, msg.class_id);
583 }
584 
585 void keypointToROS(const cv::KeyPoint & kpt, rtabmap_ros::KeyPoint & msg)
586 {
587  msg.angle = kpt.angle;
588  msg.class_id = kpt.class_id;
589  msg.octave = kpt.octave;
590  msg.pt.x = kpt.pt.x;
591  msg.pt.y = kpt.pt.y;
592  msg.response = kpt.response;
593  msg.size = kpt.size;
594 }
595 
596 std::vector<cv::KeyPoint> keypointsFromROS(const std::vector<rtabmap_ros::KeyPoint> & msg)
597 {
598  std::vector<cv::KeyPoint> v(msg.size());
599  for(unsigned int i=0; i<msg.size(); ++i)
600  {
601  v[i] = keypointFromROS(msg[i]);
602  }
603  return v;
604 }
605 
606 void keypointsFromROS(const std::vector<rtabmap_ros::KeyPoint> & msg, std::vector<cv::KeyPoint> & kpts, int xShift)
607 {
608  size_t outCurrentIndex = kpts.size();
609  kpts.resize(kpts.size()+msg.size());
610  for(unsigned int i=0; i<msg.size(); ++i)
611  {
612  kpts[outCurrentIndex+i] = keypointFromROS(msg[i]);
613  kpts[outCurrentIndex+i].pt.x += xShift;
614  }
615 }
616 
617 void keypointsToROS(const std::vector<cv::KeyPoint> & kpts, std::vector<rtabmap_ros::KeyPoint> & msg)
618 {
619  msg.resize(kpts.size());
620  for(unsigned int i=0; i<msg.size(); ++i)
621  {
622  keypointToROS(kpts[i], msg[i]);
623  }
624 }
625 
626 rtabmap::GlobalDescriptor globalDescriptorFromROS(const rtabmap_ros::GlobalDescriptor & msg)
627 {
628  return rtabmap::GlobalDescriptor(msg.type, rtabmap::uncompressData(msg.data), rtabmap::uncompressData(msg.info));
629 }
630 
631 void globalDescriptorToROS(const rtabmap::GlobalDescriptor & desc, rtabmap_ros::GlobalDescriptor & msg)
632 {
633  msg.type = desc.type();
634  msg.info = rtabmap::compressData(desc.info());
635  msg.data = rtabmap::compressData(desc.data());
636 }
637 
638 std::vector<rtabmap::GlobalDescriptor> globalDescriptorsFromROS(const std::vector<rtabmap_ros::GlobalDescriptor> & msg)
639 {
640  if(!msg.empty())
641  {
642  std::vector<rtabmap::GlobalDescriptor> v(msg.size());
643  for(unsigned int i=0; i<msg.size(); ++i)
644  {
645  v[i] = globalDescriptorFromROS(msg[i]);
646  }
647  return v;
648  }
649  return std::vector<rtabmap::GlobalDescriptor>();
650 }
651 
652 void globalDescriptorsToROS(const std::vector<rtabmap::GlobalDescriptor> & desc, std::vector<rtabmap_ros::GlobalDescriptor> & msg)
653 {
654  msg.clear();
655  if(!desc.empty())
656  {
657  msg.resize(desc.size());
658  for(unsigned int i=0; i<msg.size(); ++i)
659  {
660  globalDescriptorToROS(desc[i], msg[i]);
661  }
662  }
663 }
664 
665 rtabmap::EnvSensor envSensorFromROS(const rtabmap_ros::EnvSensor & msg)
666 {
667  return rtabmap::EnvSensor((rtabmap::EnvSensor::Type)msg.type, msg.value, timestampFromROS(msg.header.stamp));
668 }
669 
670 void envSensorToROS(const rtabmap::EnvSensor & sensor, rtabmap_ros::EnvSensor & msg)
671 {
672  msg.type = sensor.type();
673  msg.value = sensor.value();
674  msg.header.stamp = ros::Time(sensor.stamp());
675 }
676 
677 rtabmap::EnvSensors envSensorsFromROS(const std::vector<rtabmap_ros::EnvSensor> & msg)
678 {
680  if(!msg.empty())
681  {
682  for(unsigned int i=0; i<msg.size(); ++i)
683  {
685  v.insert(std::make_pair(s.type(), envSensorFromROS(msg[i])));
686  }
687  }
688  return v;
689 }
690 
691 void envSensorsToROS(const rtabmap::EnvSensors & sensors, std::vector<rtabmap_ros::EnvSensor> & msg)
692 {
693  msg.clear();
694  if(!sensors.empty())
695  {
696  msg.resize(sensors.size());
697  int i=0;
698  for(rtabmap::EnvSensors::const_iterator iter=sensors.begin(); iter!=sensors.end(); ++iter)
699  {
700  envSensorToROS(iter->second, msg[i++]);
701  }
702  }
703 }
704 
705 cv::Point2f point2fFromROS(const rtabmap_ros::Point2f & msg)
706 {
707  return cv::Point2f(msg.x, msg.y);
708 }
709 
710 void point2fToROS(const cv::Point2f & kpt, rtabmap_ros::Point2f & msg)
711 {
712  msg.x = kpt.x;
713  msg.y = kpt.y;
714 }
715 
716 std::vector<cv::Point2f> points2fFromROS(const std::vector<rtabmap_ros::Point2f> & msg)
717 {
718  std::vector<cv::Point2f> v(msg.size());
719  for(unsigned int i=0; i<msg.size(); ++i)
720  {
721  v[i] = point2fFromROS(msg[i]);
722  }
723  return v;
724 }
725 
726 void points2fToROS(const std::vector<cv::Point2f> & kpts, std::vector<rtabmap_ros::Point2f> & msg)
727 {
728  msg.resize(kpts.size());
729  for(unsigned int i=0; i<msg.size(); ++i)
730  {
731  point2fToROS(kpts[i], msg[i]);
732  }
733 }
734 
735 cv::Point3f point3fFromROS(const rtabmap_ros::Point3f & msg)
736 {
737  return cv::Point3f(msg.x, msg.y, msg.z);
738 }
739 
740 void point3fToROS(const cv::Point3f & pt, rtabmap_ros::Point3f & msg)
741 {
742  msg.x = pt.x;
743  msg.y = pt.y;
744  msg.z = pt.z;
745 }
746 
747 std::vector<cv::Point3f> points3fFromROS(const std::vector<rtabmap_ros::Point3f> & msg, const rtabmap::Transform & transform)
748 {
749  bool transformPoints = !transform.isNull() && !transform.isIdentity();
750  std::vector<cv::Point3f> v(msg.size());
751  for(unsigned int i=0; i<msg.size(); ++i)
752  {
753  v[i] = point3fFromROS(msg[i]);
754  if(transformPoints)
755  {
756  v[i] = rtabmap::util3d::transformPoint(v[i], transform);
757  }
758  }
759  return v;
760 }
761 
762 void points3fFromROS(const std::vector<rtabmap_ros::Point3f> & msg, std::vector<cv::Point3f> & points3, const rtabmap::Transform & transform)
763 {
764  size_t currentIndex = points3.size();
765  points3.resize(points3.size()+msg.size());
766  bool transformPoint = !transform.isNull() && !transform.isIdentity();
767  for(unsigned int i=0; i<msg.size(); ++i)
768  {
769  points3[currentIndex+i] = point3fFromROS(msg[i]);
770  if(transformPoint)
771  {
772  points3[currentIndex+i] = rtabmap::util3d::transformPoint(points3[currentIndex+i], transform);
773  }
774  }
775 }
776 
777 void points3fToROS(const std::vector<cv::Point3f> & pts, std::vector<rtabmap_ros::Point3f> & msg, const rtabmap::Transform & transform)
778 {
779  msg.resize(pts.size());
780  bool transformPoints = !transform.isNull() && !transform.isIdentity();
781  for(unsigned int i=0; i<msg.size(); ++i)
782  {
783  if(transformPoints)
784  {
785  cv::Point3f pt = rtabmap::util3d::transformPoint(pts[i], transform);
786  point3fToROS(pt, msg[i]);
787  }
788  else
789  {
790  point3fToROS(pts[i], msg[i]);
791  }
792  }
793 }
794 
796  const sensor_msgs::CameraInfo & camInfo,
797  const rtabmap::Transform & localTransform)
798 {
799  cv:: Mat K;
800  UASSERT(camInfo.K.empty() || camInfo.K.size() == 9);
801  if(!camInfo.K.empty())
802  {
803  K = cv::Mat(3, 3, CV_64FC1);
804  memcpy(K.data, camInfo.K.elems, 9*sizeof(double));
805  }
806 
807  cv::Mat D;
808  if(camInfo.D.size())
809  {
810  if(camInfo.D.size()>=4 &&
811  (uStrContains(camInfo.distortion_model, "fisheye") ||
812  uStrContains(camInfo.distortion_model, "equidistant") ||
813  uStrContains(camInfo.distortion_model, "Kannala Brandt4")))
814  {
815  D = cv::Mat::zeros(1, 6, CV_64FC1);
816  D.at<double>(0,0) = camInfo.D[0];
817  D.at<double>(0,1) = camInfo.D[1];
818  D.at<double>(0,4) = camInfo.D[2];
819  D.at<double>(0,5) = camInfo.D[3];
820  }
821  else if(camInfo.D.size()>8)
822  {
823  bool zerosAfter8 = true;
824  for(size_t i=8; i<camInfo.D.size() && zerosAfter8; ++i)
825  {
826  if(camInfo.D[i] != 0.0)
827  {
828  zerosAfter8 = false;
829  }
830  }
831  static bool warned = false;
832  if(!zerosAfter8 && !warned)
833  {
834  ROS_WARN("Camera info conversion: Distortion model is larger than 8, coefficients after 8 are ignored. This message is only shown once.");
835  warned = true;
836  }
837  D = cv::Mat(1, 8, CV_64FC1);
838  memcpy(D.data, camInfo.D.data(), D.cols*sizeof(double));
839  }
840  else
841  {
842  D = cv::Mat(1, camInfo.D.size(), CV_64FC1);
843  memcpy(D.data, camInfo.D.data(), D.cols*sizeof(double));
844  }
845  }
846 
847  cv:: Mat R;
848  UASSERT(camInfo.R.empty() || camInfo.R.size() == 9);
849  if(!camInfo.R.empty())
850  {
851  R = cv::Mat(3, 3, CV_64FC1);
852  memcpy(R.data, camInfo.R.elems, 9*sizeof(double));
853  }
854 
855  cv:: Mat P;
856  UASSERT(camInfo.P.empty() || camInfo.P.size() == 12);
857  if(!camInfo.P.empty())
858  {
859  P = cv::Mat(3, 4, CV_64FC1);
860  memcpy(P.data, camInfo.P.elems, 12*sizeof(double));
861  }
862 
863  return rtabmap::CameraModel(
864  "ros",
865  cv::Size(camInfo.width, camInfo.height),
866  K, D, R, P,
867  localTransform);
868 }
870  const rtabmap::CameraModel & model,
871  sensor_msgs::CameraInfo & camInfo)
872 {
873  UASSERT(model.K_raw().empty() || model.K_raw().total() == 9);
874  if(model.K_raw().empty())
875  {
876  memset(camInfo.K.elems, 0.0, 9*sizeof(double));
877  }
878  else
879  {
880  memcpy(camInfo.K.elems, model.K_raw().data, 9*sizeof(double));
881  }
882 
883  if(model.D_raw().total() == 6)
884  {
885  camInfo.D = std::vector<double>(4);
886  camInfo.D[0] = model.D_raw().at<double>(0,0);
887  camInfo.D[1] = model.D_raw().at<double>(0,1);
888  camInfo.D[2] = model.D_raw().at<double>(0,4);
889  camInfo.D[3] = model.D_raw().at<double>(0,5);
890  camInfo.distortion_model = "equidistant"; // fisheye
891  }
892  else
893  {
894  camInfo.D = std::vector<double>(model.D_raw().cols);
895  memcpy(camInfo.D.data(), model.D_raw().data, model.D_raw().cols*sizeof(double));
896  if(camInfo.D.size() > 5)
897  {
898  camInfo.distortion_model = "rational_polynomial";
899  }
900  else
901  {
902  camInfo.distortion_model = "plumb_bob";
903  }
904  }
905 
906  UASSERT(model.R().empty() || model.R().total() == 9);
907  if(model.R().empty())
908  {
909  memset(camInfo.R.elems, 0.0, 9*sizeof(double));
910  }
911  else
912  {
913  memcpy(camInfo.R.elems, model.R().data, 9*sizeof(double));
914  }
915 
916  UASSERT(model.P().empty() || model.P().total() == 12);
917  if(model.P().empty())
918  {
919  memset(camInfo.P.elems, 0.0, 12*sizeof(double));
920  }
921  else
922  {
923  memcpy(camInfo.P.elems, model.P().data, 12*sizeof(double));
924  }
925 
926  camInfo.binning_x = 1;
927  camInfo.binning_y = 1;
928  camInfo.roi.width = model.imageWidth();
929  camInfo.roi.height = model.imageHeight();
930 
931  camInfo.width = model.imageWidth();
932  camInfo.height = model.imageHeight();
933 }
935  const sensor_msgs::CameraInfo & leftCamInfo,
936  const sensor_msgs::CameraInfo & rightCamInfo,
937  const rtabmap::Transform & localTransform,
938  const rtabmap::Transform & stereoTransform)
939 {
941  "ros",
942  cameraModelFromROS(leftCamInfo, localTransform),
943  cameraModelFromROS(rightCamInfo, localTransform),
944  stereoTransform);
945 }
947  const sensor_msgs::CameraInfo & leftCamInfo,
948  const sensor_msgs::CameraInfo & rightCamInfo,
949  const std::string & frameId,
951  double waitForTransform)
952 {
953  rtabmap::Transform localTransform = getTransform(
954  frameId,
955  leftCamInfo.header.frame_id,
956  leftCamInfo.header.stamp,
957  listener,
958  waitForTransform);
959  if(localTransform.isNull())
960  {
962  }
963 
964  rtabmap::Transform stereoTransform = getTransform(
965  leftCamInfo.header.frame_id,
966  rightCamInfo.header.frame_id,
967  leftCamInfo.header.stamp,
968  listener,
969  waitForTransform);
970  if(stereoTransform.isNull())
971  {
973  }
974  return stereoCameraModelFromROS(leftCamInfo, rightCamInfo, localTransform, stereoTransform);
975 }
976 
978  const rtabmap_ros::MapData & msg,
979  std::map<int, rtabmap::Transform> & poses,
980  std::multimap<int, rtabmap::Link> & links,
981  std::map<int, rtabmap::Signature> & signatures,
982  rtabmap::Transform & mapToOdom)
983 {
984  //optimized graph
985  mapGraphFromROS(msg.graph, poses, links, mapToOdom);
986 
987  //Data
988  for(unsigned int i=0; i<msg.nodes.size(); ++i)
989  {
990  signatures.insert(std::make_pair(msg.nodes[i].id, nodeDataFromROS(msg.nodes[i])));
991  }
992 }
994  const std::map<int, rtabmap::Transform> & poses,
995  const std::multimap<int, rtabmap::Link> & links,
996  const std::map<int, rtabmap::Signature> & signatures,
997  const rtabmap::Transform & mapToOdom,
998  rtabmap_ros::MapData & msg)
999 {
1000  //Optimized graph
1001  mapGraphToROS(poses, links, mapToOdom, msg.graph);
1002 
1003  //Data
1004  msg.nodes.resize(signatures.size());
1005  int index=0;
1006  for(std::multimap<int, rtabmap::Signature>::const_iterator iter = signatures.begin();
1007  iter!=signatures.end();
1008  ++iter)
1009  {
1010  nodeDataToROS(iter->second, msg.nodes[index++]);
1011  }
1012 }
1013 
1015  const rtabmap_ros::MapGraph & msg,
1016  std::map<int, rtabmap::Transform> & poses,
1017  std::multimap<int, rtabmap::Link> & links,
1018  rtabmap::Transform & mapToOdom)
1019 {
1020  //optimized graph
1021  UASSERT(msg.posesId.size() == msg.poses.size());
1022  for(unsigned int i=0; i<msg.posesId.size(); ++i)
1023  {
1024  poses.insert(std::make_pair(msg.posesId[i], rtabmap_ros::transformFromPoseMsg(msg.poses[i])));
1025  }
1026  for(unsigned int i=0; i<msg.links.size(); ++i)
1027  {
1029  links.insert(std::make_pair(msg.links[i].fromId, linkFromROS(msg.links[i])));
1030  }
1031  mapToOdom = transformFromGeometryMsg(msg.mapToOdom);
1032 }
1034  const std::map<int, rtabmap::Transform> & poses,
1035  const std::multimap<int, rtabmap::Link> & links,
1036  const rtabmap::Transform & mapToOdom,
1037  rtabmap_ros::MapGraph & msg)
1038 {
1039  //Optimized graph
1040  msg.posesId.resize(poses.size());
1041  msg.poses.resize(poses.size());
1042  int index = 0;
1043  for(std::map<int, rtabmap::Transform>::const_iterator iter = poses.begin();
1044  iter != poses.end();
1045  ++iter)
1046  {
1047  msg.posesId[index] = iter->first;
1048  transformToPoseMsg(iter->second, msg.poses[index]);
1049  ++index;
1050  }
1051 
1052  msg.links.resize(links.size());
1053  index=0;
1054  for(std::multimap<int, rtabmap::Link>::const_iterator iter = links.begin();
1055  iter!=links.end();
1056  ++iter)
1057  {
1058  linkToROS(iter->second, msg.links[index++]);
1059  }
1060 
1061  transformToGeometryMsg(mapToOdom, msg.mapToOdom);
1062 }
1063 
1064 rtabmap::Signature nodeDataFromROS(const rtabmap_ros::NodeData & msg)
1065 {
1066  //Features stuff...
1067  std::multimap<int, int> words;
1068  std::vector<cv::KeyPoint> wordsKpts;
1069  std::vector<cv::Point3f> words3D;
1070  cv::Mat wordsDescriptors = rtabmap::uncompressData(msg.wordDescriptors);
1071 
1072  if(msg.wordIdKeys.size() != msg.wordIdValues.size())
1073  {
1074  ROS_ERROR("Word ID keys and values should be the same size (%d, %d)!", (int)msg.wordIdKeys.size(), (int)msg.wordIdValues.size());
1075  }
1076  if(!msg.wordKpts.empty() && msg.wordKpts.size() != msg.wordIdKeys.size())
1077  {
1078  ROS_ERROR("Word IDs and 2D keypoints should be the same size (%d, %d)!", (int)msg.wordIdKeys.size(), (int)msg.wordKpts.size());
1079  }
1080  if(!msg.wordPts.empty() && msg.wordPts.size() != msg.wordIdKeys.size())
1081  {
1082  ROS_ERROR("Word IDs and 3D points should be the same size (%d, %d)!", (int)msg.wordIdKeys.size(), (int)msg.wordPts.size());
1083  }
1084  if(!wordsDescriptors.empty() && wordsDescriptors.rows != (int)msg.wordIdKeys.size())
1085  {
1086  ROS_ERROR("Word IDs and descriptors should be the same size (%d, %d)!", (int)msg.wordIdKeys.size(), wordsDescriptors.rows);
1087  wordsDescriptors = cv::Mat();
1088  }
1089 
1090  if(msg.wordIdKeys.size() == msg.wordIdValues.size())
1091  {
1092  for(unsigned int i=0; i<msg.wordIdKeys.size(); ++i)
1093  {
1094  words.insert(std::make_pair(msg.wordIdKeys.at(i), msg.wordIdValues.at(i))); // ID to index
1095  if(msg.wordIdKeys.size() == msg.wordKpts.size())
1096  {
1097  if(wordsKpts.empty())
1098  {
1099  wordsKpts.reserve(msg.wordKpts.size());
1100  }
1101  wordsKpts.push_back(keypointFromROS(msg.wordKpts.at(i)));
1102  }
1103  if(msg.wordIdKeys.size() == msg.wordPts.size())
1104  {
1105  if(words3D.empty())
1106  {
1107  words3D.reserve(msg.wordPts.size());
1108  }
1109  words3D.push_back(point3fFromROS(msg.wordPts[i]));
1110  }
1111  }
1112  }
1113 
1114  std::vector<rtabmap::StereoCameraModel> stereoModels;
1115  std::vector<rtabmap::CameraModel> models;
1116  if(msg.baseline.size())
1117  {
1118  // stereo model
1119  if(msg.fx.size() == msg.baseline.size() &&
1120  msg.fy.size() == msg.baseline.size() &&
1121  msg.cx.size() == msg.baseline.size() &&
1122  msg.cy.size() == msg.baseline.size() &&
1123  msg.width.size() == msg.baseline.size() &&
1124  msg.height.size() == msg.baseline.size() &&
1125  msg.localTransform.size() == msg.baseline.size())
1126  {
1127  for(unsigned int i=0; i<msg.fx.size(); ++i)
1128  {
1129  stereoModels.push_back(rtabmap::StereoCameraModel(
1130  msg.fx[i],
1131  msg.fy[i],
1132  msg.cx[i],
1133  msg.cy[i],
1134  msg.baseline[i],
1135  transformFromGeometryMsg(msg.localTransform[i]),
1136  cv::Size(msg.width[i], msg.height[i])));
1137  }
1138  }
1139  }
1140  else
1141  {
1142  // multi-cameras model
1143  if(msg.fx.size() &&
1144  msg.fx.size() == msg.fy.size() &&
1145  msg.fx.size() == msg.cx.size() &&
1146  msg.fx.size() == msg.cy.size() &&
1147  msg.fx.size() == msg.localTransform.size())
1148  {
1149  for(unsigned int i=0; i<msg.fx.size(); ++i)
1150  {
1151  if(msg.fx[i] == 0)
1152  {
1153  models.push_back(rtabmap::CameraModel());
1154  }
1155  else
1156  {
1157  models.push_back(rtabmap::CameraModel(
1158  msg.fx[i],
1159  msg.fy[i],
1160  msg.cx[i],
1161  msg.cy[i],
1162  transformFromGeometryMsg(msg.localTransform[i]),
1163  0.0,
1164  cv::Size(msg.width[i], msg.height[i])));
1165  }
1166  }
1167  }
1168  }
1169 
1171  msg.id,
1172  msg.mapId,
1173  msg.weight,
1174  msg.stamp,
1175  msg.label,
1176  transformFromPoseMsg(msg.pose),
1177  transformFromPoseMsg(msg.groundTruthPose),
1178  stereoModels.size()?
1181  msg.laserScanMaxPts,
1182  msg.laserScanMaxRange,
1183  (rtabmap::LaserScan::Format)msg.laserScanFormat,
1184  transformFromGeometryMsg(msg.laserScanLocalTransform)),
1185  compressedMatFromBytes(msg.image),
1186  compressedMatFromBytes(msg.depth),
1187  stereoModels,
1188  msg.id,
1189  msg.stamp,
1190  compressedMatFromBytes(msg.userData)):
1193  msg.laserScanMaxPts,
1194  msg.laserScanMaxRange,
1195  (rtabmap::LaserScan::Format)msg.laserScanFormat,
1196  transformFromGeometryMsg(msg.laserScanLocalTransform)),
1197  compressedMatFromBytes(msg.image),
1198  compressedMatFromBytes(msg.depth),
1199  models,
1200  msg.id,
1201  msg.stamp,
1202  compressedMatFromBytes(msg.userData)));
1203  s.setWords(words, wordsKpts, words3D, wordsDescriptors);
1204  s.sensorData().setGlobalDescriptors(rtabmap_ros::globalDescriptorsFromROS(msg.globalDescriptors));
1205  s.sensorData().setEnvSensors(rtabmap_ros::envSensorsFromROS(msg.env_sensors));
1206  s.sensorData().setOccupancyGrid(
1207  compressedMatFromBytes(msg.grid_ground),
1208  compressedMatFromBytes(msg.grid_obstacles),
1209  compressedMatFromBytes(msg.grid_empty_cells),
1210  msg.grid_cell_size,
1211  point3fFromROS(msg.grid_view_point));
1212  s.sensorData().setGPS(rtabmap::GPS(msg.gps.stamp, msg.gps.longitude, msg.gps.latitude, msg.gps.altitude, msg.gps.error, msg.gps.bearing));
1213  return s;
1214 }
1215 void nodeDataToROS(const rtabmap::Signature & signature, rtabmap_ros::NodeData & msg)
1216 {
1217  // add data
1218  msg.id = signature.id();
1219  msg.mapId = signature.mapId();
1220  msg.weight = signature.getWeight();
1221  msg.stamp = signature.getStamp();
1222  msg.label = signature.getLabel();
1223  transformToPoseMsg(signature.getPose(), msg.pose);
1224  transformToPoseMsg(signature.getGroundTruthPose(), msg.groundTruthPose);
1225  msg.gps.stamp = signature.sensorData().gps().stamp();
1226  msg.gps.longitude = signature.sensorData().gps().longitude();
1227  msg.gps.latitude = signature.sensorData().gps().latitude();
1228  msg.gps.altitude = signature.sensorData().gps().altitude();
1229  msg.gps.error = signature.sensorData().gps().error();
1230  msg.gps.bearing = signature.sensorData().gps().bearing();
1231  compressedMatToBytes(signature.sensorData().imageCompressed(), msg.image);
1232  compressedMatToBytes(signature.sensorData().depthOrRightCompressed(), msg.depth);
1233  compressedMatToBytes(signature.sensorData().laserScanCompressed().data(), msg.laserScan);
1234  compressedMatToBytes(signature.sensorData().userDataCompressed(), msg.userData);
1235  compressedMatToBytes(signature.sensorData().gridGroundCellsCompressed(), msg.grid_ground);
1236  compressedMatToBytes(signature.sensorData().gridObstacleCellsCompressed(), msg.grid_obstacles);
1237  compressedMatToBytes(signature.sensorData().gridEmptyCellsCompressed(), msg.grid_empty_cells);
1238  point3fToROS(signature.sensorData().gridViewPoint(), msg.grid_view_point);
1239  msg.grid_cell_size = signature.sensorData().gridCellSize();
1240  msg.laserScanMaxPts = signature.sensorData().laserScanCompressed().maxPoints();
1241  msg.laserScanMaxRange = signature.sensorData().laserScanCompressed().rangeMax();
1242  msg.laserScanFormat = signature.sensorData().laserScanCompressed().format();
1243  transformToGeometryMsg(signature.sensorData().laserScanCompressed().localTransform(), msg.laserScanLocalTransform);
1244  if(signature.sensorData().cameraModels().size())
1245  {
1246  msg.fx.resize(signature.sensorData().cameraModels().size());
1247  msg.fy.resize(signature.sensorData().cameraModels().size());
1248  msg.cx.resize(signature.sensorData().cameraModels().size());
1249  msg.cy.resize(signature.sensorData().cameraModels().size());
1250  msg.width.resize(signature.sensorData().cameraModels().size());
1251  msg.height.resize(signature.sensorData().cameraModels().size());
1252  msg.localTransform.resize(signature.sensorData().cameraModels().size());
1253  for(unsigned int i=0; i<signature.sensorData().cameraModels().size(); ++i)
1254  {
1255  msg.fx[i] = signature.sensorData().cameraModels()[i].fx();
1256  msg.fy[i] = signature.sensorData().cameraModels()[i].fy();
1257  msg.cx[i] = signature.sensorData().cameraModels()[i].cx();
1258  msg.cy[i] = signature.sensorData().cameraModels()[i].cy();
1259  msg.width[i] = signature.sensorData().cameraModels()[i].imageWidth();
1260  msg.height[i] = signature.sensorData().cameraModels()[i].imageHeight();
1261  transformToGeometryMsg(signature.sensorData().cameraModels()[i].localTransform(), msg.localTransform[i]);
1262  }
1263  }
1264  else if(signature.sensorData().stereoCameraModels().size())
1265  {
1266  msg.fx.resize(signature.sensorData().stereoCameraModels().size());
1267  msg.fy.resize(signature.sensorData().stereoCameraModels().size());
1268  msg.cx.resize(signature.sensorData().stereoCameraModels().size());
1269  msg.cy.resize(signature.sensorData().stereoCameraModels().size());
1270  msg.width.resize(signature.sensorData().stereoCameraModels().size());
1271  msg.height.resize(signature.sensorData().stereoCameraModels().size());
1272  msg.baseline.resize(signature.sensorData().stereoCameraModels().size());
1273  msg.localTransform.resize(signature.sensorData().stereoCameraModels().size());
1274  for(unsigned int i=0; i<signature.sensorData().stereoCameraModels().size(); ++i)
1275  {
1276  msg.fx[i] = signature.sensorData().stereoCameraModels()[i].left().fx();
1277  msg.fy[i] = signature.sensorData().stereoCameraModels()[i].left().fy();
1278  msg.cx[i] = signature.sensorData().stereoCameraModels()[i].left().cx();
1279  msg.cy[i] = signature.sensorData().stereoCameraModels()[i].left().cy();
1280  msg.width[i] = signature.sensorData().stereoCameraModels()[i].left().imageWidth();
1281  msg.height[i] = signature.sensorData().stereoCameraModels()[i].left().imageHeight();
1282  msg.baseline[i] = signature.sensorData().stereoCameraModels()[i].baseline();
1283  transformToGeometryMsg(signature.sensorData().stereoCameraModels()[i].left().localTransform(), msg.localTransform[i]);
1284  }
1285  }
1286 
1287  //Features stuff...
1288  if(!signature.getWordsKpts().empty() &&
1289  signature.getWords().size() != signature.getWordsKpts().size())
1290  {
1291  ROS_ERROR("Word IDs and 2D keypoints must have the same size (%d vs %d)!",
1292  (int)signature.getWords().size(),
1293  (int)signature.getWordsKpts().size());
1294  }
1295 
1296  if(!signature.getWords3().empty() &&
1297  signature.getWords().size() != signature.getWords3().size())
1298  {
1299  ROS_ERROR("Word IDs and 3D points must have the same size (%d vs %d)!",
1300  (int)signature.getWords().size(),
1301  (int)signature.getWords3().size());
1302  }
1303  int i=0;
1304  msg.wordIdKeys.resize(signature.getWords().size());
1305  msg.wordIdValues.resize(signature.getWords().size());
1306  for(std::multimap<int, int>::const_iterator iter=signature.getWords().begin();
1307  iter!=signature.getWords().end();
1308  ++iter)
1309  {
1310  msg.wordIdKeys.at(i) = iter->first;
1311  msg.wordIdValues.at(i) = iter->second;
1312  if(signature.getWordsKpts().size() == signature.getWords().size())
1313  {
1314  if(msg.wordKpts.empty())
1315  {
1316  msg.wordKpts.resize(signature.getWords().size());
1317  }
1318  keypointToROS(signature.getWordsKpts().at(i), msg.wordKpts.at(i));
1319  }
1320  if(signature.getWords3().size() == signature.getWords().size())
1321  {
1322  if(msg.wordPts.empty())
1323  {
1324  msg.wordPts.resize(signature.getWords().size());
1325  }
1326  point3fToROS(signature.getWords3().at(i), msg.wordPts.at(i));
1327  }
1328  ++i;
1329  }
1330 
1331  if(!signature.getWordsDescriptors().empty())
1332  {
1333  if(signature.getWordsDescriptors().rows == (int)signature.getWords().size())
1334  {
1335  msg.wordDescriptors = rtabmap::compressData(signature.getWordsDescriptors());
1336  }
1337  else
1338  {
1339  ROS_ERROR("Word IDs and descriptors must have the same size (%d vs %d)!",
1340  (int)signature.getWords().size(),
1341  signature.getWordsDescriptors().rows);
1342  }
1343  }
1344 
1345  rtabmap_ros::globalDescriptorsToROS(signature.sensorData().globalDescriptors(), msg.globalDescriptors);
1346  rtabmap_ros::envSensorsToROS(signature.sensorData().envSensors(), msg.env_sensors);
1347 }
1348 
1349 rtabmap::Signature nodeInfoFromROS(const rtabmap_ros::NodeData & msg)
1350 {
1352  msg.id,
1353  msg.mapId,
1354  msg.weight,
1355  msg.stamp,
1356  msg.label,
1357  transformFromPoseMsg(msg.pose),
1358  transformFromPoseMsg(msg.groundTruthPose));
1359  return s;
1360 }
1361 void nodeInfoToROS(const rtabmap::Signature & signature, rtabmap_ros::NodeData & msg)
1362 {
1363  // add data
1364  msg.id = signature.id();
1365  msg.mapId = signature.mapId();
1366  msg.weight = signature.getWeight();
1367  msg.stamp = signature.getStamp();
1368  msg.label = signature.getLabel();
1369  transformToPoseMsg(signature.getPose(), msg.pose);
1370  transformToPoseMsg(signature.getGroundTruthPose(), msg.groundTruthPose);
1371 }
1372 
1373 std::map<std::string, float> odomInfoToStatistics(const rtabmap::OdometryInfo & info)
1374 {
1375  std::map<std::string, float> stats;
1376 
1377  stats.insert(std::make_pair("Odometry/TimeRegistration/ms", info.reg.totalTime*1000.0f));
1378  stats.insert(std::make_pair("Odometry/RAM_usage/MB", info.memoryUsage));
1379 
1380  // Based on rtabmap/MainWindow.cpp
1381  stats.insert(std::make_pair("Odometry/Features/", info.features));
1382  stats.insert(std::make_pair("Odometry/Matches/", info.reg.matches));
1383  stats.insert(std::make_pair("Odometry/MatchesRatio/", info.features<=0?0.0f:float(info.reg.inliers)/float(info.features)));
1384  stats.insert(std::make_pair("Odometry/Inliers/", info.reg.inliers));
1385  stats.insert(std::make_pair("Odometry/InliersMeanDistance/m", info.reg.inliersMeanDistance));
1386  stats.insert(std::make_pair("Odometry/InliersDistribution/", info.reg.inliersDistribution));
1387  stats.insert(std::make_pair("Odometry/InliersRatio/", info.reg.inliers));
1388  stats.insert(std::make_pair("Odometry/ICPInliersRatio/", info.reg.icpInliersRatio));
1389  stats.insert(std::make_pair("Odometry/ICPRotation/rad", info.reg.icpRotation));
1390  stats.insert(std::make_pair("Odometry/ICPTranslation/m", info.reg.icpTranslation));
1391  stats.insert(std::make_pair("Odometry/ICPStructuralComplexity/", info.reg.icpStructuralComplexity));
1392  stats.insert(std::make_pair("Odometry/ICPStructuralDistribution/", info.reg.icpStructuralDistribution));
1393  stats.insert(std::make_pair("Odometry/ICPCorrespondences/", info.reg.icpCorrespondences));
1394  stats.insert(std::make_pair("Odometry/StdDevLin/", sqrt((float)info.reg.covariance.at<double>(0,0))));
1395  stats.insert(std::make_pair("Odometry/StdDevAng/", sqrt((float)info.reg.covariance.at<double>(5,5))));
1396  stats.insert(std::make_pair("Odometry/VarianceLin/", (float)info.reg.covariance.at<double>(0,0)));
1397  stats.insert(std::make_pair("Odometry/VarianceAng/", (float)info.reg.covariance.at<double>(5,5)));
1398  stats.insert(std::make_pair("Odometry/TimeEstimation/ms", info.timeEstimation*1000.0f));
1399  stats.insert(std::make_pair("Odometry/TimeFiltering/ms", info.timeParticleFiltering*1000.0f));
1400  stats.insert(std::make_pair("Odometry/LocalMapSize/", info.localMapSize));
1401  stats.insert(std::make_pair("Odometry/LocalScanMapSize/", info.localScanMapSize));
1402  stats.insert(std::make_pair("Odometry/LocalKeyFrames/", info.localKeyFrames));
1403  stats.insert(std::make_pair("Odometry/LocalBundleOutliers/", info.localBundleOutliers));
1404  stats.insert(std::make_pair("Odometry/LocalBundleConstraints/", info.localBundleConstraints));
1405  stats.insert(std::make_pair("Odometry/LocalBundleTime/ms", info.localBundleTime*1000.0f));
1406  stats.insert(std::make_pair("Odometry/KeyFrameAdded/", info.keyFrameAdded?1.0f:0.0f));
1407  stats.insert(std::make_pair("Odometry/Interval/ms", (float)info.interval));
1408  stats.insert(std::make_pair("Odometry/Distance/m", info.distanceTravelled));
1409 
1410  float x,y,z,roll,pitch,yaw;
1411  float dist = 0.0f, speed=0.0f;
1412  if(!info.transform.isNull())
1413  {
1414  info.transform.getTranslationAndEulerAngles(x,y,z,roll,pitch,yaw);
1415  dist = info.transform.getNorm();
1416  stats.insert(std::make_pair("Odometry/T/m", dist));
1417  stats.insert(std::make_pair("Odometry/Tx/m", x));
1418  stats.insert(std::make_pair("Odometry/Ty/m", y));
1419  stats.insert(std::make_pair("Odometry/Tz/m", z));
1420  stats.insert(std::make_pair("Odometry/Troll/deg", roll*180.0/CV_PI));
1421  stats.insert(std::make_pair("Odometry/Tpitch/deg", pitch*180.0/CV_PI));
1422  stats.insert(std::make_pair("Odometry/Tyaw/deg", yaw*180.0/CV_PI));
1423 
1424  if(info.interval>0.0)
1425  {
1426  speed = dist/info.interval;
1427  stats.insert(std::make_pair("Odometry/Speed/kph", speed*3.6));
1428  stats.insert(std::make_pair("Odometry/Speed/mph", speed*2.237));
1429  stats.insert(std::make_pair("Odometry/Speed/mps", speed));
1430  }
1431  }
1432  if(!info.transformGroundTruth.isNull())
1433  {
1434  if(!info.transform.isNull())
1435  {
1437  stats.insert(std::make_pair("Odometry/TG_error_lin/m", diff.getNorm()));
1438  stats.insert(std::make_pair("Odometry/TG_error_ang/deg", diff.getAngle()*180.0/CV_PI));
1439  }
1440 
1441  info.transformGroundTruth.getTranslationAndEulerAngles(x,y,z,roll,pitch,yaw);
1442  dist = info.transformGroundTruth.getNorm();
1443  stats.insert(std::make_pair("Odometry/TG/m", dist));
1444  stats.insert(std::make_pair("Odometry/TGx/m", x));
1445  stats.insert(std::make_pair("Odometry/TGy/m", y));
1446  stats.insert(std::make_pair("Odometry/TGz/m", z));
1447  stats.insert(std::make_pair("Odometry/TGroll/deg", roll*180.0/CV_PI));
1448  stats.insert(std::make_pair("Odometry/TGpitch/deg", pitch*180.0/CV_PI));
1449  stats.insert(std::make_pair("Odometry/TGyaw/deg", yaw*180.0/CV_PI));
1450 
1451  if(info.interval>0.0)
1452  {
1453  speed = dist/info.interval;
1454  stats.insert(std::make_pair("Odometry/SpeedG/kph", speed*3.6));
1455  stats.insert(std::make_pair("Odometry/SpeedG/mph", speed*2.237));
1456  stats.insert(std::make_pair("Odometry/SpeedG/mps", speed));
1457  }
1458  }
1459  return stats;
1460 }
1461 
1462 rtabmap::OdometryInfo odomInfoFromROS(const rtabmap_ros::OdomInfo & msg, bool ignoreData)
1463 {
1464  rtabmap::OdometryInfo info;
1465  info.lost = msg.lost;
1466  info.reg.matches = msg.matches;
1467  info.reg.inliers = msg.inliers;
1468  info.reg.icpInliersRatio = msg.icpInliersRatio;
1469  info.reg.icpRotation = msg.icpRotation;
1470  info.reg.icpTranslation = msg.icpTranslation;
1471  info.reg.icpStructuralComplexity = msg.icpStructuralComplexity;
1472  info.reg.icpStructuralDistribution = msg.icpStructuralDistribution;
1473  info.reg.icpCorrespondences = msg.icpCorrespondences;
1474  info.reg.covariance = cv::Mat(6,6,CV_64FC1, (void*)msg.covariance.data()).clone();
1475  info.features = msg.features;
1476  info.localMapSize = msg.localMapSize;
1477  info.localScanMapSize = msg.localScanMapSize;
1478  info.localKeyFrames = msg.localKeyFrames;
1479  info.localBundleOutliers = msg.localBundleOutliers;
1480  info.localBundleConstraints = msg.localBundleConstraints;
1481  info.localBundleTime = msg.localBundleTime;
1482  UASSERT(msg.localBundleModels.size() == msg.localBundleIds.size());
1483  UASSERT(msg.localBundleModels.size() == msg.localBundlePoses.size());
1484  for(size_t i=0; i<msg.localBundleIds.size(); ++i)
1485  {
1486  std::vector<rtabmap::CameraModel> models;
1487  for(size_t j=0; j<msg.localBundleModels[i].models.size(); ++j)
1488  {
1489  models.push_back(cameraModelFromROS(msg.localBundleModels[i].models[j].camera_info, transformFromGeometryMsg(msg.localBundleModels[i].models[j].local_transform)));
1490  }
1491  info.localBundleModels.insert(std::make_pair(msg.localBundleIds[i], models));
1492  info.localBundlePoses.insert(std::make_pair(msg.localBundleIds[i], transformFromPoseMsg(msg.localBundlePoses[i])));
1493  }
1494  info.keyFrameAdded = msg.keyFrameAdded;
1495  info.timeEstimation = msg.timeEstimation;
1496  info.timeParticleFiltering = msg.timeParticleFiltering;
1497  info.stamp = msg.stamp;
1498  info.interval = msg.interval;
1499  info.distanceTravelled = msg.distanceTravelled;
1500  info.memoryUsage = msg.memoryUsage;
1501  info.gravityRollError = msg.gravityRollError;
1502  info.gravityPitchError = msg.gravityPitchError;
1503 
1504  info.type = msg.type;
1505 
1506  info.reg.matchesIDs = msg.wordMatches;
1507  info.reg.inliersIDs = msg.wordInliers;
1508 
1509  if(!ignoreData)
1510  {
1511  UASSERT(msg.wordsKeys.size() == msg.wordsValues.size());
1512  for(unsigned int i=0; i<msg.wordsKeys.size(); ++i)
1513  {
1514  info.words.insert(std::make_pair(msg.wordsKeys[i], keypointFromROS(msg.wordsValues[i])));
1515  }
1516 
1517  info.refCorners = points2fFromROS(msg.refCorners);
1518  info.newCorners = points2fFromROS(msg.newCorners);
1519  info.cornerInliers = msg.cornerInliers;
1520 
1521  info.transform = transformFromGeometryMsg(msg.transform);
1522  info.transformFiltered = transformFromGeometryMsg(msg.transformFiltered);
1523  info.transformGroundTruth = transformFromGeometryMsg(msg.transformGroundTruth);
1524  info.guess = transformFromGeometryMsg(msg.guess);
1525 
1526  UASSERT(msg.localMapKeys.size() == msg.localMapValues.size());
1527  for(unsigned int i=0; i<msg.localMapKeys.size(); ++i)
1528  {
1529  info.localMap.insert(std::make_pair(msg.localMapKeys[i], point3fFromROS(msg.localMapValues[i])));
1530  }
1531 
1532  pcl::PCLPointCloud2 cloud;
1533  pcl_conversions::toPCL(msg.localScanMap, cloud);
1535  }
1536  return info;
1537 }
1538 
1539 void odomInfoToROS(const rtabmap::OdometryInfo & info, rtabmap_ros::OdomInfo & msg, bool ignoreData)
1540 {
1541  msg.lost = info.lost;
1542  msg.matches = info.reg.matches;
1543  msg.inliers = info.reg.inliers;
1544  msg.icpInliersRatio = info.reg.icpInliersRatio;
1545  msg.icpRotation = info.reg.icpRotation;
1546  msg.icpTranslation = info.reg.icpTranslation;
1547  msg.icpStructuralComplexity = info.reg.icpStructuralComplexity;
1548  msg.icpStructuralDistribution = info.reg.icpStructuralDistribution;
1549  msg.icpCorrespondences = info.reg.icpCorrespondences;
1550  if(info.reg.covariance.type() == CV_64FC1 && info.reg.covariance.cols == 6 && info.reg.covariance.rows == 6)
1551  {
1552  memcpy(msg.covariance.data(), info.reg.covariance.data, 36*sizeof(double));
1553  }
1554  msg.features = info.features;
1555  msg.localMapSize = info.localMapSize;
1556  msg.localScanMapSize = info.localScanMapSize;
1557  msg.localKeyFrames = info.localKeyFrames;
1558  msg.localBundleOutliers = info.localBundleOutliers;
1559  msg.localBundleConstraints = info.localBundleConstraints;
1560  msg.localBundleTime = info.localBundleTime;
1561  UASSERT(info.localBundleModels.size() == info.localBundlePoses.size());
1562  for(std::map<int, std::vector<rtabmap::CameraModel> >::const_iterator iter=info.localBundleModels.begin();
1563  iter!=info.localBundleModels.end();
1564  ++iter)
1565  {
1566  msg.localBundleIds.push_back(iter->first);
1567 
1568  UASSERT(info.localBundlePoses.find(iter->first)!=info.localBundlePoses.end());
1569  geometry_msgs::Pose pose;
1570  transformToPoseMsg(info.localBundlePoses.at(iter->first), pose);
1571  msg.localBundlePoses.push_back(pose);
1572 
1573  rtabmap_ros::CameraModels models;
1574  for(size_t i=0; i<iter->second.size(); ++i)
1575  {
1576  rtabmap_ros::CameraModel modelMsg;
1577  cameraModelToROS(iter->second[i], modelMsg.camera_info);
1578  transformToGeometryMsg(iter->second[i].localTransform(), modelMsg.local_transform);
1579  models.models.push_back(modelMsg);
1580  }
1581  msg.localBundleModels.push_back(models);
1582  }
1583  msg.keyFrameAdded = info.keyFrameAdded;
1584  msg.timeEstimation = info.timeEstimation;
1585  msg.timeParticleFiltering = info.timeParticleFiltering;
1586  msg.stamp = info.stamp;
1587  msg.interval = info.interval;
1588  msg.distanceTravelled = info.distanceTravelled;
1589  msg.memoryUsage = info.memoryUsage;
1590  msg.gravityRollError = info.gravityRollError;
1591  msg.gravityPitchError = info.gravityPitchError;
1592 
1593  msg.type = info.type;
1594 
1595  transformToGeometryMsg(info.transform, msg.transform);
1596  transformToGeometryMsg(info.transformFiltered, msg.transformFiltered);
1597  transformToGeometryMsg(info.transformGroundTruth, msg.transformGroundTruth);
1598  transformToGeometryMsg(info.guess, msg.guess);
1599 
1600  if(!ignoreData)
1601  {
1602  msg.wordsKeys = uKeys(info.words);
1603  keypointsToROS(uValues(info.words), msg.wordsValues);
1604 
1605  msg.wordMatches = info.reg.matchesIDs;
1606  msg.wordInliers = info.reg.inliersIDs;
1607 
1608  points2fToROS(info.refCorners, msg.refCorners);
1609  points2fToROS(info.newCorners, msg.newCorners);
1610  msg.cornerInliers = info.cornerInliers;
1611 
1612  msg.localMapKeys = uKeys(info.localMap);
1613  points3fToROS(uValues(info.localMap), msg.localMapValues);
1614 
1616  }
1617 }
1618 
1619 cv::Mat userDataFromROS(const rtabmap_ros::UserData & dataMsg)
1620 {
1621  cv::Mat data;
1622  if(!dataMsg.data.empty())
1623  {
1624  if(dataMsg.cols > 0 && dataMsg.rows > 0 && dataMsg.type >= 0)
1625  {
1626  data = cv::Mat(dataMsg.rows, dataMsg.cols, dataMsg.type, (void*)dataMsg.data.data()).clone();
1627  }
1628  else
1629  {
1630  if(dataMsg.cols != (int)dataMsg.data.size() || dataMsg.rows != 1 || dataMsg.type != CV_8UC1)
1631  {
1632  ROS_ERROR("cols, rows and type fields of the UserData msg "
1633  "are not correctly set (cols=%d, rows=%d, type=%d)! We assume that the data "
1634  "is compressed (cols=%d, rows=1, type=%d(CV_8UC1)).",
1635  dataMsg.cols, dataMsg.rows, dataMsg.type, (int)dataMsg.data.size(), CV_8UC1);
1636 
1637  }
1638  data = cv::Mat(1, dataMsg.data.size(), CV_8UC1, (void*)dataMsg.data.data()).clone();
1639  }
1640  }
1641  return data;
1642 }
1643 void userDataToROS(const cv::Mat & data, rtabmap_ros::UserData & dataMsg, bool compress)
1644 {
1645  if(!data.empty())
1646  {
1647  if(compress)
1648  {
1649  dataMsg.data = rtabmap::compressData(data);
1650  dataMsg.rows = 1;
1651  dataMsg.cols = dataMsg.data.size();
1652  dataMsg.type = CV_8UC1;
1653  }
1654  else
1655  {
1656  dataMsg.data.resize(data.step[0] * data.rows); // use step for non-contiguous matrices
1657  memcpy(dataMsg.data.data(), data.data, dataMsg.data.size());
1658  dataMsg.rows = data.rows;
1659  dataMsg.cols = data.cols;
1660  dataMsg.type = data.type();
1661  }
1662  }
1663 }
1664 
1666  const std::map<int, std::pair<geometry_msgs::PoseWithCovarianceStamped, float> > & tags,
1667  const std::string & frameId,
1668  const std::string & odomFrameId,
1669  const ros::Time & odomStamp,
1671  double waitForTransform,
1672  double defaultLinVariance,
1673  double defaultAngVariance)
1674 {
1675  //tag detections
1676  rtabmap::Landmarks landmarks;
1677  for(std::map<int, std::pair<geometry_msgs::PoseWithCovarianceStamped, float> >::const_iterator iter=tags.begin(); iter!=tags.end(); ++iter)
1678  {
1679  if(iter->first <=0)
1680  {
1681  ROS_ERROR("Invalid landmark received! IDs should be > 0 (it is %d). Ignoring this landmark.", iter->first);
1682  continue;
1683  }
1685  frameId,
1686  iter->second.first.header.frame_id,
1687  iter->second.first.header.stamp,
1688  listener,
1689  waitForTransform);
1690 
1691  if(baseToCamera.isNull())
1692  {
1693  ROS_ERROR("Cannot transform tag pose from \"%s\" frame to \"%s\" frame!",
1694  iter->second.first.header.frame_id.c_str(), frameId.c_str());
1695  continue;
1696  }
1697 
1698  rtabmap::Transform baseToTag = baseToCamera * transformFromPoseMsg(iter->second.first.pose.pose);
1699 
1700  if(!baseToTag.isNull())
1701  {
1702  // Correction of the global pose accounting the odometry movement since we received it
1704  frameId,
1705  odomFrameId,
1706  iter->second.first.header.stamp,
1707  odomStamp,
1708  listener,
1709  waitForTransform);
1710  if(!correction.isNull())
1711  {
1712  baseToTag = correction * baseToTag;
1713  }
1714  else
1715  {
1716  ROS_WARN("Could not adjust tag pose accordingly to latest odometry pose. "
1717  "If odometry is small since it received the tag pose and "
1718  "covariance is large, this should not be a problem.");
1719  }
1720  cv::Mat covariance = cv::Mat(6,6, CV_64FC1, (void*)iter->second.first.pose.covariance.data()).clone();
1721  if(covariance.empty() || !uIsFinite(covariance.at<double>(0,0)) || covariance.at<double>(0,0)<=0.0f)
1722  {
1723  covariance = cv::Mat::eye(6,6,CV_64FC1);
1724  covariance(cv::Range(0,3), cv::Range(0,3)) *= defaultLinVariance;
1725  covariance(cv::Range(3,6), cv::Range(3,6)) *= defaultAngVariance;
1726  }
1727  landmarks.insert(std::make_pair(iter->first, rtabmap::Landmark(iter->first, iter->second.second, baseToTag, covariance)));
1728  }
1729  }
1730  return landmarks;
1731 }
1732 
1734  const std::string & fromFrameId,
1735  const std::string & toFrameId,
1736  const ros::Time & stamp,
1738  double waitForTransform)
1739 {
1740  // TF ready?
1741  rtabmap::Transform transform;
1742  try
1743  {
1744  if(waitForTransform > 0.0 && !stamp.isZero())
1745  {
1746  //if(!tfBuffer_.canTransform(fromFrameId, toFrameId, stamp, ros::Duration(1)))
1747  std::string errorMsg;
1748  if(!listener.waitForTransform(fromFrameId, toFrameId, stamp, ros::Duration(waitForTransform), ros::Duration(0.01), &errorMsg))
1749  {
1750  ROS_WARN("Could not get transform from %s to %s after %f seconds (for stamp=%f)! Error=\"%s\".",
1751  fromFrameId.c_str(), toFrameId.c_str(), waitForTransform, stamp.toSec(), errorMsg.c_str());
1752  return transform;
1753  }
1754  }
1755 
1757  listener.lookupTransform(fromFrameId, toFrameId, stamp, tmp);
1758  transform = rtabmap_ros::transformFromTF(tmp);
1759  }
1760  catch(tf::TransformException & ex)
1761  {
1762  ROS_WARN("(getting transform %s -> %s) %s", fromFrameId.c_str(), toFrameId.c_str(), ex.what());
1763  }
1764  return transform;
1765 }
1766 
1767 // get moving transform accordingly to a fixed frame. For example get
1768 // transform between moving /base_link between two stamps accordingly to /odom frame.
1770  const std::string & sourceTargetFrame,
1771  const std::string & fixedFrame,
1772  const ros::Time & stampSource,
1773  const ros::Time & stampTarget,
1775  double waitForTransform)
1776 {
1777  // TF ready?
1778  rtabmap::Transform transform;
1779  try
1780  {
1781  ros::Time stamp = stampSource>stampTarget?stampSource:stampTarget;
1782  if(waitForTransform > 0.0 && !stamp.isZero())
1783  {
1784  std::string errorMsg;
1785  if(!listener.waitForTransform(sourceTargetFrame, fixedFrame, stamp, ros::Duration(waitForTransform), ros::Duration(0.01), &errorMsg))
1786  {
1787  ROS_WARN("Could not get transform from %s to %s accordingly to %s after %f seconds (for stamps=%f -> %f)! Error=\"%s\".",
1788  sourceTargetFrame.c_str(), sourceTargetFrame.c_str(), fixedFrame.c_str(), waitForTransform, stampSource.toSec(), stampTarget.toSec(), errorMsg.c_str());
1789  return transform;
1790  }
1791  }
1792 
1794  listener.lookupTransform(sourceTargetFrame, stampTarget, sourceTargetFrame, stampSource, fixedFrame, tmp);
1795  transform = rtabmap_ros::transformFromTF(tmp);
1796  }
1797  catch(tf::TransformException & ex)
1798  {
1799  ROS_WARN("(getting transform movement of %s according to fixed %s) %s", sourceTargetFrame.c_str(), fixedFrame.c_str(), ex.what());
1800  }
1801  return transform;
1802 }
1803 
1805  const std::vector<cv_bridge::CvImageConstPtr> & imageMsgs,
1806  const std::vector<cv_bridge::CvImageConstPtr> & depthMsgs,
1807  const std::vector<sensor_msgs::CameraInfo> & cameraInfoMsgs,
1808  const std::vector<sensor_msgs::CameraInfo> & depthCameraInfoMsgs,
1809  const std::string & frameId,
1810  const std::string & odomFrameId,
1811  const ros::Time & odomStamp,
1812  cv::Mat & rgb,
1813  cv::Mat & depth,
1814  std::vector<rtabmap::CameraModel> & cameraModels,
1815  std::vector<rtabmap::StereoCameraModel> & stereoCameraModels,
1817  double waitForTransform,
1818  bool alreadRectifiedImages,
1819  const std::vector<std::vector<rtabmap_ros::KeyPoint> > & localKeyPointsMsgs,
1820  const std::vector<std::vector<rtabmap_ros::Point3f> > & localPoints3dMsgs,
1821  const std::vector<cv::Mat> & localDescriptorsMsgs,
1822  std::vector<cv::KeyPoint> * localKeyPoints,
1823  std::vector<cv::Point3f> * localPoints3d,
1824  cv::Mat * localDescriptors)
1825 {
1826  UASSERT(!cameraInfoMsgs.empty() &&
1827  (cameraInfoMsgs.size() == imageMsgs.size() || imageMsgs.empty()) &&
1828  (cameraInfoMsgs.size() == depthMsgs.size() || depthMsgs.empty()) &&
1829  (cameraInfoMsgs.size() == depthCameraInfoMsgs.size() || depthCameraInfoMsgs.empty()));
1830 
1831  int imageWidth = imageMsgs.size()?imageMsgs[0]->image.cols:cameraInfoMsgs[0].width;
1832  int imageHeight = imageMsgs.size()?imageMsgs[0]->image.rows:cameraInfoMsgs[0].height;
1833  int depthWidth = depthMsgs.size()?depthMsgs[0]->image.cols:0;
1834  int depthHeight = depthMsgs.size()?depthMsgs[0]->image.rows:0;
1835 
1836  bool isDepth = depthMsgs.empty() || (depthMsgs[0].get() != 0 && (
1837  depthMsgs[0]->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1) == 0 ||
1838  depthMsgs[0]->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1) == 0 ||
1839  depthMsgs[0]->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0));
1840 
1841  // Note that right image can be also MONO16, check the camera info if Tx is set, if so assume it is stereo instead
1842  if(isDepth &&
1843  !depthMsgs.empty() &&
1844  depthMsgs[0]->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0 &&
1845  cameraInfoMsgs.size() == depthCameraInfoMsgs.size())
1846  {
1847  isDepth = cameraInfoMsgs[0].P.elems[3] == 0.0 && depthCameraInfoMsgs[0].P.elems[3] == 0.0;
1848  static bool warned = false;
1849  if(!warned && isDepth)
1850  {
1851  ROS_WARN("Input depth/left image has encoding \"mono16\" and "
1852  "camera info P[3] is null for both cameras, thus image is "
1853  "considered a depth image. If the depth image is in "
1854  "fact the right image, please convert the right image to "
1855  "\"mono8\". This warning is shown only once.");
1856  warned = true;
1857  }
1858  }
1859 
1860  if(isDepth && !depthMsgs.empty())
1861  {
1862  UASSERT_MSG(
1863  imageWidth/depthWidth == imageHeight/depthHeight,
1864  uFormat("rgb=%dx%d depth=%dx%d", imageWidth, imageHeight, depthWidth, depthHeight).c_str());
1865  }
1866 
1867  int cameraCount = cameraInfoMsgs.size();
1868  for(unsigned int i=0; i<cameraInfoMsgs.size(); ++i)
1869  {
1870  if(!imageMsgs.empty())
1871  {
1872  if(!(imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
1873  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO8) ==0 ||
1874  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO16) ==0 ||
1875  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
1876  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
1877  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BGRA8) == 0 ||
1878  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::RGBA8) == 0 ||
1879  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BAYER_GRBG8) == 0 ||
1880  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BAYER_RGGB8) == 0))
1881  {
1882  ROS_ERROR("Input rgb/left type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8. Current rgb/left=%s",
1883  imageMsgs[i]->encoding.c_str());
1884  return false;
1885  }
1886 
1887  UASSERT_MSG(imageMsgs[i]->image.cols == imageWidth && imageMsgs[i]->image.rows == imageHeight,
1888  uFormat("imageWidth=%d vs %d imageHeight=%d vs %d",
1889  imageWidth,
1890  imageMsgs[i]->image.cols,
1891  imageHeight,
1892  imageMsgs[i]->image.rows).c_str());
1893  }
1894  if(!depthMsgs.empty())
1895  {
1896  if(isDepth &&
1897  !(depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1) == 0 ||
1898  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1) == 0 ||
1899  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0))
1900  {
1901  ROS_ERROR("Input depth type must be image_depth=32FC1,16UC1,mono16. Current depth=%s",
1902  depthMsgs[i]->encoding.c_str());
1903  return false;
1904  }
1905  else if(!isDepth &&
1906  !(depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
1907  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
1908  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0 ||
1909  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
1910  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
1911  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BGRA8) == 0 ||
1912  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::RGBA8) == 0))
1913  {
1914  ROS_ERROR("Input right type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8. Current right=%s",
1915  depthMsgs[i]->encoding.c_str());
1916  return false;
1917  }
1918  }
1919 
1920 
1921  ros::Time stamp;
1922  if(isDepth && !depthMsgs.empty())
1923  {
1924  UASSERT_MSG(depthMsgs[i]->image.cols == depthWidth && depthMsgs[i]->image.rows == depthHeight,
1925  uFormat("depthWidth=%d vs %d imageHeight=%d vs %d",
1926  depthWidth,
1927  depthMsgs[i]->image.cols,
1928  depthHeight,
1929  depthMsgs[i]->image.rows).c_str());
1930  stamp = depthMsgs[i]->header.stamp;
1931  }
1932  else if(!imageMsgs.empty())
1933  {
1934  stamp = imageMsgs[i]->header.stamp;
1935  }
1936  else
1937  {
1938  stamp = cameraInfoMsgs[i].header.stamp;
1939  }
1940 
1941  // use depth's stamp so that geometry is sync to odom, use rgb frame as we assume depth is registered (normally depth msg should have same frame than rgb)
1942  rtabmap::Transform localTransform = rtabmap_ros::getTransform(frameId, !imageMsgs.empty()?imageMsgs[i]->header.frame_id:cameraInfoMsgs[i].header.frame_id, stamp, listener, waitForTransform);
1943  if(localTransform.isNull())
1944  {
1945  ROS_ERROR("TF of received image %d at time %fs is not set!", i, stamp.toSec());
1946  return false;
1947  }
1948  // sync with odometry stamp
1949  if(!odomFrameId.empty() && odomStamp != stamp)
1950  {
1951  rtabmap::Transform sensorT = getTransform(
1952  frameId,
1953  odomFrameId,
1954  odomStamp,
1955  stamp,
1956  listener,
1957  waitForTransform);
1958  if(sensorT.isNull())
1959  {
1960  ROS_WARN("Could not get odometry value for image stamp (%fs). Latest odometry "
1961  "stamp is %fs. The image pose will not be synchronized with odometry.", stamp.toSec(), odomStamp.toSec());
1962  }
1963  else
1964  {
1965  //ROS_WARN("RGBD correction = %s (time diff=%fs)", sensorT.prettyPrint().c_str(), fabs(stamp.toSec()-odomStamp.toSec()));
1966  localTransform = sensorT * localTransform;
1967  }
1968  }
1969 
1970  if(!imageMsgs.empty())
1971  {
1972  cv_bridge::CvImageConstPtr ptrImage = imageMsgs[i];
1973  if(imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1)==0 ||
1974  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
1975  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0)
1976  {
1977  // do nothing
1978  }
1979  else if(imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0)
1980  {
1981  ptrImage = cv_bridge::cvtColor(imageMsgs[i], "mono8");
1982  }
1983  else
1984  {
1985  ptrImage = cv_bridge::cvtColor(imageMsgs[i], "bgr8");
1986  }
1987 
1988  // initialize
1989  if(rgb.empty())
1990  {
1991  rgb = cv::Mat(imageHeight, imageWidth*cameraCount, ptrImage->image.type());
1992  }
1993  if(ptrImage->image.type() == rgb.type())
1994  {
1995  ptrImage->image.copyTo(cv::Mat(rgb, cv::Rect(i*imageWidth, 0, imageWidth, imageHeight)));
1996  }
1997  else
1998  {
1999  ROS_ERROR("Some RGB/left images are not the same type!");
2000  return false;
2001  }
2002  }
2003 
2004  if(!depthMsgs.empty())
2005  {
2006  if(isDepth)
2007  {
2008  cv_bridge::CvImageConstPtr ptrDepth = depthMsgs[i];
2009  cv::Mat subDepth = ptrDepth->image;
2010 
2011  if(depth.empty())
2012  {
2013  depth = cv::Mat(depthHeight, depthWidth*cameraCount, subDepth.type());
2014  }
2015 
2016  if(subDepth.type() == depth.type())
2017  {
2018  subDepth.copyTo(cv::Mat(depth, cv::Rect(i*depthWidth, 0, depthWidth, depthHeight)));
2019  }
2020  else
2021  {
2022  ROS_ERROR("Some Depth images are not the same type!");
2023  return false;
2024  }
2025  }
2026  else
2027  {
2028  cv_bridge::CvImageConstPtr ptrImage = depthMsgs[i];
2029  if( depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1)==0 ||
2030  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0)
2031  {
2032  // do nothing
2033  }
2034  else
2035  {
2036  ptrImage = cv_bridge::cvtColor(depthMsgs[i], "mono8");
2037  }
2038 
2039  // initialize
2040  if(depth.empty())
2041  {
2042  depth = cv::Mat(depthHeight, depthWidth*cameraCount, ptrImage->image.type());
2043  }
2044  if(ptrImage->image.type() == depth.type())
2045  {
2046  ptrImage->image.copyTo(cv::Mat(depth, cv::Rect(i*depthWidth, 0, depthWidth, depthHeight)));
2047  }
2048  else
2049  {
2050  ROS_ERROR("Some right images are not the same type!");
2051  return false;
2052  }
2053  }
2054  }
2055 
2056  if(isDepth)
2057  {
2058  cameraModels.push_back(rtabmap_ros::cameraModelFromROS(cameraInfoMsgs[i], localTransform));
2059  }
2060  else //stereo
2061  {
2062  UASSERT(cameraInfoMsgs.size() == depthCameraInfoMsgs.size());
2063  rtabmap::Transform stereoTransform;
2064  if(!alreadRectifiedImages)
2065  {
2066  if(depthCameraInfoMsgs[i].header.frame_id.empty() || cameraInfoMsgs[i].header.frame_id.empty())
2067  {
2068  if(depthCameraInfoMsgs[i].P[3] == 0.0 && cameraInfoMsgs[i].P[3] == 0)
2069  {
2070  ROS_ERROR("Parameter %s is false but the frame_id in one of the camera_info "
2071  "topic is empty, so TF between the cameras cannot be computed!",
2072  rtabmap::Parameters::kRtabmapImagesAlreadyRectified().c_str());
2073  return false;
2074  }
2075  else
2076  {
2077  static bool warned = false;
2078  if(!warned)
2079  {
2080  ROS_WARN("Parameter %s is false but the frame_id in one of the "
2081  "camera_info topic is empty, so TF between the cameras cannot be "
2082  "computed! However, the baseline can be computed from the calibration, "
2083  "we will use this one instead of TF. This message is only printed once...",
2084  rtabmap::Parameters::kRtabmapImagesAlreadyRectified().c_str());
2085  warned = true;
2086  }
2087  }
2088  }
2089  else
2090  {
2091  stereoTransform = getTransform(
2092  depthCameraInfoMsgs[i].header.frame_id,
2093  cameraInfoMsgs[i].header.frame_id,
2094  cameraInfoMsgs[i].header.stamp,
2095  listener,
2096  waitForTransform);
2097  if(stereoTransform.isNull())
2098  {
2099  ROS_ERROR("Parameter %s is false but we cannot get TF between the two cameras!", rtabmap::Parameters::kRtabmapImagesAlreadyRectified().c_str());
2100  return false;
2101  }
2102  }
2103  }
2104 
2105  rtabmap::StereoCameraModel stereoModel = rtabmap_ros::stereoCameraModelFromROS(cameraInfoMsgs[i], depthCameraInfoMsgs[i], localTransform, stereoTransform);
2106 
2107  if(stereoModel.baseline() > 10.0)
2108  {
2109  static bool shown = false;
2110  if(!shown)
2111  {
2112  ROS_WARN("Detected baseline (%f m) is quite large! Is your "
2113  "right camera_info P(0,3) correctly set? Note that "
2114  "baseline=-P(0,3)/P(0,0). You may need to calibrate your camera. "
2115  "This warning is printed only once.",
2116  stereoModel.baseline());
2117  shown = true;
2118  }
2119  }
2120  else if(stereoModel.baseline() == 0 && alreadRectifiedImages)
2121  {
2122  rtabmap::Transform stereoTransform;
2123  if( !cameraInfoMsgs[i].header.frame_id.empty() &&
2124  !depthCameraInfoMsgs[i].header.frame_id.empty())
2125  {
2126  stereoTransform = getTransform(
2127  cameraInfoMsgs[i].header.frame_id,
2128  depthCameraInfoMsgs[i].header.frame_id,
2129  cameraInfoMsgs[i].header.stamp,
2130  listener,
2131  waitForTransform);
2132  }
2133  if(stereoTransform.isNull() || stereoTransform.x()<=0)
2134  {
2135  if(cameraInfoMsgs[i].header.frame_id.empty() || depthCameraInfoMsgs[i].header.frame_id.empty())
2136  {
2137  ROS_WARN("We cannot estimated the baseline of the rectified images with tf! (camera_info topics have empty frame_id)");
2138  }
2139  else
2140  {
2141  ROS_WARN("We cannot estimated the baseline of the rectified images with tf! (%s->%s = %s)",
2142  depthCameraInfoMsgs[i].header.frame_id.c_str(), cameraInfoMsgs[i].header.frame_id.c_str(), stereoTransform.prettyPrint().c_str());
2143  }
2144  }
2145  else
2146  {
2147  static bool warned = false;
2148  if(!warned)
2149  {
2150  ROS_WARN("Right camera info doesn't have Tx set but we are assuming that stereo images are already rectified (see %s parameter). While not "
2151  "recommended, we used TF to get the baseline (%s->%s = %fm) for convenience (e.g., D400 ir stereo issue). It is preferred to feed "
2152  "a valid right camera info if stereo images are already rectified. This message is only printed once...",
2153  rtabmap::Parameters::kRtabmapImagesAlreadyRectified().c_str(),
2154  depthCameraInfoMsgs[i].header.frame_id.c_str(), cameraInfoMsgs[i].header.frame_id.c_str(), stereoTransform.x());
2155  warned = true;
2156  }
2157  stereoModel = rtabmap::StereoCameraModel(
2158  stereoModel.left().fx(),
2159  stereoModel.left().fy(),
2160  stereoModel.left().cx(),
2161  stereoModel.left().cy(),
2162  stereoTransform.x(),
2163  stereoModel.localTransform(),
2164  stereoModel.left().imageSize());
2165  }
2166  }
2167  stereoCameraModels.push_back(stereoModel);
2168  }
2169 
2170  if(localKeyPoints && localKeyPointsMsgs.size() == cameraInfoMsgs.size())
2171  {
2172  rtabmap_ros::keypointsFromROS(localKeyPointsMsgs[i], *localKeyPoints, imageWidth*i);
2173  }
2174  if(localPoints3d && localPoints3dMsgs.size() == cameraInfoMsgs.size())
2175  {
2176  // Points should be in base frame
2177  rtabmap_ros::points3fFromROS(localPoints3dMsgs[i], *localPoints3d, localTransform);
2178  }
2179  if(localDescriptors && localDescriptorsMsgs.size() == cameraInfoMsgs.size())
2180  {
2181  localDescriptors->push_back(localDescriptorsMsgs[i]);
2182  }
2183  }
2184  return true;
2185 }
2186 
2188  const cv_bridge::CvImageConstPtr& leftImageMsg,
2189  const cv_bridge::CvImageConstPtr& rightImageMsg,
2190  const sensor_msgs::CameraInfo& leftCamInfoMsg,
2191  const sensor_msgs::CameraInfo& rightCamInfoMsg,
2192  const std::string & frameId,
2193  const std::string & odomFrameId,
2194  const ros::Time & odomStamp,
2195  cv::Mat & left,
2196  cv::Mat & right,
2197  rtabmap::StereoCameraModel & stereoModel,
2199  double waitForTransform,
2200  bool alreadyRectified)
2201 {
2202  UASSERT(leftImageMsg.get() && rightImageMsg.get());
2203 
2204  if(!(leftImageMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
2205  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
2206  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0 ||
2207  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
2208  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
2209  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::BGRA8) == 0 ||
2210  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::RGBA8) == 0) ||
2211  !(rightImageMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
2212  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
2213  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0 ||
2214  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
2215  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
2216  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::BGRA8) == 0 ||
2217  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::RGBA8) == 0))
2218  {
2219  ROS_ERROR("Input type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8");
2220  ROS_ERROR("Input type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8 Current left=%s and right=%s",
2221  leftImageMsg->encoding.c_str(),
2222  rightImageMsg->encoding.c_str());
2223  return false;
2224  }
2225 
2226  if(leftImageMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
2227  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0)
2228  {
2229  left = leftImageMsg->image.clone();
2230  }
2231  else if(leftImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0)
2232  {
2233  left = cv_bridge::cvtColor(leftImageMsg, "mono8")->image;
2234  }
2235  else
2236  {
2237  left = cv_bridge::cvtColor(leftImageMsg, "bgr8")->image;
2238  }
2239  if(rightImageMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
2240  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0)
2241  {
2242  right = rightImageMsg->image.clone();
2243  }
2244  else
2245  {
2246  right = cv_bridge::cvtColor(rightImageMsg, "mono8")->image;
2247  }
2248 
2249  rtabmap::Transform localTransform = getTransform(frameId, leftImageMsg->header.frame_id, leftImageMsg->header.stamp, listener, waitForTransform);
2250  if(localTransform.isNull())
2251  {
2252  return false;
2253  }
2254  // sync with odometry stamp
2255  if(!odomFrameId.empty() && odomStamp != leftImageMsg->header.stamp)
2256  {
2257  rtabmap::Transform sensorT = getTransform(
2258  frameId,
2259  odomFrameId,
2260  odomStamp,
2261  leftImageMsg->header.stamp,
2262  listener,
2263  waitForTransform);
2264  if(sensorT.isNull())
2265  {
2266  ROS_WARN("Could not get odometry value for stereo msg stamp (%fs). Latest odometry "
2267  "stamp is %fs. The stereo image pose will not be synchronized with odometry.", leftImageMsg->header.stamp.toSec(), odomStamp.toSec());
2268  }
2269  else
2270  {
2271  localTransform = sensorT * localTransform;
2272  }
2273  }
2274 
2275  rtabmap::Transform stereoTransform;
2276  if(!alreadyRectified)
2277  {
2278  stereoTransform = getTransform(
2279  rightCamInfoMsg.header.frame_id,
2280  leftCamInfoMsg.header.frame_id,
2281  leftCamInfoMsg.header.stamp,
2282  listener,
2283  waitForTransform);
2284  if(stereoTransform.isNull())
2285  {
2286  ROS_ERROR("Parameter %s is false but we cannot get TF between the two cameras!", rtabmap::Parameters::kRtabmapImagesAlreadyRectified().c_str());
2287  return false;
2288  }
2289  }
2290 
2291  stereoModel = rtabmap_ros::stereoCameraModelFromROS(leftCamInfoMsg, rightCamInfoMsg, localTransform, stereoTransform);
2292 
2293  if(stereoModel.baseline() > 10.0)
2294  {
2295  static bool shown = false;
2296  if(!shown)
2297  {
2298  ROS_WARN("Detected baseline (%f m) is quite large! Is your "
2299  "right camera_info P(0,3) correctly set? Note that "
2300  "baseline=-P(0,3)/P(0,0). You may need to calibrate your camera. "
2301  "This warning is printed only once.",
2302  stereoModel.baseline());
2303  shown = true;
2304  }
2305  }
2306  else if(stereoModel.baseline() == 0 && alreadyRectified)
2307  {
2308  rtabmap::Transform stereoTransform = getTransform(
2309  leftCamInfoMsg.header.frame_id,
2310  rightCamInfoMsg.header.frame_id,
2311  leftCamInfoMsg.header.stamp,
2312  listener,
2313  waitForTransform);
2314  if(stereoTransform.isNull() || stereoTransform.x()<=0)
2315  {
2316  ROS_WARN("We cannot estimated the baseline of the rectified images with tf! (%s->%s = %s)",
2317  rightCamInfoMsg.header.frame_id.c_str(), leftCamInfoMsg.header.frame_id.c_str(), stereoTransform.prettyPrint().c_str());
2318  }
2319  else
2320  {
2321  static bool warned = false;
2322  if(!warned)
2323  {
2324  ROS_WARN("Right camera info doesn't have Tx set but we are assuming that stereo images are already rectified (see %s parameter). While not "
2325  "recommended, we used TF to get the baseline (%s->%s = %fm) for convenience (e.g., D400 ir stereo issue). It is preferred to feed "
2326  "a valid right camera info if stereo images are already rectified. This message is only printed once...",
2327  rtabmap::Parameters::kRtabmapImagesAlreadyRectified().c_str(),
2328  rightCamInfoMsg.header.frame_id.c_str(), leftCamInfoMsg.header.frame_id.c_str(), stereoTransform.x());
2329  warned = true;
2330  }
2331  stereoModel = rtabmap::StereoCameraModel(
2332  stereoModel.left().fx(),
2333  stereoModel.left().fy(),
2334  stereoModel.left().cx(),
2335  stereoModel.left().cy(),
2336  stereoTransform.x(),
2337  stereoModel.localTransform(),
2338  stereoModel.left().imageSize());
2339  }
2340  }
2341  return true;
2342 }
2343 
2345  const sensor_msgs::LaserScan & scan2dMsg,
2346  const std::string & frameId,
2347  const std::string & odomFrameId,
2348  const ros::Time & odomStamp,
2349  rtabmap::LaserScan & scan,
2351  double waitForTransform,
2352  bool outputInFrameId)
2353 {
2354  // make sure the frame of the laser is updated during the whole scan time
2356  scan2dMsg.header.frame_id,
2357  odomFrameId.empty()?frameId:odomFrameId,
2358  scan2dMsg.header.stamp,
2359  scan2dMsg.header.stamp + ros::Duration().fromSec(scan2dMsg.ranges.size()*scan2dMsg.time_increment),
2360  listener,
2361  waitForTransform);
2362  if(tmpT.isNull())
2363  {
2364  return false;
2365  }
2366 
2367  rtabmap::Transform scanLocalTransform = getTransform(
2368  frameId,
2369  scan2dMsg.header.frame_id,
2370  scan2dMsg.header.stamp,
2371  listener,
2372  waitForTransform);
2373  if(scanLocalTransform.isNull())
2374  {
2375  return false;
2376  }
2377 
2378  //transform in frameId_ frame
2379  sensor_msgs::PointCloud2 scanOut;
2381  projection.transformLaserScanToPointCloud(odomFrameId.empty()?frameId:odomFrameId, scan2dMsg, scanOut, listener);
2382 
2383  //transform back in laser frame
2384  rtabmap::Transform laserToOdom = getTransform(
2385  scan2dMsg.header.frame_id,
2386  odomFrameId.empty()?frameId:odomFrameId,
2387  scan2dMsg.header.stamp,
2388  listener,
2389  waitForTransform);
2390  if(laserToOdom.isNull())
2391  {
2392  return false;
2393  }
2394 
2395  // sync with odometry stamp
2396  if(!odomFrameId.empty() && odomStamp != scan2dMsg.header.stamp)
2397  {
2398  rtabmap::Transform sensorT = getTransform(
2399  frameId,
2400  odomFrameId,
2401  odomStamp,
2402  scan2dMsg.header.stamp,
2403  listener,
2404  waitForTransform);
2405  if(sensorT.isNull())
2406  {
2407  ROS_WARN("Could not get odometry value for laser scan stamp (%fs). Latest odometry "
2408  "stamp is %fs. The laser scan pose will not be synchronized with odometry.", scan2dMsg.header.stamp.toSec(), odomStamp.toSec());
2409  }
2410  else
2411  {
2412  //ROS_WARN("scan correction = %s (time diff=%fs)", sensorT.prettyPrint().c_str(), fabs(scan2dMsg->header.stamp.toSec()-odomStamp.toSec()));
2413  scanLocalTransform = sensorT * scanLocalTransform;
2414  }
2415  }
2416 
2417  if(outputInFrameId)
2418  {
2419  laserToOdom *= scanLocalTransform;
2420  }
2421 
2422  bool hasIntensity = false;
2423  for(unsigned int i=0; i<scanOut.fields.size(); ++i)
2424  {
2425  if(scanOut.fields[i].name.compare("intensity") == 0)
2426  {
2427  if(scanOut.fields[i].datatype == sensor_msgs::PointField::FLOAT32)
2428  {
2429  hasIntensity = true;
2430  }
2431  else
2432  {
2433  static bool warningShown = false;
2434  if(!warningShown)
2435  {
2436  ROS_WARN("The input scan cloud has an \"intensity\" field "
2437  "but the datatype (%d) is not supported. Intensity will be ignored. "
2438  "This message is only shown once.", scanOut.fields[i].datatype);
2439  warningShown = true;
2440  }
2441  }
2442  }
2443  }
2444 
2446  cv::Mat data;
2447  if(hasIntensity)
2448  {
2449  pcl::PointCloud<pcl::PointXYZI>::Ptr pclScan(new pcl::PointCloud<pcl::PointXYZI>);
2450  pcl::fromROSMsg(scanOut, *pclScan);
2451  pclScan->is_dense = true;
2452  data = rtabmap::util3d::laserScan2dFromPointCloud(*pclScan, laserToOdom).data(); // put back in laser frame
2453  format = rtabmap::LaserScan::kXYI;
2454  }
2455  else
2456  {
2457  pcl::PointCloud<pcl::PointXYZ>::Ptr pclScan(new pcl::PointCloud<pcl::PointXYZ>);
2458  pcl::fromROSMsg(scanOut, *pclScan);
2459  pclScan->is_dense = true;
2460  data = rtabmap::util3d::laserScan2dFromPointCloud(*pclScan, laserToOdom).data(); // put back in laser frame
2461  format = rtabmap::LaserScan::kXY;
2462  }
2463 
2464  rtabmap::Transform zAxis(0,0,1,0,0,0);
2465  if((scanLocalTransform.rotation()*zAxis).z() < 0)
2466  {
2467  cv::Mat flipScan;
2468  cv::flip(data, flipScan, 1);
2469  data = flipScan;
2470  }
2471 
2472  scan = rtabmap::LaserScan(
2473  data,
2474  format,
2475  scan2dMsg.range_min,
2476  scan2dMsg.range_max,
2477  scan2dMsg.angle_min,
2478  scan2dMsg.angle_max,
2479  scan2dMsg.angle_increment,
2480  outputInFrameId?rtabmap::Transform::getIdentity():scanLocalTransform);
2481 
2482  return true;
2483 }
2484 
2486  const sensor_msgs::PointCloud2 & scan3dMsg,
2487  const std::string & frameId,
2488  const std::string & odomFrameId,
2489  const ros::Time & odomStamp,
2490  rtabmap::LaserScan & scan,
2492  double waitForTransform,
2493  int maxPoints,
2494  float maxRange,
2495  bool is2D)
2496 {
2497  UASSERT_MSG(scan3dMsg.data.size() == scan3dMsg.row_step*scan3dMsg.height,
2498  uFormat("data=%d row_step=%d height=%d", scan3dMsg.data.size(), scan3dMsg.row_step, scan3dMsg.height).c_str());
2499 
2500  rtabmap::Transform scanLocalTransform = getTransform(frameId, scan3dMsg.header.frame_id, scan3dMsg.header.stamp, listener, waitForTransform);
2501  if(scanLocalTransform.isNull())
2502  {
2503  ROS_ERROR("TF of received scan cloud at time %fs is not set, aborting rtabmap update.", scan3dMsg.header.stamp.toSec());
2504  return false;
2505  }
2506 
2507  // sync with odometry stamp
2508  if(!odomFrameId.empty() && odomStamp != scan3dMsg.header.stamp)
2509  {
2510  rtabmap::Transform sensorT = getTransform(
2511  frameId,
2512  odomFrameId,
2513  odomStamp,
2514  scan3dMsg.header.stamp,
2515  listener,
2516  waitForTransform);
2517  if(sensorT.isNull())
2518  {
2519  ROS_WARN("Could not get odometry value for laser scan stamp (%fs). Latest odometry "
2520  "stamp is %fs. The 3d laser scan pose will not be synchronized with odometry.", scan3dMsg.header.stamp.toSec(), odomStamp.toSec());
2521  }
2522  else
2523  {
2524  scanLocalTransform = sensorT * scanLocalTransform;
2525  }
2526  }
2527  scan = rtabmap::util3d::laserScanFromPointCloud(scan3dMsg, true, is2D);
2528  scan = rtabmap::LaserScan(scan, maxPoints, maxRange, scanLocalTransform);
2529  return true;
2530 }
2531 
2533  const sensor_msgs::PointCloud2 & input,
2534  sensor_msgs::PointCloud2 & output,
2535  const std::string & fixedFrameId,
2537  double waitForTransform,
2538  bool slerp,
2539  const rtabmap::Transform & velocity,
2540  double previousStamp)
2541 {
2542  if(listener != 0)
2543  {
2544  if(input.header.frame_id.empty())
2545  {
2546  ROS_ERROR("Input cloud has empty frame_id!");
2547  return false;
2548  }
2549 
2550  if(fixedFrameId.empty())
2551  {
2552  ROS_ERROR("fixedFrameId parameter should be set!");
2553  return false;
2554  }
2555  }
2556  else
2557  {
2558  if(!slerp)
2559  {
2560  ROS_ERROR("slerp should be true when constant velocity model is used!");
2561  return false;
2562  }
2563 
2564  if(previousStamp <= 0.0)
2565  {
2566  ROS_ERROR("previousStamp should be >0 when constant velocity model is used!");
2567  return false;
2568  }
2569 
2570  if(velocity.isNull())
2571  {
2572  ROS_ERROR("velocity should be valid when constant velocity model is used!");
2573  return false;
2574  }
2575  }
2576 
2577  int offsetTime = -1;
2578  int offsetX = -1;
2579  int offsetY = -1;
2580  int offsetZ = -1;
2581  int timeDatatype = 6;
2582  for(size_t i=0; i<input.fields.size(); ++i)
2583  {
2584  if(input.fields[i].name.compare("t") == 0)
2585  {
2586  if(offsetTime != -1)
2587  {
2588  ROS_WARN("The input cloud should have only one of these fields: t, time or stamps. Overriding with %s.", input.fields[i].name.c_str());
2589  }
2590  offsetTime = input.fields[i].offset;
2591  timeDatatype = input.fields[i].datatype;
2592  }
2593  else if(input.fields[i].name.compare("time") == 0)
2594  {
2595  if(offsetTime != -1)
2596  {
2597  ROS_WARN("The input cloud should have only one of these fields: t, time or stamps. Overriding with %s.", input.fields[i].name.c_str());
2598  }
2599  offsetTime = input.fields[i].offset;
2600  timeDatatype = input.fields[i].datatype;
2601  }
2602  else if(input.fields[i].name.compare("stamps") == 0)
2603  {
2604  if(offsetTime != -1)
2605  {
2606  ROS_WARN("The input cloud should have only one of these fields: t, time or stamps. Overriding with %s.", input.fields[i].name.c_str());
2607  }
2608  offsetTime = input.fields[i].offset;
2609  timeDatatype = input.fields[i].datatype;
2610  }
2611  else if(input.fields[i].name.compare("x") == 0)
2612  {
2613  ROS_ASSERT(input.fields[i].datatype==7);
2614  offsetX = input.fields[i].offset;
2615  }
2616  else if(input.fields[i].name.compare("y") == 0)
2617  {
2618  ROS_ASSERT(input.fields[i].datatype==7);
2619  offsetY = input.fields[i].offset;
2620  }
2621  else if(input.fields[i].name.compare("z") == 0)
2622  {
2623  ROS_ASSERT(input.fields[i].datatype==7);
2624  offsetZ = input.fields[i].offset;
2625  }
2626  }
2627 
2628  if(offsetTime < 0)
2629  {
2630  ROS_ERROR("Input cloud doesn't have \"t\" or \"time\" field!");
2631  return false;
2632  }
2633  if(offsetX < 0)
2634  {
2635  ROS_ERROR("Input cloud doesn't have \"x\" field!");
2636  return false;
2637  }
2638  if(offsetY < 0)
2639  {
2640  ROS_ERROR("Input cloud doesn't have \"y\" field!");
2641  return false;
2642  }
2643  if(offsetZ < 0)
2644  {
2645  ROS_ERROR("Input cloud doesn't have \"z\" field!");
2646  return false;
2647  }
2648  if(input.height == 0)
2649  {
2650  ROS_ERROR("Input cloud height is zero!");
2651  return false;
2652  }
2653  if(input.width == 0)
2654  {
2655  ROS_ERROR("Input cloud width is zero!");
2656  return false;
2657  }
2658 
2659  bool timeOnColumns = input.width > input.height;
2660 
2661  // Get latest timestamp
2662  ros::Time firstStamp;
2663  ros::Time lastStamp;
2664  if(timeDatatype == 6) // UINT32
2665  {
2666  unsigned int nsec = *((const unsigned int*)(&input.data[0]+offsetTime));
2667  firstStamp = input.header.stamp+ros::Duration(0, nsec);
2668  nsec = *((const unsigned int*)(&input.data[timeOnColumns?(input.width-1)*input.point_step:(input.height-1)*input.row_step]+offsetTime));
2669  lastStamp = input.header.stamp+ros::Duration(0, nsec);
2670  }
2671  else if(timeDatatype == 7) // FLOAT32
2672  {
2673  float sec = *((const float*)(&input.data[0]+offsetTime));
2674  firstStamp = input.header.stamp+ros::Duration().fromSec(sec);
2675  sec = *((const float*)(&input.data[timeOnColumns?(input.width-1)*input.point_step:(input.height-1)*input.row_step]+offsetTime));
2676  lastStamp = input.header.stamp+ros::Duration().fromSec(sec);
2677  }
2678  else
2679  {
2680  ROS_ERROR("Not supported time datatype %d!", timeDatatype);
2681  return false;
2682  }
2683 
2684  if(lastStamp <= firstStamp)
2685  {
2686  ROS_ERROR("First and last stamps in the scan are the same!");
2687  return false;
2688  }
2689 
2690  std::string errorMsg;
2691  if(listener != 0 &&
2692  waitForTransform>0.0 &&
2693  !listener->waitForTransform(
2694  input.header.frame_id,
2695  firstStamp,
2696  input.header.frame_id,
2697  lastStamp,
2698  fixedFrameId,
2699  ros::Duration(waitForTransform),
2700  ros::Duration(0.01),
2701  &errorMsg))
2702  {
2703  ROS_ERROR("Could not estimate motion of %s accordingly to fixed frame %s between stamps %f and %f! (%s)",
2704  input.header.frame_id.c_str(),
2705  fixedFrameId.c_str(),
2706  firstStamp.toSec(),
2707  lastStamp.toSec(),
2708  errorMsg.c_str());
2709  return false;
2710  }
2711 
2712  rtabmap::Transform firstPose;
2713  rtabmap::Transform lastPose;
2714  double scanTime = 0;
2715  if(slerp)
2716  {
2717  if(listener != 0)
2718  {
2719  firstPose = rtabmap_ros::getTransform(
2720  input.header.frame_id,
2721  fixedFrameId,
2722  firstStamp,
2723  input.header.stamp,
2724  *listener,
2725  0);
2726  lastPose = rtabmap_ros::getTransform(
2727  input.header.frame_id,
2728  fixedFrameId,
2729  lastStamp,
2730  input.header.stamp,
2731  *listener,
2732  0);
2733  }
2734  else
2735  {
2736  float vx,vy,vz, vroll,vpitch,vyaw;
2737  velocity.getTranslationAndEulerAngles(vx,vy,vz, vroll,vpitch,vyaw);
2738 
2739  // We need three poses:
2740  // 1- The pose of base frame in odom frame at first stamp
2741  // 2- The pose of base frame in odom frame at msg stamp
2742  // 3- The pose of base frame in odom frame at last stamp
2743  UASSERT(firstStamp.toSec() >= previousStamp);
2744  UASSERT(lastStamp.toSec() > previousStamp);
2745  double dt1 = firstStamp.toSec() - previousStamp;
2746  double dt2 = input.header.stamp.toSec() - previousStamp;
2747  double dt3 = lastStamp.toSec() - previousStamp;
2748 
2749  rtabmap::Transform p1(vx*dt1, vy*dt1, vz*dt1, vroll*dt1, vpitch*dt1, vyaw*dt1);
2750  rtabmap::Transform p2(vx*dt2, vy*dt2, vz*dt2, vroll*dt2, vpitch*dt2, vyaw*dt2);
2751  rtabmap::Transform p3(vx*dt3, vy*dt3, vz*dt3, vroll*dt3, vpitch*dt3, vyaw*dt3);
2752 
2753  // First and last poses are relative to stamp of the msg
2754  firstPose = p2.inverse() * p1;
2755  lastPose = p2.inverse() * p3;
2756  }
2757 
2758  if(firstPose.isNull())
2759  {
2760  ROS_ERROR("Could not get transform of %s accordingly to %s between stamps %f and %f!",
2761  input.header.frame_id.c_str(),
2762  fixedFrameId.empty()?"velocity":fixedFrameId.c_str(),
2763  firstStamp.toSec(),
2764  input.header.stamp.toSec());
2765  return false;
2766  }
2767  if(lastPose.isNull())
2768  {
2769  ROS_ERROR("Could not get transform of %s accordingly to %s between stamps %f and %f!",
2770  input.header.frame_id.c_str(),
2771  fixedFrameId.empty()?"velocity":fixedFrameId.c_str(),
2772  lastStamp.toSec(),
2773  input.header.stamp.toSec());
2774  return false;
2775  }
2776  scanTime = lastStamp.toSec() - firstStamp.toSec();
2777  }
2778  //else tf will be used to get more accurate transforms
2779 
2780  output = input;
2781  ros::Time stamp;
2782  UTimer processingTime;
2783  if(timeOnColumns)
2784  {
2785  // ouster point cloud:
2786  // t1 t2 ...
2787  // ring1 ring1 ...
2788  // ring2 ring2 ...
2789  // ring3 ring4 ...
2790  // ring4 ring3 ...
2791  for(size_t u=0; u<output.width; ++u)
2792  {
2793  if(timeDatatype == 6) // UINT32
2794  {
2795  unsigned int nsec = *((const unsigned int*)(&output.data[u*output.point_step]+offsetTime));
2796  stamp = input.header.stamp+ros::Duration(0, nsec);
2797  }
2798  else
2799  {
2800  float sec = *((const float*)(&output.data[u*output.point_step]+offsetTime));
2801  stamp = input.header.stamp+ros::Duration().fromSec(sec);
2802  }
2803 
2804  rtabmap::Transform transform;
2805  if(slerp)
2806  {
2807  transform = firstPose.interpolate((stamp-firstStamp).toSec() / scanTime, lastPose);
2808  }
2809  else
2810  {
2811  transform = rtabmap_ros::getTransform(
2812  output.header.frame_id,
2813  fixedFrameId,
2814  stamp,
2815  output.header.stamp,
2816  *listener,
2817  0);
2818  if(transform.isNull())
2819  {
2820  ROS_ERROR("Could not get transform of %s accordingly to %s between stamps %f and %f!",
2821  output.header.frame_id.c_str(),
2822  fixedFrameId.c_str(),
2823  stamp.toSec(),
2824  output.header.stamp.toSec());
2825  return false;
2826  }
2827  }
2828 
2829  for(size_t v=0; v<input.height; ++v)
2830  {
2831  unsigned char * dataPtr = &output.data[v*output.row_step + u*output.point_step];
2832  float & x = *((float*)(dataPtr+offsetX));
2833  float & y = *((float*)(dataPtr+offsetY));
2834  float & z = *((float*)(dataPtr+offsetZ));
2835  pcl::PointXYZ pt(x,y,z);
2836  pt = rtabmap::util3d::transformPoint(pt, transform);
2837  x = pt.x;
2838  y = pt.y;
2839  z = pt.z;
2840 
2841  // set delta stamp to zero so that on downstream they know the cloud is deskewed
2842  if(timeDatatype == 6) // UINT32
2843  {
2844  *((unsigned int*)(dataPtr+offsetTime)) = 0;
2845  }
2846  else
2847  {
2848  *((float*)(dataPtr+offsetTime)) = 0;
2849  }
2850  }
2851  }
2852  }
2853  else // time on rows
2854  {
2855  // velodyne point cloud:
2856  // t1 ring1 ring2 ring3 ring4
2857  // t2 ring1 ring2 ring3 ring4
2858  // t3 ring1 ring2 ring3 ring4
2859  // t4 ring1 ring2 ring3 ring4
2860  // ... ... ... ... ...
2861  for(size_t v=0; v<output.height; ++v)
2862  {
2863  if(timeDatatype == 6) // UINT32
2864  {
2865  unsigned int nsec = *((const unsigned int*)(&output.data[v*output.row_step]+offsetTime));
2866  stamp = input.header.stamp+ros::Duration(0, nsec);
2867  }
2868  else
2869  {
2870  float sec = *((const float*)(&output.data[v*output.row_step]+offsetTime));
2871  stamp = input.header.stamp+ros::Duration().fromSec(sec);
2872  }
2873 
2874  rtabmap::Transform transform;
2875  if(slerp)
2876  {
2877  transform = firstPose.interpolate((stamp-firstStamp).toSec() / scanTime, lastPose);
2878  }
2879  else
2880  {
2881  transform = rtabmap_ros::getTransform(
2882  output.header.frame_id,
2883  fixedFrameId,
2884  stamp,
2885  output.header.stamp,
2886  *listener,
2887  0);
2888  if(transform.isNull())
2889  {
2890  ROS_ERROR("Could not get transform of %s accordingly to %s between stamps %f and %f!",
2891  output.header.frame_id.c_str(),
2892  fixedFrameId.c_str(),
2893  stamp.toSec(),
2894  output.header.stamp.toSec());
2895  return false;
2896  }
2897  }
2898 
2899  for(size_t u=0; u<input.width; ++u)
2900  {
2901  unsigned char * dataPtr = &output.data[v*output.row_step + u*output.point_step];
2902  float & x = *((float*)(dataPtr+offsetX));
2903  float & y = *((float*)(dataPtr+offsetY));
2904  float & z = *((float*)(dataPtr+offsetZ));
2905  pcl::PointXYZ pt(x,y,z);
2906  pt = rtabmap::util3d::transformPoint(pt, transform);
2907  x = pt.x;
2908  y = pt.y;
2909  z = pt.z;
2910 
2911  // set delta stamp to zero so that on downstream they know the cloud is deskewed
2912  if(timeDatatype == 6) // UINT32
2913  {
2914  *((unsigned int*)(dataPtr+offsetTime)) = 0;
2915  }
2916  else
2917  {
2918  *((float*)(dataPtr+offsetTime)) = 0;
2919  }
2920  }
2921  }
2922  }
2923  ROS_DEBUG("Lidar deskewing time=%fs", processingTime.elapsed());
2924  return true;
2925 }
2926 
2927 bool deskew(
2928  const sensor_msgs::PointCloud2 & input,
2929  sensor_msgs::PointCloud2 & output,
2930  const std::string & fixedFrameId,
2932  double waitForTransform,
2933  bool slerp)
2934 {
2935  return deskew_impl(input, output, fixedFrameId, &listener, waitForTransform, slerp, rtabmap::Transform(), 0);
2936 }
2937 
2938 bool deskew(
2939  const sensor_msgs::PointCloud2 & input,
2940  sensor_msgs::PointCloud2 & output,
2941  double previousStamp,
2942  const rtabmap::Transform & velocity)
2943 {
2944  return deskew_impl(input, output, "", 0, 0, true, velocity, previousStamp);
2945 }
2946 
2947 }
CvImageConstPtr toCvShare(const sensor_msgs::ImageConstPtr &source, const std::string &encoding=std::string())
void globalDescriptorsToROS(const std::vector< rtabmap::GlobalDescriptor > &desc, std::vector< rtabmap_ros::GlobalDescriptor > &msg)
const Type & type() const
const std::string BAYER_GRBG8
GLM_FUNC_DECL T roll(detail::tquat< T, P > const &x)
cv::Mat R() const
void keypointsToROS(const std::vector< cv::KeyPoint > &kpts, std::vector< rtabmap_ros::KeyPoint > &msg)
const std::vector< int > & wmState() const
void transformEigenToTF(const Eigen::Affine3d &e, tf::Transform &t)
bool isValidForProjection() const
const cv::Size & imageSize() const
std::vector< cv::Point2f > refCorners
const std::map< int, float > & posterior() const
const double & error() const
void transformToTF(const rtabmap::Transform &transform, tf::Transform &tfTransform)
cv::Mat RTABMAP_EXP uncompressData(const cv::Mat &bytes)
const cv::Mat & gridObstacleCellsCompressed() const
std::string uFormat(const char *fmt,...)
float getAngle(float x=1.0f, float y=0.0f, float z=0.0f) const
int imageHeight() const
Transform transformGroundTruth
const double & stamp() const
cv::Point3f RTABMAP_EXP transformPoint(const cv::Point3f &pt, const Transform &transform)
const Transform & getGroundTruthPose() const
int currentGoalId() const
const std::vector< cv::Point3f > & keypoints3D() const
bool convertRGBDMsgs(const std::vector< cv_bridge::CvImageConstPtr > &imageMsgs, const std::vector< cv_bridge::CvImageConstPtr > &depthMsgs, const std::vector< sensor_msgs::CameraInfo > &cameraInfoMsgs, const std::vector< sensor_msgs::CameraInfo > &depthCameraInfoMsgs, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, cv::Mat &rgb, cv::Mat &depth, std::vector< rtabmap::CameraModel > &cameraModels, std::vector< rtabmap::StereoCameraModel > &stereoCameraModels, tf::TransformListener &listener, double waitForTransform, bool alreadRectifiedImages, const std::vector< std::vector< rtabmap_ros::KeyPoint > > &localKeyPointsMsgs=std::vector< std::vector< rtabmap_ros::KeyPoint > >(), const std::vector< std::vector< rtabmap_ros::Point3f > > &localPoints3dMsgs=std::vector< std::vector< rtabmap_ros::Point3f > >(), const std::vector< cv::Mat > &localDescriptorsMsgs=std::vector< cv::Mat >(), std::vector< cv::KeyPoint > *localKeyPoints=0, std::vector< cv::Point3f > *localPoints3d=0, cv::Mat *localDescriptors=0)
rtabmap::Signature nodeDataFromROS(const rtabmap_ros::NodeData &msg)
int maxPoints() const
void transformTFToEigen(const tf::Transform &t, Eigen::Affine3d &e)
double elapsed()
GLM_FUNC_DECL genType sec(genType const &angle)
rtabmap::EnvSensor envSensorFromROS(const rtabmap_ros::EnvSensor &msg)
f
const LaserScan & laserScanCompressed() const
void points2fToROS(const std::vector< cv::Point2f > &kpts, std::vector< rtabmap_ros::Point2f > &msg)
x
rtabmap::SensorData rgbdImageFromROS(const rtabmap_ros::RGBDImageConstPtr &image)
bool deskew_impl(const sensor_msgs::PointCloud2 &input, sensor_msgs::PointCloud2 &output, const std::string &fixedFrameId, tf::TransformListener *listener, double waitForTransform, bool slerp, const rtabmap::Transform &velocity, double previousStamp)
Transform transformFiltered
void mapGraphToROS(const std::map< int, rtabmap::Transform > &poses, const std::multimap< int, rtabmap::Link > &links, const rtabmap::Transform &mapToOdom, rtabmap_ros::MapGraph &msg)
void poseEigenToMsg(const Eigen::Affine3d &e, geometry_msgs::Pose &m)
GLM_FUNC_DECL vecType< T, P > sqrt(vecType< T, P > const &x)
std::vector< rtabmap::GlobalDescriptor > globalDescriptorsFromROS(const std::vector< rtabmap_ros::GlobalDescriptor > &msg)
void setLoopClosureId(int id)
const cv::Mat & gridGroundCellsCompressed() const
std::map< int, cv::Point3f > localMap
void setLabels(const std::map< int, std::string > &labels)
void setPosterior(const std::map< int, float > &posterior)
void poseMsgToEigen(const geometry_msgs::Pose &m, Eigen::Affine3d &e)
void setRawLikelihood(const std::map< int, float > &rawLikelihood)
static Transform getIdentity()
const cv::Mat & getWordsDescriptors() const
void setRefImageId(int id)
const cv::Mat & depthOrRightRaw() const
std::map< int, std::vector< CameraModel > > localBundleModels
XmlRpcServer s
data
void odomInfoToROS(const rtabmap::OdometryInfo &info, rtabmap_ros::OdomInfo &msg, bool ignoreData=false)
void setProximityDetectionId(int id)
void transformEigenToMsg(const Eigen::Affine3d &e, geometry_msgs::Transform &m)
void transformToGeometryMsg(const rtabmap::Transform &transform, geometry_msgs::Transform &msg)
pcl::PCLPointCloud2::Ptr RTABMAP_EXP laserScanToPointCloud2(const LaserScan &laserScan, const Transform &transform=Transform())
rtabmap::Landmarks landmarksFromROS(const std::map< int, std::pair< geometry_msgs::PoseWithCovarianceStamped, float > > &tags, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, tf::TransformListener &listener, double waitForTransform, double defaultLinVariance, double defaultAngVariance)
bool waitForTransform(const std::string &target_frame, const std::string &source_frame, const ros::Time &time, const ros::Duration &timeout, const ros::Duration &polling_sleep_duration=ros::Duration(0.01), std::string *error_msg=NULL) const
std::map< int, Landmark > Landmarks
std::string encoding
int mapId() const
const std::vector< StereoCameraModel > & stereoCameraModels() const
Transform interpolate(float t, const Transform &other) const
rtabmap::Signature nodeInfoFromROS(const rtabmap_ros::NodeData &msg)
const cv::Mat & data() const
rtabmap::Transform transformFromPoseMsg(const geometry_msgs::Pose &msg, bool ignoreRotationIfNotSet=false)
Format format() const
const std::vector< cv::KeyPoint > & keypoints() const
const std::string TYPE_8UC1
const std::vector< cv::Point3f > & getWords3() const
void fromROSMsg(const sensor_msgs::PointCloud2 &cloud, pcl::PointCloud< T > &pcl_cloud)
const cv::Mat info() const
double fx() const
void setCurrentGoalId(int goal)
std::vector< int > inliersIDs
float getNorm() const
const std::map< int, int > & weights() const
int getWeight() const
void nodeInfoToROS(const rtabmap::Signature &signature, rtabmap_ros::NodeData &msg)
void setOdomCacheConstraints(const std::multimap< int, Link > &constraints)
const std::string & getLabel() const
GLM_FUNC_DECL T pitch(detail::tquat< T, P > const &x)
bool deskew(const sensor_msgs::PointCloud2 &input, sensor_msgs::PointCloud2 &output, const std::string &fixedFrameId, tf::TransformListener &listener, double waitForTransform, bool slerp=false)
void getTranslationAndEulerAngles(float &x, float &y, float &z, float &roll, float &pitch, float &yaw) const
const std::vector< GlobalDescriptor > & globalDescriptors() const
void points3fToROS(const std::vector< cv::Point3f > &pts, std::vector< rtabmap_ros::Point3f > &msg, const rtabmap::Transform &transform=rtabmap::Transform())
#define ROS_WARN(...)
const Transform & loopClosureTransform() const
geometry_msgs::TransformStamped t
bool uStrContains(const std::string &string, const std::string &substring)
rtabmap::Transform getTransform(const std::string &fromFrameId, const std::string &toFrameId, const ros::Time &stamp, tf::TransformListener &listener, double waitForTransform)
float rangeMax() const
const std::map< int, float > & likelihood() const
float gridCellSize() const
const std::map< std::string, float > & data() const
int loopClosureId() const
void point2fToROS(const cv::Point2f &kpt, rtabmap_ros::Point2f &msg)
bool uIsFinite(const T &value)
const std::vector< cv::KeyPoint > & getWordsKpts() const
#define UASSERT(condition)
cv::KeyPoint keypointFromROS(const rtabmap_ros::KeyPoint &msg)
void setLocalPath(const std::vector< int > &localPath)
const double & altitude() const
const double & bearing() const
sensor_msgs::ImagePtr toImageMsg() const
std::map< std::string, float > odomInfoToStatistics(const rtabmap::OdometryInfo &info)
const std::vector< CameraModel > & cameraModels() const
void mapDataFromROS(const rtabmap_ros::MapData &msg, std::map< int, rtabmap::Transform > &poses, std::multimap< int, rtabmap::Link > &links, std::map< int, rtabmap::Signature > &signatures, rtabmap::Transform &mapToOdom)
rtabmap::OdometryInfo odomInfoFromROS(const rtabmap_ros::OdomInfo &msg, bool ignoreData=false)
const GPS & gps() const
std::vector< int > cornerInliers
void setExtended(bool extended)
const std::map< int, float > & rawLikelihood() const
void linkToROS(const rtabmap::Link &link, rtabmap_ros::Link &msg)
rtabmap::Link linkFromROS(const rtabmap_ros::Link &msg)
#define UASSERT_MSG(condition, msg_str)
CvImagePtr cvtColor(const CvImageConstPtr &source, const std::string &encoding)
void rgbdImageToROS(const rtabmap::SensorData &data, rtabmap_ros::RGBDImage &msg, const std::string &sensorFrameId)
Duration & fromSec(double t)
const cv::Mat & imageRaw() const
GLM_FUNC_DECL detail::tquat< T, P > slerp(detail::tquat< T, P > const &x, detail::tquat< T, P > const &y, T const &a)
void mapDataToROS(const std::map< int, rtabmap::Transform > &poses, const std::multimap< int, rtabmap::Link > &links, const std::map< int, rtabmap::Signature > &signatures, const rtabmap::Transform &mapToOdom, rtabmap_ros::MapData &msg)
std::vector< unsigned char > RTABMAP_EXP compressData(const cv::Mat &data)
void envSensorsToROS(const rtabmap::EnvSensors &sensors, std::vector< rtabmap_ros::EnvSensor > &msg)
std::vector< cv::Point3f > points3fFromROS(const std::vector< rtabmap_ros::Point3f > &msg, const rtabmap::Transform &transform=rtabmap::Transform())
cv::Mat D_raw() const
const EnvSensors & envSensors() const
cv::Point3f point3fFromROS(const rtabmap_ros::Point3f &msg)
const Transform & mapCorrection() const
void infoFromROS(const rtabmap_ros::Info &info, rtabmap::Statistics &stat)
const std::string TYPE_32FC1
bool extended() const
std::string prettyPrint() const
cv::Mat K_raw() const
void mapGraphFromROS(const rtabmap_ros::MapGraph &msg, std::map< int, rtabmap::Transform > &poses, std::multimap< int, rtabmap::Link > &links, rtabmap::Transform &mapToOdom)
int currentIndex
Definition: patrol.py:9
const cv::Mat & descriptors() const
std::map< int, Transform > localBundlePoses
cv::Mat compressedMatFromBytes(const std::vector< unsigned char > &bytes, bool copy=true)
rtabmap::CameraModel cameraModelFromROS(const sensor_msgs::CameraInfo &camInfo, const rtabmap::Transform &localTransform=rtabmap::Transform::getIdentity())
std::vector< cv::KeyPoint > keypointsFromROS(const std::vector< rtabmap_ros::KeyPoint > &msg)
CvImagePtr toCvCopy(const sensor_msgs::ImageConstPtr &source, const std::string &encoding=std::string())
const std::string TYPE_16UC1
rtabmap::EnvSensors envSensorsFromROS(const std::vector< rtabmap_ros::EnvSensor > &msg)
const std::multimap< int, Link > & odomCacheConstraints() const
LaserScan laserScanFromPointCloud(const PointCloud2T &cloud, bool filterNaNs, bool is2D, const Transform &transform)
void toPCL(const ros::Time &stamp, std::uint64_t &pcl_stamp)
void cameraModelToROS(const rtabmap::CameraModel &model, sensor_msgs::CameraInfo &camInfo)
const std::string MONO16
std::list< V > uValues(const std::multimap< K, V > &mm)
const cv::Mat & depthOrRightCompressed() const
cv::Mat userDataFromROS(const rtabmap_ros::UserData &dataMsg)
bool convertStereoMsg(const cv_bridge::CvImageConstPtr &leftImageMsg, const cv_bridge::CvImageConstPtr &rightImageMsg, const sensor_msgs::CameraInfo &leftCamInfoMsg, const sensor_msgs::CameraInfo &rightCamInfoMsg, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, cv::Mat &left, cv::Mat &right, rtabmap::StereoCameraModel &stereoModel, tf::TransformListener &listener, double waitForTransform, bool alreadyRectified)
void setLikelihood(const std::map< int, float > &likelihood)
Transform rotation() const
bool isNull() const
void transformToPoseMsg(const rtabmap::Transform &transform, geometry_msgs::Pose &msg)
string frameId
Definition: patrol.py:11
double cx() const
void keypointToROS(const cv::KeyPoint &kpt, rtabmap_ros::KeyPoint &msg)
static Transform fromEigen3d(const Eigen::Affine3d &matrix)
const cv::Mat data() const
const std::vector< int > & localPath() const
const cv::Point3f & gridViewPoint() const
bool convertScanMsg(const sensor_msgs::LaserScan &scan2dMsg, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, rtabmap::LaserScan &scan, tf::TransformListener &listener, double waitForTransform, bool outputInFrameId=false)
void setWmState(const std::vector< int > &state)
void transformMsgToEigen(const geometry_msgs::Transform &m, Eigen::Affine3d &e)
double cy() const
const Transform & localTransform() const
rtabmap::GlobalDescriptor globalDescriptorFromROS(const rtabmap_ros::GlobalDescriptor &msg)
void setStamp(double stamp)
std::multimap< int, cv::KeyPoint > words
SensorData & sensorData()
void lookupTransform(const std::string &target_frame, const std::string &source_frame, const ros::Time &time, StampedTransform &transform) const
const double & longitude() const
std::list< K > uKeys(const std::multimap< K, V > &mm)
void toCvShare(const rtabmap_ros::RGBDImageConstPtr &image, cv_bridge::CvImageConstPtr &rgb, cv_bridge::CvImageConstPtr &depth)
void setWeights(const std::map< int, int > &weights)
dist
#define UERROR(...)
std::vector< cv::Point2f > points2fFromROS(const std::vector< rtabmap_ros::Point2f > &msg)
const cv::Mat & userDataCompressed() const
int refImageId() const
const Transform & getPose() const
void globalDescriptorToROS(const rtabmap::GlobalDescriptor &desc, rtabmap_ros::GlobalDescriptor &msg)
rtabmap::StereoCameraModel stereoCameraModelFromROS(const sensor_msgs::CameraInfo &leftCamInfo, const sensor_msgs::CameraInfo &rightCamInfo, const rtabmap::Transform &localTransform=rtabmap::Transform::getIdentity(), const rtabmap::Transform &stereoTransform=rtabmap::Transform())
void envSensorToROS(const rtabmap::EnvSensor &sensor, rtabmap_ros::EnvSensor &msg)
double stamp() const
const double & stamp() const
cv::Mat P() const
diff
double getStamp() const
double timestampFromROS(const ros::Time &stamp)
GLM_FUNC_DECL T yaw(detail::tquat< T, P > const &x)
double fy() const
std::vector< int > matchesIDs
std::vector< cv::Point2f > newCorners
const std::string BAYER_RGGB8
void setLoopClosureTransform(const Transform &loopClosureTransform)
const std::string header
void toCvCopy(const rtabmap_ros::RGBDImage &image, cv_bridge::CvImagePtr &rgb, cv_bridge::CvImagePtr &depth)
RegistrationInfo reg
void userDataToROS(const cv::Mat &data, rtabmap_ros::UserData &dataMsg, bool compress)
void compressedMatToBytes(const cv::Mat &compressed, std::vector< unsigned char > &bytes)
const double & latitude() const
void transformLaserScanToPointCloud(const std::string &target_frame, const sensor_msgs::LaserScan &scan_in, sensor_msgs::PointCloud &cloud_out, tf::Transformer &tf, double range_cutoff, int channel_options=channel_option::Default)
int size() const
std::map< EnvSensor::Type, EnvSensor > EnvSensors
const std::map< int, std::string > & labels() const
void moveFromPCL(pcl::PCLImage &pcl_image, sensor_msgs::Image &image)
int imageWidth() const
#define ROS_ASSERT(cond)
int proximityDetectionId() const
rtabmap::Transform transformFromGeometryMsg(const geometry_msgs::Transform &msg)
const cv::Mat & gridEmptyCellsCompressed() const
#define ROS_ERROR(...)
const double & value() const
void nodeDataToROS(const rtabmap::Signature &signature, rtabmap_ros::NodeData &msg)
rtabmap::Transform transformFromTF(const tf::Transform &transform)
const cv::Mat & imageCompressed() const
Transform localTransform() const
bool convertScan3dMsg(const sensor_msgs::PointCloud2 &scan3dMsg, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, rtabmap::LaserScan &scan, tf::TransformListener &listener, double waitForTransform, int maxPoints=0, float maxRange=0.0f, bool is2D=false)
LaserScan RTABMAP_EXP laserScan2dFromPointCloud(const pcl::PointCloud< pcl::PointXYZ > &cloud, const Transform &transform=Transform(), bool filterNaNs=true)
void point3fToROS(const cv::Point3f &pt, rtabmap_ros::Point3f &msg)
V uValue(const std::map< K, V > &m, const K &key, const V &defaultValue=V())
std_msgs::Header header
cv::Point2f point2fFromROS(const rtabmap_ros::Point2f &msg)
const std::map< int, Transform > & odomCachePoses() const
void infoToROS(const rtabmap::Statistics &stats, rtabmap_ros::Info &info)
bool isIdentity() const
Transform inverse() const
cv::Mat RTABMAP_EXP uncompressImage(const cv::Mat &bytes)
void setOdomCachePoses(const std::map< int, Transform > &poses)
void addStatistic(const std::string &name, float value)
const std::multimap< int, int > & getWords() const
#define ROS_DEBUG(...)
Eigen::Affine3d toEigen3d() const
const CameraModel & left() const


rtabmap_ros
Author(s): Mathieu Labbe
autogenerated on Tue Jan 24 2023 04:04:40