MsgConversion.cpp
Go to the documentation of this file.
1 /*
2 Copyright (c) 2010-2016, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the Universite de Sherbrooke nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 
29 
30 #include <opencv2/highgui/highgui.hpp>
31 #include <zlib.h>
32 #include <ros/ros.h>
33 #include <rtabmap/core/util3d.h>
37 #include <rtabmap/utilite/UStl.h>
39 #include <rtabmap/utilite/UTimer.h>
49 
50 namespace rtabmap_conversions {
51 
52 void transformToTF(const rtabmap::Transform & transform, tf::Transform & tfTransform)
53 {
54  if(!transform.isNull())
55  {
56  tf::transformEigenToTF(transform.toEigen3d(), tfTransform);
57  }
58  else
59  {
60  tfTransform = tf::Transform();
61  }
62 }
63 
65 {
66  Eigen::Affine3d eigenTf;
68  return rtabmap::Transform::fromEigen3d(eigenTf);
69 }
70 
71 void transformToGeometryMsg(const rtabmap::Transform & transform, geometry_msgs::Transform & msg)
72 {
73  if(!transform.isNull())
74  {
76 
77  // make sure the quaternion is normalized
78  long double recipNorm = 1.0 / sqrt(msg.rotation.x * msg.rotation.x + msg.rotation.y * msg.rotation.y + msg.rotation.z * msg.rotation.z + msg.rotation.w * msg.rotation.w);
79  msg.rotation.x *= recipNorm;
80  msg.rotation.y *= recipNorm;
81  msg.rotation.z *= recipNorm;
82  msg.rotation.w *= recipNorm;
83  }
84  else
85  {
86  msg = geometry_msgs::Transform();
87  }
88 }
89 
90 
91 rtabmap::Transform transformFromGeometryMsg(const geometry_msgs::Transform & msg)
92 {
93  if(msg.rotation.w == 0 &&
94  msg.rotation.x == 0 &&
95  msg.rotation.y == 0 &&
96  msg.rotation.z ==0)
97  {
98  return rtabmap::Transform();
99  }
100 
101  Eigen::Affine3d tfTransform;
102  tf::transformMsgToEigen(msg, tfTransform);
103  return rtabmap::Transform::fromEigen3d(tfTransform);
104 }
105 
106 void transformToPoseMsg(const rtabmap::Transform & transform, geometry_msgs::Pose & msg)
107 {
108  if(!transform.isNull())
109  {
110  tf::poseEigenToMsg(transform.toEigen3d(), msg);
111  }
112  else
113  {
114  msg = geometry_msgs::Pose();
115  }
116 }
117 
118 rtabmap::Transform transformFromPoseMsg(const geometry_msgs::Pose & msg, bool ignoreRotationIfNotSet)
119 {
120  if(msg.orientation.w == 0 &&
121  msg.orientation.x == 0 &&
122  msg.orientation.y == 0 &&
123  msg.orientation.z == 0)
124  {
125  if(ignoreRotationIfNotSet)
126  {
127  return rtabmap::Transform(msg.position.x, msg.position.y, msg.position.z, 0, 0, 0);
128  }
129  return rtabmap::Transform();
130  }
131  Eigen::Affine3d tfPose;
132  tf::poseMsgToEigen(msg, tfPose);
133  return rtabmap::Transform::fromEigen3d(tfPose);
134 }
135 
136 void toCvCopy(const rtabmap_msgs::RGBDImage & image, cv_bridge::CvImagePtr & rgb, cv_bridge::CvImagePtr & depth)
137 {
138  if(!image.rgb.data.empty())
139  {
140  rgb = cv_bridge::toCvCopy(image.rgb);
141  }
142  else if(!image.rgb_compressed.data.empty())
143  {
144 #ifdef CV_BRIDGE_HYDRO
145  ROS_ERROR("Unsupported compressed image copy, please upgrade at least to ROS Indigo to use this.");
146 #else
147  rgb = cv_bridge::toCvCopy(image.rgb_compressed);
148 #endif
149  }
150 
151  if(!image.depth.data.empty())
152  {
153  depth = cv_bridge::toCvCopy(image.depth);
154  }
155  else if(!image.depth_compressed.data.empty())
156  {
157  cv_bridge::CvImagePtr ptr = boost::make_shared<cv_bridge::CvImage>();
158  ptr->header = image.depth_compressed.header;
159  ptr->image = rtabmap::uncompressImage(image.depth_compressed.data);
160  ROS_ASSERT(ptr->image.empty() || ptr->image.type() == CV_32FC1 || ptr->image.type() == CV_16UC1);
161  ptr->encoding = ptr->image.empty()?"":ptr->image.type() == CV_32FC1?sensor_msgs::image_encodings::TYPE_32FC1:sensor_msgs::image_encodings::TYPE_16UC1;
162  depth = ptr;
163  }
164 }
165 
166 void toCvShare(const rtabmap_msgs::RGBDImageConstPtr & image, cv_bridge::CvImageConstPtr & rgb, cv_bridge::CvImageConstPtr & depth)
167 {
168  toCvShare(*image, image, rgb, depth);
169 }
170 
171 void toCvShare(const rtabmap_msgs::RGBDImage & image, const boost::shared_ptr<void const>& trackedObject, cv_bridge::CvImageConstPtr & rgb, cv_bridge::CvImageConstPtr & depth)
172 {
173  if(!image.rgb.data.empty())
174  {
175  rgb = cv_bridge::toCvShare(image.rgb, trackedObject);
176  }
177  else if(!image.rgb_compressed.data.empty())
178  {
179 #ifdef CV_BRIDGE_HYDRO
180  ROS_ERROR("Unsupported compressed image copy, please upgrade at least to ROS Indigo to use this.");
181 #else
182  rgb = cv_bridge::toCvCopy(image.rgb_compressed);
183 #endif
184  }
185 
186  if(!image.depth.data.empty())
187  {
188  depth = cv_bridge::toCvShare(image.depth, trackedObject);
189  }
190  else if(!image.depth_compressed.data.empty())
191  {
192  if(image.depth_compressed.format.compare("jpg")==0)
193  {
194 #ifdef CV_BRIDGE_HYDRO
195  ROS_ERROR("Unsupported compressed image copy, please upgrade at least to ROS Indigo to use this.");
196 #else
197  depth = cv_bridge::toCvCopy(image.depth_compressed);
198 #endif
199  }
200  else
201  {
202  cv_bridge::CvImagePtr ptr = boost::make_shared<cv_bridge::CvImage>();
203  ptr->header = image.depth_compressed.header;
204  ptr->image = rtabmap::uncompressImage(image.depth_compressed.data);
205  ROS_ASSERT(ptr->image.empty() || ptr->image.type() == CV_32FC1 || ptr->image.type() == CV_16UC1);
206  ptr->encoding = ptr->image.empty()?"":ptr->image.type() == CV_32FC1?sensor_msgs::image_encodings::TYPE_32FC1:sensor_msgs::image_encodings::TYPE_16UC1;
207  depth = ptr;
208  }
209  }
210 }
211 
212 void rgbdImageToROS(const rtabmap::SensorData & data, rtabmap_msgs::RGBDImage & msg, const std::string & sensorFrameId)
213 {
214  std_msgs::Header header;
215  header.frame_id = sensorFrameId;
216  header.stamp = ros::Time(data.stamp());
217  rtabmap::Transform localTransform;
218  if(data.cameraModels().size()>1)
219  {
220  UERROR("Cannot convert multi-camera data to rgbd image");
221  return;
222  }
223  if(data.cameraModels().size() == 1)
224  {
225  //rgb+depth
226  rtabmap_conversions::cameraModelToROS(data.cameraModels().front(), msg.rgb_camera_info);
227  msg.rgb_camera_info.header = header;
228  localTransform = data.cameraModels().front().localTransform();
229  }
230  else if(data.stereoCameraModels().size() == 1)
231  {
232  //stereo
233  rtabmap_conversions::cameraModelToROS(data.stereoCameraModels()[0].left(), msg.rgb_camera_info);
234  rtabmap_conversions::cameraModelToROS(data.stereoCameraModels()[0].right(), msg.depth_camera_info);
235  msg.rgb_camera_info.header = header;
236  msg.depth_camera_info.header = header;
237  localTransform = data.stereoCameraModels()[0].localTransform();
238  }
239 
240  if(!data.imageRaw().empty())
241  {
242  cv_bridge::CvImage cvImg;
243  cvImg.header = header;
244  cvImg.image = data.imageRaw();
245  UASSERT(data.imageRaw().type()==CV_8UC1 || data.imageRaw().type()==CV_8UC3);
247  cvImg.toImageMsg(msg.rgb);
248  }
249  else if(!data.imageCompressed().empty())
250  {
251  ROS_ERROR("Conversion of compressed SensorData to RGBDImage is not implemented...");
252  }
253 
254  if(!data.depthOrRightRaw().empty())
255  {
256  cv_bridge::CvImage cvDepth;
257  cvDepth.header = header;
258  cvDepth.image = data.depthOrRightRaw();
259  UASSERT(data.depthOrRightRaw().type()==CV_8UC1 || data.depthOrRightRaw().type()==CV_16UC1 || data.depthOrRightRaw().type()==CV_32FC1);
260  cvDepth.encoding = data.depthOrRightRaw().type()==CV_8UC1?sensor_msgs::image_encodings::MONO8:data.depthOrRightRaw().type()==CV_16UC1?sensor_msgs::image_encodings::TYPE_16UC1:sensor_msgs::image_encodings::TYPE_32FC1;
261  cvDepth.toImageMsg(msg.depth);
262  }
263  else if(!data.depthOrRightCompressed().empty())
264  {
265  ROS_ERROR("Conversion of compressed SensorData to RGBDImage is not implemented...");
266  }
267 
268  //convert features
269  if(!data.keypoints().empty())
270  {
271  rtabmap_conversions::keypointsToROS(data.keypoints(), msg.key_points);
272  }
273  if(!data.keypoints3D().empty())
274  {
275  rtabmap_conversions::points3fToROS(data.keypoints3D(), msg.points, localTransform.inverse());
276  }
277  if(!data.descriptors().empty())
278  {
279  msg.descriptors = rtabmap::compressData(data.descriptors());
280  }
281  if(!data.globalDescriptors().empty())
282  {
283  rtabmap_conversions::globalDescriptorToROS(data.globalDescriptors().front(), msg.global_descriptor);
284  msg.global_descriptor.header = header;
285  }
286 }
287 
288 rtabmap::SensorData rgbdImageFromROS(const rtabmap_msgs::RGBDImageConstPtr & image)
289 {
293  toCvShare(image, imageMsg, depthMsg);
294 
295  rtabmap::StereoCameraModel stereoModel = stereoCameraModelFromROS(image->rgb_camera_info, image->depth_camera_info, rtabmap::Transform::getIdentity());
296 
297  if(stereoModel.isValidForProjection())
298  {
299  cv_bridge::CvImageConstPtr imageRectLeft = imageMsg;
300  cv_bridge::CvImageConstPtr imageRectRight = depthMsg;
301  if(!(imageRectLeft->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) ==0 ||
302  imageRectLeft->encoding.compare(sensor_msgs::image_encodings::MONO8) ==0 ||
303  imageRectLeft->encoding.compare(sensor_msgs::image_encodings::MONO16) ==0 ||
304  imageRectLeft->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
305  imageRectLeft->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0) ||
306  !(imageRectRight->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) ==0 ||
307  imageRectRight->encoding.compare(sensor_msgs::image_encodings::MONO8) ==0 ||
308  imageRectRight->encoding.compare(sensor_msgs::image_encodings::MONO16) ==0 ||
309  imageRectRight->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
310  imageRectRight->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0))
311  {
312  ROS_ERROR("Input type must be image=mono8,mono16,rgb8,bgr8 (mono8 recommended), received types are %s (left) and %s (right)",
313  imageRectLeft->encoding.c_str(), imageRectRight->encoding.c_str());
314  return data;
315  }
316 
317  if(!imageRectLeft->image.empty() && !imageRectRight->image.empty())
318  {
319  if(stereoModel.baseline() > 10.0)
320  {
321  static bool shown = false;
322  if(!shown)
323  {
324  ROS_WARN("Detected baseline (%f m) is quite large! Is your "
325  "right camera_info P(0,3) correctly set? Note that "
326  "baseline=-P(0,3)/P(0,0). This warning is printed only once.",
327  stereoModel.baseline());
328  shown = true;
329  }
330  }
331 
332  cv::Mat left, right;
333  if(imageRectLeft->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
334  imageRectLeft->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0)
335  {
336  left = imageRectLeft->image;
337  }
338  else if(imageRectLeft->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0)
339  {
340  left = cv_bridge::cvtColor(imageRectLeft, "mono8")->image;
341  }
342  else
343  {
344  left = cv_bridge::cvtColor(imageRectLeft, "bgr8")->image;
345  }
346  if(imageRectRight->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
347  imageRectRight->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0)
348  {
349  right = imageRectRight->image;
350  }
351  else
352  {
353  right = cv_bridge::cvtColor(imageRectRight, "mono8")->image;
354  }
355 
356  //
357 
359  left,
360  right,
361  stereoModel,
362  0,
363  rtabmap_conversions::timestampFromROS(image->header.stamp));
364  }
365  else
366  {
367  ROS_WARN("Odom: input images empty?!?");
368  }
369  }
370  else //depth
371  {
372  ros::Time higherStamp;
373  int imageWidth = imageMsg->image.cols;
374  int imageHeight = imageMsg->image.rows;
375  int depthWidth = depthMsg->image.cols;
376  int depthHeight = depthMsg->image.rows;
377 
378  UASSERT_MSG(
379  imageWidth/depthWidth == imageHeight/depthHeight,
380  uFormat("rgb=%dx%d depth=%dx%d", imageWidth, imageHeight, depthWidth, depthHeight).c_str());
381 
382  cv::Mat rgb;
383  cv::Mat depth;
384  rtabmap::CameraModel cameraModels;
385 
386  if(!(imageMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) ==0 ||
387  imageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) ==0 ||
388  imageMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) ==0 ||
389  imageMsg->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
390  imageMsg->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
391  imageMsg->encoding.compare(sensor_msgs::image_encodings::BGRA8) == 0 ||
392  imageMsg->encoding.compare(sensor_msgs::image_encodings::RGBA8) == 0 ||
393  imageMsg->encoding.compare(sensor_msgs::image_encodings::BAYER_GRBG8) == 0) ||
394  !(depthMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1) == 0 ||
395  depthMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1) == 0 ||
396  depthMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0))
397  {
398  ROS_ERROR("Input type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8 and "
399  "image_depth=32FC1,16UC1,mono16. Current rgb=%s and depth=%s",
400  imageMsg->encoding.c_str(),
401  depthMsg->encoding.c_str());
402  return data;
403  }
404 
405  cv_bridge::CvImageConstPtr ptrImage = imageMsg;
406  if(imageMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1)==0 ||
407  imageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
408  imageMsg->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0)
409  {
410  // do nothing
411  }
412  else if(imageMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0)
413  {
414  ptrImage = cv_bridge::cvtColor(imageMsg, "mono8");
415  }
416  else
417  {
418  ptrImage = cv_bridge::cvtColor(imageMsg, "bgr8");
419  }
420 
421  cv_bridge::CvImageConstPtr ptrDepth = depthMsg;
423  ptrImage->image,
424  ptrDepth->image,
425  rtabmap_conversions::cameraModelFromROS(image->rgb_camera_info),
426  0,
427  rtabmap_conversions::timestampFromROS(image->header.stamp));
428  }
429 
430  return data;
431 }
432 
433 void compressedMatToBytes(const cv::Mat & compressed, std::vector<unsigned char> & bytes)
434 {
435  UASSERT(compressed.empty() || compressed.type() == CV_8UC1);
436  bytes.clear();
437  if(!compressed.empty())
438  {
439  bytes.resize(compressed.cols * compressed.rows);
440  memcpy(bytes.data(), compressed.data, bytes.size());
441  }
442 }
443 
444 cv::Mat compressedMatFromBytes(const std::vector<unsigned char> & bytes, bool copy)
445 {
446  cv::Mat out;
447  if(bytes.size())
448  {
449  out = cv::Mat(1, bytes.size(), CV_8UC1, (void*)bytes.data());
450  if(copy)
451  {
452  out = out.clone();
453  }
454  }
455  return out;
456 }
457 
458 void infoFromROS(const rtabmap_msgs::Info & info, rtabmap::Statistics & stat)
459 {
460  stat.setExtended(true); // Extended
461 
462  // rtabmap_msgs::Info
463  stat.setRefImageId(info.refId);
464  stat.setLoopClosureId(info.loopClosureId);
465  stat.setProximityDetectionId(info.proximityDetectionId);
466  stat.setStamp(info.header.stamp.toSec());
467 
469 
470  //wmState
471  stat.setWmState(info.wmState);
472 
473  //Posterior, likelihood, childCount
474  std::map<int, float> mapIntFloat;
475  for(unsigned int i=0; i<info.posteriorKeys.size() && i<info.posteriorValues.size(); ++i)
476  {
477  mapIntFloat.insert(std::pair<int, float>(info.posteriorKeys.at(i), info.posteriorValues.at(i)));
478  }
479  stat.setPosterior(mapIntFloat);
480 
481  mapIntFloat.clear();
482  for(unsigned int i=0; i<info.likelihoodKeys.size() && i<info.likelihoodValues.size(); ++i)
483  {
484  mapIntFloat.insert(std::pair<int, float>(info.likelihoodKeys.at(i), info.likelihoodValues.at(i)));
485  }
486  stat.setLikelihood(mapIntFloat);
487 
488  mapIntFloat.clear();
489  for(unsigned int i=0; i<info.rawLikelihoodKeys.size() && i<info.rawLikelihoodValues.size(); ++i)
490  {
491  mapIntFloat.insert(std::pair<int, float>(info.rawLikelihoodKeys.at(i), info.rawLikelihoodValues.at(i)));
492  }
493  stat.setRawLikelihood(mapIntFloat);
494 
495  std::map<int, int> mapIntInt;
496  for(unsigned int i=0; i<info.weightsKeys.size() && i<info.weightsValues.size(); ++i)
497  {
498  mapIntInt.insert(std::pair<int, int>(info.weightsKeys.at(i), info.weightsValues.at(i)));
499  }
500  stat.setWeights(mapIntInt);
501 
502  std::map<int, std::string> mapIntStr;
503  for(unsigned int i=0; i<info.labelsKeys.size() && i<info.labelsValues.size(); ++i)
504  {
505  mapIntStr.insert(std::pair<int, std::string>(info.labelsKeys.at(i), info.labelsValues.at(i)));
506  }
507  stat.setLabels(mapIntStr);
508 
509  stat.setLocalPath(info.localPath);
510  stat.setCurrentGoalId(info.currentGoalId);
511 
512  std::map<int, rtabmap::Transform> poses;
513  std::multimap<int, rtabmap::Link> constraints;
515  mapGraphFromROS(info.odom_cache, poses, constraints, t);
516  stat.setOdomCachePoses(poses);
517  stat.setOdomCacheConstraints(constraints);
518 
519  // Statistics data
520  for(unsigned int i=0; i<info.statsKeys.size() && i<info.statsValues.size(); i++)
521  {
522  stat.addStatistic(info.statsKeys.at(i), info.statsValues.at(i));
523  }
524 }
525 
526 void infoToROS(const rtabmap::Statistics & stats, rtabmap_msgs::Info & info)
527 {
528  info.refId = stats.refImageId();
529  info.loopClosureId = stats.loopClosureId();
530  info.proximityDetectionId = stats.proximityDetectionId();
531  info.landmarkId = static_cast<int>(uValue(stats.data(), rtabmap::Statistics::kLoopLandmark_detected(), 0.0f));
532 
533  rtabmap_conversions::transformToGeometryMsg(stats.loopClosureTransform(), info.loopClosureTransform);
534 
535  // Detailed info
536  if(stats.extended())
537  {
538  //wmState
539  info.wmState = stats.wmState();
540 
541  //Posterior, likelihood, childCount
542  info.posteriorKeys = uKeys(stats.posterior());
543  info.posteriorValues = uValues(stats.posterior());
544  info.likelihoodKeys = uKeys(stats.likelihood());
545  info.likelihoodValues = uValues(stats.likelihood());
546  info.rawLikelihoodKeys = uKeys(stats.rawLikelihood());
547  info.rawLikelihoodValues = uValues(stats.rawLikelihood());
548  info.weightsKeys = uKeys(stats.weights());
549  info.weightsValues = uValues(stats.weights());
550  info.labelsKeys = uKeys(stats.labels());
551  info.labelsValues = uValues(stats.labels());
552  info.localPath = stats.localPath();
553  info.currentGoalId = stats.currentGoalId();
554  mapGraphToROS(stats.odomCachePoses(), stats.odomCacheConstraints(), stats.mapCorrection(), info.odom_cache);
555 
556  // Statistics data
557  info.statsKeys = uKeys(stats.data());
558  info.statsValues = uValues(stats.data());
559  }
560 }
561 
562 rtabmap::Link linkFromROS(const rtabmap_msgs::Link & msg)
563 {
564  cv::Mat information = cv::Mat(6,6,CV_64FC1, (void*)msg.information.data()).clone();
565  return rtabmap::Link(msg.fromId, msg.toId, (rtabmap::Link::Type)msg.type, transformFromGeometryMsg(msg.transform), information);
566 }
567 
568 void linkToROS(const rtabmap::Link & link, rtabmap_msgs::Link & msg)
569 {
570  msg.fromId = link.from();
571  msg.toId = link.to();
572  msg.type = link.type();
573  if(link.infMatrix().type() == CV_64FC1 && link.infMatrix().cols == 6 && link.infMatrix().rows == 6)
574  {
575  memcpy(msg.information.data(), link.infMatrix().data, 36*sizeof(double));
576  }
577  transformToGeometryMsg(link.transform(), msg.transform);
578 }
579 
580 cv::KeyPoint keypointFromROS(const rtabmap_msgs::KeyPoint & msg)
581 {
582  return cv::KeyPoint(msg.pt.x, msg.pt.y, msg.size, msg.angle, msg.response, msg.octave, msg.class_id);
583 }
584 
585 void keypointToROS(const cv::KeyPoint & kpt, rtabmap_msgs::KeyPoint & msg)
586 {
587  msg.angle = kpt.angle;
588  msg.class_id = kpt.class_id;
589  msg.octave = kpt.octave;
590  msg.pt.x = kpt.pt.x;
591  msg.pt.y = kpt.pt.y;
592  msg.response = kpt.response;
593  msg.size = kpt.size;
594 }
595 
596 std::vector<cv::KeyPoint> keypointsFromROS(const std::vector<rtabmap_msgs::KeyPoint> & msg)
597 {
598  std::vector<cv::KeyPoint> v(msg.size());
599  for(unsigned int i=0; i<msg.size(); ++i)
600  {
601  v[i] = keypointFromROS(msg[i]);
602  }
603  return v;
604 }
605 
606 void keypointsFromROS(const std::vector<rtabmap_msgs::KeyPoint> & msg, std::vector<cv::KeyPoint> & kpts, int xShift)
607 {
608  size_t outCurrentIndex = kpts.size();
609  kpts.resize(kpts.size()+msg.size());
610  for(unsigned int i=0; i<msg.size(); ++i)
611  {
612  kpts[outCurrentIndex+i] = keypointFromROS(msg[i]);
613  kpts[outCurrentIndex+i].pt.x += xShift;
614  }
615 }
616 
617 void keypointsToROS(const std::vector<cv::KeyPoint> & kpts, std::vector<rtabmap_msgs::KeyPoint> & msg)
618 {
619  msg.resize(kpts.size());
620  for(unsigned int i=0; i<msg.size(); ++i)
621  {
622  keypointToROS(kpts[i], msg[i]);
623  }
624 }
625 
626 rtabmap::GlobalDescriptor globalDescriptorFromROS(const rtabmap_msgs::GlobalDescriptor & msg)
627 {
629 }
630 
631 void globalDescriptorToROS(const rtabmap::GlobalDescriptor & desc, rtabmap_msgs::GlobalDescriptor & msg)
632 {
633  msg.type = desc.type();
634  msg.info = rtabmap::compressData(desc.info());
635  msg.data = rtabmap::compressData(desc.data());
636 }
637 
638 std::vector<rtabmap::GlobalDescriptor> globalDescriptorsFromROS(const std::vector<rtabmap_msgs::GlobalDescriptor> & msg)
639 {
640  if(!msg.empty())
641  {
642  std::vector<rtabmap::GlobalDescriptor> v(msg.size());
643  for(unsigned int i=0; i<msg.size(); ++i)
644  {
646  }
647  return v;
648  }
649  return std::vector<rtabmap::GlobalDescriptor>();
650 }
651 
652 void globalDescriptorsToROS(const std::vector<rtabmap::GlobalDescriptor> & desc, std::vector<rtabmap_msgs::GlobalDescriptor> & msg)
653 {
654  msg.clear();
655  if(!desc.empty())
656  {
657  msg.resize(desc.size());
658  for(unsigned int i=0; i<msg.size(); ++i)
659  {
661  }
662  }
663 }
664 
665 rtabmap::EnvSensor envSensorFromROS(const rtabmap_msgs::EnvSensor & msg)
666 {
667  return rtabmap::EnvSensor((rtabmap::EnvSensor::Type)msg.type, msg.value, timestampFromROS(msg.header.stamp));
668 }
669 
670 void envSensorToROS(const rtabmap::EnvSensor & sensor, rtabmap_msgs::EnvSensor & msg)
671 {
672  msg.type = sensor.type();
673  msg.value = sensor.value();
674  msg.header.stamp = ros::Time(sensor.stamp());
675 }
676 
677 rtabmap::EnvSensors envSensorsFromROS(const std::vector<rtabmap_msgs::EnvSensor> & msg)
678 {
680  if(!msg.empty())
681  {
682  for(unsigned int i=0; i<msg.size(); ++i)
683  {
685  v.insert(std::make_pair(s.type(), envSensorFromROS(msg[i])));
686  }
687  }
688  return v;
689 }
690 
691 void envSensorsToROS(const rtabmap::EnvSensors & sensors, std::vector<rtabmap_msgs::EnvSensor> & msg)
692 {
693  msg.clear();
694  if(!sensors.empty())
695  {
696  msg.resize(sensors.size());
697  int i=0;
698  for(rtabmap::EnvSensors::const_iterator iter=sensors.begin(); iter!=sensors.end(); ++iter)
699  {
700  envSensorToROS(iter->second, msg[i++]);
701  }
702  }
703 }
704 
705 cv::Point2f point2fFromROS(const rtabmap_msgs::Point2f & msg)
706 {
707  return cv::Point2f(msg.x, msg.y);
708 }
709 
710 void point2fToROS(const cv::Point2f & kpt, rtabmap_msgs::Point2f & msg)
711 {
712  msg.x = kpt.x;
713  msg.y = kpt.y;
714 }
715 
716 std::vector<cv::Point2f> points2fFromROS(const std::vector<rtabmap_msgs::Point2f> & msg)
717 {
718  std::vector<cv::Point2f> v(msg.size());
719  for(unsigned int i=0; i<msg.size(); ++i)
720  {
721  v[i] = point2fFromROS(msg[i]);
722  }
723  return v;
724 }
725 
726 void points2fToROS(const std::vector<cv::Point2f> & kpts, std::vector<rtabmap_msgs::Point2f> & msg)
727 {
728  msg.resize(kpts.size());
729  for(unsigned int i=0; i<msg.size(); ++i)
730  {
731  point2fToROS(kpts[i], msg[i]);
732  }
733 }
734 
735 cv::Point3f point3fFromROS(const rtabmap_msgs::Point3f & msg)
736 {
737  return cv::Point3f(msg.x, msg.y, msg.z);
738 }
739 
740 void point3fToROS(const cv::Point3f & pt, rtabmap_msgs::Point3f & msg)
741 {
742  msg.x = pt.x;
743  msg.y = pt.y;
744  msg.z = pt.z;
745 }
746 
747 std::vector<cv::Point3f> points3fFromROS(const std::vector<rtabmap_msgs::Point3f> & msg, const rtabmap::Transform & transform)
748 {
749  bool transformPoints = !transform.isNull() && !transform.isIdentity();
750  std::vector<cv::Point3f> v(msg.size());
751  for(unsigned int i=0; i<msg.size(); ++i)
752  {
753  v[i] = point3fFromROS(msg[i]);
754  if(transformPoints)
755  {
757  }
758  }
759  return v;
760 }
761 
762 void points3fFromROS(const std::vector<rtabmap_msgs::Point3f> & msg, std::vector<cv::Point3f> & points3, const rtabmap::Transform & transform)
763 {
764  size_t currentIndex = points3.size();
765  points3.resize(points3.size()+msg.size());
766  bool transformPoint = !transform.isNull() && !transform.isIdentity();
767  for(unsigned int i=0; i<msg.size(); ++i)
768  {
769  points3[currentIndex+i] = point3fFromROS(msg[i]);
770  if(transformPoint)
771  {
772  points3[currentIndex+i] = rtabmap::util3d::transformPoint(points3[currentIndex+i], transform);
773  }
774  }
775 }
776 
777 void points3fToROS(const std::vector<cv::Point3f> & pts, std::vector<rtabmap_msgs::Point3f> & msg, const rtabmap::Transform & transform)
778 {
779  msg.resize(pts.size());
780  bool transformPoints = !transform.isNull() && !transform.isIdentity();
781  for(unsigned int i=0; i<msg.size(); ++i)
782  {
783  if(transformPoints)
784  {
785  cv::Point3f pt = rtabmap::util3d::transformPoint(pts[i], transform);
786  point3fToROS(pt, msg[i]);
787  }
788  else
789  {
790  point3fToROS(pts[i], msg[i]);
791  }
792  }
793 }
794 
796  const sensor_msgs::CameraInfo & camInfo,
797  const rtabmap::Transform & localTransform)
798 {
799  cv:: Mat K;
800  UASSERT(camInfo.K.empty() || camInfo.K.size() == 9);
801  if(!camInfo.K.empty())
802  {
803  K = cv::Mat(3, 3, CV_64FC1);
804  memcpy(K.data, camInfo.K.elems, 9*sizeof(double));
805  }
806 
807  cv::Mat D;
808  if(camInfo.D.size())
809  {
810  if(camInfo.D.size()>=4 &&
811  (uStrContains(camInfo.distortion_model, "fisheye") ||
812  uStrContains(camInfo.distortion_model, "equidistant") ||
813  uStrContains(camInfo.distortion_model, "Kannala Brandt4")))
814  {
815  D = cv::Mat::zeros(1, 6, CV_64FC1);
816  D.at<double>(0,0) = camInfo.D[0];
817  D.at<double>(0,1) = camInfo.D[1];
818  D.at<double>(0,4) = camInfo.D[2];
819  D.at<double>(0,5) = camInfo.D[3];
820  }
821  else if(camInfo.D.size()>8)
822  {
823  bool zerosAfter8 = true;
824  for(size_t i=8; i<camInfo.D.size() && zerosAfter8; ++i)
825  {
826  if(camInfo.D[i] != 0.0)
827  {
828  zerosAfter8 = false;
829  }
830  }
831  static bool warned = false;
832  if(!zerosAfter8 && !warned)
833  {
834  ROS_WARN("Camera info conversion: Distortion model is larger than 8, coefficients after 8 are ignored. This message is only shown once.");
835  warned = true;
836  }
837  D = cv::Mat(1, 8, CV_64FC1);
838  memcpy(D.data, camInfo.D.data(), D.cols*sizeof(double));
839  }
840  else
841  {
842  D = cv::Mat(1, camInfo.D.size(), CV_64FC1);
843  memcpy(D.data, camInfo.D.data(), D.cols*sizeof(double));
844  }
845  }
846 
847  cv:: Mat R;
848  UASSERT(camInfo.R.empty() || camInfo.R.size() == 9);
849  if(!camInfo.R.empty())
850  {
851  R = cv::Mat(3, 3, CV_64FC1);
852  memcpy(R.data, camInfo.R.elems, 9*sizeof(double));
853  }
854 
855  cv:: Mat P;
856  UASSERT(camInfo.P.empty() || camInfo.P.size() == 12);
857  if(!camInfo.P.empty())
858  {
859  P = cv::Mat(3, 4, CV_64FC1);
860  memcpy(P.data, camInfo.P.elems, 12*sizeof(double));
861  }
862 
863  return rtabmap::CameraModel(
864  "ros",
865  cv::Size(camInfo.width, camInfo.height),
866  K, D, R, P,
867  localTransform);
868 }
870  const rtabmap::CameraModel & model,
871  sensor_msgs::CameraInfo & camInfo)
872 {
873  UASSERT(model.K_raw().empty() || model.K_raw().total() == 9);
874  if(model.K_raw().empty())
875  {
876  memset(camInfo.K.elems, 0.0, 9*sizeof(double));
877  }
878  else
879  {
880  memcpy(camInfo.K.elems, model.K_raw().data, 9*sizeof(double));
881  }
882 
883  if(model.D_raw().total() == 6)
884  {
885  camInfo.D = std::vector<double>(4);
886  camInfo.D[0] = model.D_raw().at<double>(0,0);
887  camInfo.D[1] = model.D_raw().at<double>(0,1);
888  camInfo.D[2] = model.D_raw().at<double>(0,4);
889  camInfo.D[3] = model.D_raw().at<double>(0,5);
890  camInfo.distortion_model = "equidistant"; // fisheye
891  }
892  else
893  {
894  camInfo.D = std::vector<double>(model.D_raw().cols);
895  memcpy(camInfo.D.data(), model.D_raw().data, model.D_raw().cols*sizeof(double));
896  if(camInfo.D.size() > 5)
897  {
898  camInfo.distortion_model = "rational_polynomial";
899  }
900  else
901  {
902  camInfo.distortion_model = "plumb_bob";
903  }
904  }
905 
906  UASSERT(model.R().empty() || model.R().total() == 9);
907  if(model.R().empty())
908  {
909  memset(camInfo.R.elems, 0.0, 9*sizeof(double));
910  }
911  else
912  {
913  memcpy(camInfo.R.elems, model.R().data, 9*sizeof(double));
914  }
915 
916  UASSERT(model.P().empty() || model.P().total() == 12);
917  if(model.P().empty())
918  {
919  memset(camInfo.P.elems, 0.0, 12*sizeof(double));
920  }
921  else
922  {
923  memcpy(camInfo.P.elems, model.P().data, 12*sizeof(double));
924  }
925 
926  camInfo.binning_x = 1;
927  camInfo.binning_y = 1;
928  camInfo.roi.width = model.imageWidth();
929  camInfo.roi.height = model.imageHeight();
930 
931  camInfo.width = model.imageWidth();
932  camInfo.height = model.imageHeight();
933 }
935  const sensor_msgs::CameraInfo & leftCamInfo,
936  const sensor_msgs::CameraInfo & rightCamInfo,
937  const rtabmap::Transform & localTransform,
938  const rtabmap::Transform & stereoTransform)
939 {
941  "ros",
942  cameraModelFromROS(leftCamInfo, localTransform),
943  cameraModelFromROS(rightCamInfo, localTransform),
944  stereoTransform);
945 }
947  const sensor_msgs::CameraInfo & leftCamInfo,
948  const sensor_msgs::CameraInfo & rightCamInfo,
949  const std::string & frameId,
950  tf::TransformListener & listener,
951  double waitForTransform)
952 {
953  rtabmap::Transform localTransform = getTransform(
954  frameId,
955  leftCamInfo.header.frame_id,
956  leftCamInfo.header.stamp,
957  listener,
958  waitForTransform);
959  if(localTransform.isNull())
960  {
962  }
963 
964  rtabmap::Transform stereoTransform = getTransform(
965  leftCamInfo.header.frame_id,
966  rightCamInfo.header.frame_id,
967  leftCamInfo.header.stamp,
968  listener,
969  waitForTransform);
970  if(stereoTransform.isNull())
971  {
973  }
974  return stereoCameraModelFromROS(leftCamInfo, rightCamInfo, localTransform, stereoTransform);
975 }
976 
978  const rtabmap_msgs::MapData & msg,
979  std::map<int, rtabmap::Transform> & poses,
980  std::multimap<int, rtabmap::Link> & links,
981  std::map<int, rtabmap::Signature> & signatures,
982  rtabmap::Transform & mapToOdom)
983 {
984  //optimized graph
985  mapGraphFromROS(msg.graph, poses, links, mapToOdom);
986 
987  //Data
988  for(unsigned int i=0; i<msg.nodes.size(); ++i)
989  {
990  signatures.insert(std::make_pair(msg.nodes[i].id, nodeFromROS(msg.nodes[i])));
991  }
992 }
994  const std::map<int, rtabmap::Transform> & poses,
995  const std::multimap<int, rtabmap::Link> & links,
996  const std::map<int, rtabmap::Signature> & signatures,
997  const rtabmap::Transform & mapToOdom,
998  rtabmap_msgs::MapData & msg)
999 {
1000  //Optimized graph
1001  mapGraphToROS(poses, links, mapToOdom, msg.graph);
1002 
1003  //Data
1004  msg.nodes.resize(signatures.size());
1005  int index=0;
1006  for(std::multimap<int, rtabmap::Signature>::const_iterator iter = signatures.begin();
1007  iter!=signatures.end();
1008  ++iter)
1009  {
1010  nodeToROS(iter->second, msg.nodes[index++]);
1011  }
1012 }
1013 
1015  const rtabmap_msgs::MapGraph & msg,
1016  std::map<int, rtabmap::Transform> & poses,
1017  std::multimap<int, rtabmap::Link> & links,
1018  rtabmap::Transform & mapToOdom)
1019 {
1020  //optimized graph
1021  UASSERT(msg.posesId.size() == msg.poses.size());
1022  for(unsigned int i=0; i<msg.posesId.size(); ++i)
1023  {
1024  poses.insert(std::make_pair(msg.posesId[i], rtabmap_conversions::transformFromPoseMsg(msg.poses[i])));
1025  }
1026  for(unsigned int i=0; i<msg.links.size(); ++i)
1027  {
1029  links.insert(std::make_pair(msg.links[i].fromId, linkFromROS(msg.links[i])));
1030  }
1031  mapToOdom = transformFromGeometryMsg(msg.mapToOdom);
1032 }
1034  const std::map<int, rtabmap::Transform> & poses,
1035  const std::multimap<int, rtabmap::Link> & links,
1036  const rtabmap::Transform & mapToOdom,
1037  rtabmap_msgs::MapGraph & msg)
1038 {
1039  //Optimized graph
1040  msg.posesId.resize(poses.size());
1041  msg.poses.resize(poses.size());
1042  int index = 0;
1043  for(std::map<int, rtabmap::Transform>::const_iterator iter = poses.begin();
1044  iter != poses.end();
1045  ++iter)
1046  {
1047  msg.posesId[index] = iter->first;
1048  transformToPoseMsg(iter->second, msg.poses[index]);
1049  ++index;
1050  }
1051 
1052  msg.links.resize(links.size());
1053  index=0;
1054  for(std::multimap<int, rtabmap::Link>::const_iterator iter = links.begin();
1055  iter!=links.end();
1056  ++iter)
1057  {
1058  linkToROS(iter->second, msg.links[index++]);
1059  }
1060 
1061  transformToGeometryMsg(mapToOdom, msg.mapToOdom);
1062 }
1063 
1064 rtabmap::SensorData sensorDataFromROS(const rtabmap_msgs::SensorData & msg)
1065 {
1067  cv::Mat(),
1068  msg.header.seq,
1069  msg.header.stamp.toSec(),
1070  compressedMatFromBytes(msg.user_data));
1071 
1072  std::vector<rtabmap::StereoCameraModel> stereoModels;
1073  std::vector<rtabmap::CameraModel> models;
1074  bool isStereo = !msg.right_camera_info.empty();
1075  if(isStereo)
1076  {
1077  // stereo model
1078  if(msg.left_camera_info.size() == msg.right_camera_info.size() &&
1079  msg.local_transform.size() == msg.right_camera_info.size())
1080  {
1081  for(unsigned int i=0; i<msg.right_camera_info.size(); ++i)
1082  {
1083  stereoModels.push_back(stereoCameraModelFromROS(
1084  msg.left_camera_info[i],
1085  msg.right_camera_info[i],
1086  transformFromGeometryMsg(msg.local_transform[i])
1087  ));
1088  }
1089  }
1090  }
1091  else
1092  {
1093  // multi-cameras model
1094  if(msg.left_camera_info.size() &&
1095  msg.local_transform.size() == msg.left_camera_info.size())
1096  {
1097  for(unsigned int i=0; i<msg.left_camera_info.size(); ++i)
1098  {
1099  models.push_back(cameraModelFromROS(
1100  msg.left_camera_info[i],
1101  transformFromGeometryMsg(msg.local_transform[i])));
1102  }
1103  }
1104  }
1105 
1106  // Image data
1107  cv::Mat left, right;
1108  if(!msg.left.data.empty())
1109  {
1110  boost::shared_ptr<void const> trackedObject;
1111  cv_bridge::CvImageConstPtr leftRawPtr = cv_bridge::toCvShare(msg.left, trackedObject);
1112 
1113  if(!(leftRawPtr->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) ==0 ||
1114  leftRawPtr->encoding.compare(sensor_msgs::image_encodings::MONO8) ==0 ||
1115  leftRawPtr->encoding.compare(sensor_msgs::image_encodings::MONO16) ==0 ||
1116  leftRawPtr->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
1117  leftRawPtr->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
1118  leftRawPtr->encoding.compare(sensor_msgs::image_encodings::BGRA8) == 0 ||
1119  leftRawPtr->encoding.compare(sensor_msgs::image_encodings::RGBA8) == 0 ||
1120  leftRawPtr->encoding.compare(sensor_msgs::image_encodings::BAYER_GRBG8)))
1121  {
1122  ROS_ERROR("Input type must be image=mono8,mono16,rgb8,bgr8 (mono8 recommended), received type is %s. Will return data without left/rgb raw image.",
1123  leftRawPtr->encoding.c_str());
1124  }
1125  else
1126  {
1127  if(leftRawPtr->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
1128  leftRawPtr->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0)
1129  {
1130  left = leftRawPtr->image.clone();
1131  }
1132  else if(leftRawPtr->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0)
1133  {
1134  left = cv_bridge::cvtColor(leftRawPtr, "mono8")->image;
1135  }
1136  else
1137  {
1138  left = cv_bridge::cvtColor(leftRawPtr, "bgr8")->image;
1139  }
1140  }
1141  }
1142  if(!msg.right.data.empty())
1143  {
1144  boost::shared_ptr<void const> trackedObject;
1145  cv_bridge::CvImageConstPtr rightRawPtr = cv_bridge::toCvShare(msg.right, trackedObject);
1146 
1147  if(!(rightRawPtr->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) ==0 ||
1148  rightRawPtr->encoding.compare(sensor_msgs::image_encodings::MONO8) ==0 ||
1149  rightRawPtr->encoding.compare(sensor_msgs::image_encodings::MONO16) ==0 ||
1150  rightRawPtr->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
1151  rightRawPtr->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
1152  rightRawPtr->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1) == 0 ||
1153  rightRawPtr->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1) == 0))
1154  {
1155  ROS_ERROR("Input type must be image=mono8,mono16,rgb8,bgr8,32FC1,16UC1, received type is %s. Will return data without right/depth raw image.",
1156  rightRawPtr->encoding.c_str());
1157  }
1158  else
1159  {
1160  if(rightRawPtr->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
1161  rightRawPtr->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
1162  (!isStereo &&
1163  (rightRawPtr->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0||
1164  rightRawPtr->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1) == 0 ||
1165  rightRawPtr->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1) == 0)))
1166  {
1167  right = rightRawPtr->image.clone();
1168  }
1169  else
1170  {
1171  right = cv_bridge::cvtColor(rightRawPtr, "mono8")->image;
1172  }
1173  }
1174  }
1175 
1176  if(isStereo)
1177  {
1178  s.setStereoImage(
1179  compressedMatFromBytes(msg.left_compressed),
1180  compressedMatFromBytes(msg.right_compressed),
1181  stereoModels);
1182  if(!left.empty() && !right.empty())
1183  {
1184  s.setStereoImage(left, right, stereoModels, false);
1185  }
1186  }
1187  else
1188  {
1189  s.setRGBDImage(
1190  compressedMatFromBytes(msg.left_compressed),
1191  compressedMatFromBytes(msg.right_compressed),
1192  models);
1193  if(!left.empty() && !right.empty())
1194  {
1195  s.setRGBDImage(left, right, models, false);
1196  }
1197  }
1198 
1199  // Laser scan data
1200  if(!msg.laser_scan_compressed.empty())
1201  {
1202  s.setLaserScan(rtabmap::LaserScan(
1203  compressedMatFromBytes(msg.laser_scan_compressed),
1204  msg.laser_scan_max_pts,
1205  msg.laser_scan_max_range,
1206  (rtabmap::LaserScan::Format)msg.laser_scan_format,
1207  transformFromGeometryMsg(msg.laser_scan_local_transform)));
1208  }
1209  if(!msg.laser_scan.data.empty())
1210  {
1211  pcl::PCLPointCloud2 cloud;
1212  pcl_conversions::toPCL(msg.laser_scan, cloud);
1213  s.setLaserScan(rtabmap::LaserScan(
1215  msg.laser_scan_max_pts,
1216  msg.laser_scan_max_range,
1217  transformFromGeometryMsg(msg.laser_scan_local_transform)),
1218  false);
1219  UASSERT((rtabmap::LaserScan::Format)msg.laser_scan_format == s.laserScanRaw().format());
1220  }
1221 
1222  //convert features
1223  std::vector<cv::KeyPoint> keypoints;
1224  std::vector<cv::Point3f> keypoints3D;
1225  cv::Mat descriptors;
1226  if(!msg.key_points.empty())
1227  {
1228  keypoints = rtabmap_conversions::keypointsFromROS(msg.key_points);
1229  }
1230  if(!msg.points.empty())
1231  {
1232  keypoints3D = rtabmap_conversions::points3fFromROS(msg.points);
1233  }
1234  if(!msg.descriptors.empty())
1235  {
1236  descriptors = rtabmap::uncompressData(msg.descriptors);
1237  }
1238  s.setFeatures(keypoints, keypoints3D, descriptors);
1239 
1240  s.setGlobalDescriptors(rtabmap_conversions::globalDescriptorsFromROS(msg.global_descriptors));
1241  s.setEnvSensors(rtabmap_conversions::envSensorsFromROS(msg.env_sensors));
1242  s.setOccupancyGrid(
1243  compressedMatFromBytes(msg.grid_ground),
1244  compressedMatFromBytes(msg.grid_obstacles),
1245  compressedMatFromBytes(msg.grid_empty_cells),
1246  msg.grid_cell_size,
1247  point3fFromROS(msg.grid_view_point));
1248  s.setGPS(rtabmap::GPS(msg.gps.stamp, msg.gps.longitude, msg.gps.latitude, msg.gps.altitude, msg.gps.error, msg.gps.bearing));
1249  s.setIMU(rtabmap_conversions::imuFromROS(msg.imu, transformFromGeometryMsg(msg.imu_local_transform)));
1250  return s;
1251 }
1252 void sensorDataToROS(const rtabmap::SensorData & data, rtabmap_msgs::SensorData & msg, const std::string & frameId, bool copyRawData)
1253 {
1254  // add data
1255  msg.header.seq = data.id();
1256  msg.header.stamp = ros::Time(data.stamp());
1257  msg.header.frame_id = frameId;
1258  transformToPoseMsg(data.groundTruth(), msg.ground_truth_pose);
1259  msg.gps.stamp = data.gps().stamp();
1260  msg.gps.longitude = data.gps().longitude();
1261  msg.gps.latitude = data.gps().latitude();
1262  msg.gps.altitude = data.gps().altitude();
1263  msg.gps.error = data.gps().error();
1264  msg.gps.bearing = data.gps().bearing();
1265 
1266  //Calibration
1267  if(data.cameraModels().size())
1268  {
1269  msg.left_camera_info.resize(data.cameraModels().size());
1270  msg.local_transform.resize(data.cameraModels().size());
1271  for(unsigned int i=0; i<data.cameraModels().size(); ++i)
1272  {
1273  cameraModelToROS(data.cameraModels()[i], msg.left_camera_info[i]);
1274  transformToGeometryMsg(data.cameraModels()[i].localTransform(), msg.local_transform[i]);
1275  }
1276  }
1277  else if(data.stereoCameraModels().size())
1278  {
1279  msg.left_camera_info.resize(data.stereoCameraModels().size());
1280  msg.right_camera_info.resize(data.stereoCameraModels().size());
1281  msg.local_transform.resize(data.stereoCameraModels().size());
1282  for(unsigned int i=0; i<data.stereoCameraModels().size(); ++i)
1283  {
1284  cameraModelToROS(data.stereoCameraModels()[i].left(), msg.left_camera_info[i]);
1285  cameraModelToROS(data.stereoCameraModels()[i].right(), msg.right_camera_info[i]);
1286  transformToGeometryMsg(data.stereoCameraModels()[i].left().localTransform(), msg.local_transform[i]);
1287  }
1288  }
1289 
1290  // Images
1291  if(copyRawData)
1292  {
1293  if(!data.imageRaw().empty())
1294  {
1295  cv_bridge::CvImage cvImg;
1296  cvImg.image = data.imageRaw();
1297  UASSERT(data.imageRaw().type()==CV_8UC1 || data.imageRaw().type()==CV_8UC3);
1299  cvImg.toImageMsg(msg.left);
1300  }
1301  if(!data.depthOrRightRaw().empty())
1302  {
1303  cv_bridge::CvImage cvDepth;
1304  cvDepth.image = data.depthOrRightRaw();
1305  UASSERT(data.depthOrRightRaw().type()==CV_8UC1 || data.depthOrRightRaw().type()==CV_16UC1 || data.depthOrRightRaw().type()==CV_32FC1);
1306  cvDepth.encoding = data.depthOrRightRaw().type()==CV_8UC1?sensor_msgs::image_encodings::MONO8:data.depthOrRightRaw().type()==CV_16UC1?sensor_msgs::image_encodings::TYPE_16UC1:sensor_msgs::image_encodings::TYPE_32FC1;
1307  cvDepth.toImageMsg(msg.right);
1308  }
1309  }
1310  compressedMatToBytes(data.imageCompressed(), msg.left_compressed);
1311  compressedMatToBytes(data.depthOrRightCompressed(), msg.right_compressed);
1312 
1313  // Laser scan
1314  if(copyRawData && !data.laserScanRaw().empty())
1315  {
1316  pcl::PCLPointCloud2::Ptr cloud = rtabmap::util3d::laserScanToPointCloud2(data.laserScanRaw());
1317  pcl_conversions::moveFromPCL(*cloud, msg.laser_scan);
1318  msg.laser_scan_max_pts = data.laserScanCompressed().maxPoints();
1319  msg.laser_scan_max_range = data.laserScanCompressed().rangeMax();
1320  msg.laser_scan_format = data.laserScanCompressed().format();
1321  transformToGeometryMsg(data.laserScanCompressed().localTransform(), msg.laser_scan_local_transform);
1322  }
1323  if(!data.laserScanCompressed().empty())
1324  {
1325  compressedMatToBytes(data.laserScanCompressed().data(), msg.laser_scan_compressed);
1326  msg.laser_scan_max_pts = data.laserScanCompressed().maxPoints();
1327  msg.laser_scan_max_range = data.laserScanCompressed().rangeMax();
1328  msg.laser_scan_format = data.laserScanCompressed().format();
1329  transformToGeometryMsg(data.laserScanCompressed().localTransform(), msg.laser_scan_local_transform);
1330  }
1331 
1332  // user data
1333  if(!data.userDataCompressed().empty())
1334  {
1335  compressedMatToBytes(data.userDataCompressed(), msg.user_data);
1336 
1337  }
1338  else if(copyRawData && !data.userDataRaw().empty())
1339  {
1340  compressedMatToBytes(rtabmap::compressData2(data.userDataRaw()), msg.user_data);
1341  }
1342 
1343  // oocupancy grid
1344  if(!data.gridGroundCellsCompressed().empty())
1345  {
1346  compressedMatToBytes(data.gridGroundCellsCompressed(), msg.grid_ground);
1347 
1348  }
1349  else if(copyRawData && !data.gridGroundCellsRaw().empty())
1350  {
1351  compressedMatToBytes(rtabmap::compressData2(data.gridGroundCellsRaw()), msg.grid_ground);
1352  }
1353  if(!data.gridObstacleCellsCompressed().empty())
1354  {
1355  compressedMatToBytes(data.gridObstacleCellsCompressed(), msg.grid_obstacles);
1356 
1357  }
1358  else if(copyRawData && !data.gridObstacleCellsRaw().empty())
1359  {
1360  compressedMatToBytes(rtabmap::compressData2(data.gridObstacleCellsRaw()), msg.grid_obstacles);
1361  }
1362  if(!data.gridEmptyCellsCompressed().empty())
1363  {
1364  compressedMatToBytes(data.gridEmptyCellsCompressed(), msg.grid_empty_cells);
1365 
1366  }
1367  else if(copyRawData && !data.gridEmptyCellsRaw().empty())
1368  {
1369  compressedMatToBytes(rtabmap::compressData2(data.gridEmptyCellsRaw()), msg.grid_empty_cells);
1370  }
1371 
1372  point3fToROS(data.gridViewPoint(), msg.grid_view_point);
1373  msg.grid_cell_size = data.gridCellSize();
1374 
1375  //convert features
1376  if(!data.keypoints().empty())
1377  {
1378  rtabmap_conversions::keypointsToROS(data.keypoints(), msg.key_points);
1379  }
1380  if(!data.keypoints3D().empty())
1381  {
1382  rtabmap_conversions::points3fToROS(data.keypoints3D(), msg.points);
1383  }
1384  if(!data.descriptors().empty())
1385  {
1386  msg.descriptors = rtabmap::compressData(data.descriptors());
1387  }
1388  if(!data.globalDescriptors().empty())
1389  {
1390  rtabmap_conversions::globalDescriptorsToROS(data.globalDescriptors(), msg.global_descriptors);
1391  }
1392 
1393  rtabmap_conversions::globalDescriptorsToROS(data.globalDescriptors(), msg.global_descriptors);
1394  rtabmap_conversions::envSensorsToROS(data.envSensors(), msg.env_sensors);
1396  transformToGeometryMsg(data.imu().localTransform(), msg.imu_local_transform);
1397 }
1398 
1399 rtabmap::Signature nodeFromROS(const rtabmap_msgs::Node & msg)
1400 {
1401  //Features stuff...
1402  std::multimap<int, int> words;
1403  std::vector<cv::KeyPoint> wordsKpts;
1404  std::vector<cv::Point3f> words3D;
1405 
1406  cv::Mat wordsDescriptors = rtabmap::uncompressData(msg.word_descriptors);
1407 
1408  if(msg.word_id_keys.size() != msg.word_id_values.size())
1409  {
1410  ROS_ERROR("Word ID keys and values should be the same size (%d, %d)!", (int)msg.word_id_keys.size(), (int)msg.word_id_values.size());
1411  }
1412  if(!msg.word_kpts.empty() && msg.word_kpts.size() != msg.word_id_keys.size())
1413  {
1414  ROS_ERROR("Word IDs and 2D keypoints should be the same size (%d, %d)!", (int)msg.word_id_keys.size(), (int)msg.word_kpts.size());
1415  }
1416  if(!msg.word_pts.empty() && msg.word_pts.size() != msg.word_id_keys.size())
1417  {
1418  ROS_ERROR("Word IDs and 3D points should be the same size (%d, %d)!", (int)msg.word_id_keys.size(), (int)msg.word_pts.size());
1419  }
1420  if(!wordsDescriptors.empty() && wordsDescriptors.rows != (int)msg.word_id_keys.size())
1421  {
1422  ROS_ERROR("Word IDs and descriptors should be the same size (%d, %d)!", (int)msg.word_id_keys.size(), wordsDescriptors.rows);
1423  wordsDescriptors = cv::Mat();
1424  }
1425 
1427  msg.id,
1428  msg.map_id,
1429  msg.weight,
1430  msg.stamp,
1431  msg.label,
1432  transformFromPoseMsg(msg.pose),
1433  transformFromPoseMsg(msg.data.ground_truth_pose));
1434 
1435  if(msg.word_id_keys.size() == msg.word_id_values.size())
1436  {
1437  for(unsigned int i=0; i<msg.word_id_keys.size(); ++i)
1438  {
1439  words.insert(std::make_pair(msg.word_id_keys.at(i), msg.word_id_values.at(i))); // ID to index
1440  if(msg.word_id_keys.size() == msg.word_kpts.size())
1441  {
1442  if(wordsKpts.empty())
1443  {
1444  wordsKpts.reserve(msg.word_kpts.size());
1445  }
1446  wordsKpts.push_back(keypointFromROS(msg.word_kpts.at(i)));
1447  }
1448  if(msg.word_id_keys.size() == msg.word_pts.size())
1449  {
1450  if(words3D.empty())
1451  {
1452  words3D.reserve(msg.word_pts.size());
1453  }
1454  words3D.push_back(point3fFromROS(msg.word_pts[i]));
1455  }
1456  }
1457  }
1458  s.setWords(words, wordsKpts, words3D, wordsDescriptors);
1459  s.sensorData() = sensorDataFromROS(msg.data);
1460  s.sensorData().setId(msg.id);
1461  return s;
1462 }
1463 void nodeToROS(const rtabmap::Signature & signature, rtabmap_msgs::Node & msg)
1464 {
1465  // add data
1466  msg.id = signature.id();
1467  msg.map_id = signature.mapId();
1468  msg.weight = signature.getWeight();
1469  msg.stamp = signature.getStamp();
1470  msg.label = signature.getLabel();
1471  transformToPoseMsg(signature.getPose(), msg.pose);
1472 
1473  //Features stuff...
1474  if(!signature.getWordsKpts().empty() &&
1475  signature.getWords().size() != signature.getWordsKpts().size())
1476  {
1477  ROS_ERROR("Word IDs and 2D keypoints must have the same size (%d vs %d)!",
1478  (int)signature.getWords().size(),
1479  (int)signature.getWordsKpts().size());
1480  }
1481 
1482  if(!signature.getWords3().empty() &&
1483  signature.getWords().size() != signature.getWords3().size())
1484  {
1485  ROS_ERROR("Word IDs and 3D points must have the same size (%d vs %d)!",
1486  (int)signature.getWords().size(),
1487  (int)signature.getWords3().size());
1488  }
1489  int i=0;
1490  msg.word_id_keys.resize(signature.getWords().size());
1491  msg.word_id_values.resize(signature.getWords().size());
1492  for(std::multimap<int, int>::const_iterator iter=signature.getWords().begin();
1493  iter!=signature.getWords().end();
1494  ++iter)
1495  {
1496  msg.word_id_keys.at(i) = iter->first;
1497  msg.word_id_values.at(i) = iter->second;
1498  if(signature.getWordsKpts().size() == signature.getWords().size())
1499  {
1500  if(msg.word_kpts.empty())
1501  {
1502  msg.word_kpts.resize(signature.getWords().size());
1503  }
1504  keypointToROS(signature.getWordsKpts().at(i), msg.word_kpts.at(i));
1505  }
1506  if(signature.getWords3().size() == signature.getWords().size())
1507  {
1508  if(msg.word_pts.empty())
1509  {
1510  msg.word_pts.resize(signature.getWords().size());
1511  }
1512  point3fToROS(signature.getWords3().at(i), msg.word_pts.at(i));
1513  }
1514  ++i;
1515  }
1516 
1517  if(!signature.getWordsDescriptors().empty())
1518  {
1519  if(signature.getWordsDescriptors().rows == (int)signature.getWords().size())
1520  {
1521  msg.word_descriptors = rtabmap::compressData(signature.getWordsDescriptors());
1522  }
1523  else
1524  {
1525  ROS_ERROR("Word IDs and descriptors must have the same size (%d vs %d)!",
1526  (int)signature.getWords().size(),
1527  signature.getWordsDescriptors().rows);
1528  }
1529  }
1530 
1531  sensorDataToROS(signature.sensorData(), msg.data);
1532  transformToPoseMsg(signature.getGroundTruthPose(), msg.data.ground_truth_pose);
1533 }
1534 
1535 rtabmap::Signature nodeDataFromROS(const rtabmap_msgs::Node & msg)
1536 {
1537  return nodeFromROS(msg);
1538 }
1539 void nodeDataToROS(const rtabmap::Signature & signature, rtabmap_msgs::Node & msg)
1540 {
1541  nodeToROS(signature, msg);
1542 }
1543 
1544 rtabmap::Signature nodeInfoFromROS(const rtabmap_msgs::Node & msg)
1545 {
1547  msg.id,
1548  msg.map_id,
1549  msg.weight,
1550  msg.stamp,
1551  msg.label,
1552  transformFromPoseMsg(msg.pose),
1553  transformFromPoseMsg(msg.data.ground_truth_pose));
1554  return s;
1555 }
1556 void nodeInfoToROS(const rtabmap::Signature & signature, rtabmap_msgs::Node & msg)
1557 {
1558  // add data
1559  msg.id = signature.id();
1560  msg.map_id = signature.mapId();
1561  msg.weight = signature.getWeight();
1562  msg.stamp = signature.getStamp();
1563  msg.label = signature.getLabel();
1564  transformToPoseMsg(signature.getPose(), msg.pose);
1565  transformToPoseMsg(signature.getGroundTruthPose(), msg.data.ground_truth_pose);
1566 }
1567 
1568 std::map<std::string, float> odomInfoToStatistics(const rtabmap::OdometryInfo & info)
1569 {
1570  std::map<std::string, float> stats;
1571 
1572  stats.insert(std::make_pair("Odometry/TimeRegistration/ms", info.reg.totalTime*1000.0f));
1573  stats.insert(std::make_pair("Odometry/RAM_usage/MB", info.memoryUsage));
1574 
1575  // Based on rtabmap/MainWindow.cpp
1576  stats.insert(std::make_pair("Odometry/Features/", info.features));
1577  stats.insert(std::make_pair("Odometry/Matches/", info.reg.matches));
1578  stats.insert(std::make_pair("Odometry/MatchesRatio/", info.features<=0?0.0f:float(info.reg.inliers)/float(info.features)));
1579  stats.insert(std::make_pair("Odometry/Inliers/", info.reg.inliers));
1580  stats.insert(std::make_pair("Odometry/InliersMeanDistance/m", info.reg.inliersMeanDistance));
1581  stats.insert(std::make_pair("Odometry/InliersDistribution/", info.reg.inliersDistribution));
1582  stats.insert(std::make_pair("Odometry/InliersRatio/", info.reg.inliers));
1583  stats.insert(std::make_pair("Odometry/ICPInliersRatio/", info.reg.icpInliersRatio));
1584  stats.insert(std::make_pair("Odometry/ICPRotation/rad", info.reg.icpRotation));
1585  stats.insert(std::make_pair("Odometry/ICPTranslation/m", info.reg.icpTranslation));
1586  stats.insert(std::make_pair("Odometry/ICPStructuralComplexity/", info.reg.icpStructuralComplexity));
1587  stats.insert(std::make_pair("Odometry/ICPStructuralDistribution/", info.reg.icpStructuralDistribution));
1588  stats.insert(std::make_pair("Odometry/ICPCorrespondences/", info.reg.icpCorrespondences));
1589  stats.insert(std::make_pair("Odometry/StdDevLin/", sqrt((float)info.reg.covariance.at<double>(0,0))));
1590  stats.insert(std::make_pair("Odometry/StdDevAng/", sqrt((float)info.reg.covariance.at<double>(5,5))));
1591  stats.insert(std::make_pair("Odometry/VarianceLin/", (float)info.reg.covariance.at<double>(0,0)));
1592  stats.insert(std::make_pair("Odometry/VarianceAng/", (float)info.reg.covariance.at<double>(5,5)));
1593  stats.insert(std::make_pair("Odometry/TimeEstimation/ms", info.timeEstimation*1000.0f));
1594  stats.insert(std::make_pair("Odometry/TimeFiltering/ms", info.timeParticleFiltering*1000.0f));
1595  stats.insert(std::make_pair("Odometry/LocalMapSize/", info.localMapSize));
1596  stats.insert(std::make_pair("Odometry/LocalScanMapSize/", info.localScanMapSize));
1597  stats.insert(std::make_pair("Odometry/LocalKeyFrames/", info.localKeyFrames));
1598  stats.insert(std::make_pair("Odometry/LocalBundleOutliers/", info.localBundleOutliers));
1599  stats.insert(std::make_pair("Odometry/LocalBundleConstraints/", info.localBundleConstraints));
1600  stats.insert(std::make_pair("Odometry/LocalBundleTime/ms", info.localBundleTime*1000.0f));
1601  stats.insert(std::make_pair("Odometry/localBundleAvgInlierDistance/pix", info.localBundleAvgInlierDistance));
1602  stats.insert(std::make_pair("Odometry/localBundleMaxKeyFramesForInlier/", info.localBundleMaxKeyFramesForInlier));
1603  stats.insert(std::make_pair("Odometry/KeyFrameAdded/", info.keyFrameAdded?1.0f:0.0f));
1604  stats.insert(std::make_pair("Odometry/Interval/ms", (float)info.interval));
1605  stats.insert(std::make_pair("Odometry/Distance/m", info.distanceTravelled));
1606 
1607  float x,y,z,roll,pitch,yaw;
1608  float dist = 0.0f, speed=0.0f;
1609  if(!info.transform.isNull())
1610  {
1611  info.transform.getTranslationAndEulerAngles(x,y,z,roll,pitch,yaw);
1612  dist = info.transform.getNorm();
1613  stats.insert(std::make_pair("Odometry/T/m", dist));
1614  stats.insert(std::make_pair("Odometry/Tx/m", x));
1615  stats.insert(std::make_pair("Odometry/Ty/m", y));
1616  stats.insert(std::make_pair("Odometry/Tz/m", z));
1617  stats.insert(std::make_pair("Odometry/Troll/deg", roll*180.0/CV_PI));
1618  stats.insert(std::make_pair("Odometry/Tpitch/deg", pitch*180.0/CV_PI));
1619  stats.insert(std::make_pair("Odometry/Tyaw/deg", yaw*180.0/CV_PI));
1620 
1621  if(info.interval>0.0)
1622  {
1623  speed = dist/info.interval;
1624  stats.insert(std::make_pair("Odometry/Speed/kph", speed*3.6));
1625  stats.insert(std::make_pair("Odometry/Speed/mph", speed*2.237));
1626  stats.insert(std::make_pair("Odometry/Speed/mps", speed));
1627  }
1628  }
1629  if(!info.transformGroundTruth.isNull())
1630  {
1631  if(!info.transform.isNull())
1632  {
1633  rtabmap::Transform diff = info.transformGroundTruth.inverse()*info.transform;
1634  stats.insert(std::make_pair("Odometry/TG_error_lin/m", diff.getNorm()));
1635  stats.insert(std::make_pair("Odometry/TG_error_ang/deg", diff.getAngle()*180.0/CV_PI));
1636  }
1637 
1638  info.transformGroundTruth.getTranslationAndEulerAngles(x,y,z,roll,pitch,yaw);
1639  dist = info.transformGroundTruth.getNorm();
1640  stats.insert(std::make_pair("Odometry/TG/m", dist));
1641  stats.insert(std::make_pair("Odometry/TGx/m", x));
1642  stats.insert(std::make_pair("Odometry/TGy/m", y));
1643  stats.insert(std::make_pair("Odometry/TGz/m", z));
1644  stats.insert(std::make_pair("Odometry/TGroll/deg", roll*180.0/CV_PI));
1645  stats.insert(std::make_pair("Odometry/TGpitch/deg", pitch*180.0/CV_PI));
1646  stats.insert(std::make_pair("Odometry/TGyaw/deg", yaw*180.0/CV_PI));
1647 
1648  if(info.interval>0.0)
1649  {
1650  speed = dist/info.interval;
1651  stats.insert(std::make_pair("Odometry/SpeedG/kph", speed*3.6));
1652  stats.insert(std::make_pair("Odometry/SpeedG/mph", speed*2.237));
1653  stats.insert(std::make_pair("Odometry/SpeedG/mps", speed));
1654  }
1655  }
1656  return stats;
1657 }
1658 
1659 rtabmap::OdometryInfo odomInfoFromROS(const rtabmap_msgs::OdomInfo & msg, bool ignoreData)
1660 {
1662  info.lost = msg.lost;
1663  info.reg.matches = msg.matches;
1664  info.reg.inliers = msg.inliers;
1665  info.reg.icpInliersRatio = msg.icpInliersRatio;
1666  info.reg.icpRotation = msg.icpRotation;
1667  info.reg.icpTranslation = msg.icpTranslation;
1668  info.reg.icpStructuralComplexity = msg.icpStructuralComplexity;
1669  info.reg.icpStructuralDistribution = msg.icpStructuralDistribution;
1670  info.reg.icpCorrespondences = msg.icpCorrespondences;
1671  info.reg.covariance = cv::Mat(6,6,CV_64FC1, (void*)msg.covariance.data()).clone();
1672  info.features = msg.features;
1673  info.localMapSize = msg.localMapSize;
1674  info.localScanMapSize = msg.localScanMapSize;
1675  info.localKeyFrames = msg.localKeyFrames;
1676  info.localBundleOutliers = msg.localBundleOutliers;
1677  info.localBundleConstraints = msg.localBundleConstraints;
1678  info.localBundleTime = msg.localBundleTime;
1679  info.localBundleAvgInlierDistance = msg.localBundleAvgInlierDistance;
1680  info.localBundleMaxKeyFramesForInlier = msg.localBundleMaxKeyFramesForInlier;
1681  UASSERT(msg.localBundleModels.size() == msg.localBundleIds.size());
1682  UASSERT(msg.localBundleModels.size() == msg.localBundlePoses.size());
1683  for(size_t i=0; i<msg.localBundleIds.size(); ++i)
1684  {
1685  std::vector<rtabmap::CameraModel> models;
1686  for(size_t j=0; j<msg.localBundleModels[i].models.size(); ++j)
1687  {
1688  models.push_back(cameraModelFromROS(msg.localBundleModels[i].models[j].camera_info, transformFromGeometryMsg(msg.localBundleModels[i].models[j].local_transform)));
1689  }
1690  info.localBundleModels.insert(std::make_pair(msg.localBundleIds[i], models));
1691  info.localBundlePoses.insert(std::make_pair(msg.localBundleIds[i], transformFromPoseMsg(msg.localBundlePoses[i])));
1692  }
1693  info.keyFrameAdded = msg.keyFrameAdded;
1694  info.timeEstimation = msg.timeEstimation;
1695  info.timeParticleFiltering = msg.timeParticleFiltering;
1696  info.stamp = msg.stamp;
1697  info.interval = msg.interval;
1698  info.distanceTravelled = msg.distanceTravelled;
1699  info.memoryUsage = msg.memoryUsage;
1700  info.gravityRollError = msg.gravityRollError;
1701  info.gravityPitchError = msg.gravityPitchError;
1702 
1703  info.type = msg.type;
1704 
1705  info.reg.matchesIDs = msg.wordMatches;
1706  info.reg.inliersIDs = msg.wordInliers;
1707 
1708  if(!ignoreData)
1709  {
1710  UASSERT(msg.wordsKeys.size() == msg.wordsValues.size());
1711  for(unsigned int i=0; i<msg.wordsKeys.size(); ++i)
1712  {
1713  info.words.insert(std::make_pair(msg.wordsKeys[i], keypointFromROS(msg.wordsValues[i])));
1714  }
1715 
1716  info.refCorners = points2fFromROS(msg.refCorners);
1717  info.newCorners = points2fFromROS(msg.newCorners);
1718  info.cornerInliers = msg.cornerInliers;
1719 
1720  info.transform = transformFromGeometryMsg(msg.transform);
1721  info.transformFiltered = transformFromGeometryMsg(msg.transformFiltered);
1722  info.transformGroundTruth = transformFromGeometryMsg(msg.transformGroundTruth);
1723  info.guess = transformFromGeometryMsg(msg.guess);
1724 
1725  UASSERT(msg.localMapKeys.size() == msg.localMapValues.size());
1726  for(unsigned int i=0; i<msg.localMapKeys.size(); ++i)
1727  {
1728  info.localMap.insert(std::make_pair(msg.localMapKeys[i], point3fFromROS(msg.localMapValues[i])));
1729  }
1730 
1731  pcl::PCLPointCloud2 cloud;
1732  pcl_conversions::toPCL(msg.localScanMap, cloud);
1733  info.localScanMap = rtabmap::util3d::laserScanFromPointCloud(cloud);
1734  }
1735  return info;
1736 }
1737 
1738 void odomInfoToROS(const rtabmap::OdometryInfo & info, rtabmap_msgs::OdomInfo & msg, bool ignoreData)
1739 {
1740  msg.lost = info.lost;
1741  msg.matches = info.reg.matches;
1742  msg.inliers = info.reg.inliers;
1743  msg.icpInliersRatio = info.reg.icpInliersRatio;
1744  msg.icpRotation = info.reg.icpRotation;
1745  msg.icpTranslation = info.reg.icpTranslation;
1746  msg.icpStructuralComplexity = info.reg.icpStructuralComplexity;
1747  msg.icpStructuralDistribution = info.reg.icpStructuralDistribution;
1748  msg.icpCorrespondences = info.reg.icpCorrespondences;
1749  if(info.reg.covariance.type() == CV_64FC1 && info.reg.covariance.cols == 6 && info.reg.covariance.rows == 6)
1750  {
1751  memcpy(msg.covariance.data(), info.reg.covariance.data, 36*sizeof(double));
1752  }
1753  msg.features = info.features;
1754  msg.localMapSize = info.localMapSize;
1755  msg.localScanMapSize = info.localScanMapSize;
1756  msg.localKeyFrames = info.localKeyFrames;
1757  msg.localBundleOutliers = info.localBundleOutliers;
1758  msg.localBundleConstraints = info.localBundleConstraints;
1759  msg.localBundleTime = info.localBundleTime;
1760  msg.localBundleAvgInlierDistance = info.localBundleAvgInlierDistance;
1761  msg.localBundleMaxKeyFramesForInlier = info.localBundleMaxKeyFramesForInlier;
1762  UASSERT(info.localBundleModels.size() == info.localBundlePoses.size());
1763  for(std::map<int, std::vector<rtabmap::CameraModel> >::const_iterator iter=info.localBundleModels.begin();
1764  iter!=info.localBundleModels.end();
1765  ++iter)
1766  {
1767  msg.localBundleIds.push_back(iter->first);
1768 
1769  UASSERT(info.localBundlePoses.find(iter->first)!=info.localBundlePoses.end());
1770  geometry_msgs::Pose pose;
1771  transformToPoseMsg(info.localBundlePoses.at(iter->first), pose);
1772  msg.localBundlePoses.push_back(pose);
1773 
1774  rtabmap_msgs::CameraModels models;
1775  for(size_t i=0; i<iter->second.size(); ++i)
1776  {
1777  rtabmap_msgs::CameraModel modelMsg;
1778  cameraModelToROS(iter->second[i], modelMsg.camera_info);
1779  transformToGeometryMsg(iter->second[i].localTransform(), modelMsg.local_transform);
1780  models.models.push_back(modelMsg);
1781  }
1782  msg.localBundleModels.push_back(models);
1783  }
1784  msg.keyFrameAdded = info.keyFrameAdded;
1785  msg.timeEstimation = info.timeEstimation;
1786  msg.timeParticleFiltering = info.timeParticleFiltering;
1787  msg.stamp = info.stamp;
1788  msg.interval = info.interval;
1789  msg.distanceTravelled = info.distanceTravelled;
1790  msg.memoryUsage = info.memoryUsage;
1791  msg.gravityRollError = info.gravityRollError;
1792  msg.gravityPitchError = info.gravityPitchError;
1793 
1794  msg.type = info.type;
1795 
1796  transformToGeometryMsg(info.transform, msg.transform);
1797  transformToGeometryMsg(info.transformFiltered, msg.transformFiltered);
1798  transformToGeometryMsg(info.transformGroundTruth, msg.transformGroundTruth);
1799  transformToGeometryMsg(info.guess, msg.guess);
1800 
1801  if(!ignoreData)
1802  {
1803  msg.wordsKeys = uKeys(info.words);
1804  keypointsToROS(uValues(info.words), msg.wordsValues);
1805 
1806  msg.wordMatches = info.reg.matchesIDs;
1807  msg.wordInliers = info.reg.inliersIDs;
1808 
1809  points2fToROS(info.refCorners, msg.refCorners);
1810  points2fToROS(info.newCorners, msg.newCorners);
1811  msg.cornerInliers = info.cornerInliers;
1812 
1813  msg.localMapKeys = uKeys(info.localMap);
1814  points3fToROS(uValues(info.localMap), msg.localMapValues);
1815 
1816  pcl_conversions::moveFromPCL(*rtabmap::util3d::laserScanToPointCloud2(info.localScanMap, info.localScanMap.localTransform()), msg.localScanMap);
1817  }
1818 }
1819 
1820 cv::Mat userDataFromROS(const rtabmap_msgs::UserData & dataMsg)
1821 {
1822  cv::Mat data;
1823  if(!dataMsg.data.empty())
1824  {
1825  if(dataMsg.cols > 0 && dataMsg.rows > 0 && dataMsg.type >= 0)
1826  {
1827  data = cv::Mat(dataMsg.rows, dataMsg.cols, dataMsg.type, (void*)dataMsg.data.data()).clone();
1828  }
1829  else
1830  {
1831  if(dataMsg.cols != (int)dataMsg.data.size() || dataMsg.rows != 1 || dataMsg.type != CV_8UC1)
1832  {
1833  ROS_ERROR("cols, rows and type fields of the UserData msg "
1834  "are not correctly set (cols=%d, rows=%d, type=%d)! We assume that the data "
1835  "is compressed (cols=%d, rows=1, type=%d(CV_8UC1)).",
1836  dataMsg.cols, dataMsg.rows, dataMsg.type, (int)dataMsg.data.size(), CV_8UC1);
1837 
1838  }
1839  data = cv::Mat(1, dataMsg.data.size(), CV_8UC1, (void*)dataMsg.data.data()).clone();
1840  }
1841  }
1842  return data;
1843 }
1844 void userDataToROS(const cv::Mat & data, rtabmap_msgs::UserData & dataMsg, bool compress)
1845 {
1846  if(!data.empty())
1847  {
1848  if(compress)
1849  {
1850  dataMsg.data = rtabmap::compressData(data);
1851  dataMsg.rows = 1;
1852  dataMsg.cols = dataMsg.data.size();
1853  dataMsg.type = CV_8UC1;
1854  }
1855  else
1856  {
1857  dataMsg.data.resize(data.step[0] * data.rows); // use step for non-contiguous matrices
1858  memcpy(dataMsg.data.data(), data.data, dataMsg.data.size());
1859  dataMsg.rows = data.rows;
1860  dataMsg.cols = data.cols;
1861  dataMsg.type = data.type();
1862  }
1863  }
1864 }
1865 
1866 rtabmap::IMU imuFromROS(const sensor_msgs::Imu & msg, const rtabmap::Transform & localTransform)
1867 {
1868  return rtabmap::IMU(
1869  cv::Vec4d(msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w),
1870  cv::Mat(3,3,CV_64FC1,(void*)msg.orientation_covariance.data()).clone(),
1871  cv::Vec3d(msg.angular_velocity.x, msg.angular_velocity.y, msg.angular_velocity.z),
1872  cv::Mat(3,3,CV_64FC1,(void*)msg.angular_velocity_covariance.data()).clone(),
1873  cv::Vec3d(msg.linear_acceleration.x, msg.linear_acceleration.y, msg.linear_acceleration.z),
1874  cv::Mat(3,3,CV_64FC1,(void*)msg.linear_acceleration_covariance.data()).clone(),
1875  localTransform);
1876 }
1877 void imuToROS(const rtabmap::IMU & imu, sensor_msgs::Imu & msg)
1878 {
1879  msg.orientation.x = imu.orientation()[0];
1880  msg.orientation.y = imu.orientation()[1];
1881  msg.orientation.z = imu.orientation()[2];
1882  msg.orientation.w = imu.orientation()[3];
1883  if(!imu.orientationCovariance().empty())
1884  {
1885  memcpy((void*)msg.orientation_covariance.data(), imu.orientationCovariance().data, 9*sizeof(double));
1886  }
1887  msg.angular_velocity.x = imu.angularVelocity()[0];
1888  msg.angular_velocity.y = imu.angularVelocity()[1];
1889  msg.angular_velocity.z = imu.angularVelocity()[2];
1890  if(!imu.angularVelocityCovariance().empty())
1891  {
1892  memcpy((void*)msg.angular_velocity_covariance.data(), imu.angularVelocityCovariance().data, 9*sizeof(double));
1893  }
1894  msg.linear_acceleration.x = imu.linearAcceleration()[0];
1895  msg.linear_acceleration.y = imu.linearAcceleration()[1];
1896  msg.linear_acceleration.z = imu.linearAcceleration()[2];
1897  if(!imu.linearAccelerationCovariance().empty())
1898  {
1899  memcpy((void*)msg.linear_acceleration_covariance.data(), imu.linearAccelerationCovariance().data, 9*sizeof(double));
1900  }
1901 }
1902 
1904  const std::map<int, std::pair<geometry_msgs::PoseWithCovarianceStamped, float> > & tags,
1905  const std::string & frameId,
1906  const std::string & odomFrameId,
1907  const ros::Time & odomStamp,
1908  tf::TransformListener & listener,
1909  double waitForTransform,
1910  double defaultLinVariance,
1911  double defaultAngVariance)
1912 {
1913  //tag detections
1914  rtabmap::Landmarks landmarks;
1915  for(std::map<int, std::pair<geometry_msgs::PoseWithCovarianceStamped, float> >::const_iterator iter=tags.begin(); iter!=tags.end(); ++iter)
1916  {
1917  if(iter->first <=0)
1918  {
1919  ROS_ERROR("Invalid landmark received! IDs should be > 0 (it is %d). Ignoring this landmark.", iter->first);
1920  continue;
1921  }
1923  frameId,
1924  iter->second.first.header.frame_id,
1925  iter->second.first.header.stamp,
1926  listener,
1927  waitForTransform);
1928 
1929  if(baseToCamera.isNull())
1930  {
1931  ROS_ERROR("Cannot transform tag pose from \"%s\" frame to \"%s\" frame!",
1932  iter->second.first.header.frame_id.c_str(), frameId.c_str());
1933  continue;
1934  }
1935 
1936  rtabmap::Transform baseToTag = baseToCamera * transformFromPoseMsg(iter->second.first.pose.pose);
1937 
1938  if(!baseToTag.isNull())
1939  {
1940  // Correction of the global pose accounting the odometry movement since we received it
1942  frameId,
1943  odomFrameId,
1944  odomStamp,
1945  iter->second.first.header.stamp,
1946  listener,
1947  waitForTransform);
1948  if(!correction.isNull())
1949  {
1950  baseToTag = correction * baseToTag;
1951  }
1952  else
1953  {
1954  ROS_WARN("Could not adjust tag pose accordingly to latest odometry pose. "
1955  "If odometry is small since it received the tag pose and "
1956  "covariance is large, this should not be a problem.");
1957  }
1958  cv::Mat covariance = cv::Mat(6,6, CV_64FC1, (void*)iter->second.first.pose.covariance.data()).clone();
1959  if(covariance.empty() || !uIsFinite(covariance.at<double>(0,0)) || covariance.at<double>(0,0)<=0.0f)
1960  {
1961  covariance = cv::Mat::eye(6,6,CV_64FC1);
1962  covariance(cv::Range(0,3), cv::Range(0,3)) *= defaultLinVariance;
1963  covariance(cv::Range(3,6), cv::Range(3,6)) *= defaultAngVariance;
1964  }
1965  landmarks.insert(std::make_pair(iter->first, rtabmap::Landmark(iter->first, iter->second.second, baseToTag, covariance)));
1966  }
1967  }
1968  return landmarks;
1969 }
1970 
1972  const std::string & fromFrameId,
1973  const std::string & toFrameId,
1974  const ros::Time & stamp,
1975  tf::TransformListener & listener,
1976  double waitForTransform)
1977 {
1978  // TF ready?
1980  try
1981  {
1982  if(waitForTransform > 0.0 && !stamp.isZero())
1983  {
1984  //if(!tfBuffer_.canTransform(fromFrameId, toFrameId, stamp, ros::Duration(1)))
1985  std::string errorMsg;
1986  if(!listener.waitForTransform(fromFrameId, toFrameId, stamp, ros::Duration(waitForTransform), ros::Duration(0.01), &errorMsg))
1987  {
1988  ROS_WARN("Could not get transform from %s to %s after %f seconds (for stamp=%f)! Error=\"%s\".",
1989  fromFrameId.c_str(), toFrameId.c_str(), waitForTransform, stamp.toSec(), errorMsg.c_str());
1990  return transform;
1991  }
1992  }
1993 
1995  listener.lookupTransform(fromFrameId, toFrameId, stamp, tmp);
1997  }
1998  catch(tf::TransformException & ex)
1999  {
2000  ROS_WARN("(getting transform %s -> %s) %s", fromFrameId.c_str(), toFrameId.c_str(), ex.what());
2001  }
2002  return transform;
2003 }
2004 
2005 // get moving transform accordingly to a fixed frame. For example get
2006 // transform between moving /base_link between two stamps accordingly to /odom frame.
2008  const std::string & movingFrame,
2009  const std::string & fixedFrame,
2010  const ros::Time & stampFrom,
2011  const ros::Time & stampTo,
2012  tf::TransformListener & listener,
2013  double waitForTransform)
2014 {
2015  // TF ready?
2017  try
2018  {
2019  ros::Time stamp = stampTo>stampFrom?stampTo:stampFrom;
2020  if(waitForTransform > 0.0 && !stamp.isZero())
2021  {
2022  std::string errorMsg;
2023  if(!listener.waitForTransform(movingFrame, fixedFrame, stamp, ros::Duration(waitForTransform), ros::Duration(0.01), &errorMsg))
2024  {
2025  ROS_WARN("Could not get transform from %s to %s accordingly to %s after %f seconds (for stamps=%f -> %f)! Error=\"%s\".",
2026  movingFrame.c_str(), movingFrame.c_str(), fixedFrame.c_str(), waitForTransform, stampTo.toSec(), stampFrom.toSec(), errorMsg.c_str());
2027  return transform;
2028  }
2029  }
2030 
2032  listener.lookupTransform(movingFrame, stampFrom, movingFrame, stampTo, fixedFrame, tmp);
2034  }
2035  catch(tf::TransformException & ex)
2036  {
2037  ROS_WARN("(getting transform movement of %s according to fixed %s) %s", movingFrame.c_str(), fixedFrame.c_str(), ex.what());
2038  }
2039  return transform;
2040 }
2041 
2043  const std::vector<cv_bridge::CvImageConstPtr> & imageMsgs,
2044  const std::vector<cv_bridge::CvImageConstPtr> & depthMsgs,
2045  const std::vector<sensor_msgs::CameraInfo> & cameraInfoMsgs,
2046  const std::vector<sensor_msgs::CameraInfo> & depthCameraInfoMsgs,
2047  const std::string & frameId,
2048  const std::string & odomFrameId,
2049  const ros::Time & odomStamp,
2050  cv::Mat & rgb,
2051  cv::Mat & depth,
2052  std::vector<rtabmap::CameraModel> & cameraModels,
2053  std::vector<rtabmap::StereoCameraModel> & stereoCameraModels,
2054  tf::TransformListener & listener,
2055  double waitForTransform,
2056  bool alreadRectifiedImages,
2057  const std::vector<std::vector<rtabmap_msgs::KeyPoint> > & localKeyPointsMsgs,
2058  const std::vector<std::vector<rtabmap_msgs::Point3f> > & localPoints3dMsgs,
2059  const std::vector<cv::Mat> & localDescriptorsMsgs,
2060  std::vector<cv::KeyPoint> * localKeyPoints,
2061  std::vector<cv::Point3f> * localPoints3d,
2062  cv::Mat * localDescriptors)
2063 {
2064  UASSERT(!cameraInfoMsgs.empty() &&
2065  (cameraInfoMsgs.size() == imageMsgs.size() || imageMsgs.empty()) &&
2066  (cameraInfoMsgs.size() == depthMsgs.size() || depthMsgs.empty()) &&
2067  (cameraInfoMsgs.size() == depthCameraInfoMsgs.size() || depthCameraInfoMsgs.empty()));
2068 
2069  int imageWidth = imageMsgs.size()?imageMsgs[0]->image.cols:cameraInfoMsgs[0].width;
2070  int imageHeight = imageMsgs.size()?imageMsgs[0]->image.rows:cameraInfoMsgs[0].height;
2071  int depthWidth = depthMsgs.size()?depthMsgs[0]->image.cols:0;
2072  int depthHeight = depthMsgs.size()?depthMsgs[0]->image.rows:0;
2073 
2074  bool isDepth = depthMsgs.empty() || (depthMsgs[0].get() != 0 && (
2075  depthMsgs[0]->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1) == 0 ||
2076  depthMsgs[0]->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1) == 0 ||
2077  depthMsgs[0]->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0));
2078 
2079  // Note that right image can be also MONO16, check the camera info if Tx is set, if so assume it is stereo instead
2080  if(isDepth &&
2081  !depthMsgs.empty() &&
2082  depthMsgs[0]->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0 &&
2083  cameraInfoMsgs.size() == depthCameraInfoMsgs.size())
2084  {
2085  isDepth = cameraInfoMsgs[0].P.elems[3] == 0.0 && depthCameraInfoMsgs[0].P.elems[3] == 0.0;
2086  static bool warned = false;
2087  if(!warned && isDepth)
2088  {
2089  ROS_WARN("Input depth/left image has encoding \"mono16\" and "
2090  "camera info P[3] is null for both cameras, thus image is "
2091  "considered a depth image. If the depth image is in "
2092  "fact the right image, please convert the right image to "
2093  "\"mono8\". This warning is shown only once.");
2094  warned = true;
2095  }
2096  }
2097 
2098  if(isDepth && !depthMsgs.empty())
2099  {
2100  UASSERT_MSG(
2101  imageWidth/depthWidth == imageHeight/depthHeight,
2102  uFormat("rgb=%dx%d depth=%dx%d", imageWidth, imageHeight, depthWidth, depthHeight).c_str());
2103  }
2104 
2105  int cameraCount = cameraInfoMsgs.size();
2106  for(unsigned int i=0; i<cameraInfoMsgs.size(); ++i)
2107  {
2108  if(!imageMsgs.empty())
2109  {
2110  if(!(imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
2111  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO8) ==0 ||
2112  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO16) ==0 ||
2113  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
2114  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
2115  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BGRA8) == 0 ||
2116  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::RGBA8) == 0 ||
2117  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BAYER_GRBG8) == 0 ||
2118  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BAYER_RGGB8) == 0))
2119  {
2120  ROS_ERROR("Input rgb/left type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8. Current rgb/left=%s",
2121  imageMsgs[i]->encoding.c_str());
2122  return false;
2123  }
2124 
2125  UASSERT_MSG(imageMsgs[i]->image.cols == imageWidth && imageMsgs[i]->image.rows == imageHeight,
2126  uFormat("imageWidth=%d vs %d imageHeight=%d vs %d",
2127  imageWidth,
2128  imageMsgs[i]->image.cols,
2129  imageHeight,
2130  imageMsgs[i]->image.rows).c_str());
2131  }
2132  if(!depthMsgs.empty())
2133  {
2134  if(isDepth &&
2135  !(depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1) == 0 ||
2136  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1) == 0 ||
2137  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0))
2138  {
2139  ROS_ERROR("Input depth type must be image_depth=32FC1,16UC1,mono16. Current depth=%s",
2140  depthMsgs[i]->encoding.c_str());
2141  return false;
2142  }
2143  else if(!isDepth &&
2144  !(depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
2145  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
2146  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0 ||
2147  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
2148  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
2149  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BGRA8) == 0 ||
2150  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::RGBA8) == 0))
2151  {
2152  ROS_ERROR("Input right type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8. Current right=%s",
2153  depthMsgs[i]->encoding.c_str());
2154  return false;
2155  }
2156  }
2157 
2158 
2159  ros::Time stamp;
2160  if(isDepth && !depthMsgs.empty())
2161  {
2162  UASSERT_MSG(depthMsgs[i]->image.cols == depthWidth && depthMsgs[i]->image.rows == depthHeight,
2163  uFormat("depthWidth=%d vs %d imageHeight=%d vs %d",
2164  depthWidth,
2165  depthMsgs[i]->image.cols,
2166  depthHeight,
2167  depthMsgs[i]->image.rows).c_str());
2168  stamp = depthMsgs[i]->header.stamp;
2169  }
2170  else if(!imageMsgs.empty())
2171  {
2172  stamp = imageMsgs[i]->header.stamp;
2173  }
2174  else
2175  {
2176  stamp = cameraInfoMsgs[i].header.stamp;
2177  }
2178 
2179  // use depth's stamp so that geometry is sync to odom, use rgb frame as we assume depth is registered (normally depth msg should have same frame than rgb)
2180  rtabmap::Transform localTransform = rtabmap_conversions::getTransform(frameId, !imageMsgs.empty()?imageMsgs[i]->header.frame_id:cameraInfoMsgs[i].header.frame_id, stamp, listener, waitForTransform);
2181  if(localTransform.isNull())
2182  {
2183  ROS_ERROR("TF of received image %d at time %fs is not set!", i, stamp.toSec());
2184  return false;
2185  }
2186  // sync with odometry stamp
2187  if(!odomFrameId.empty() && odomStamp != stamp)
2188  {
2190  frameId,
2191  odomFrameId,
2192  odomStamp,
2193  stamp,
2194  listener,
2195  waitForTransform);
2196  if(sensorT.isNull())
2197  {
2198  ROS_WARN("Could not get odometry value for image stamp (%fs). Latest odometry "
2199  "stamp is %fs. The image pose will not be synchronized with odometry.", stamp.toSec(), odomStamp.toSec());
2200  }
2201  else
2202  {
2203  //ROS_WARN("RGBD correction = %s (time diff=%fs)", sensorT.prettyPrint().c_str(), fabs(stamp.toSec()-odomStamp.toSec()));
2204  localTransform = sensorT * localTransform;
2205  }
2206  }
2207 
2208  if(!imageMsgs.empty())
2209  {
2210  cv_bridge::CvImageConstPtr ptrImage = imageMsgs[i];
2211  if(imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1)==0 ||
2212  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
2213  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0)
2214  {
2215  // do nothing
2216  }
2217  else if(imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0)
2218  {
2219  ptrImage = cv_bridge::cvtColor(imageMsgs[i], "mono8");
2220  }
2221  else
2222  {
2223  ptrImage = cv_bridge::cvtColor(imageMsgs[i], "bgr8");
2224  }
2225 
2226  // initialize
2227  if(rgb.empty())
2228  {
2229  rgb = cv::Mat(imageHeight, imageWidth*cameraCount, ptrImage->image.type());
2230  }
2231  if(ptrImage->image.type() == rgb.type())
2232  {
2233  ptrImage->image.copyTo(cv::Mat(rgb, cv::Rect(i*imageWidth, 0, imageWidth, imageHeight)));
2234  }
2235  else
2236  {
2237  ROS_ERROR("Some RGB/left images are not the same type!");
2238  return false;
2239  }
2240  }
2241 
2242  if(!depthMsgs.empty())
2243  {
2244  if(isDepth)
2245  {
2246  cv_bridge::CvImageConstPtr ptrDepth = depthMsgs[i];
2247  cv::Mat subDepth = ptrDepth->image;
2248 
2249  if(depth.empty())
2250  {
2251  depth = cv::Mat(depthHeight, depthWidth*cameraCount, subDepth.type());
2252  }
2253 
2254  if(subDepth.type() == depth.type())
2255  {
2256  subDepth.copyTo(cv::Mat(depth, cv::Rect(i*depthWidth, 0, depthWidth, depthHeight)));
2257  }
2258  else
2259  {
2260  ROS_ERROR("Some Depth images are not the same type!");
2261  return false;
2262  }
2263  }
2264  else
2265  {
2266  cv_bridge::CvImageConstPtr ptrImage = depthMsgs[i];
2267  if( depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1)==0 ||
2268  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0)
2269  {
2270  // do nothing
2271  }
2272  else
2273  {
2274  ptrImage = cv_bridge::cvtColor(depthMsgs[i], "mono8");
2275  }
2276 
2277  // initialize
2278  if(depth.empty())
2279  {
2280  depth = cv::Mat(depthHeight, depthWidth*cameraCount, ptrImage->image.type());
2281  }
2282  if(ptrImage->image.type() == depth.type())
2283  {
2284  ptrImage->image.copyTo(cv::Mat(depth, cv::Rect(i*depthWidth, 0, depthWidth, depthHeight)));
2285  }
2286  else
2287  {
2288  ROS_ERROR("Some right images are not the same type!");
2289  return false;
2290  }
2291  }
2292  }
2293 
2294  if(isDepth)
2295  {
2296  cameraModels.push_back(rtabmap_conversions::cameraModelFromROS(cameraInfoMsgs[i], localTransform));
2297  }
2298  else //stereo
2299  {
2300  UASSERT(cameraInfoMsgs.size() == depthCameraInfoMsgs.size());
2301  rtabmap::Transform stereoTransform;
2302  if(!alreadRectifiedImages)
2303  {
2304  if(depthCameraInfoMsgs[i].header.frame_id.empty() || cameraInfoMsgs[i].header.frame_id.empty())
2305  {
2306  if(depthCameraInfoMsgs[i].P[3] == 0.0 && cameraInfoMsgs[i].P[3] == 0)
2307  {
2308  ROS_ERROR("Parameter %s is false but the frame_id in one of the camera_info "
2309  "topic is empty, so TF between the cameras cannot be computed!",
2310  rtabmap::Parameters::kRtabmapImagesAlreadyRectified().c_str());
2311  return false;
2312  }
2313  else
2314  {
2315  static bool warned = false;
2316  if(!warned)
2317  {
2318  ROS_WARN("Parameter %s is false but the frame_id in one of the "
2319  "camera_info topic is empty, so TF between the cameras cannot be "
2320  "computed! However, the baseline can be computed from the calibration, "
2321  "we will use this one instead of TF. This message is only printed once...",
2322  rtabmap::Parameters::kRtabmapImagesAlreadyRectified().c_str());
2323  warned = true;
2324  }
2325  }
2326  }
2327  else
2328  {
2329  stereoTransform = getTransform(
2330  depthCameraInfoMsgs[i].header.frame_id,
2331  cameraInfoMsgs[i].header.frame_id,
2332  cameraInfoMsgs[i].header.stamp,
2333  listener,
2334  waitForTransform);
2335  if(stereoTransform.isNull())
2336  {
2337  ROS_ERROR("Parameter %s is false but we cannot get TF between the two cameras!", rtabmap::Parameters::kRtabmapImagesAlreadyRectified().c_str());
2338  return false;
2339  }
2340  else if(stereoTransform.isIdentity())
2341  {
2342  ROS_ERROR("Parameter %s is false but we cannot get a valid TF between the two cameras! "
2343  "Identity transform returned between left and right cameras. Verify that if TF between "
2344  "the cameras is valid: \"rosrun tf tf_echo %s %s\".",
2345  rtabmap::Parameters::kRtabmapImagesAlreadyRectified().c_str(),
2346  depthCameraInfoMsgs[i].header.frame_id.c_str(),
2347  cameraInfoMsgs[i].header.frame_id.c_str());
2348  return false;
2349  }
2350  }
2351  }
2352 
2353  rtabmap::StereoCameraModel stereoModel = rtabmap_conversions::stereoCameraModelFromROS(cameraInfoMsgs[i], depthCameraInfoMsgs[i], localTransform, stereoTransform);
2354 
2355  if(stereoModel.baseline() > 10.0)
2356  {
2357  static bool shown = false;
2358  if(!shown)
2359  {
2360  ROS_WARN("Detected baseline (%f m) is quite large! Is your "
2361  "right camera_info P(0,3) correctly set? Note that "
2362  "baseline=-P(0,3)/P(0,0). You may need to calibrate your camera. "
2363  "This warning is printed only once.",
2364  stereoModel.baseline());
2365  shown = true;
2366  }
2367  }
2368  else if(stereoModel.baseline() == 0 && alreadRectifiedImages)
2369  {
2370  rtabmap::Transform stereoTransform;
2371  if( !cameraInfoMsgs[i].header.frame_id.empty() &&
2372  !depthCameraInfoMsgs[i].header.frame_id.empty())
2373  {
2374  stereoTransform = getTransform(
2375  cameraInfoMsgs[i].header.frame_id,
2376  depthCameraInfoMsgs[i].header.frame_id,
2377  cameraInfoMsgs[i].header.stamp,
2378  listener,
2379  waitForTransform);
2380  }
2381  if(stereoTransform.isNull() || stereoTransform.x()<=0)
2382  {
2383  if(cameraInfoMsgs[i].header.frame_id.empty() || depthCameraInfoMsgs[i].header.frame_id.empty())
2384  {
2385  ROS_WARN("We cannot estimated the baseline of the rectified images with tf! (camera_info topics have empty frame_id)");
2386  }
2387  else
2388  {
2389  ROS_WARN("We cannot estimated the baseline of the rectified images with tf! (%s->%s = %s)",
2390  depthCameraInfoMsgs[i].header.frame_id.c_str(), cameraInfoMsgs[i].header.frame_id.c_str(), stereoTransform.prettyPrint().c_str());
2391  }
2392  }
2393  else
2394  {
2395  static bool warned = false;
2396  if(!warned)
2397  {
2398  ROS_WARN("Right camera info doesn't have Tx set but we are assuming that stereo images are already rectified (see %s parameter). While not "
2399  "recommended, we used TF to get the baseline (%s->%s = %fm) for convenience (e.g., D400 ir stereo issue). It is preferred to feed "
2400  "a valid right camera info if stereo images are already rectified. This message is only printed once...",
2401  rtabmap::Parameters::kRtabmapImagesAlreadyRectified().c_str(),
2402  depthCameraInfoMsgs[i].header.frame_id.c_str(), cameraInfoMsgs[i].header.frame_id.c_str(), stereoTransform.x());
2403  warned = true;
2404  }
2405  stereoModel = rtabmap::StereoCameraModel(
2406  stereoModel.left().fx(),
2407  stereoModel.left().fy(),
2408  stereoModel.left().cx(),
2409  stereoModel.left().cy(),
2410  stereoTransform.x(),
2411  stereoModel.localTransform(),
2412  stereoModel.left().imageSize());
2413  }
2414  }
2415  stereoCameraModels.push_back(stereoModel);
2416  }
2417 
2418  if(localKeyPoints && localKeyPointsMsgs.size() == cameraInfoMsgs.size())
2419  {
2420  rtabmap_conversions::keypointsFromROS(localKeyPointsMsgs[i], *localKeyPoints, imageWidth*i);
2421  }
2422  if(localPoints3d && localPoints3dMsgs.size() == cameraInfoMsgs.size())
2423  {
2424  // Points should be in base frame
2425  rtabmap_conversions::points3fFromROS(localPoints3dMsgs[i], *localPoints3d, localTransform);
2426  }
2427  if(localDescriptors && localDescriptorsMsgs.size() == cameraInfoMsgs.size())
2428  {
2429  localDescriptors->push_back(localDescriptorsMsgs[i]);
2430  }
2431  }
2432  return true;
2433 }
2434 
2436  const cv_bridge::CvImageConstPtr& leftImageMsg,
2437  const cv_bridge::CvImageConstPtr& rightImageMsg,
2438  const sensor_msgs::CameraInfo& leftCamInfoMsg,
2439  const sensor_msgs::CameraInfo& rightCamInfoMsg,
2440  const std::string & frameId,
2441  const std::string & odomFrameId,
2442  const ros::Time & odomStamp,
2443  cv::Mat & left,
2444  cv::Mat & right,
2445  rtabmap::StereoCameraModel & stereoModel,
2446  tf::TransformListener & listener,
2447  double waitForTransform,
2448  bool alreadyRectified)
2449 {
2450  UASSERT(leftImageMsg.get() && rightImageMsg.get());
2451 
2452  if(!(leftImageMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
2453  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
2454  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0 ||
2455  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
2456  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
2457  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::BGRA8) == 0 ||
2458  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::RGBA8) == 0) ||
2459  !(rightImageMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
2460  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
2461  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0 ||
2462  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
2463  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
2464  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::BGRA8) == 0 ||
2465  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::RGBA8) == 0))
2466  {
2467  ROS_ERROR("Input type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8");
2468  ROS_ERROR("Input type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8 Current left=%s and right=%s",
2469  leftImageMsg->encoding.c_str(),
2470  rightImageMsg->encoding.c_str());
2471  return false;
2472  }
2473 
2474  if(leftImageMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
2475  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0)
2476  {
2477  left = leftImageMsg->image.clone();
2478  }
2479  else if(leftImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0)
2480  {
2481  left = cv_bridge::cvtColor(leftImageMsg, "mono8")->image;
2482  }
2483  else
2484  {
2485  left = cv_bridge::cvtColor(leftImageMsg, "bgr8")->image;
2486  }
2487  if(rightImageMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
2488  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0)
2489  {
2490  right = rightImageMsg->image.clone();
2491  }
2492  else
2493  {
2494  right = cv_bridge::cvtColor(rightImageMsg, "mono8")->image;
2495  }
2496 
2497  rtabmap::Transform localTransform = getTransform(frameId, leftImageMsg->header.frame_id, leftImageMsg->header.stamp, listener, waitForTransform);
2498  if(localTransform.isNull())
2499  {
2500  return false;
2501  }
2502  // sync with odometry stamp
2503  if(!odomFrameId.empty() && odomStamp != leftImageMsg->header.stamp)
2504  {
2506  frameId,
2507  odomFrameId,
2508  odomStamp,
2509  leftImageMsg->header.stamp,
2510  listener,
2511  waitForTransform);
2512  if(sensorT.isNull())
2513  {
2514  ROS_WARN("Could not get odometry value for stereo msg stamp (%fs). Latest odometry "
2515  "stamp is %fs. The stereo image pose will not be synchronized with odometry.", leftImageMsg->header.stamp.toSec(), odomStamp.toSec());
2516  }
2517  else
2518  {
2519  localTransform = sensorT * localTransform;
2520  }
2521  }
2522 
2523  rtabmap::Transform stereoTransform;
2524  if(!alreadyRectified)
2525  {
2526  stereoTransform = getTransform(
2527  rightCamInfoMsg.header.frame_id,
2528  leftCamInfoMsg.header.frame_id,
2529  leftCamInfoMsg.header.stamp,
2530  listener,
2531  waitForTransform);
2532  if(stereoTransform.isNull())
2533  {
2534  ROS_ERROR("Parameter %s is false but we cannot get TF between the two cameras!", rtabmap::Parameters::kRtabmapImagesAlreadyRectified().c_str());
2535  return false;
2536  }
2537  }
2538 
2539  stereoModel = rtabmap_conversions::stereoCameraModelFromROS(leftCamInfoMsg, rightCamInfoMsg, localTransform, stereoTransform);
2540 
2541  if(stereoModel.baseline() > 10.0)
2542  {
2543  static bool shown = false;
2544  if(!shown)
2545  {
2546  ROS_WARN("Detected baseline (%f m) is quite large! Is your "
2547  "right camera_info P(0,3) correctly set? Note that "
2548  "baseline=-P(0,3)/P(0,0). You may need to calibrate your camera. "
2549  "This warning is printed only once.",
2550  stereoModel.baseline());
2551  shown = true;
2552  }
2553  }
2554  else if(stereoModel.baseline() == 0 && alreadyRectified)
2555  {
2556  rtabmap::Transform stereoTransform = getTransform(
2557  leftCamInfoMsg.header.frame_id,
2558  rightCamInfoMsg.header.frame_id,
2559  leftCamInfoMsg.header.stamp,
2560  listener,
2561  waitForTransform);
2562  if(stereoTransform.isNull() || stereoTransform.x()<=0)
2563  {
2564  ROS_WARN("We cannot estimated the baseline of the rectified images with tf! (%s->%s = %s)",
2565  rightCamInfoMsg.header.frame_id.c_str(), leftCamInfoMsg.header.frame_id.c_str(), stereoTransform.prettyPrint().c_str());
2566  }
2567  else
2568  {
2569  static bool warned = false;
2570  if(!warned)
2571  {
2572  ROS_WARN("Right camera info doesn't have Tx set but we are assuming that stereo images are already rectified (see %s parameter). While not "
2573  "recommended, we used TF to get the baseline (%s->%s = %fm) for convenience (e.g., D400 ir stereo issue). It is preferred to feed "
2574  "a valid right camera info if stereo images are already rectified. This message is only printed once...",
2575  rtabmap::Parameters::kRtabmapImagesAlreadyRectified().c_str(),
2576  rightCamInfoMsg.header.frame_id.c_str(), leftCamInfoMsg.header.frame_id.c_str(), stereoTransform.x());
2577  warned = true;
2578  }
2579  stereoModel = rtabmap::StereoCameraModel(
2580  stereoModel.left().fx(),
2581  stereoModel.left().fy(),
2582  stereoModel.left().cx(),
2583  stereoModel.left().cy(),
2584  stereoTransform.x(),
2585  stereoModel.localTransform(),
2586  stereoModel.left().imageSize());
2587  }
2588  }
2589  return true;
2590 }
2591 
2593  const sensor_msgs::LaserScan & scan2dMsg,
2594  const std::string & frameId,
2595  const std::string & odomFrameId,
2596  const ros::Time & odomStamp,
2597  rtabmap::LaserScan & scan,
2598  tf::TransformListener & listener,
2599  double waitForTransform,
2600  bool outputInFrameId)
2601 {
2602  // scan message validation check
2603  if(scan2dMsg.angle_increment == 0.0f) {
2604  ROS_ERROR("convertScanMsg: angle_increment should not be 0!");
2605  return false;
2606  }
2607  if(scan2dMsg.range_min > scan2dMsg.range_max) {
2608  ROS_ERROR("convertScanMsg: range_min (%f) should be smaller than range_max (%f)!", scan2dMsg.range_min, scan2dMsg.range_max);
2609  return false;
2610  }
2611  if(scan2dMsg.angle_increment > 0 && scan2dMsg.angle_max < scan2dMsg.angle_min) {
2612  ROS_ERROR("convertScanMsg: Angle increment (%f) should be negative if angle_min(%f) > angle_max(%f)!", scan2dMsg.angle_increment, scan2dMsg.angle_min, scan2dMsg.angle_max);
2613  return false;
2614  }
2615  else if (scan2dMsg.angle_increment < 0 && scan2dMsg.angle_max > scan2dMsg.angle_min) {
2616  ROS_ERROR("convertScanMsg: Angle increment (%f) should positive if angle_min(%f) < angle_max(%f)!", scan2dMsg.angle_increment, scan2dMsg.angle_min, scan2dMsg.angle_max);
2617  return false;
2618  }
2619 
2620  // make sure the frame of the laser is updated during the whole scan time
2622  scan2dMsg.header.frame_id,
2623  odomFrameId.empty()?frameId:odomFrameId,
2624  scan2dMsg.header.stamp,
2625  scan2dMsg.header.stamp + ros::Duration().fromSec(scan2dMsg.ranges.size()*scan2dMsg.time_increment),
2626  listener,
2627  waitForTransform);
2628  if(tmpT.isNull())
2629  {
2630  return false;
2631  }
2632 
2633  rtabmap::Transform scanLocalTransform = getTransform(
2634  frameId,
2635  scan2dMsg.header.frame_id,
2636  scan2dMsg.header.stamp,
2637  listener,
2638  waitForTransform);
2639  if(scanLocalTransform.isNull())
2640  {
2641  return false;
2642  }
2643 
2644  //transform in frameId_ frame
2645  sensor_msgs::PointCloud2 scanOut;
2647  projection.transformLaserScanToPointCloud(odomFrameId.empty()?frameId:odomFrameId, scan2dMsg, scanOut, listener);
2648 
2649  //transform back in laser frame
2650  rtabmap::Transform laserToOdom = getTransform(
2651  scan2dMsg.header.frame_id,
2652  odomFrameId.empty()?frameId:odomFrameId,
2653  scan2dMsg.header.stamp,
2654  listener,
2655  waitForTransform);
2656  if(laserToOdom.isNull())
2657  {
2658  return false;
2659  }
2660 
2661  // sync with odometry stamp
2662  if(!odomFrameId.empty() && odomStamp != scan2dMsg.header.stamp)
2663  {
2665  frameId,
2666  odomFrameId,
2667  odomStamp,
2668  scan2dMsg.header.stamp,
2669  listener,
2670  waitForTransform);
2671  if(sensorT.isNull())
2672  {
2673  ROS_WARN("Could not get odometry value for laser scan stamp (%fs). Latest odometry "
2674  "stamp is %fs. The laser scan pose will not be synchronized with odometry.", scan2dMsg.header.stamp.toSec(), odomStamp.toSec());
2675  }
2676  else
2677  {
2678  //ROS_WARN("scan correction = %s (time diff=%fs)", sensorT.prettyPrint().c_str(), fabs(scan2dMsg->header.stamp.toSec()-odomStamp.toSec()));
2679  scanLocalTransform = sensorT * scanLocalTransform;
2680  }
2681  }
2682 
2683  if(outputInFrameId)
2684  {
2685  laserToOdom *= scanLocalTransform;
2686  }
2687 
2688  bool hasIntensity = false;
2689  for(unsigned int i=0; i<scanOut.fields.size(); ++i)
2690  {
2691  if(scanOut.fields[i].name.compare("intensity") == 0)
2692  {
2693  if(scanOut.fields[i].datatype == sensor_msgs::PointField::FLOAT32)
2694  {
2695  hasIntensity = true;
2696  }
2697  else
2698  {
2699  static bool warningShown = false;
2700  if(!warningShown)
2701  {
2702  ROS_WARN("The input scan cloud has an \"intensity\" field "
2703  "but the datatype (%d) is not supported. Intensity will be ignored. "
2704  "This message is only shown once.", scanOut.fields[i].datatype);
2705  warningShown = true;
2706  }
2707  }
2708  }
2709  }
2710 
2712  cv::Mat data;
2713  if(hasIntensity)
2714  {
2715  pcl::PointCloud<pcl::PointXYZI>::Ptr pclScan(new pcl::PointCloud<pcl::PointXYZI>);
2716  pcl::fromROSMsg(scanOut, *pclScan);
2717  pclScan->is_dense = true;
2718  data = rtabmap::util3d::laserScan2dFromPointCloud(*pclScan, laserToOdom).data(); // put back in laser frame
2720  }
2721  else
2722  {
2723  pcl::PointCloud<pcl::PointXYZ>::Ptr pclScan(new pcl::PointCloud<pcl::PointXYZ>);
2724  pcl::fromROSMsg(scanOut, *pclScan);
2725  pclScan->is_dense = true;
2726  data = rtabmap::util3d::laserScan2dFromPointCloud(*pclScan, laserToOdom).data(); // put back in laser frame
2728  }
2729 
2730  rtabmap::Transform zAxis(0,0,1,0,0,0);
2731  if((scanLocalTransform.rotation()*zAxis).z() < 0)
2732  {
2733  cv::Mat flipScan;
2734  cv::flip(data, flipScan, 1);
2735  data = flipScan;
2736  }
2737 
2738  scan = rtabmap::LaserScan(
2739  data,
2740  format,
2741  scan2dMsg.range_min,
2742  scan2dMsg.range_max,
2743  scan2dMsg.angle_min,
2744  scan2dMsg.angle_max,
2745  scan2dMsg.angle_increment,
2746  outputInFrameId?rtabmap::Transform::getIdentity():scanLocalTransform);
2747 
2748  return true;
2749 }
2750 
2752  const sensor_msgs::PointCloud2 & scan3dMsg,
2753  const std::string & frameId,
2754  const std::string & odomFrameId,
2755  const ros::Time & odomStamp,
2756  rtabmap::LaserScan & scan,
2757  tf::TransformListener & listener,
2758  double waitForTransform,
2759  int maxPoints,
2760  float maxRange,
2761  bool is2D)
2762 {
2763  UASSERT_MSG(scan3dMsg.data.size() == scan3dMsg.row_step*scan3dMsg.height,
2764  uFormat("data=%d row_step=%d height=%d", scan3dMsg.data.size(), scan3dMsg.row_step, scan3dMsg.height).c_str());
2765 
2766  rtabmap::Transform scanLocalTransform = getTransform(frameId, scan3dMsg.header.frame_id, scan3dMsg.header.stamp, listener, waitForTransform);
2767  if(scanLocalTransform.isNull())
2768  {
2769  ROS_ERROR("TF of received scan cloud at time %fs is not set, aborting rtabmap update.", scan3dMsg.header.stamp.toSec());
2770  return false;
2771  }
2772 
2773  // sync with odometry stamp
2774  if(!odomFrameId.empty() && odomStamp != scan3dMsg.header.stamp)
2775  {
2777  frameId,
2778  odomFrameId,
2779  odomStamp,
2780  scan3dMsg.header.stamp,
2781  listener,
2782  waitForTransform);
2783  if(sensorT.isNull())
2784  {
2785  ROS_WARN("Could not get odometry value for laser scan stamp (%fs). Latest odometry "
2786  "stamp is %fs. The 3d laser scan pose will not be synchronized with odometry.", scan3dMsg.header.stamp.toSec(), odomStamp.toSec());
2787  }
2788  else
2789  {
2790  scanLocalTransform = sensorT * scanLocalTransform;
2791  }
2792  }
2793  scan = rtabmap::util3d::laserScanFromPointCloud(scan3dMsg, true, is2D);
2794  scan = rtabmap::LaserScan(scan, maxPoints, maxRange, scanLocalTransform);
2795  return true;
2796 }
2797 
2799  const sensor_msgs::PointCloud2 & input,
2800  sensor_msgs::PointCloud2 & output,
2801  const std::string & fixedFrameId,
2802  tf::TransformListener * listener,
2803  double waitForTransform,
2804  bool slerp,
2805  const rtabmap::Transform & velocity,
2806  double previousStamp)
2807 {
2808  if(listener != 0)
2809  {
2810  if(input.header.frame_id.empty())
2811  {
2812  ROS_ERROR("Input cloud has empty frame_id!");
2813  return false;
2814  }
2815 
2816  if(fixedFrameId.empty())
2817  {
2818  ROS_ERROR("fixedFrameId parameter should be set!");
2819  return false;
2820  }
2821  }
2822  else
2823  {
2824  if(!slerp)
2825  {
2826  ROS_ERROR("slerp should be true when constant velocity model is used!");
2827  return false;
2828  }
2829 
2830  if(previousStamp <= 0.0)
2831  {
2832  ROS_ERROR("previousStamp should be >0 when constant velocity model is used!");
2833  return false;
2834  }
2835 
2836  if(velocity.isNull())
2837  {
2838  ROS_ERROR("velocity should be valid when constant velocity model is used!");
2839  return false;
2840  }
2841  }
2842 
2843  int offsetTime = -1;
2844  int offsetX = -1;
2845  int offsetY = -1;
2846  int offsetZ = -1;
2847  int timeDatatype = 6;
2848  for(size_t i=0; i<input.fields.size(); ++i)
2849  {
2850  if(input.fields[i].name.compare("t") == 0 ||
2851  input.fields[i].name.compare("time") == 0 ||
2852  input.fields[i].name.compare("stamps") == 0 ||
2853  input.fields[i].name.compare("timestamp") == 0)
2854  {
2855  if(offsetTime != -1)
2856  {
2857  ROS_WARN("The input cloud should have only one of these fields: t, time, stamps or timestamp. Overriding with %s.", input.fields[i].name.c_str());
2858  }
2859  offsetTime = input.fields[i].offset;
2860  timeDatatype = input.fields[i].datatype;
2861  }
2862  else if(input.fields[i].name.compare("x") == 0)
2863  {
2864  ROS_ASSERT(input.fields[i].datatype==7);
2865  offsetX = input.fields[i].offset;
2866  }
2867  else if(input.fields[i].name.compare("y") == 0)
2868  {
2869  ROS_ASSERT(input.fields[i].datatype==7);
2870  offsetY = input.fields[i].offset;
2871  }
2872  else if(input.fields[i].name.compare("z") == 0)
2873  {
2874  ROS_ASSERT(input.fields[i].datatype==7);
2875  offsetZ = input.fields[i].offset;
2876  }
2877  }
2878 
2879  if(offsetTime < 0)
2880  {
2881  ROS_ERROR("Input cloud doesn't have \"t\", \"time\", \"stamps\" or \"timestamp\" field!");
2882  std::string fieldsReceived;
2883  for(size_t i=0; i<input.fields.size(); ++i)
2884  {
2885  fieldsReceived += input.fields[i].name + " ";
2886  }
2887  ROS_ERROR("Input cloud has these fields: %s", fieldsReceived.c_str());
2888  return false;
2889  }
2890  if(offsetX < 0)
2891  {
2892  ROS_ERROR("Input cloud doesn't have \"x\" field!");
2893  return false;
2894  }
2895  if(offsetY < 0)
2896  {
2897  ROS_ERROR("Input cloud doesn't have \"y\" field!");
2898  return false;
2899  }
2900  if(offsetZ < 0)
2901  {
2902  ROS_ERROR("Input cloud doesn't have \"z\" field!");
2903  return false;
2904  }
2905  if(input.height == 0)
2906  {
2907  ROS_ERROR("Input cloud height is zero!");
2908  return false;
2909  }
2910  if(input.width == 0)
2911  {
2912  ROS_ERROR("Input cloud width is zero!");
2913  return false;
2914  }
2915 
2916  bool timeOnColumns = input.width > input.height;
2917 
2918  // Get latest timestamp
2919  ros::Time firstStamp;
2920  ros::Time lastStamp;
2921  if(timeDatatype == 6) // UINT32
2922  {
2923  unsigned int nsecFirst = *((const unsigned int*)(&input.data[0]+offsetTime));
2924  unsigned int nsecLast = *((const unsigned int*)(&input.data[(input.width-1)*input.point_step + input.row_step*(input.height-1)]+offsetTime));
2925 
2926  if(nsecFirst > nsecLast)
2927  {
2928  // scans are not ordered, we need to search min/max
2929  static bool warned = false;
2930  if(!warned) {
2931  ROS_WARN("Timestamp channel is not ordered, we will have to parse every scans to "
2932  "determinate first and last time offsets. This will add slightly computation "
2933  "time. This warning is only shown once.");
2934  warned = true;
2935  }
2936  if(timeOnColumns)
2937  {
2938  for(size_t i=0; i<input.width; ++i)
2939  {
2940  unsigned int nsec = *((const unsigned int*)(&input.data[(i)*input.point_step]+offsetTime));
2941  if(nsec < nsecFirst)
2942  {
2943  nsecFirst = nsec;
2944  }
2945  else if(nsec > nsecLast)
2946  {
2947  nsecLast = nsec;
2948  }
2949  }
2950  }
2951  else
2952  {
2953  for(size_t i=0; i<input.height; ++i)
2954  {
2955  unsigned int nsec = *((const unsigned int*)(&input.data[input.row_step*(i)]+offsetTime));
2956  if(nsec < nsecFirst)
2957  {
2958  nsecFirst = nsec;
2959  }
2960  else if(nsec > nsecLast)
2961  {
2962  nsecLast = nsec;
2963  }
2964  }
2965  }
2966  }
2967 
2968  firstStamp = input.header.stamp+ros::Duration(0, nsecFirst);
2969  lastStamp = input.header.stamp+ros::Duration(0, nsecLast);
2970  }
2971  else if(timeDatatype == 7) // FLOAT32
2972  {
2973  float secFirst = *((const float*)(&input.data[0]+offsetTime));
2974  float secLast = *((const float*)(&input.data[(input.width-1)*input.point_step + input.row_step*(input.height-1)]+offsetTime));
2975 
2976  if(secFirst > secLast)
2977  {
2978  // scans are not ordered, we need to search min/max
2979  static bool warned = false;
2980  if(!warned) {
2981  ROS_WARN("Timestamp channel is not ordered, we will have to parse every scans to "
2982  "determinate first and last time offsets. This will add slightly computation "
2983  "time. This warning is only shown once.");
2984  warned = true;
2985  }
2986  if(timeOnColumns)
2987  {
2988  for(size_t i=0; i<input.width; ++i)
2989  {
2990  float sec = *((const float*)(&input.data[(i)*input.point_step]+offsetTime));
2991  if(sec < secFirst)
2992  {
2993  secFirst = sec;
2994  }
2995  else if(sec > secLast)
2996  {
2997  secLast = sec;
2998  }
2999  }
3000  }
3001  else
3002  {
3003  for(size_t i=0; i<input.height; ++i)
3004  {
3005  float sec = *((const float*)(&input.data[input.row_step*(i)]+offsetTime));
3006  if(sec < secFirst)
3007  {
3008  secFirst = sec;
3009  }
3010  else if(sec > secLast)
3011  {
3012  secLast = sec;
3013  }
3014  }
3015  }
3016  }
3017 
3018  firstStamp = input.header.stamp+ros::Duration().fromSec(secFirst);
3019  lastStamp = input.header.stamp+ros::Duration().fromSec(secLast);
3020  }
3021  else if(timeDatatype == 8) // FLOAT64
3022  {
3023  double secFirst = *((const double*)(&input.data[0]+offsetTime));
3024  double secLast = *((const double*)(&input.data[(input.width-1)*input.point_step + input.row_step*(input.height-1)]+offsetTime));
3025  if(secFirst > secLast)
3026  {
3027  // scans are not ordered, we need to search min/max
3028  static bool warned = false;
3029  if(!warned) {
3030  ROS_WARN("Timestamp channel is not ordered, we will have to parse every scans to "
3031  "determinate first and last time offsets. This will add slightly computation "
3032  "time. This warning is only shown once.");
3033  warned = true;
3034  }
3035  if(timeOnColumns)
3036  {
3037  for(size_t i=0; i<input.width; ++i)
3038  {
3039  double sec = *((const double*)(&input.data[(i)*input.point_step]+offsetTime));
3040  if(sec < secFirst)
3041  {
3042  secFirst = sec;
3043  }
3044  else if(sec > secLast)
3045  {
3046  secLast = sec;
3047  }
3048  }
3049  }
3050  else
3051  {
3052  for(size_t i=0; i<input.height; ++i)
3053  {
3054  double sec = *((const double*)(&input.data[input.row_step*(i)]+offsetTime));
3055  if(sec < secFirst)
3056  {
3057  secFirst = sec;
3058  }
3059  else if(sec > secLast)
3060  {
3061  secLast = sec;
3062  }
3063  }
3064  }
3065  }
3066 
3067  if(secFirst > 1.e18)
3068  {
3069  // convert nanoseconds to seconds
3070  secFirst /= 1.e9;
3071  secLast /= 1.e9;
3072  }
3073  else if(secFirst > 1.e15)
3074  {
3075  // convert microseconds to seconds
3076  secFirst /= 1.e6;
3077  secLast /= 1.e6;
3078  }
3079  else if(secFirst > 1.e12)
3080  {
3081  // convert milliseconds to seconds
3082  secFirst /= 1.e3;
3083  secLast /= 1.e3;
3084  }
3085 
3086  firstStamp = ros::Time(secFirst);
3087  lastStamp = ros::Time(secLast);
3088  }
3089  else
3090  {
3091  ROS_ERROR("Not supported time datatype %d!", timeDatatype);
3092  return false;
3093  }
3094  if(!(timeDatatype >=6 && timeDatatype<=8))
3095  {
3096  ROS_ERROR("Only lidar timestamp channel data type 6, 7 or 8 is supported! (received %d)", timeDatatype);
3097  return false;
3098  }
3099  if(lastStamp < firstStamp)
3100  {
3101  ROS_ERROR("Last stamp (%f) is smaller than first stamp (%f) (header=%f)!", lastStamp.toSec(), firstStamp.toSec(), input.header.stamp.toSec());
3102  return false;
3103  }
3104  else if(lastStamp == firstStamp)
3105  {
3106  ROS_ERROR("First and last stamps in the scan are the same (%f) (header=%f)!", lastStamp.toSec(), input.header.stamp.toSec());
3107  return false;
3108  }
3109 
3110  std::string errorMsg;
3111  if(listener != 0 &&
3112  waitForTransform>0.0 &&
3113  !listener->waitForTransform(
3114  input.header.frame_id,
3115  firstStamp,
3116  input.header.frame_id,
3117  lastStamp,
3118  fixedFrameId,
3119  ros::Duration(waitForTransform),
3120  ros::Duration(0.01),
3121  &errorMsg))
3122  {
3123  ROS_ERROR("Could not estimate motion of %s accordingly to fixed frame %s between stamps %f and %f! (%s)",
3124  input.header.frame_id.c_str(),
3125  fixedFrameId.c_str(),
3126  firstStamp.toSec(),
3127  lastStamp.toSec(),
3128  errorMsg.c_str());
3129  return false;
3130  }
3131 
3132  rtabmap::Transform firstPose;
3133  rtabmap::Transform lastPose;
3134  double scanTime = 0;
3135  if(slerp)
3136  {
3137  if(listener != 0)
3138  {
3140  input.header.frame_id,
3141  fixedFrameId,
3142  input.header.stamp,
3143  firstStamp,
3144  *listener,
3145  0);
3147  input.header.frame_id,
3148  fixedFrameId,
3149  input.header.stamp,
3150  lastStamp,
3151  *listener,
3152  0);
3153  }
3154  else
3155  {
3156  float vx,vy,vz, vroll,vpitch,vyaw;
3157  velocity.getTranslationAndEulerAngles(vx,vy,vz, vroll,vpitch,vyaw);
3158 
3159  // We need three poses:
3160  // 1- The pose of base frame in odom frame at first stamp
3161  // 2- The pose of base frame in odom frame at msg stamp
3162  // 3- The pose of base frame in odom frame at last stamp
3163  UASSERT(firstStamp.toSec() >= previousStamp);
3164  UASSERT(lastStamp.toSec() > previousStamp);
3165  double dt1 = firstStamp.toSec() - previousStamp;
3166  double dt2 = input.header.stamp.toSec() - previousStamp;
3167  double dt3 = lastStamp.toSec() - previousStamp;
3168 
3169  rtabmap::Transform p1(vx*dt1, vy*dt1, vz*dt1, vroll*dt1, vpitch*dt1, vyaw*dt1);
3170  rtabmap::Transform p2(vx*dt2, vy*dt2, vz*dt2, vroll*dt2, vpitch*dt2, vyaw*dt2);
3171  rtabmap::Transform p3(vx*dt3, vy*dt3, vz*dt3, vroll*dt3, vpitch*dt3, vyaw*dt3);
3172 
3173  // First and last poses are relative to stamp of the msg
3174  firstPose = p2.inverse() * p1;
3175  lastPose = p2.inverse() * p3;
3176  }
3177 
3178  if(firstPose.isNull())
3179  {
3180  ROS_ERROR("Could not get transform of %s accordingly to %s between stamps %f and %f!",
3181  input.header.frame_id.c_str(),
3182  fixedFrameId.empty()?"velocity":fixedFrameId.c_str(),
3183  firstStamp.toSec(),
3184  input.header.stamp.toSec());
3185  return false;
3186  }
3187  if(lastPose.isNull())
3188  {
3189  ROS_ERROR("Could not get transform of %s accordingly to %s between stamps %f and %f!",
3190  input.header.frame_id.c_str(),
3191  fixedFrameId.empty()?"velocity":fixedFrameId.c_str(),
3192  lastStamp.toSec(),
3193  input.header.stamp.toSec());
3194  return false;
3195  }
3196  scanTime = lastStamp.toSec() - firstStamp.toSec();
3197  }
3198  //else tf will be used to get more accurate transforms
3199 
3200  output = input;
3201  ros::Time stamp;
3202  UTimer processingTime;
3203  if(timeOnColumns)
3204  {
3205  // ouster point cloud:
3206  // t1 t2 ...
3207  // ring1 ring1 ...
3208  // ring2 ring2 ...
3209  // ring3 ring4 ...
3210  // ring4 ring3 ...
3211  for(size_t u=0; u<output.width; ++u)
3212  {
3213  if(timeDatatype == 6) // UINT32
3214  {
3215  unsigned int nsec = *((const unsigned int*)(&output.data[u*output.point_step]+offsetTime));
3216  stamp = input.header.stamp+ros::Duration(0, nsec);
3217  }
3218  else if(timeDatatype == 7) //float 32
3219  {
3220  float sec = *((const float*)(&output.data[u*output.point_step]+offsetTime));
3221  stamp = input.header.stamp+ros::Duration().fromSec(sec);
3222  }
3223  else if(timeDatatype == 8) //float64
3224  {
3225  double sec = *((const double*)(&output.data[u*output.point_step]+offsetTime));
3226  if(sec > 1.e18)
3227  {
3228  // convert nanoseconds to seconds
3229  sec /= 1.e9;
3230  }
3231  else if(sec > 1.e15)
3232  {
3233  // convert microseconds to seconds
3234  sec /= 1.e6;
3235  }
3236  else if(sec > 1.e12)
3237  {
3238  // sec milliseconds to seconds
3239  sec /= 1.e3;
3240  }
3241  stamp = ros::Time(sec);
3242  }
3243 
3245  if(slerp)
3246  {
3247  transform = firstPose.interpolate((stamp-firstStamp).toSec() / scanTime, lastPose);
3248  }
3249  else
3250  {
3252  output.header.frame_id,
3253  fixedFrameId,
3254  output.header.stamp,
3255  stamp,
3256  *listener,
3257  0);
3258  if(transform.isNull())
3259  {
3260  ROS_ERROR("Could not get transform of %s accordingly to %s between stamps %f and %f!",
3261  output.header.frame_id.c_str(),
3262  fixedFrameId.c_str(),
3263  stamp.toSec(),
3264  output.header.stamp.toSec());
3265  return false;
3266  }
3267  }
3268 
3269  for(size_t v=0; v<input.height; ++v)
3270  {
3271  unsigned char * dataPtr = &output.data[v*output.row_step + u*output.point_step];
3272  float & x = *((float*)(dataPtr+offsetX));
3273  float & y = *((float*)(dataPtr+offsetY));
3274  float & z = *((float*)(dataPtr+offsetZ));
3275  pcl::PointXYZ pt(x,y,z);
3277  x = pt.x;
3278  y = pt.y;
3279  z = pt.z;
3280 
3281  // set delta stamp to zero so that on downstream they know the cloud is deskewed
3282  if(timeDatatype == 6) // UINT32
3283  {
3284  *((unsigned int*)(dataPtr+offsetTime)) = 0;
3285  }
3286  else if(timeDatatype == 7)
3287  {
3288  *((float*)(dataPtr+offsetTime)) = 0;
3289  }
3290  else if(timeDatatype == 8)
3291  {
3292  *((double*)(dataPtr+offsetTime)) = 0;
3293  }
3294  }
3295  }
3296  }
3297  else // time on rows
3298  {
3299  // velodyne point cloud:
3300  // t1 ring1 ring2 ring3 ring4
3301  // t2 ring1 ring2 ring3 ring4
3302  // t3 ring1 ring2 ring3 ring4
3303  // t4 ring1 ring2 ring3 ring4
3304  // ... ... ... ... ...
3305  for(size_t v=0; v<output.height; ++v)
3306  {
3307  if(timeDatatype == 6) // UINT32
3308  {
3309  unsigned int nsec = *((const unsigned int*)(&output.data[v*output.row_step]+offsetTime));
3310  stamp = input.header.stamp+ros::Duration(0, nsec);
3311  }
3312  else if(timeDatatype == 7) //float 32
3313  {
3314  float sec = *((const float*)(&output.data[v*output.row_step]+offsetTime));
3315  stamp = input.header.stamp+ros::Duration().fromSec(sec);
3316  }
3317  else if(timeDatatype == 8)
3318  {
3319  double sec = *((const double*)(&output.data[v*output.row_step]+offsetTime));
3320  if(sec > 1.e18)
3321  {
3322  // convert nanoseconds to seconds
3323  sec /= 1.e9;
3324  }
3325  else if(sec > 1.e15)
3326  {
3327  // convert microseconds to seconds
3328  sec /= 1.e6;
3329  }
3330  else if(sec > 1.e12)
3331  {
3332  // sec milliseconds to seconds
3333  sec /= 1.e3;
3334  }
3335  stamp = ros::Time(sec);
3336  }
3337 
3339  if(slerp)
3340  {
3341  transform = firstPose.interpolate((stamp-firstStamp).toSec() / scanTime, lastPose);
3342  }
3343  else
3344  {
3346  output.header.frame_id,
3347  fixedFrameId,
3348  output.header.stamp,
3349  stamp,
3350  *listener,
3351  0);
3352  if(transform.isNull())
3353  {
3354  ROS_ERROR("Could not get transform of %s accordingly to %s between stamps %f and %f!",
3355  output.header.frame_id.c_str(),
3356  fixedFrameId.c_str(),
3357  stamp.toSec(),
3358  output.header.stamp.toSec());
3359  return false;
3360  }
3361  }
3362 
3363  for(size_t u=0; u<input.width; ++u)
3364  {
3365  unsigned char * dataPtr = &output.data[v*output.row_step + u*output.point_step];
3366  float & x = *((float*)(dataPtr+offsetX));
3367  float & y = *((float*)(dataPtr+offsetY));
3368  float & z = *((float*)(dataPtr+offsetZ));
3369  pcl::PointXYZ pt(x,y,z);
3371  x = pt.x;
3372  y = pt.y;
3373  z = pt.z;
3374 
3375  // set delta stamp to zero so that on downstream they know the cloud is deskewed
3376  if(timeDatatype == 6) // UINT32
3377  {
3378  *((unsigned int*)(dataPtr+offsetTime)) = 0;
3379  }
3380  else if(timeDatatype == 7)
3381  {
3382  *((float*)(dataPtr+offsetTime)) = 0;
3383  }
3384  else if(timeDatatype == 8)
3385  {
3386  *((double*)(dataPtr+offsetTime)) = 0;
3387  }
3388  }
3389  }
3390  }
3391  ROS_DEBUG("Lidar deskewing time=%fs", processingTime.elapsed());
3392  return true;
3393 }
3394 
3395 bool deskew(
3396  const sensor_msgs::PointCloud2 & input,
3397  sensor_msgs::PointCloud2 & output,
3398  const std::string & fixedFrameId,
3399  tf::TransformListener & listener,
3400  double waitForTransform,
3401  bool slerp)
3402 {
3403  return deskew_impl(input, output, fixedFrameId, &listener, waitForTransform, slerp, rtabmap::Transform(), 0);
3404 }
3405 
3406 bool deskew(
3407  const sensor_msgs::PointCloud2 & input,
3408  sensor_msgs::PointCloud2 & output,
3409  double previousStamp,
3410  const rtabmap::Transform & velocity)
3411 {
3412  return deskew_impl(input, output, "", 0, 0, true, velocity, previousStamp);
3413 }
3414 
3415 }
rtabmap::SensorData
tf::poseMsgToEigen
void poseMsgToEigen(const geometry_msgs::Pose &m, Eigen::Affine3d &e)
rtabmap_conversions::convertScanMsg
bool convertScanMsg(const sensor_msgs::LaserScan &scan2dMsg, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, rtabmap::LaserScan &scan, tf::TransformListener &listener, double waitForTransform, bool outputInFrameId=false)
Definition: MsgConversion.cpp:2592
rtabmap_conversions::transformToTF
void transformToTF(const rtabmap::Transform &transform, tf::Transform &tfTransform)
Definition: MsgConversion.cpp:52
rtabmap::CameraModel::fx
double fx() const
rtabmap_conversions::transformFromGeometryMsg
rtabmap::Transform transformFromGeometryMsg(const geometry_msgs::Transform &msg)
Definition: MsgConversion.cpp:91
rtabmap_conversions::nodeFromROS
rtabmap::Signature nodeFromROS(const rtabmap_msgs::Node &msg)
Definition: MsgConversion.cpp:1399
rtabmap::CameraModel::cx
double cx() const
rtabmap_conversions::point3fToROS
void point3fToROS(const cv::Point3f &pt, rtabmap_msgs::Point3f &msg)
Definition: MsgConversion.cpp:740
rtabmap_conversions::globalDescriptorsToROS
void globalDescriptorsToROS(const std::vector< rtabmap::GlobalDescriptor > &desc, std::vector< rtabmap_msgs::GlobalDescriptor > &msg)
Definition: MsgConversion.cpp:652
rtabmap_conversions::deskew
bool deskew(const sensor_msgs::PointCloud2 &input, sensor_msgs::PointCloud2 &output, const std::string &fixedFrameId, tf::TransformListener &listener, double waitForTransform, bool slerp=false)
Definition: MsgConversion.cpp:3395
rtabmap::IMU::linearAcceleration
const cv::Vec3d linearAcceleration() const
sensor_msgs::image_encodings::BAYER_RGGB8
const std::string BAYER_RGGB8
point_cloud2_iterator.h
rtabmap::uncompressImage
cv::Mat RTABMAP_CORE_EXPORT uncompressImage(const cv::Mat &bytes)
sensor_msgs::image_encodings::TYPE_8UC1
const std::string TYPE_8UC1
rtabmap::StereoCameraModel
rtabmap_conversions::toCvShare
void toCvShare(const rtabmap_msgs::RGBDImageConstPtr &image, cv_bridge::CvImageConstPtr &rgb, cv_bridge::CvImageConstPtr &depth)
Definition: MsgConversion.cpp:166
rtabmap_conversions::mapGraphToROS
void mapGraphToROS(const std::map< int, rtabmap::Transform > &poses, const std::multimap< int, rtabmap::Link > &links, const rtabmap::Transform &mapToOdom, rtabmap_msgs::MapGraph &msg)
Definition: MsgConversion.cpp:1033
rtabmap::Statistics
rtabmap_conversions::globalDescriptorsFromROS
std::vector< rtabmap::GlobalDescriptor > globalDescriptorsFromROS(const std::vector< rtabmap_msgs::GlobalDescriptor > &msg)
Definition: MsgConversion.cpp:638
tf::transformEigenToTF
void transformEigenToTF(const Eigen::Affine3d &e, tf::Transform &t)
rtabmap::util3d::laserScanToPointCloud2
pcl::PCLPointCloud2::Ptr RTABMAP_CORE_EXPORT laserScanToPointCloud2(const LaserScan &laserScan, const Transform &transform=Transform())
rtabmap_conversions::globalDescriptorFromROS
rtabmap::GlobalDescriptor globalDescriptorFromROS(const rtabmap_msgs::GlobalDescriptor &msg)
Definition: MsgConversion.cpp:626
DurationBase< Duration >::fromSec
Duration & fromSec(double t)
yaw
GLM_FUNC_DECL T yaw(detail::tquat< T, P > const &x)
D
MatrixXcd D
image_encodings.h
bytes
format
std::string format(const std::string &str, const std::vector< std::string > &find, const std::vector< std::string > &replace)
rtabmap::LaserScan::Format
Format
encoding
encoding
Eigen::Transform
K
K
rtabmap_conversions::points2fToROS
void points2fToROS(const std::vector< cv::Point2f > &kpts, std::vector< rtabmap_msgs::Point2f > &msg)
Definition: MsgConversion.cpp:726
boost::shared_ptr
rtabmap_conversions::getTransform
rtabmap::Transform getTransform(const std::string &fromFrameId, const std::string &toFrameId, const ros::Time &stamp, tf::TransformListener &listener, double waitForTransform)
Definition: MsgConversion.cpp:1971
rtabmap_conversions::transformFromPoseMsg
rtabmap::Transform transformFromPoseMsg(const geometry_msgs::Pose &msg, bool ignoreRotationIfNotSet=false)
Definition: MsgConversion.cpp:118
s
RealScalar s
rtabmap_conversions::transformToPoseMsg
void transformToPoseMsg(const rtabmap::Transform &transform, geometry_msgs::Pose &msg)
Definition: MsgConversion.cpp:106
cv_bridge::CvImage::toImageMsg
sensor_msgs::ImagePtr toImageMsg() const
pinhole_camera_model.h
rtabmap_conversions::nodeInfoFromROS
rtabmap::Signature nodeInfoFromROS(const rtabmap_msgs::Node &msg)
Definition: MsgConversion.cpp:1544
rtabmap::Signature::getWords
const std::multimap< int, int > & getWords() const
laser_geometry::LaserProjection
rtabmap::CameraModel::cy
double cy() const
uFormat
std::string uFormat(const char *fmt,...)
ros.h
rtabmap::Transform::getIdentity
static Transform getIdentity()
size
Index size
rtabmap::CameraModel
rtabmap_conversions::mapGraphFromROS
void mapGraphFromROS(const rtabmap_msgs::MapGraph &msg, std::map< int, rtabmap::Transform > &poses, std::multimap< int, rtabmap::Link > &links, rtabmap::Transform &mapToOdom)
Definition: MsgConversion.cpp:1014
rtabmap::Signature::getWordsKpts
const std::vector< cv::KeyPoint > & getWordsKpts() const
Compression.h
rtabmap::LaserScan::data
const cv::Mat & data() const
projection
Expression< Point2 > projection(f, p_cam)
pcl_conversions::toPCL
void toPCL(const pcl_msgs::ModelCoefficients &mc, pcl::ModelCoefficients &pcl_mc)
rtabmap::Signature::getWordsDescriptors
const cv::Mat & getWordsDescriptors() const
rtabmap_conversions::cameraModelToROS
void cameraModelToROS(const rtabmap::CameraModel &model, sensor_msgs::CameraInfo &camInfo)
Definition: MsgConversion.cpp:869
rtabmap_conversions::points3fToROS
void points3fToROS(const std::vector< cv::Point3f > &pts, std::vector< rtabmap_msgs::Point3f > &msg, const rtabmap::Transform &transform=rtabmap::Transform())
Definition: MsgConversion.cpp:777
sensor_msgs::image_encodings::RGBA8
const std::string RGBA8
rtabmap::Signature::mapId
int mapId() const
rtabmap::Signature::getGroundTruthPose
const Transform & getGroundTruthPose() const
rtabmap::GPS
rtabmap::IMU::angularVelocityCovariance
const cv::Mat & angularVelocityCovariance() const
rtabmap_conversions::cameraModelFromROS
rtabmap::CameraModel cameraModelFromROS(const sensor_msgs::CameraInfo &camInfo, const rtabmap::Transform &localTransform=rtabmap::Transform::getIdentity())
Definition: MsgConversion.cpp:795
copy
int EIGEN_BLAS_FUNC() copy(int *n, RealScalar *px, int *incx, RealScalar *py, int *incy)
UStl.h
rtabmap_conversions::deskew_impl
bool deskew_impl(const sensor_msgs::PointCloud2 &input, sensor_msgs::PointCloud2 &output, const std::string &fixedFrameId, tf::TransformListener *listener, double waitForTransform, bool slerp, const rtabmap::Transform &velocity, double previousStamp)
Definition: MsgConversion.cpp:2798
sec
GLM_FUNC_DECL genType sec(genType const &angle)
TimeBase< Time, Duration >::toSec
double toSec() const
rtabmap::Statistics::setPosterior
void setPosterior(const std::map< int, float > &posterior)
rtabmap::Signature::getLabel
const std::string & getLabel() const
rtabmap::StereoCameraModel::baseline
double baseline() const
rtabmap_conversions::globalDescriptorToROS
void globalDescriptorToROS(const rtabmap::GlobalDescriptor &desc, rtabmap_msgs::GlobalDescriptor &msg)
Definition: MsgConversion.cpp:631
y
Matrix3f y
rtabmap_conversions::userDataToROS
void userDataToROS(const cv::Mat &data, rtabmap_msgs::UserData &dataMsg, bool compress)
Definition: MsgConversion.cpp:1844
rtabmap_conversions::odomInfoToStatistics
std::map< std::string, float > odomInfoToStatistics(const rtabmap::OdometryInfo &info)
Definition: MsgConversion.cpp:1568
rtabmap_conversions::linkToROS
void linkToROS(const rtabmap::Link &link, rtabmap_msgs::Link &msg)
Definition: MsgConversion.cpp:568
rtabmap::LaserScan
sensor_msgs::image_encodings::RGB8
const std::string RGB8
rtabmap_conversions::convertRGBDMsgs
bool convertRGBDMsgs(const std::vector< cv_bridge::CvImageConstPtr > &imageMsgs, const std::vector< cv_bridge::CvImageConstPtr > &depthMsgs, const std::vector< sensor_msgs::CameraInfo > &cameraInfoMsgs, const std::vector< sensor_msgs::CameraInfo > &depthCameraInfoMsgs, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, cv::Mat &rgb, cv::Mat &depth, std::vector< rtabmap::CameraModel > &cameraModels, std::vector< rtabmap::StereoCameraModel > &stereoCameraModels, tf::TransformListener &listener, double waitForTransform, bool alreadRectifiedImages, const std::vector< std::vector< rtabmap_msgs::KeyPoint > > &localKeyPointsMsgs=std::vector< std::vector< rtabmap_msgs::KeyPoint > >(), const std::vector< std::vector< rtabmap_msgs::Point3f > > &localPoints3dMsgs=std::vector< std::vector< rtabmap_msgs::Point3f > >(), const std::vector< cv::Mat > &localDescriptorsMsgs=std::vector< cv::Mat >(), std::vector< cv::KeyPoint > *localKeyPoints=0, std::vector< cv::Point3f > *localPoints3d=0, cv::Mat *localDescriptors=0)
Definition: MsgConversion.cpp:2042
tf::StampedTransform
pcl::fromROSMsg
void fromROSMsg(const sensor_msgs::PointCloud2 &cloud, pcl::PointCloud< T > &pcl_cloud)
rtabmap::Landmarks
std::map< int, Landmark > Landmarks
rtabmap::EnvSensor::value
const double & value() const
rtabmap::Signature::getWeight
int getWeight() const
rtabmap::Transform::isNull
bool isNull() const
tf::transformTFToEigen
void transformTFToEigen(const tf::Transform &t, Eigen::Affine3d &e)
stereo_camera_model.h
desc
desc
rtabmap_conversions::userDataFromROS
cv::Mat userDataFromROS(const rtabmap_msgs::UserData &dataMsg)
Definition: MsgConversion.cpp:1820
rtabmap::Statistics::setLocalPath
void setLocalPath(const std::vector< int > &localPath)
rtabmap::IMU::linearAccelerationCovariance
const cv::Mat & linearAccelerationCovariance() const
rtabmap_conversions::envSensorFromROS
rtabmap::EnvSensor envSensorFromROS(const rtabmap_msgs::EnvSensor &msg)
Definition: MsgConversion.cpp:665
rtabmap::Landmark
sensor_msgs::image_encodings::TYPE_16UC1
const std::string TYPE_16UC1
p3
p3
rtabmap_conversions::odomInfoFromROS
rtabmap::OdometryInfo odomInfoFromROS(const rtabmap_msgs::OdomInfo &msg, bool ignoreData=false)
Definition: MsgConversion.cpp:1659
rtabmap_conversions::point3fFromROS
cv::Point3f point3fFromROS(const rtabmap_msgs::Point3f &msg)
Definition: MsgConversion.cpp:735
rtabmap_conversions::landmarksFromROS
rtabmap::Landmarks landmarksFromROS(const std::map< int, std::pair< geometry_msgs::PoseWithCovarianceStamped, float > > &tags, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, tf::TransformListener &listener, double waitForTransform, double defaultLinVariance, double defaultAngVariance)
Definition: MsgConversion.cpp:1903
rtabmap::LaserScan::kXYI
kXYI
rtabmap_conversions::odomInfoToROS
void odomInfoToROS(const rtabmap::OdometryInfo &info, rtabmap_msgs::OdomInfo &msg, bool ignoreData=false)
Definition: MsgConversion.cpp:1738
rtabmap::compressData
std::vector< unsigned char > RTABMAP_CORE_EXPORT compressData(const cv::Mat &data)
rtabmap::IMU
compress
def compress(data)
rtabmap::Statistics::setRawLikelihood
void setRawLikelihood(const std::map< int, float > &rawLikelihood)
rtabmap_conversions::compressedMatToBytes
void compressedMatToBytes(const cv::Mat &compressed, std::vector< unsigned char > &bytes)
Definition: MsgConversion.cpp:433
rtabmap_conversions::nodeToROS
void nodeToROS(const rtabmap::Signature &signature, rtabmap_msgs::Node &msg)
Definition: MsgConversion.cpp:1463
rtabmap_conversions::stereoCameraModelFromROS
rtabmap::StereoCameraModel stereoCameraModelFromROS(const sensor_msgs::CameraInfo &leftCamInfo, const sensor_msgs::CameraInfo &rightCamInfo, const rtabmap::Transform &localTransform=rtabmap::Transform::getIdentity(), const rtabmap::Transform &stereoTransform=rtabmap::Transform())
Definition: MsgConversion.cpp:934
laser_geometry.h
rtabmap::Transform::prettyPrint
std::string prettyPrint() const
data
int data[]
rtabmap::Statistics::setLoopClosureId
void setLoopClosureId(int id)
vy
StridedVectorType vy(make_vector(y, *n, std::abs(*incy)))
tf_eigen.h
j
std::ptrdiff_t j
stats
bool stats
rtabmap_conversions::keypointsToROS
void keypointsToROS(const std::vector< cv::KeyPoint > &kpts, std::vector< rtabmap_msgs::KeyPoint > &msg)
Definition: MsgConversion.cpp:617
sqrt
GLM_FUNC_DECL vecType< T, P > sqrt(vecType< T, P > const &x)
cv_bridge::toCvCopy
CvImagePtr toCvCopy(const sensor_msgs::CompressedImage &source, const std::string &encoding=std::string())
cv_bridge::CvImage::header
std_msgs::Header header
ROS_DEBUG
#define ROS_DEBUG(...)
rtabmap::EnvSensor::stamp
const double & stamp() const
rtabmap_conversions::nodeDataFromROS
rtabmap::Signature nodeDataFromROS(const rtabmap_msgs::Node &msg)
Definition: MsgConversion.cpp:1535
TimeBase< Time, Duration >::isZero
bool isZero() const
frameId
std::string const * frameId(const M &m)
rtabmap::Transform::x
float & x()
rtabmap::Transform::interpolate
Transform interpolate(float t, const Transform &other) const
rtabmap_conversions::point2fToROS
void point2fToROS(const cv::Point2f &kpt, rtabmap_msgs::Point2f &msg)
Definition: MsgConversion.cpp:710
p2
p2
rtabmap::util3d::laserScanFromPointCloud
LaserScan RTABMAP_CORE_EXPORT laserScanFromPointCloud(const pcl::PointCloud< pcl::PointNormal > &cloud, const pcl::IndicesPtr &indices, const Transform &transform=Transform(), bool filterNaNs=true)
rtabmap::Signature::getWords3
const std::vector< cv::Point3f > & getWords3() const
rtabmap::EnvSensor::Type
Type
rtabmap_conversions::infoFromROS
void infoFromROS(const rtabmap_msgs::Info &info, rtabmap::Statistics &stat)
Definition: MsgConversion.cpp:458
rtabmap_conversions::point2fFromROS
cv::Point2f point2fFromROS(const rtabmap_msgs::Point2f &msg)
Definition: MsgConversion.cpp:705
eigen_msg.h
rtabmap_conversions::keypointToROS
void keypointToROS(const cv::KeyPoint &kpt, rtabmap_msgs::KeyPoint &msg)
Definition: MsgConversion.cpp:585
velocity
Velocity3 velocity(const NavState &X, OptionalJacobian< 3, 9 > H)
rtabmap::LaserScan::kXY
kXY
info
else if n * info
rtabmap::IMU::orientation
const cv::Vec4d & orientation() const
rtabmap::Statistics::setLabels
void setLabels(const std::map< int, std::string > &labels)
rtabmap::EnvSensor::type
const Type & type() const
rtabmap::StereoCameraModel::localTransform
const Transform & localTransform() const
rtabmap::CameraModel::fy
double fy() const
rtabmap::StereoCameraModel::left
const CameraModel & left() const
UASSERT
#define UASSERT(condition)
ROS_WARN
#define ROS_WARN(...)
z
z
uValues
std::vector< V > uValues(const std::map< K, V > &m)
uValue
V uValue(const std::map< K, V > &m, const K &key, const V &defaultValue=V())
rtabmap_conversions::sensorDataToROS
void sensorDataToROS(const rtabmap::SensorData &signature, rtabmap_msgs::SensorData &msg, const std::string &frameId="base_link", bool copyRawData=false)
Definition: MsgConversion.cpp:1252
x
x
rtabmap_conversions::envSensorsFromROS
rtabmap::EnvSensors envSensorsFromROS(const std::vector< rtabmap_msgs::EnvSensor > &msg)
Definition: MsgConversion.cpp:677
rtabmap::Statistics::setProximityDetectionId
void setProximityDetectionId(int id)
rtabmap::StereoCameraModel::isValidForProjection
bool isValidForProjection() const
rtabmap_conversions::toCvCopy
void toCvCopy(const rtabmap_msgs::RGBDImage &image, cv_bridge::CvImagePtr &rgb, cv_bridge::CvImagePtr &depth)
Definition: MsgConversion.cpp:136
tf::poseEigenToMsg
void poseEigenToMsg(const Eigen::Affine3d &e, geometry_msgs::Pose &m)
tf::transformMsgToEigen
void transformMsgToEigen(const geometry_msgs::Transform &m, Eigen::Affine3d &e)
uStrContains
bool uStrContains(const std::string &string, const std::string &substring)
rtabmap::GlobalDescriptor
rtabmap::Statistics::setWeights
void setWeights(const std::map< int, int > &weights)
tf::Transformer::waitForTransform
bool waitForTransform(const std::string &target_frame, const ros::Time &target_time, const std::string &source_frame, const ros::Time &source_time, const std::string &fixed_frame, const ros::Duration &timeout, const ros::Duration &polling_sleep_duration=ros::Duration(0.01), std::string *error_msg=NULL) const
rtabmap::Signature::getStamp
double getStamp() const
tf::Transform
rtabmap::Statistics::setLikelihood
void setLikelihood(const std::map< int, float > &likelihood)
slerp
GLM_FUNC_DECL detail::tquat< T, P > slerp(detail::tquat< T, P > const &x, detail::tquat< T, P > const &y, T const &a)
model
noiseModel::Diagonal::shared_ptr model
pitch
GLM_FUNC_DECL T pitch(detail::tquat< T, P > const &x)
rtabmap_conversions::nodeInfoToROS
void nodeInfoToROS(const rtabmap::Signature &signature, rtabmap_msgs::Node &msg)
Definition: MsgConversion.cpp:1556
rtabmap_conversions::infoToROS
void infoToROS(const rtabmap::Statistics &stats, rtabmap_msgs::Info &info)
Definition: MsgConversion.cpp:526
out
std::ofstream out("Result.txt")
p1
Vector3f p1
rtabmap::Transform::fromEigen3d
static Transform fromEigen3d(const Eigen::Affine3d &matrix)
rtabmap_conversions::nodeDataToROS
void nodeDataToROS(const rtabmap::Signature &signature, rtabmap_msgs::Node &msg)
Definition: MsgConversion.cpp:1539
rtabmap::Transform::inverse
Transform inverse() const
rtabmap::Statistics::setOdomCachePoses
void setOdomCachePoses(const std::map< int, Transform > &poses)
UASSERT_MSG
#define UASSERT_MSG(condition, msg_str)
rtabmap::compressData2
cv::Mat RTABMAP_CORE_EXPORT compressData2(const cv::Mat &data)
util3d_filtering.h
rtabmap::Statistics::setLoopClosureTransform
void setLoopClosureTransform(const Transform &loopClosureTransform)
rtabmap_conversions::compressedMatFromBytes
cv::Mat compressedMatFromBytes(const std::vector< unsigned char > &bytes, bool copy=true)
Definition: MsgConversion.cpp:444
rtabmap::Signature::getPose
const Transform & getPose() const
rtabmap_conversions::keypointFromROS
cv::KeyPoint keypointFromROS(const rtabmap_msgs::KeyPoint &msg)
Definition: MsgConversion.cpp:580
rtabmap::Transform
rtabmap_conversions::timestampFromROS
double timestampFromROS(const ros::Time &stamp)
Definition: MsgConversion.h:198
rtabmap::EnvSensors
std::map< EnvSensor::Type, EnvSensor > EnvSensors
rtabmap_conversions::getMovingTransform
rtabmap::Transform getMovingTransform(const std::string &movingFrame, const std::string &fixedFrame, const ros::Time &stampFrom, const ros::Time &stampTo, tf::TransformListener &listener, double waitForTransform)
Definition: MsgConversion.cpp:2007
ros::Time
rtabmap::Signature::id
int id() const
rtabmap::Statistics::setWmState
void setWmState(const std::vector< int > &state)
rtabmap::Statistics::setExtended
void setExtended(bool extended)
rtabmap::Statistics::setCurrentGoalId
void setCurrentGoalId(int goal)
rtabmap_conversions::transformFromTF
rtabmap::Transform transformFromTF(const tf::Transform &transform)
Definition: MsgConversion.cpp:64
sensor_msgs::image_encodings::BGRA8
const std::string BGRA8
rtabmap_conversions::imuFromROS
rtabmap::IMU imuFromROS(const sensor_msgs::Imu &msg, const rtabmap::Transform &localTransform=rtabmap::Transform::getIdentity())
Definition: MsgConversion.cpp:1866
cv_bridge::CvImage::encoding
std::string encoding
sensor_msgs::image_encodings::TYPE_32FC1
const std::string TYPE_32FC1
PointMatcherSupport::Parametrizable
iter
iterator iter(handle obj)
MsgConversion.h
rtabmap::Transform::rotation
Transform rotation() const
rtabmap_conversions
Definition: MsgConversion.h:64
sensor_msgs::image_encodings::MONO8
const std::string MONO8
rtabmap::Transform::isIdentity
bool isIdentity() const
c_str
const char * c_str(Args &&...args)
rtabmap_conversions::points3fFromROS
std::vector< cv::Point3f > points3fFromROS(const std::vector< rtabmap_msgs::Point3f > &msg, const rtabmap::Transform &transform=rtabmap::Transform())
Definition: MsgConversion.cpp:747
sensor_msgs::image_encodings::MONO16
const std::string MONO16
ROS_ERROR
#define ROS_ERROR(...)
diff
diff
rtabmap_conversions::transformToGeometryMsg
void transformToGeometryMsg(const rtabmap::Transform &transform, geometry_msgs::Transform &msg)
Definition: MsgConversion.cpp:71
cv_bridge::CvImage
v
Array< int, Dynamic, 1 > v
rtabmap_conversions::mapDataToROS
void mapDataToROS(const std::map< int, rtabmap::Transform > &poses, const std::multimap< int, rtabmap::Link > &links, const std::map< int, rtabmap::Signature > &signatures, const rtabmap::Transform &mapToOdom, rtabmap_msgs::MapData &msg)
Definition: MsgConversion.cpp:993
rtabmap::util3d::transformPoint
cv::Point3d RTABMAP_CORE_EXPORT transformPoint(const cv::Point3d &pt, const Transform &transform)
rtabmap::CameraModel::imageSize
const cv::Size & imageSize() const
tf::TransformListener
rtabmap::Statistics::setStamp
void setStamp(double stamp)
UTimer
rtabmap::OdometryInfo
uKeys
std::vector< K > uKeys(const std::map< K, V > &m)
rtabmap_conversions::imuToROS
void imuToROS(const rtabmap::IMU &imu, sensor_msgs::Imu &msg)
Definition: MsgConversion.cpp:1877
rtabmap_conversions::points2fFromROS
std::vector< cv::Point2f > points2fFromROS(const std::vector< rtabmap_msgs::Point2f > &msg)
Definition: MsgConversion.cpp:716
rtabmap::Statistics::addStatistic
void addStatistic(const std::string &name, float value)
rtabmap_conversions::envSensorsToROS
void envSensorsToROS(const rtabmap::EnvSensors &sensors, std::vector< rtabmap_msgs::EnvSensor > &msg)
Definition: MsgConversion.cpp:691
ULogger.h
rtabmap_conversions::rgbdImageFromROS
rtabmap::SensorData rgbdImageFromROS(const rtabmap_msgs::RGBDImageConstPtr &image)
Definition: MsgConversion.cpp:288
rtabmap::util3d::laserScan2dFromPointCloud
LaserScan RTABMAP_CORE_EXPORT laserScan2dFromPointCloud(const pcl::PointCloud< pcl::PointNormal > &cloud, const Transform &transform=Transform(), bool filterNaNs=true)
sensor_msgs::image_encodings::BGR8
const std::string BGR8
rtabmap::Statistics::setOdomCacheConstraints
void setOdomCacheConstraints(const std::multimap< int, Link > &constraints)
rtabmap::IMU::angularVelocity
const cv::Vec3d & angularVelocity() const
rtabmap_conversions::convertScan3dMsg
bool convertScan3dMsg(const sensor_msgs::PointCloud2 &scan3dMsg, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, rtabmap::LaserScan &scan, tf::TransformListener &listener, double waitForTransform, int maxPoints=0, float maxRange=0.0f, bool is2D=false)
Definition: MsgConversion.cpp:2751
sensor_msgs::image_encodings::BAYER_GRBG8
const std::string BAYER_GRBG8
transform
EIGEN_DONT_INLINE void transform(const Quaternion< Scalar > &t, Data &data)
util3d_surface.h
rtabmap_conversions::mapDataFromROS
void mapDataFromROS(const rtabmap_msgs::MapData &msg, std::map< int, rtabmap::Transform > &poses, std::multimap< int, rtabmap::Link > &links, std::map< int, rtabmap::Signature > &signatures, rtabmap::Transform &mapToOdom)
Definition: MsgConversion.cpp:977
cv_bridge::toCvShare
CvImageConstPtr toCvShare(const sensor_msgs::Image &source, const boost::shared_ptr< void const > &tracked_object, const std::string &encoding=std::string())
tf::Transformer::lookupTransform
void lookupTransform(const std::string &target_frame, const ros::Time &target_time, const std::string &source_frame, const ros::Time &source_time, const std::string &fixed_frame, StampedTransform &transform) const
UTimer.h
header
const std::string header
tf::transformEigenToMsg
void transformEigenToMsg(const Eigen::Affine3d &e, geometry_msgs::Transform &m)
dist
dist
rtabmap_conversions::envSensorToROS
void envSensorToROS(const rtabmap::EnvSensor &sensor, rtabmap_msgs::EnvSensor &msg)
Definition: MsgConversion.cpp:670
rtabmap_conversions::convertStereoMsg
bool convertStereoMsg(const cv_bridge::CvImageConstPtr &leftImageMsg, const cv_bridge::CvImageConstPtr &rightImageMsg, const sensor_msgs::CameraInfo &leftCamInfoMsg, const sensor_msgs::CameraInfo &rightCamInfoMsg, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, cv::Mat &left, cv::Mat &right, rtabmap::StereoCameraModel &stereoModel, tf::TransformListener &listener, double waitForTransform, bool alreadyRectified)
Definition: MsgConversion.cpp:2435
tf2::TransformException
t
Point2 t(10, 10)
rtabmap::Signature::sensorData
SensorData & sensorData()
ROS_ASSERT
#define ROS_ASSERT(cond)
rtabmap_conversions::rgbdImageToROS
void rgbdImageToROS(const rtabmap::SensorData &data, rtabmap_msgs::RGBDImage &msg, const std::string &sensorFrameId)
Definition: MsgConversion.cpp:212
util3d_transforms.h
UERROR
#define UERROR(...)
cv_bridge::CvImage::image
cv::Mat image
rtabmap::EnvSensor
ros::Duration
rtabmap::IMU::orientationCovariance
const cv::Mat & orientationCovariance() const
UTimer::elapsed
double elapsed()
rtabmap_conversions::linkFromROS
rtabmap::Link linkFromROS(const rtabmap_msgs::Link &msg)
Definition: MsgConversion.cpp:562
i
int i
uIsFinite
bool uIsFinite(const T &value)
rtabmap::Statistics::setRefImageId
void setRefImageId(int id)
util3d.h
rtabmap_conversions::sensorDataFromROS
rtabmap::SensorData sensorDataFromROS(const rtabmap_msgs::SensorData &msg)
Definition: MsgConversion.cpp:1064
roll
GLM_FUNC_DECL T roll(detail::tquat< T, P > const &x)
pcl_conversions::moveFromPCL
void moveFromPCL(pcl::ModelCoefficients &pcl_mc, pcl_msgs::ModelCoefficients &mc)
rtabmap::Signature
vx
StridedVectorType vx(make_vector(x, *n, std::abs(*incx)))
rtabmap::uncompressData
cv::Mat RTABMAP_CORE_EXPORT uncompressData(const cv::Mat &bytes)
msg
msg
cv_bridge::cvtColor
CvImagePtr cvtColor(const CvImageConstPtr &source, const std::string &encoding)
rtabmap_conversions::keypointsFromROS
std::vector< cv::KeyPoint > keypointsFromROS(const std::vector< rtabmap_msgs::KeyPoint > &msg)
Definition: MsgConversion.cpp:596
pcl_conversions.h
R
R


rtabmap_conversions
Author(s): Mathieu Labbe
autogenerated on Mon Apr 28 2025 02:35:04