MsgConversion.cpp
Go to the documentation of this file.
1 /*
2 Copyright (c) 2010-2016, Mathieu Labbe - IntRoLab - Universite de Sherbrooke
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7  * Redistributions of source code must retain the above copyright
8  notice, this list of conditions and the following disclaimer.
9  * Redistributions in binary form must reproduce the above copyright
10  notice, this list of conditions and the following disclaimer in the
11  documentation and/or other materials provided with the distribution.
12  * Neither the name of the Universite de Sherbrooke nor the
13  names of its contributors may be used to endorse or promote products
14  derived from this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
20 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 
29 
30 #include <opencv2/highgui/highgui.hpp>
31 #include <zlib.h>
32 #include <ros/ros.h>
33 #include <rtabmap/core/util3d.h>
37 #include <rtabmap/utilite/UStl.h>
47 
48 namespace rtabmap_ros {
49 
50 void transformToTF(const rtabmap::Transform & transform, tf::Transform & tfTransform)
51 {
52  if(!transform.isNull())
53  {
54  tf::transformEigenToTF(transform.toEigen3d(), tfTransform);
55  }
56  else
57  {
58  tfTransform = tf::Transform();
59  }
60 }
61 
63 {
64  Eigen::Affine3d eigenTf;
65  tf::transformTFToEigen(transform, eigenTf);
66  return rtabmap::Transform::fromEigen3d(eigenTf);
67 }
68 
69 void transformToGeometryMsg(const rtabmap::Transform & transform, geometry_msgs::Transform & msg)
70 {
71  if(!transform.isNull())
72  {
73  tf::transformEigenToMsg(transform.toEigen3d(), msg);
74 
75  // make sure the quaternion is normalized
76  long double recipNorm = 1.0 / sqrt(msg.rotation.x * msg.rotation.x + msg.rotation.y * msg.rotation.y + msg.rotation.z * msg.rotation.z + msg.rotation.w * msg.rotation.w);
77  msg.rotation.x *= recipNorm;
78  msg.rotation.y *= recipNorm;
79  msg.rotation.z *= recipNorm;
80  msg.rotation.w *= recipNorm;
81  }
82  else
83  {
84  msg = geometry_msgs::Transform();
85  }
86 }
87 
88 
89 rtabmap::Transform transformFromGeometryMsg(const geometry_msgs::Transform & msg)
90 {
91  if(msg.rotation.w == 0 &&
92  msg.rotation.x == 0 &&
93  msg.rotation.y == 0 &&
94  msg.rotation.z ==0)
95  {
96  return rtabmap::Transform();
97  }
98 
99  Eigen::Affine3d tfTransform;
100  tf::transformMsgToEigen(msg, tfTransform);
101  return rtabmap::Transform::fromEigen3d(tfTransform);
102 }
103 
104 void transformToPoseMsg(const rtabmap::Transform & transform, geometry_msgs::Pose & msg)
105 {
106  if(!transform.isNull())
107  {
108  tf::poseEigenToMsg(transform.toEigen3d(), msg);
109  }
110  else
111  {
112  msg = geometry_msgs::Pose();
113  }
114 }
115 
116 rtabmap::Transform transformFromPoseMsg(const geometry_msgs::Pose & msg)
117 {
118  if(msg.orientation.w == 0 &&
119  msg.orientation.x == 0 &&
120  msg.orientation.y == 0 &&
121  msg.orientation.z ==0)
122  {
123  return rtabmap::Transform();
124  }
125  Eigen::Affine3d tfPose;
126  tf::poseMsgToEigen(msg, tfPose);
127  return rtabmap::Transform::fromEigen3d(tfPose);
128 }
129 
130 void toCvCopy(const rtabmap_ros::RGBDImage & image, cv_bridge::CvImagePtr & rgb, cv_bridge::CvImagePtr & depth)
131 {
132  if(!image.rgb.data.empty())
133  {
134  rgb = cv_bridge::toCvCopy(image.rgb);
135  }
136  else if(!image.rgbCompressed.data.empty())
137  {
138 #ifdef CV_BRIDGE_HYDRO
139  ROS_ERROR("Unsupported compressed image copy, please upgrade at least to ROS Indigo to use this.");
140 #else
141  rgb = cv_bridge::toCvCopy(image.rgbCompressed);
142 #endif
143  }
144  else
145  {
146  // empty
147  rgb = boost::make_shared<cv_bridge::CvImage>();
148  }
149 
150  if(!image.depth.data.empty())
151  {
152  depth = cv_bridge::toCvCopy(image.depth);
153  }
154  else if(!image.depthCompressed.data.empty())
155  {
156  cv_bridge::CvImagePtr ptr = boost::make_shared<cv_bridge::CvImage>();
157  ptr->header = image.depthCompressed.header;
158  ptr->image = rtabmap::uncompressImage(image.depthCompressed.data);
159  ROS_ASSERT(ptr->image.empty() || ptr->image.type() == CV_32FC1 || ptr->image.type() == CV_16UC1);
160  ptr->encoding = ptr->image.empty()?"":ptr->image.type() == CV_32FC1?sensor_msgs::image_encodings::TYPE_32FC1:sensor_msgs::image_encodings::TYPE_16UC1;
161  depth = ptr;
162  }
163  else
164  {
165  // empty
166  depth = boost::make_shared<cv_bridge::CvImage>();
167  }
168 }
169 
170 void toCvShare(const rtabmap_ros::RGBDImageConstPtr & image, cv_bridge::CvImageConstPtr & rgb, cv_bridge::CvImageConstPtr & depth)
171 {
172  if(!image->rgb.data.empty())
173  {
174  rgb = cv_bridge::toCvShare(image->rgb, image);
175  }
176  else if(!image->rgbCompressed.data.empty())
177  {
178 #ifdef CV_BRIDGE_HYDRO
179  ROS_ERROR("Unsupported compressed image copy, please upgrade at least to ROS Indigo to use this.");
180 #else
181  rgb = cv_bridge::toCvCopy(image->rgbCompressed);
182 #endif
183  }
184  else
185  {
186  // empty
187  rgb = boost::make_shared<cv_bridge::CvImage>();
188  }
189 
190  if(!image->depth.data.empty())
191  {
192  depth = cv_bridge::toCvShare(image->depth, image);
193  }
194  else if(!image->depthCompressed.data.empty())
195  {
196  if(image->depthCompressed.format.compare("jpg")==0)
197  {
198 #ifdef CV_BRIDGE_HYDRO
199  ROS_ERROR("Unsupported compressed image copy, please upgrade at least to ROS Indigo to use this.");
200 #else
201  depth = cv_bridge::toCvCopy(image->depthCompressed);
202 #endif
203  }
204  else
205  {
206  cv_bridge::CvImagePtr ptr = boost::make_shared<cv_bridge::CvImage>();
207  ptr->header = image->depthCompressed.header;
208  ptr->image = rtabmap::uncompressImage(image->depthCompressed.data);
209  ROS_ASSERT(ptr->image.empty() || ptr->image.type() == CV_32FC1 || ptr->image.type() == CV_16UC1);
210  ptr->encoding = ptr->image.empty()?"":ptr->image.type() == CV_32FC1?sensor_msgs::image_encodings::TYPE_32FC1:sensor_msgs::image_encodings::TYPE_16UC1;
211  depth = ptr;
212  }
213  }
214  else
215  {
216  // empty
217  depth = boost::make_shared<cv_bridge::CvImage>();
218  }
219 }
220 
221 rtabmap::SensorData rgbdImageFromROS(const rtabmap_ros::RGBDImageConstPtr & image)
222 {
223  rtabmap::SensorData data;
226  toCvShare(image, imageMsg, depthMsg);
227 
228  rtabmap::StereoCameraModel stereoModel = stereoCameraModelFromROS(image->rgbCameraInfo, image->depthCameraInfo, rtabmap::Transform::getIdentity());
229 
230  if(stereoModel.isValidForProjection())
231  {
232  cv_bridge::CvImageConstPtr imageRectLeft = imageMsg;
233  cv_bridge::CvImageConstPtr imageRectRight = depthMsg;
234  if(!(imageRectLeft->encoding.compare(sensor_msgs::image_encodings::MONO8) ==0 ||
235  imageRectLeft->encoding.compare(sensor_msgs::image_encodings::MONO16) ==0 ||
236  imageRectLeft->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
237  imageRectLeft->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0) ||
238  !(imageRectRight->encoding.compare(sensor_msgs::image_encodings::MONO8) ==0 ||
239  imageRectRight->encoding.compare(sensor_msgs::image_encodings::MONO16) ==0 ||
240  imageRectRight->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
241  imageRectRight->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0))
242  {
243  ROS_ERROR("Input type must be image=mono8,mono16,rgb8,bgr8 (mono8 recommended), received types are %s (left) and %s (right)",
244  imageRectLeft->encoding.c_str(), imageRectRight->encoding.c_str());
245  return data;
246  }
247 
248  if(!imageRectLeft->image.empty() && !imageRectRight->image.empty())
249  {
250  if(stereoModel.baseline() > 10.0)
251  {
252  static bool shown = false;
253  if(!shown)
254  {
255  ROS_WARN("Detected baseline (%f m) is quite large! Is your "
256  "right camera_info P(0,3) correctly set? Note that "
257  "baseline=-P(0,3)/P(0,0). This warning is printed only once.",
258  stereoModel.baseline());
259  shown = true;
260  }
261  }
262 
263  cv::Mat left, right;
264  if(imageRectLeft->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
265  imageRectLeft->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0)
266  {
267  left = cv_bridge::cvtColor(imageRectLeft, "mono8")->image;
268  }
269  else
270  {
271  left = cv_bridge::cvtColor(imageRectLeft, "bgr8")->image;
272  }
273  right = cv_bridge::cvtColor(imageRectRight, "mono8")->image;
274 
275  //
276 
277  data = rtabmap::SensorData(
278  left,
279  right,
280  stereoModel,
281  0,
282  rtabmap_ros::timestampFromROS(image->header.stamp));
283  }
284  else
285  {
286  ROS_WARN("Odom: input images empty?!?");
287  }
288  }
289  else //depth
290  {
291  ros::Time higherStamp;
292  int imageWidth = imageMsg->image.cols;
293  int imageHeight = imageMsg->image.rows;
294  int depthWidth = depthMsg->image.cols;
295  int depthHeight = depthMsg->image.rows;
296 
297  UASSERT_MSG(
298  imageWidth % depthWidth == 0 && imageHeight % depthHeight == 0 &&
299  imageWidth/depthWidth == imageHeight/depthHeight,
300  uFormat("rgb=%dx%d depth=%dx%d", imageWidth, imageHeight, depthWidth, depthHeight).c_str());
301 
302  cv::Mat rgb;
303  cv::Mat depth;
304  rtabmap::CameraModel cameraModels;
305 
306  if(!(imageMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) ==0 ||
307  imageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) ==0 ||
308  imageMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) ==0 ||
309  imageMsg->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
310  imageMsg->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
311  imageMsg->encoding.compare(sensor_msgs::image_encodings::BGRA8) == 0 ||
312  imageMsg->encoding.compare(sensor_msgs::image_encodings::RGBA8) == 0 ||
313  imageMsg->encoding.compare(sensor_msgs::image_encodings::BAYER_GRBG8) == 0) ||
314  !(depthMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1) == 0 ||
315  depthMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1) == 0 ||
316  depthMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0))
317  {
318  ROS_ERROR("Input type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8 and "
319  "image_depth=32FC1,16UC1,mono16. Current rgb=%s and depth=%s",
320  imageMsg->encoding.c_str(),
321  depthMsg->encoding.c_str());
322  return data;
323  }
324 
325  cv_bridge::CvImageConstPtr ptrImage = imageMsg;
326  if(imageMsg->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1)==0 ||
327  imageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
328  imageMsg->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0)
329  {
330  // do nothing
331  }
332  else if(imageMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0)
333  {
334  ptrImage = cv_bridge::cvtColor(imageMsg, "mono8");
335  }
336  else
337  {
338  ptrImage = cv_bridge::cvtColor(imageMsg, "bgr8");
339  }
340 
341  cv_bridge::CvImageConstPtr ptrDepth = depthMsg;
342  data = rtabmap::SensorData(
343  ptrImage->image,
344  ptrDepth->image,
345  rtabmap_ros::cameraModelFromROS(image->rgbCameraInfo),
346  0,
347  rtabmap_ros::timestampFromROS(image->header.stamp));
348  }
349 
350  return data;
351 }
352 
353 void compressedMatToBytes(const cv::Mat & compressed, std::vector<unsigned char> & bytes)
354 {
355  UASSERT(compressed.empty() || compressed.type() == CV_8UC1);
356  bytes.clear();
357  if(!compressed.empty())
358  {
359  bytes.resize(compressed.cols * compressed.rows);
360  memcpy(bytes.data(), compressed.data, bytes.size());
361  }
362 }
363 
364 cv::Mat compressedMatFromBytes(const std::vector<unsigned char> & bytes, bool copy)
365 {
366  cv::Mat out;
367  if(bytes.size())
368  {
369  out = cv::Mat(1, bytes.size(), CV_8UC1, (void*)bytes.data());
370  if(copy)
371  {
372  out = out.clone();
373  }
374  }
375  return out;
376 }
377 
378 void infoFromROS(const rtabmap_ros::Info & info, rtabmap::Statistics & stat)
379 {
380  stat.setExtended(true); // Extended
381 
382  // rtabmap_ros::Info
383  stat.setRefImageId(info.refId);
384  stat.setLoopClosureId(info.loopClosureId);
385  stat.setProximityDetectionId(info.proximityDetectionId);
386  stat.setStamp(info.header.stamp.toSec());
387 
388  stat.setLoopClosureTransform(rtabmap_ros::transformFromGeometryMsg(info.loopClosureTransform));
389 
390  //Posterior, likelihood, childCount
391  std::map<int, float> mapIntFloat;
392  for(unsigned int i=0; i<info.posteriorKeys.size() && i<info.posteriorValues.size(); ++i)
393  {
394  mapIntFloat.insert(std::pair<int, float>(info.posteriorKeys.at(i), info.posteriorValues.at(i)));
395  }
396  stat.setPosterior(mapIntFloat);
397  mapIntFloat.clear();
398  for(unsigned int i=0; i<info.likelihoodKeys.size() && i<info.likelihoodValues.size(); ++i)
399  {
400  mapIntFloat.insert(std::pair<int, float>(info.likelihoodKeys.at(i), info.likelihoodValues.at(i)));
401  }
402  stat.setLikelihood(mapIntFloat);
403  mapIntFloat.clear();
404  for(unsigned int i=0; i<info.rawLikelihoodKeys.size() && i<info.rawLikelihoodValues.size(); ++i)
405  {
406  mapIntFloat.insert(std::pair<int, float>(info.rawLikelihoodKeys.at(i), info.rawLikelihoodValues.at(i)));
407  }
408  stat.setRawLikelihood(mapIntFloat);
409  std::map<int, int> mapIntInt;
410  for(unsigned int i=0; i<info.weightsKeys.size() && i<info.weightsValues.size(); ++i)
411  {
412  mapIntInt.insert(std::pair<int, int>(info.weightsKeys.at(i), info.weightsValues.at(i)));
413  }
414  stat.setWeights(mapIntInt);
415 
416  stat.setLocalPath(info.localPath);
417  stat.setCurrentGoalId(info.currentGoalId);
418 
419  // Statistics data
420  for(unsigned int i=0; i<info.statsKeys.size() && i<info.statsValues.size(); i++)
421  {
422  stat.addStatistic(info.statsKeys.at(i), info.statsValues.at(i));
423  }
424 }
425 
426 void infoToROS(const rtabmap::Statistics & stats, rtabmap_ros::Info & info)
427 {
428  info.refId = stats.refImageId();
429  info.loopClosureId = stats.loopClosureId();
430  info.proximityDetectionId = stats.proximityDetectionId();
431 
432  rtabmap_ros::transformToGeometryMsg(stats.loopClosureTransform(), info.loopClosureTransform);
433 
434  // Detailed info
435  if(stats.extended())
436  {
437  //Posterior, likelihood, childCount
438  info.posteriorKeys = uKeys(stats.posterior());
439  info.posteriorValues = uValues(stats.posterior());
440  info.likelihoodKeys = uKeys(stats.likelihood());
441  info.likelihoodValues = uValues(stats.likelihood());
442  info.rawLikelihoodKeys = uKeys(stats.rawLikelihood());
443  info.rawLikelihoodValues = uValues(stats.rawLikelihood());
444  info.weightsKeys = uKeys(stats.weights());
445  info.weightsValues = uValues(stats.weights());
446  info.localPath = stats.localPath();
447  info.currentGoalId = stats.currentGoalId();
448 
449  // Statistics data
450  info.statsKeys = uKeys(stats.data());
451  info.statsValues = uValues(stats.data());
452  }
453 }
454 
455 rtabmap::Link linkFromROS(const rtabmap_ros::Link & msg)
456 {
457  cv::Mat information = cv::Mat(6,6,CV_64FC1, (void*)msg.information.data()).clone();
458  return rtabmap::Link(msg.fromId, msg.toId, (rtabmap::Link::Type)msg.type, transformFromGeometryMsg(msg.transform), information);
459 }
460 
461 void linkToROS(const rtabmap::Link & link, rtabmap_ros::Link & msg)
462 {
463  msg.fromId = link.from();
464  msg.toId = link.to();
465  msg.type = link.type();
466  if(link.infMatrix().type() == CV_64FC1 && link.infMatrix().cols == 6 && link.infMatrix().rows == 6)
467  {
468  memcpy(msg.information.data(), link.infMatrix().data, 36*sizeof(double));
469  }
470  transformToGeometryMsg(link.transform(), msg.transform);
471 }
472 
473 cv::KeyPoint keypointFromROS(const rtabmap_ros::KeyPoint & msg)
474 {
475  return cv::KeyPoint(msg.pt.x, msg.pt.y, msg.size, msg.angle, msg.response, msg.octave, msg.class_id);
476 }
477 
478 void keypointToROS(const cv::KeyPoint & kpt, rtabmap_ros::KeyPoint & msg)
479 {
480  msg.angle = kpt.angle;
481  msg.class_id = kpt.class_id;
482  msg.octave = kpt.octave;
483  msg.pt.x = kpt.pt.x;
484  msg.pt.y = kpt.pt.y;
485  msg.response = kpt.response;
486  msg.size = kpt.size;
487 }
488 
489 std::vector<cv::KeyPoint> keypointsFromROS(const std::vector<rtabmap_ros::KeyPoint> & msg)
490 {
491  std::vector<cv::KeyPoint> v(msg.size());
492  for(unsigned int i=0; i<msg.size(); ++i)
493  {
494  v[i] = keypointFromROS(msg[i]);
495  }
496  return v;
497 }
498 
499 void keypointsToROS(const std::vector<cv::KeyPoint> & kpts, std::vector<rtabmap_ros::KeyPoint> & msg)
500 {
501  msg.resize(kpts.size());
502  for(unsigned int i=0; i<msg.size(); ++i)
503  {
504  keypointToROS(kpts[i], msg[i]);
505  }
506 }
507 
508 cv::Point2f point2fFromROS(const rtabmap_ros::Point2f & msg)
509 {
510  return cv::Point2f(msg.x, msg.y);
511 }
512 
513 void point2fToROS(const cv::Point2f & kpt, rtabmap_ros::Point2f & msg)
514 {
515  msg.x = kpt.x;
516  msg.y = kpt.y;
517 }
518 
519 std::vector<cv::Point2f> points2fFromROS(const std::vector<rtabmap_ros::Point2f> & msg)
520 {
521  std::vector<cv::Point2f> v(msg.size());
522  for(unsigned int i=0; i<msg.size(); ++i)
523  {
524  v[i] = point2fFromROS(msg[i]);
525  }
526  return v;
527 }
528 
529 void points2fToROS(const std::vector<cv::Point2f> & kpts, std::vector<rtabmap_ros::Point2f> & msg)
530 {
531  msg.resize(kpts.size());
532  for(unsigned int i=0; i<msg.size(); ++i)
533  {
534  point2fToROS(kpts[i], msg[i]);
535  }
536 }
537 
538 cv::Point3f point3fFromROS(const rtabmap_ros::Point3f & msg)
539 {
540  return cv::Point3f(msg.x, msg.y, msg.z);
541 }
542 
543 void point3fToROS(const cv::Point3f & kpt, rtabmap_ros::Point3f & msg)
544 {
545  msg.x = kpt.x;
546  msg.y = kpt.y;
547  msg.z = kpt.z;
548 }
549 
550 std::vector<cv::Point3f> points3fFromROS(const std::vector<rtabmap_ros::Point3f> & msg)
551 {
552  std::vector<cv::Point3f> v(msg.size());
553  for(unsigned int i=0; i<msg.size(); ++i)
554  {
555  v[i] = point3fFromROS(msg[i]);
556  }
557  return v;
558 }
559 
560 void points3fToROS(const std::vector<cv::Point3f> & kpts, std::vector<rtabmap_ros::Point3f> & msg)
561 {
562  msg.resize(kpts.size());
563  for(unsigned int i=0; i<msg.size(); ++i)
564  {
565  point3fToROS(kpts[i], msg[i]);
566  }
567 }
568 
570  const sensor_msgs::CameraInfo & camInfo,
571  const rtabmap::Transform & localTransform)
572 {
573  cv:: Mat K;
574  UASSERT(camInfo.K.empty() || camInfo.K.size() == 9);
575  if(!camInfo.K.empty())
576  {
577  K = cv::Mat(3, 3, CV_64FC1);
578  memcpy(K.data, camInfo.K.elems, 9*sizeof(double));
579  }
580 
581  cv::Mat D;
582  if(camInfo.D.size())
583  {
584  if(camInfo.D.size()>=4 &&
585  (uStrContains(camInfo.distortion_model, "fisheye") ||
586  uStrContains(camInfo.distortion_model, "equidistant")))
587  {
588  D = cv::Mat::zeros(1, 6, CV_64FC1);
589  D.at<double>(0,0) = camInfo.D[0];
590  D.at<double>(0,1) = camInfo.D[1];
591  D.at<double>(0,4) = camInfo.D[2];
592  D.at<double>(0,5) = camInfo.D[3];
593  }
594  else
595  {
596  D = cv::Mat(1, camInfo.D.size(), CV_64FC1);
597  memcpy(D.data, camInfo.D.data(), D.cols*sizeof(double));
598  }
599  }
600 
601  cv:: Mat R;
602  UASSERT(camInfo.R.empty() || camInfo.R.size() == 9);
603  if(!camInfo.R.empty())
604  {
605  R = cv::Mat(3, 3, CV_64FC1);
606  memcpy(R.data, camInfo.R.elems, 9*sizeof(double));
607  }
608 
609  cv:: Mat P;
610  UASSERT(camInfo.P.empty() || camInfo.P.size() == 12);
611  if(!camInfo.P.empty())
612  {
613  P = cv::Mat(3, 4, CV_64FC1);
614  memcpy(P.data, camInfo.P.elems, 12*sizeof(double));
615  }
616 
617  return rtabmap::CameraModel(
618  "ros",
619  cv::Size(camInfo.width, camInfo.height),
620  K, D, R, P,
621  localTransform);
622 }
624  const rtabmap::CameraModel & model,
625  sensor_msgs::CameraInfo & camInfo)
626 {
627  UASSERT(model.K_raw().empty() || model.K_raw().total() == 9);
628  if(model.K_raw().empty())
629  {
630  memset(camInfo.K.elems, 0.0, 9*sizeof(double));
631  }
632  else
633  {
634  memcpy(camInfo.K.elems, model.K_raw().data, 9*sizeof(double));
635  }
636 
637  if(camInfo.D.size() == 6)
638  {
639  camInfo.D = std::vector<double>(4);
640  camInfo.D[0] = model.D_raw().at<double>(0,0);
641  camInfo.D[1] = model.D_raw().at<double>(0,1);
642  camInfo.D[2] = model.D_raw().at<double>(0,4);
643  camInfo.D[3] = model.D_raw().at<double>(0,5);
644  camInfo.distortion_model = "equidistant"; // fisheye
645  }
646  else
647  {
648  camInfo.D = std::vector<double>(model.D_raw().cols);
649  memcpy(camInfo.D.data(), model.D_raw().data, model.D_raw().cols*sizeof(double));
650  if(camInfo.D.size() > 5)
651  {
652  camInfo.distortion_model = "rational_polynomial";
653  }
654  else
655  {
656  camInfo.distortion_model = "plumb_bob";
657  }
658  }
659 
660  UASSERT(model.R().empty() || model.R().total() == 9);
661  if(model.R().empty())
662  {
663  memset(camInfo.R.elems, 0.0, 9*sizeof(double));
664  }
665  else
666  {
667  memcpy(camInfo.R.elems, model.R().data, 9*sizeof(double));
668  }
669 
670  UASSERT(model.P().empty() || model.P().total() == 12);
671  if(model.P().empty())
672  {
673  memset(camInfo.P.elems, 0.0, 12*sizeof(double));
674  }
675  else
676  {
677  memcpy(camInfo.P.elems, model.P().data, 12*sizeof(double));
678  }
679 
680  camInfo.binning_x = 1;
681  camInfo.binning_y = 1;
682  camInfo.roi.width = model.imageWidth();
683  camInfo.roi.height = model.imageHeight();
684 
685  camInfo.width = model.imageWidth();
686  camInfo.height = model.imageHeight();
687 }
689  const sensor_msgs::CameraInfo & leftCamInfo,
690  const sensor_msgs::CameraInfo & rightCamInfo,
691  const rtabmap::Transform & localTransform)
692 {
694  "ros",
695  cameraModelFromROS(leftCamInfo, localTransform),
696  cameraModelFromROS(rightCamInfo, localTransform),
698 }
699 
701  const rtabmap_ros::MapData & msg,
702  std::map<int, rtabmap::Transform> & poses,
703  std::multimap<int, rtabmap::Link> & links,
704  std::map<int, rtabmap::Signature> & signatures,
705  rtabmap::Transform & mapToOdom)
706 {
707  //optimized graph
708  mapGraphFromROS(msg.graph, poses, links, mapToOdom);
709 
710  //Data
711  for(unsigned int i=0; i<msg.nodes.size(); ++i)
712  {
713  signatures.insert(std::make_pair(msg.nodes[i].id, nodeDataFromROS(msg.nodes[i])));
714  }
715 }
717  const std::map<int, rtabmap::Transform> & poses,
718  const std::multimap<int, rtabmap::Link> & links,
719  const std::map<int, rtabmap::Signature> & signatures,
720  const rtabmap::Transform & mapToOdom,
721  rtabmap_ros::MapData & msg)
722 {
723  //Optimized graph
724  mapGraphToROS(poses, links, mapToOdom, msg.graph);
725 
726  //Data
727  msg.nodes.resize(signatures.size());
728  int index=0;
729  for(std::multimap<int, rtabmap::Signature>::const_iterator iter = signatures.begin();
730  iter!=signatures.end();
731  ++iter)
732  {
733  nodeDataToROS(iter->second, msg.nodes[index++]);
734  }
735 }
736 
738  const rtabmap_ros::MapGraph & msg,
739  std::map<int, rtabmap::Transform> & poses,
740  std::multimap<int, rtabmap::Link> & links,
741  rtabmap::Transform & mapToOdom)
742 {
743  //optimized graph
744  UASSERT(msg.posesId.size() == msg.poses.size());
745  for(unsigned int i=0; i<msg.posesId.size(); ++i)
746  {
747  poses.insert(std::make_pair(msg.posesId[i], rtabmap_ros::transformFromPoseMsg(msg.poses[i])));
748  }
749  for(unsigned int i=0; i<msg.links.size(); ++i)
750  {
751  rtabmap::Transform t = rtabmap_ros::transformFromGeometryMsg(msg.links[i].transform);
752  links.insert(std::make_pair(msg.links[i].fromId, linkFromROS(msg.links[i])));
753  }
754  mapToOdom = transformFromGeometryMsg(msg.mapToOdom);
755 }
757  const std::map<int, rtabmap::Transform> & poses,
758  const std::multimap<int, rtabmap::Link> & links,
759  const rtabmap::Transform & mapToOdom,
760  rtabmap_ros::MapGraph & msg)
761 {
762  //Optimized graph
763  msg.posesId.resize(poses.size());
764  msg.poses.resize(poses.size());
765  int index = 0;
766  for(std::map<int, rtabmap::Transform>::const_iterator iter = poses.begin();
767  iter != poses.end();
768  ++iter)
769  {
770  msg.posesId[index] = iter->first;
771  transformToPoseMsg(iter->second, msg.poses[index]);
772  ++index;
773  }
774 
775  msg.links.resize(links.size());
776  index=0;
777  for(std::multimap<int, rtabmap::Link>::const_iterator iter = links.begin();
778  iter!=links.end();
779  ++iter)
780  {
781  linkToROS(iter->second, msg.links[index++]);
782  }
783 
784  transformToGeometryMsg(mapToOdom, msg.mapToOdom);
785 }
786 
787 rtabmap::Signature nodeDataFromROS(const rtabmap_ros::NodeData & msg)
788 {
789  //Features stuff...
790  std::multimap<int, cv::KeyPoint> words;
791  std::multimap<int, cv::Point3f> words3D;
792  std::multimap<int, cv::Mat> wordsDescriptors;
793  pcl::PointCloud<pcl::PointXYZ> cloud;
794  cv::Mat descriptors;
795  if(msg.wordPts.data.size() &&
796  msg.wordPts.height*msg.wordPts.width == msg.wordIds.size())
797  {
798  pcl::fromROSMsg(msg.wordPts, cloud);
799  descriptors = rtabmap::uncompressData(msg.descriptors);
800  }
801 
802  for(unsigned int i=0; i<msg.wordIds.size() && i<msg.wordKpts.size(); ++i)
803  {
804  cv::KeyPoint pt = keypointFromROS(msg.wordKpts.at(i));
805  int wordId = msg.wordIds.at(i);
806  words.insert(std::make_pair(wordId, pt));
807  if(i< cloud.size())
808  {
809  words3D.insert(std::make_pair(wordId, cv::Point3f(cloud[i].x, cloud[i].y, cloud[i].z)));
810  }
811  if(i < descriptors.rows)
812  {
813  wordsDescriptors.insert(std::make_pair(wordId, descriptors.row(i).clone()));
814  }
815  }
816 
817  if(words3D.size() && words3D.size() != words.size())
818  {
819  ROS_ERROR("Words 2D and 3D should be the same size (%d, %d)!", (int)words.size(), (int)words3D.size());
820  }
821 
822  rtabmap::StereoCameraModel stereoModel;
823  std::vector<rtabmap::CameraModel> models;
824  if(msg.baseline > 0.0f)
825  {
826  // stereo model
827  if(msg.fx.size() == 1 &&
828  msg.fy.size() == 1 &&
829  msg.cx.size() == 1 &&
830  msg.cy.size() == 1 &&
831  msg.width.size() == 1 &&
832  msg.height.size() == 1 &&
833  msg.localTransform.size() == 1)
834  {
835  stereoModel = rtabmap::StereoCameraModel(
836  msg.fx[0],
837  msg.fy[0],
838  msg.cx[0],
839  msg.cy[0],
840  msg.baseline,
841  transformFromGeometryMsg(msg.localTransform[0]),
842  cv::Size(msg.width[0], msg.height[0]));
843  }
844  }
845  else
846  {
847  // multi-cameras model
848  if(msg.fx.size() &&
849  msg.fx.size() == msg.fy.size(),
850  msg.fx.size() == msg.cx.size(),
851  msg.fx.size() == msg.cy.size(),
852  msg.fx.size() == msg.localTransform.size())
853  {
854  for(unsigned int i=0; i<msg.fx.size(); ++i)
855  {
856  models.push_back(rtabmap::CameraModel(
857  msg.fx[i],
858  msg.fy[i],
859  msg.cx[i],
860  msg.cy[i],
861  transformFromGeometryMsg(msg.localTransform[i]),
862  0.0,
863  cv::Size(msg.width[i], msg.height[i])));
864  }
865  }
866  }
867 
869  msg.id,
870  msg.mapId,
871  msg.weight,
872  msg.stamp,
873  msg.label,
874  transformFromPoseMsg(msg.pose),
875  transformFromPoseMsg(msg.groundTruthPose),
876  stereoModel.isValidForProjection()?
879  msg.laserScanMaxPts,
880  msg.laserScanMaxRange,
881  (rtabmap::LaserScan::Format)msg.laserScanFormat,
882  transformFromGeometryMsg(msg.laserScanLocalTransform)),
883  compressedMatFromBytes(msg.image),
884  compressedMatFromBytes(msg.depth),
885  stereoModel,
886  msg.id,
887  msg.stamp,
888  compressedMatFromBytes(msg.userData)):
891  msg.laserScanMaxPts,
892  msg.laserScanMaxRange,
893  (rtabmap::LaserScan::Format)msg.laserScanFormat,
894  transformFromGeometryMsg(msg.laserScanLocalTransform)),
895  compressedMatFromBytes(msg.image),
896  compressedMatFromBytes(msg.depth),
897  models,
898  msg.id,
899  msg.stamp,
900  compressedMatFromBytes(msg.userData)));
901  s.setWords(words);
902  s.setWords3(words3D);
903  s.setWordsDescriptors(wordsDescriptors);
904  s.sensorData().setOccupancyGrid(
905  compressedMatFromBytes(msg.grid_ground),
906  compressedMatFromBytes(msg.grid_obstacles),
907  compressedMatFromBytes(msg.grid_empty_cells),
908  msg.grid_cell_size,
909  point3fFromROS(msg.grid_view_point));
910  s.sensorData().setGPS(rtabmap::GPS(msg.gps.stamp, msg.gps.longitude, msg.gps.latitude, msg.gps.altitude, msg.gps.error, msg.gps.bearing));
911  return s;
912 }
913 void nodeDataToROS(const rtabmap::Signature & signature, rtabmap_ros::NodeData & msg)
914 {
915  // add data
916  msg.id = signature.id();
917  msg.mapId = signature.mapId();
918  msg.weight = signature.getWeight();
919  msg.stamp = signature.getStamp();
920  msg.label = signature.getLabel();
921  transformToPoseMsg(signature.getPose(), msg.pose);
922  transformToPoseMsg(signature.getGroundTruthPose(), msg.groundTruthPose);
923  msg.gps.stamp = signature.sensorData().gps().stamp();
924  msg.gps.longitude = signature.sensorData().gps().longitude();
925  msg.gps.latitude = signature.sensorData().gps().latitude();
926  msg.gps.altitude = signature.sensorData().gps().altitude();
927  msg.gps.error = signature.sensorData().gps().error();
928  msg.gps.bearing = signature.sensorData().gps().bearing();
929  compressedMatToBytes(signature.sensorData().imageCompressed(), msg.image);
930  compressedMatToBytes(signature.sensorData().depthOrRightCompressed(), msg.depth);
931  compressedMatToBytes(signature.sensorData().laserScanCompressed().data(), msg.laserScan);
932  compressedMatToBytes(signature.sensorData().userDataCompressed(), msg.userData);
933  compressedMatToBytes(signature.sensorData().gridGroundCellsCompressed(), msg.grid_ground);
934  compressedMatToBytes(signature.sensorData().gridObstacleCellsCompressed(), msg.grid_obstacles);
935  compressedMatToBytes(signature.sensorData().gridEmptyCellsCompressed(), msg.grid_empty_cells);
936  point3fToROS(signature.sensorData().gridViewPoint(), msg.grid_view_point);
937  msg.grid_cell_size = signature.sensorData().gridCellSize();
938  msg.laserScanMaxPts = signature.sensorData().laserScanCompressed().maxPoints();
939  msg.laserScanMaxRange = signature.sensorData().laserScanCompressed().maxRange();
940  msg.laserScanFormat = signature.sensorData().laserScanCompressed().format();
941  transformToGeometryMsg(signature.sensorData().laserScanCompressed().localTransform(), msg.laserScanLocalTransform);
942  msg.baseline = 0;
943  if(signature.sensorData().cameraModels().size())
944  {
945  msg.fx.resize(signature.sensorData().cameraModels().size());
946  msg.fy.resize(signature.sensorData().cameraModels().size());
947  msg.cx.resize(signature.sensorData().cameraModels().size());
948  msg.cy.resize(signature.sensorData().cameraModels().size());
949  msg.width.resize(signature.sensorData().cameraModels().size());
950  msg.height.resize(signature.sensorData().cameraModels().size());
951  msg.localTransform.resize(signature.sensorData().cameraModels().size());
952  for(unsigned int i=0; i<signature.sensorData().cameraModels().size(); ++i)
953  {
954  msg.fx[i] = signature.sensorData().cameraModels()[i].fx();
955  msg.fy[i] = signature.sensorData().cameraModels()[i].fy();
956  msg.cx[i] = signature.sensorData().cameraModels()[i].cx();
957  msg.cy[i] = signature.sensorData().cameraModels()[i].cy();
958  msg.width[i] = signature.sensorData().cameraModels()[i].imageWidth();
959  msg.height[i] = signature.sensorData().cameraModels()[i].imageHeight();
960  transformToGeometryMsg(signature.sensorData().cameraModels()[i].localTransform(), msg.localTransform[i]);
961  }
962  }
963  else if(signature.sensorData().stereoCameraModel().isValidForProjection())
964  {
965  msg.fx.push_back(signature.sensorData().stereoCameraModel().left().fx());
966  msg.fy.push_back(signature.sensorData().stereoCameraModel().left().fy());
967  msg.cx.push_back(signature.sensorData().stereoCameraModel().left().cx());
968  msg.cy.push_back(signature.sensorData().stereoCameraModel().left().cy());
969  msg.width.push_back(signature.sensorData().stereoCameraModel().left().imageWidth());
970  msg.height.push_back(signature.sensorData().stereoCameraModel().left().imageHeight());
971  msg.baseline = signature.sensorData().stereoCameraModel().baseline();
972  msg.localTransform.resize(1);
973  transformToGeometryMsg(signature.sensorData().stereoCameraModel().left().localTransform(), msg.localTransform[0]);
974  }
975 
976  //Features stuff...
977  msg.wordIds = uKeys(signature.getWords());
978  msg.wordKpts.resize(signature.getWords().size());
979  int index = 0;
980  for(std::multimap<int, cv::KeyPoint>::const_iterator jter=signature.getWords().begin();
981  jter!=signature.getWords().end();
982  ++jter)
983  {
984  keypointToROS(jter->second, msg.wordKpts.at(index++));
985  }
986 
987  if(signature.getWords3().size() && signature.getWords3().size() == signature.getWords().size())
988  {
989  pcl::PointCloud<pcl::PointXYZ> cloud;
990  cloud.resize(signature.getWords3().size());
991  index = 0;
992  for(std::multimap<int, cv::Point3f>::const_iterator jter=signature.getWords3().begin();
993  jter!=signature.getWords3().end();
994  ++jter)
995  {
996  cloud[index].x = jter->second.x;
997  cloud[index].y = jter->second.y;
998  cloud[index++].z = jter->second.z;
999  }
1000  pcl::toROSMsg(cloud, msg.wordPts);
1001  }
1002  else if(signature.getWords3().size())
1003  {
1004  ROS_ERROR("Words 2D and words 3D must have the same size (%d vs %d)!",
1005  (int)signature.getWords().size(),
1006  (int)signature.getWords3().size());
1007  }
1008 
1009  if(signature.getWordsDescriptors().size() && signature.getWordsDescriptors().size() == signature.getWords().size())
1010  {
1011  cv::Mat descriptors(
1012  signature.getWordsDescriptors().size(),
1013  signature.getWordsDescriptors().begin()->second.cols,
1014  signature.getWordsDescriptors().begin()->second.type());
1015  index = 0;
1016  bool valid = true;
1017  for(std::multimap<int, cv::Mat>::const_iterator jter=signature.getWordsDescriptors().begin();
1018  jter!=signature.getWordsDescriptors().end() && valid;
1019  ++jter)
1020  {
1021  if(jter->second.cols == descriptors.cols &&
1022  jter->second.type() == descriptors.type())
1023  {
1024  jter->second.copyTo(descriptors.row(index++));
1025  }
1026  else
1027  {
1028  valid = false;
1029  ROS_ERROR("Some descriptors have different type/size! Cannot copy them...");
1030  }
1031  }
1032 
1033  if(valid)
1034  {
1035  msg.descriptors = rtabmap::compressData(descriptors);
1036  }
1037  }
1038  else if(signature.getWordsDescriptors().size())
1039  {
1040  ROS_ERROR("Words and descriptors must have the same size (%d vs %d)!",
1041  (int)signature.getWords().size(),
1042  (int)signature.getWordsDescriptors().size());
1043  }
1044 }
1045 
1046 rtabmap::Signature nodeInfoFromROS(const rtabmap_ros::NodeData & msg)
1047 {
1049  msg.id,
1050  msg.mapId,
1051  msg.weight,
1052  msg.stamp,
1053  msg.label,
1054  transformFromPoseMsg(msg.pose),
1055  transformFromPoseMsg(msg.groundTruthPose));
1056  return s;
1057 }
1058 void nodeInfoToROS(const rtabmap::Signature & signature, rtabmap_ros::NodeData & msg)
1059 {
1060  // add data
1061  msg.id = signature.id();
1062  msg.mapId = signature.mapId();
1063  msg.weight = signature.getWeight();
1064  msg.stamp = signature.getStamp();
1065  msg.label = signature.getLabel();
1066  transformToPoseMsg(signature.getPose(), msg.pose);
1067  transformToPoseMsg(signature.getGroundTruthPose(), msg.groundTruthPose);
1068 }
1069 
1070 rtabmap::OdometryInfo odomInfoFromROS(const rtabmap_ros::OdomInfo & msg)
1071 {
1072  rtabmap::OdometryInfo info;
1073  info.lost = msg.lost;
1074  info.reg.matches = msg.matches;
1075  info.reg.inliers = msg.inliers;
1076  info.reg.icpInliersRatio = msg.icpInliersRatio;
1077  info.reg.icpRotation = msg.icpRotation;
1078  info.reg.icpTranslation = msg.icpTranslation;
1079  info.reg.icpStructuralComplexity = msg.icpStructuralComplexity;
1080  info.reg.covariance = cv::Mat(6,6,CV_64FC1, (void*)msg.covariance.data()).clone();
1081  info.features = msg.features;
1082  info.localMapSize = msg.localMapSize;
1083  info.localScanMapSize = msg.localScanMapSize;
1084  info.localKeyFrames = msg.localKeyFrames;
1085  info.localBundleOutliers = msg.localBundleOutliers;
1086  info.localBundleConstraints = msg.localBundleConstraints;
1087  info.localBundleTime = msg.localBundleTime;
1088  info.keyFrameAdded = msg.keyFrameAdded;
1089  info.timeEstimation = msg.timeEstimation;
1090  info.timeParticleFiltering = msg.timeParticleFiltering;
1091  info.stamp = msg.stamp;
1092  info.interval = msg.interval;
1093  info.distanceTravelled = msg.distanceTravelled;
1094  info.memoryUsage = msg.memoryUsage;
1095 
1096  info.type = msg.type;
1097 
1098  UASSERT(msg.wordsKeys.size() == msg.wordsValues.size());
1099  for(unsigned int i=0; i<msg.wordsKeys.size(); ++i)
1100  {
1101  info.words.insert(std::make_pair(msg.wordsKeys[i], keypointFromROS(msg.wordsValues[i])));
1102  }
1103 
1104  info.reg.matchesIDs = msg.wordMatches;
1105  info.reg.inliersIDs = msg.wordInliers;
1106 
1107  info.refCorners = points2fFromROS(msg.refCorners);
1108  info.newCorners = points2fFromROS(msg.newCorners);
1109  info.cornerInliers = msg.cornerInliers;
1110 
1111  info.transform = transformFromGeometryMsg(msg.transform);
1112  info.transformFiltered = transformFromGeometryMsg(msg.transformFiltered);
1113 
1114  UASSERT(msg.localMapKeys.size() == msg.localMapValues.size());
1115  for(unsigned int i=0; i<msg.localMapKeys.size(); ++i)
1116  {
1117  info.localMap.insert(std::make_pair(msg.localMapKeys[i], point3fFromROS(msg.localMapValues[i])));
1118  }
1119 
1121 
1122  return info;
1123 }
1124 
1125 void odomInfoToROS(const rtabmap::OdometryInfo & info, rtabmap_ros::OdomInfo & msg)
1126 {
1127  msg.lost = info.lost;
1128  msg.matches = info.reg.matches;
1129  msg.inliers = info.reg.inliers;
1130  msg.icpInliersRatio = info.reg.icpInliersRatio;
1131  msg.icpRotation = info.reg.icpRotation;
1132  msg.icpTranslation = info.reg.icpTranslation;
1133  msg.icpStructuralComplexity = info.reg.icpStructuralComplexity;
1134  if(info.reg.covariance.type() == CV_64FC1 && info.reg.covariance.cols == 6 && info.reg.covariance.rows == 6)
1135  {
1136  memcpy(msg.covariance.data(), info.reg.covariance.data, 36*sizeof(double));
1137  }
1138  msg.features = info.features;
1139  msg.localMapSize = info.localMapSize;
1140  msg.localScanMapSize = info.localScanMapSize;
1141  msg.localKeyFrames = info.localKeyFrames;
1142  msg.localBundleOutliers = info.localBundleOutliers;
1143  msg.localBundleConstraints = info.localBundleConstraints;
1144  msg.localBundleTime = info.localBundleTime;
1145  msg.keyFrameAdded = info.keyFrameAdded;
1146  msg.timeEstimation = info.timeEstimation;
1147  msg.timeParticleFiltering = info.timeParticleFiltering;
1148  msg.stamp = info.stamp;
1149  msg.interval = info.interval;
1150  msg.distanceTravelled = info.distanceTravelled;
1151  msg.memoryUsage = info.memoryUsage;
1152 
1153  msg.type = info.type;
1154 
1155  msg.wordsKeys = uKeys(info.words);
1156  keypointsToROS(uValues(info.words), msg.wordsValues);
1157 
1158  msg.wordMatches = info.reg.matchesIDs;
1159  msg.wordInliers = info.reg.inliersIDs;
1160 
1161  points2fToROS(info.refCorners, msg.refCorners);
1162  points2fToROS(info.newCorners, msg.newCorners);
1163  msg.cornerInliers = info.cornerInliers;
1164 
1165  transformToGeometryMsg(info.transform, msg.transform);
1166  transformToGeometryMsg(info.transformFiltered, msg.transformFiltered);
1167 
1168  msg.localMapKeys = uKeys(info.localMap);
1169  points3fToROS(uValues(info.localMap), msg.localMapValues);
1170 
1172 }
1173 
1174 cv::Mat userDataFromROS(const rtabmap_ros::UserData & dataMsg)
1175 {
1176  cv::Mat data;
1177  if(!dataMsg.data.empty())
1178  {
1179  if(dataMsg.cols > 0 && dataMsg.rows > 0 && dataMsg.type >= 0)
1180  {
1181  data = cv::Mat(dataMsg.rows, dataMsg.cols, dataMsg.type, (void*)dataMsg.data.data()).clone();
1182  }
1183  else
1184  {
1185  if(dataMsg.cols != (int)dataMsg.data.size() || dataMsg.rows != 1 || dataMsg.type != CV_8UC1)
1186  {
1187  ROS_ERROR("cols, rows and type fields of the UserData msg "
1188  "are not correctly set (cols=%d, rows=%d, type=%d)! We assume that the data "
1189  "is compressed (cols=%d, rows=1, type=%d(CV_8UC1)).",
1190  dataMsg.cols, dataMsg.rows, dataMsg.type, (int)dataMsg.data.size(), CV_8UC1);
1191 
1192  }
1193  data = cv::Mat(1, dataMsg.data.size(), CV_8UC1, (void*)dataMsg.data.data()).clone();
1194  }
1195  }
1196  return data;
1197 }
1198 void userDataToROS(const cv::Mat & data, rtabmap_ros::UserData & dataMsg, bool compress)
1199 {
1200  if(!data.empty())
1201  {
1202  if(compress)
1203  {
1204  dataMsg.data = rtabmap::compressData(data);
1205  dataMsg.rows = 1;
1206  dataMsg.cols = dataMsg.data.size();
1207  dataMsg.type = CV_8UC1;
1208  }
1209  else
1210  {
1211  dataMsg.data.resize(data.step[0] * data.rows); // use step for non-contiguous matrices
1212  memcpy(dataMsg.data.data(), data.data, dataMsg.data.size());
1213  dataMsg.rows = data.rows;
1214  dataMsg.cols = data.cols;
1215  dataMsg.type = data.type();
1216  }
1217  }
1218 }
1219 
1221  const std::string & fromFrameId,
1222  const std::string & toFrameId,
1223  const ros::Time & stamp,
1225  double waitForTransform)
1226 {
1227  // TF ready?
1228  rtabmap::Transform transform;
1229  try
1230  {
1231  if(waitForTransform > 0.0 && !stamp.isZero())
1232  {
1233  //if(!tfBuffer_.canTransform(fromFrameId, toFrameId, stamp, ros::Duration(1)))
1234  std::string errorMsg;
1235  if(!listener.waitForTransform(fromFrameId, toFrameId, stamp, ros::Duration(waitForTransform), ros::Duration(0.01), &errorMsg))
1236  {
1237  ROS_WARN("Could not get transform from %s to %s after %f seconds (for stamp=%f)! Error=\"%s\".",
1238  fromFrameId.c_str(), toFrameId.c_str(), waitForTransform, stamp.toSec(), errorMsg.c_str());
1239  return transform;
1240  }
1241  }
1242 
1244  listener.lookupTransform(fromFrameId, toFrameId, stamp, tmp);
1245  transform = rtabmap_ros::transformFromTF(tmp);
1246  }
1247  catch(tf::TransformException & ex)
1248  {
1249  ROS_WARN("(getting transform %s -> %s) %s", fromFrameId.c_str(), toFrameId.c_str(), ex.what());
1250  }
1251  return transform;
1252 }
1253 
1254 // get moving transform accordingly to a fixed frame. For example get
1255 // transform between moving /base_link between two stamps accordingly to /odom frame.
1257  const std::string & sourceTargetFrame,
1258  const std::string & fixedFrame,
1259  const ros::Time & stampSource,
1260  const ros::Time & stampTarget,
1262  double waitForTransform)
1263 {
1264  // TF ready?
1265  rtabmap::Transform transform;
1266  try
1267  {
1268  ros::Time stamp = stampSource>stampTarget?stampSource:stampTarget;
1269  if(waitForTransform > 0.0 && !stamp.isZero())
1270  {
1271  std::string errorMsg;
1272  if(!listener.waitForTransform(sourceTargetFrame, fixedFrame, stamp, ros::Duration(waitForTransform), ros::Duration(0.01), &errorMsg))
1273  {
1274  ROS_WARN("Could not get transform from %s to %s accordingly to %s after %f seconds (for stamps=%f -> %f)! Error=\"%s\".",
1275  sourceTargetFrame.c_str(), sourceTargetFrame.c_str(), fixedFrame.c_str(), waitForTransform, stampSource.toSec(), stampTarget.toSec(), errorMsg.c_str());
1276  return transform;
1277  }
1278  }
1279 
1281  listener.lookupTransform(sourceTargetFrame, stampTarget, sourceTargetFrame, stampSource, fixedFrame, tmp);
1282  transform = rtabmap_ros::transformFromTF(tmp);
1283  }
1284  catch(tf::TransformException & ex)
1285  {
1286  ROS_WARN("(getting transform movement of %s according to fixed %s) %s", sourceTargetFrame.c_str(), fixedFrame.c_str(), ex.what());
1287  }
1288  return transform;
1289 }
1290 
1292  const std::vector<cv_bridge::CvImageConstPtr> & imageMsgs,
1293  const std::vector<cv_bridge::CvImageConstPtr> & depthMsgs,
1294  const std::vector<sensor_msgs::CameraInfo> & cameraInfoMsgs,
1295  const std::string & frameId,
1296  const std::string & odomFrameId,
1297  const ros::Time & odomStamp,
1298  cv::Mat & rgb,
1299  cv::Mat & depth,
1300  std::vector<rtabmap::CameraModel> & cameraModels,
1302  double waitForTransform)
1303 {
1304  UASSERT(imageMsgs.size()>0 &&
1305  (imageMsgs.size() == depthMsgs.size() || depthMsgs.empty()) &&
1306  imageMsgs.size() == cameraInfoMsgs.size());
1307 
1308  int imageWidth = imageMsgs[0]->image.cols;
1309  int imageHeight = imageMsgs[0]->image.rows;
1310  int depthWidth = depthMsgs.size()?depthMsgs[0]->image.cols:0;
1311  int depthHeight = depthMsgs.size()?depthMsgs[0]->image.rows:0;
1312 
1313  if(depthMsgs.size())
1314  {
1315  UASSERT_MSG(
1316  imageWidth % depthWidth == 0 && imageHeight % depthHeight == 0 &&
1317  imageWidth/depthWidth == imageHeight/depthHeight,
1318  uFormat("rgb=%dx%d depth=%dx%d", imageWidth, imageHeight, depthWidth, depthHeight).c_str());
1319  }
1320 
1321  int cameraCount = imageMsgs.size();
1322  for(unsigned int i=0; i<imageMsgs.size(); ++i)
1323  {
1324  if(!(imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1) == 0 ||
1325  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO8) ==0 ||
1326  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO16) ==0 ||
1327  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
1328  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
1329  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BGRA8) == 0 ||
1330  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::RGBA8) == 0 ||
1331  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BAYER_GRBG8) == 0))
1332  {
1333 
1334  ROS_ERROR("Input rgb type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8. Current rgb=%s",
1335  imageMsgs[i]->encoding.c_str());
1336  return false;
1337  }
1338  if(depthMsgs.size() &&
1339  !(depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1) == 0 ||
1340  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1) == 0 ||
1341  depthMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0))
1342  {
1343  ROS_ERROR("Input depth type must be image_depth=32FC1,16UC1,mono16. Current depth=%s",
1344  depthMsgs[i]->encoding.c_str());
1345  return false;
1346  }
1347 
1348  UASSERT_MSG(imageMsgs[i]->image.cols == imageWidth && imageMsgs[i]->image.rows == imageHeight,
1349  uFormat("imageWidth=%d vs %d imageHeight=%d vs %d",
1350  imageWidth,
1351  imageMsgs[i]->image.cols,
1352  imageHeight,
1353  imageMsgs[i]->image.rows).c_str());
1354  ros::Time stamp;
1355  if(depthMsgs.size())
1356  {
1357  UASSERT_MSG(depthMsgs[i]->image.cols == depthWidth && depthMsgs[i]->image.rows == depthHeight,
1358  uFormat("depthWidth=%d vs %d imageHeight=%d vs %d",
1359  depthWidth,
1360  depthMsgs[i]->image.cols,
1361  depthHeight,
1362  depthMsgs[i]->image.rows).c_str());
1363  stamp = depthMsgs[i]->header.stamp;
1364  }
1365  else
1366  {
1367  stamp = imageMsgs[i]->header.stamp;
1368  }
1369 
1370  // use depth's stamp so that geometry is sync to odom, use rgb frame as we assume depth is registered (normally depth msg should have same frame than rgb)
1371  rtabmap::Transform localTransform = rtabmap_ros::getTransform(frameId, imageMsgs[i]->header.frame_id, stamp, listener, waitForTransform);
1372  if(localTransform.isNull())
1373  {
1374  ROS_ERROR("TF of received image %d at time %fs is not set!", i, stamp.toSec());
1375  return false;
1376  }
1377  // sync with odometry stamp
1378  if(!odomFrameId.empty() && odomStamp != stamp)
1379  {
1380  rtabmap::Transform sensorT = getTransform(
1381  frameId,
1382  odomFrameId,
1383  odomStamp,
1384  stamp,
1385  listener,
1386  waitForTransform);
1387  if(sensorT.isNull())
1388  {
1389  ROS_WARN("Could not get odometry value for depth image stamp (%fs). Latest odometry "
1390  "stamp is %fs. The depth image pose will not be synchronized with odometry.", stamp.toSec(), odomStamp.toSec());
1391  }
1392  else
1393  {
1394  //ROS_WARN("RGBD correction = %s (time diff=%fs)", sensorT.prettyPrint().c_str(), fabs(stamp.toSec()-odomStamp.toSec()));
1395  localTransform = sensorT * localTransform;
1396  }
1397  }
1398 
1399  cv_bridge::CvImageConstPtr ptrImage = imageMsgs[i];
1400  if(imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::TYPE_8UC1)==0 ||
1401  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
1402  imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0)
1403  {
1404  // do nothing
1405  }
1406  else if(imageMsgs[i]->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0)
1407  {
1408  ptrImage = cv_bridge::cvtColor(imageMsgs[i], "mono8");
1409  }
1410  else
1411  {
1412  ptrImage = cv_bridge::cvtColor(imageMsgs[i], "bgr8");
1413  }
1414 
1415  // initialize
1416  if(rgb.empty())
1417  {
1418  rgb = cv::Mat(imageHeight, imageWidth*cameraCount, ptrImage->image.type());
1419  }
1420  if(ptrImage->image.type() == rgb.type())
1421  {
1422  ptrImage->image.copyTo(cv::Mat(rgb, cv::Rect(i*imageWidth, 0, imageWidth, imageHeight)));
1423  }
1424  else
1425  {
1426  ROS_ERROR("Some RGB images are not the same type!");
1427  return false;
1428  }
1429 
1430  if(depthMsgs.size())
1431  {
1432  cv_bridge::CvImageConstPtr ptrDepth = depthMsgs[i];
1433  cv::Mat subDepth = ptrDepth->image;
1434 
1435  if(depth.empty())
1436  {
1437  depth = cv::Mat(depthHeight, depthWidth*cameraCount, subDepth.type());
1438  }
1439 
1440  if(subDepth.type() == depth.type())
1441  {
1442  subDepth.copyTo(cv::Mat(depth, cv::Rect(i*depthWidth, 0, depthWidth, depthHeight)));
1443  }
1444  else
1445  {
1446  ROS_ERROR("Some Depth images are not the same type!");
1447  return false;
1448  }
1449  }
1450 
1451  cameraModels.push_back(rtabmap_ros::cameraModelFromROS(cameraInfoMsgs[i], localTransform));
1452  }
1453  return true;
1454 }
1455 
1457  const cv_bridge::CvImageConstPtr& leftImageMsg,
1458  const cv_bridge::CvImageConstPtr& rightImageMsg,
1459  const sensor_msgs::CameraInfo& leftCamInfoMsg,
1460  const sensor_msgs::CameraInfo& rightCamInfoMsg,
1461  const std::string & frameId,
1462  const std::string & odomFrameId,
1463  const ros::Time & odomStamp,
1464  cv::Mat & left,
1465  cv::Mat & right,
1466  rtabmap::StereoCameraModel & stereoModel,
1468  double waitForTransform)
1469 {
1470  UASSERT(leftImageMsg.get() && rightImageMsg.get());
1471 
1472  if(!(leftImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
1473  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0 ||
1474  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
1475  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
1476  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::BGRA8) == 0 ||
1477  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::RGBA8) == 0) ||
1478  !(rightImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
1479  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0 ||
1480  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::BGR8) == 0 ||
1481  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::RGB8) == 0 ||
1482  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::BGRA8) == 0 ||
1483  rightImageMsg->encoding.compare(sensor_msgs::image_encodings::RGBA8) == 0))
1484  {
1485  ROS_ERROR("Input type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8");
1486  ROS_ERROR("Input type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8 Current left=%s and right=%s",
1487  leftImageMsg->encoding.c_str(),
1488  rightImageMsg->encoding.c_str());
1489  return false;
1490  }
1491 
1492  if(leftImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO8) == 0 ||
1493  leftImageMsg->encoding.compare(sensor_msgs::image_encodings::MONO16) == 0)
1494  {
1495  left = cv_bridge::cvtColor(leftImageMsg, "mono8")->image;
1496  }
1497  else
1498  {
1499  left = cv_bridge::cvtColor(leftImageMsg, "bgr8")->image;
1500  }
1501  right = cv_bridge::cvtColor(rightImageMsg, "mono8")->image;
1502 
1503  rtabmap::Transform localTransform = getTransform(frameId, leftImageMsg->header.frame_id, leftImageMsg->header.stamp, listener, waitForTransform);
1504  if(localTransform.isNull())
1505  {
1506  return false;
1507  }
1508  // sync with odometry stamp
1509  if(!odomFrameId.empty() && odomStamp != leftImageMsg->header.stamp)
1510  {
1511  rtabmap::Transform sensorT = getTransform(
1512  frameId,
1513  odomFrameId,
1514  odomStamp,
1515  leftImageMsg->header.stamp,
1516  listener,
1517  waitForTransform);
1518  if(sensorT.isNull())
1519  {
1520  ROS_WARN("Could not get odometry value for stereo msg stamp (%fs). Latest odometry "
1521  "stamp is %fs. The stereo image pose will not be synchronized with odometry.", leftImageMsg->header.stamp.toSec(), odomStamp.toSec());
1522  }
1523  else
1524  {
1525  localTransform = sensorT * localTransform;
1526  }
1527  }
1528 
1529  stereoModel = rtabmap_ros::stereoCameraModelFromROS(leftCamInfoMsg, rightCamInfoMsg, localTransform);
1530 
1531  if(stereoModel.baseline() > 10.0)
1532  {
1533  static bool shown = false;
1534  if(!shown)
1535  {
1536  ROS_WARN("Detected baseline (%f m) is quite large! Is your "
1537  "right camera_info P(0,3) correctly set? Note that "
1538  "baseline=-P(0,3)/P(0,0). You may need to calibrate your camera. "
1539  "This warning is printed only once.",
1540  stereoModel.baseline());
1541  shown = true;
1542  }
1543  }
1544  return true;
1545 }
1546 
1548  const sensor_msgs::LaserScanConstPtr& scan2dMsg,
1549  const std::string & frameId,
1550  const std::string & odomFrameId,
1551  const ros::Time & odomStamp,
1552  cv::Mat & scan,
1553  rtabmap::Transform & scanLocalTransform,
1555  double waitForTransform)
1556 {
1557  // make sure the frame of the laser is updated too
1559  odomFrameId.empty()?frameId:odomFrameId,
1560  scan2dMsg->header.frame_id,
1561  scan2dMsg->header.stamp + ros::Duration().fromSec(scan2dMsg->ranges.size()*scan2dMsg->time_increment),
1562  listener,
1563  waitForTransform);
1564  if(tmpT.isNull())
1565  {
1566  return false;
1567  }
1568 
1569  scanLocalTransform = getTransform(
1570  frameId,
1571  scan2dMsg->header.frame_id,
1572  scan2dMsg->header.stamp,
1573  listener,
1574  waitForTransform);
1575  if(scanLocalTransform.isNull())
1576  {
1577  return false;
1578  }
1579 
1580  //transform in frameId_ frame
1581  sensor_msgs::PointCloud2 scanOut;
1583  projection.transformLaserScanToPointCloud(odomFrameId.empty()?frameId:odomFrameId, *scan2dMsg, scanOut, listener);
1584  pcl::PointCloud<pcl::PointXYZ>::Ptr pclScan(new pcl::PointCloud<pcl::PointXYZ>);
1585  pcl::fromROSMsg(scanOut, *pclScan);
1586  pclScan->is_dense = true;
1587 
1588  //transform back in laser frame
1589  rtabmap::Transform laserToOdom = getTransform(
1590  scan2dMsg->header.frame_id,
1591  odomFrameId.empty()?frameId:odomFrameId,
1592  scan2dMsg->header.stamp,
1593  listener,
1594  waitForTransform);
1595  if(laserToOdom.isNull())
1596  {
1597  return false;
1598  }
1599 
1600  // sync with odometry stamp
1601  if(!odomFrameId.empty() && odomStamp != scan2dMsg->header.stamp)
1602  {
1603  rtabmap::Transform sensorT = getTransform(
1604  frameId,
1605  odomFrameId,
1606  odomStamp,
1607  scan2dMsg->header.stamp,
1608  listener,
1609  waitForTransform);
1610  if(sensorT.isNull())
1611  {
1612  ROS_WARN("Could not get odometry value for laser scan stamp (%fs). Latest odometry "
1613  "stamp is %fs. The laser scan pose will not be synchronized with odometry.", scan2dMsg->header.stamp.toSec(), odomStamp.toSec());
1614  }
1615  else
1616  {
1617  //ROS_WARN("scan correction = %s (time diff=%fs)", sensorT.prettyPrint().c_str(), fabs(scan2dMsg->header.stamp.toSec()-odomStamp.toSec()));
1618  scanLocalTransform = sensorT * scanLocalTransform;
1619  }
1620  }
1621 
1622  scan = rtabmap::util3d::laserScan2dFromPointCloud(*pclScan, laserToOdom); // put back in laser frame
1623 
1624  return true;
1625 }
1626 
1628  const sensor_msgs::PointCloud2ConstPtr & scan3dMsg,
1629  const std::string & frameId,
1630  const std::string & odomFrameId,
1631  const ros::Time & odomStamp,
1632  cv::Mat & scan,
1633  rtabmap::Transform & scanLocalTransform,
1635  double waitForTransform)
1636 {
1637  bool containNormals = false;
1638  bool containColors = false;
1639  for(unsigned int i=0; i<scan3dMsg->fields.size(); ++i)
1640  {
1641  if(scan3dMsg->fields[i].name.compare("normal_x") == 0)
1642  {
1643  containNormals = true;
1644  }
1645  if(scan3dMsg->fields[i].name.compare("rgb") == 0 || scan3dMsg->fields[i].name.compare("rgba") == 0)
1646  {
1647  containColors = true;
1648  }
1649  }
1650 
1651  scanLocalTransform = getTransform(frameId, scan3dMsg->header.frame_id, scan3dMsg->header.stamp, listener, waitForTransform);
1652  if(scanLocalTransform.isNull())
1653  {
1654  ROS_ERROR("TF of received scan cloud at time %fs is not set, aborting rtabmap update.", scan3dMsg->header.stamp.toSec());
1655  return false;
1656  }
1657 
1658  // sync with odometry stamp
1659  if(!odomFrameId.empty() && odomStamp != scan3dMsg->header.stamp)
1660  {
1661  rtabmap::Transform sensorT = getTransform(
1662  frameId,
1663  odomFrameId,
1664  odomStamp,
1665  scan3dMsg->header.stamp,
1666  listener,
1667  waitForTransform);
1668  if(sensorT.isNull())
1669  {
1670  ROS_WARN("Could not get odometry value for laser scan stamp (%fs). Latest odometry "
1671  "stamp is %fs. The 3d laser scan pose will not be synchronized with odometry.", scan3dMsg->header.stamp.toSec(), odomStamp.toSec());
1672  }
1673  else
1674  {
1675  scanLocalTransform = sensorT * scanLocalTransform;
1676  }
1677  }
1678 
1679  if(containNormals)
1680  {
1681  if(containColors)
1682  {
1683  pcl::PointCloud<pcl::PointXYZRGBNormal>::Ptr pclScan(new pcl::PointCloud<pcl::PointXYZRGBNormal>);
1684  pcl::fromROSMsg(*scan3dMsg, *pclScan);
1685  if(!pclScan->is_dense)
1686  {
1688  }
1690  }
1691  else
1692  {
1693  pcl::PointCloud<pcl::PointNormal>::Ptr pclScan(new pcl::PointCloud<pcl::PointNormal>);
1694  pcl::fromROSMsg(*scan3dMsg, *pclScan);
1695  if(!pclScan->is_dense)
1696  {
1698  }
1700  }
1701  }
1702  else
1703  {
1704  if(containColors)
1705  {
1706  pcl::PointCloud<pcl::PointXYZRGB>::Ptr pclScan(new pcl::PointCloud<pcl::PointXYZRGB>);
1707  pcl::fromROSMsg(*scan3dMsg, *pclScan);
1708  if(!pclScan->is_dense)
1709  {
1710  pclScan = rtabmap::util3d::removeNaNFromPointCloud(pclScan);
1711  }
1712 
1714  }
1715  else
1716  {
1717  pcl::PointCloud<pcl::PointXYZ>::Ptr pclScan(new pcl::PointCloud<pcl::PointXYZ>);
1718  pcl::fromROSMsg(*scan3dMsg, *pclScan);
1719  if(!pclScan->is_dense)
1720  {
1721  pclScan = rtabmap::util3d::removeNaNFromPointCloud(pclScan);
1722  }
1723 
1725  }
1726  }
1727  return true;
1728 }
1729 
1730 }
const std::multimap< int, cv::Point3f > & getWords3() const
bool convertScanMsg(const sensor_msgs::LaserScanConstPtr &scan2dMsg, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, cv::Mat &scan, rtabmap::Transform &scanLocalTransform, tf::TransformListener &listener, double waitForTransform)
bool extended() const
CvImageConstPtr toCvShare(const sensor_msgs::ImageConstPtr &source, const std::string &encoding=std::string())
const std::string BAYER_GRBG8
LaserScan RTABMAP_EXP laserScanFromPointCloud(const pcl::PCLPointCloud2 &cloud, bool filterNaNs=true)
int imageWidth() const
void keypointsToROS(const std::vector< cv::KeyPoint > &kpts, std::vector< rtabmap_ros::KeyPoint > &msg)
double baseline() const
void transformEigenToTF(const Eigen::Affine3d &e, tf::Transform &t)
cv::Mat K_raw() const
std::vector< cv::Point2f > refCorners
void transformToTF(const rtabmap::Transform &transform, tf::Transform &tfTransform)
cv::Mat RTABMAP_EXP uncompressData(const cv::Mat &bytes)
std::string uFormat(const char *fmt,...)
rtabmap::StereoCameraModel stereoCameraModelFromROS(const sensor_msgs::CameraInfo &leftCamInfo, const sensor_msgs::CameraInfo &rightCamInfo, const rtabmap::Transform &localTransform=rtabmap::Transform::getIdentity())
const cv::Mat & data() const
rtabmap::Signature nodeDataFromROS(const rtabmap_ros::NodeData &msg)
Format format() const
const LaserScan & laserScanCompressed() const
bool convertScan3dMsg(const sensor_msgs::PointCloud2ConstPtr &scan3dMsg, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, cv::Mat &scan, rtabmap::Transform &scanLocalTransform, tf::TransformListener &listener, double waitForTransform)
const Transform & getGroundTruthPose() const
void odomInfoToROS(const rtabmap::OdometryInfo &info, rtabmap_ros::OdomInfo &msg)
void transformTFToEigen(const tf::Transform &t, Eigen::Affine3d &e)
int mapId() const
const std::multimap< int, cv::KeyPoint > & getWords() const
void points2fToROS(const std::vector< cv::Point2f > &kpts, std::vector< rtabmap_ros::Point2f > &msg)
float maxRange() const
rtabmap::SensorData rgbdImageFromROS(const rtabmap_ros::RGBDImageConstPtr &image)
Transform transformFiltered
void mapGraphToROS(const std::map< int, rtabmap::Transform > &poses, const std::multimap< int, rtabmap::Link > &links, const rtabmap::Transform &mapToOdom, rtabmap_ros::MapGraph &msg)
void poseEigenToMsg(const Eigen::Affine3d &e, geometry_msgs::Pose &m)
void setRefImageId(int refImageId)
GLM_FUNC_DECL vecType< T, P > sqrt(vecType< T, P > const &x)
rtabmap::OdometryInfo odomInfoFromROS(const rtabmap_ros::OdomInfo &msg)
const std::map< int, float > & likelihood() const
std::map< int, cv::Point3f > localMap
void setPosterior(const std::map< int, float > &posterior)
int proximityDetectionId() const
void poseMsgToEigen(const geometry_msgs::Pose &m, Eigen::Affine3d &e)
void setRawLikelihood(const std::map< int, float > &rawLikelihood)
static Transform getIdentity()
const std::map< int, float > & posterior() const
std::vector< cv::Point3f > points3fFromROS(const std::vector< rtabmap_ros::Point3f > &msg)
XmlRpcServer s
void setProximityDetectionId(int id)
pcl::PointCloud< pcl::PointXYZ >::Ptr RTABMAP_EXP removeNaNFromPointCloud(const pcl::PointCloud< pcl::PointXYZ >::Ptr &cloud)
void transformEigenToMsg(const Eigen::Affine3d &e, geometry_msgs::Transform &m)
void transformToGeometryMsg(const rtabmap::Transform &transform, geometry_msgs::Transform &msg)
rtabmap::Signature nodeInfoFromROS(const rtabmap_ros::NodeData &msg)
const std::string TYPE_8UC1
void fromROSMsg(const sensor_msgs::PointCloud2 &cloud, pcl::PointCloud< T > &pcl_cloud)
float gridCellSize() const
int maxPoints() const
const std::multimap< int, cv::Mat > & getWordsDescriptors() const
void setCurrentGoalId(int goal)
std::vector< int > inliersIDs
const cv::Mat & gridObstacleCellsCompressed() const
void nodeInfoToROS(const rtabmap::Signature &signature, rtabmap_ros::NodeData &msg)
const cv::Mat & gridGroundCellsCompressed() const
const cv::Mat & gridEmptyCellsCompressed() const
#define ROS_WARN(...)
const std::map< int, int > & weights() const
bool uStrContains(const std::string &string, const std::string &substring)
rtabmap::Transform getTransform(const std::string &fromFrameId, const std::string &toFrameId, const ros::Time &stamp, tf::TransformListener &listener, double waitForTransform)
void points3fToROS(const std::vector< cv::Point3f > &kpts, std::vector< rtabmap_ros::Point3f > &msg)
cv::Mat R() const
cv::Mat RTABMAP_EXP laserScan2dFromPointCloud(const pcl::PointCloud< pcl::PointXYZ > &cloud, const Transform &transform=Transform(), bool filterNaNs=true)
const std::string & getLabel() const
void point2fToROS(const cv::Point2f &kpt, rtabmap_ros::Point2f &msg)
bool waitForTransform(const std::string &target_frame, const std::string &source_frame, const ros::Time &time, const ros::Duration &timeout, const ros::Duration &polling_sleep_duration=ros::Duration(0.01), std::string *error_msg=NULL) const
double getStamp() const
#define UASSERT(condition)
cv::KeyPoint keypointFromROS(const rtabmap_ros::KeyPoint &msg)
const std::map< std::string, float > & data() const
void setLocalPath(const std::vector< int > &localPath)
bool isValidForProjection() const
const CameraModel & left() const
double cx() const
const cv::Mat & depthOrRightCompressed() const
void mapDataFromROS(const rtabmap_ros::MapData &msg, std::map< int, rtabmap::Transform > &poses, std::multimap< int, rtabmap::Link > &links, std::map< int, rtabmap::Signature > &signatures, rtabmap::Transform &mapToOdom)
bool isNull() const
std::vector< int > cornerInliers
void setExtended(bool extended)
void linkToROS(const rtabmap::Link &link, rtabmap_ros::Link &msg)
rtabmap::Link linkFromROS(const rtabmap_ros::Link &msg)
#define UASSERT_MSG(condition, msg_str)
CvImagePtr cvtColor(const CvImageConstPtr &source, const std::string &encoding)
void point3fToROS(const cv::Point3f &kpt, rtabmap_ros::Point3f &msg)
double fy() const
LaserScan RTABMAP_EXP transformLaserScan(const LaserScan &laserScan, const Transform &transform)
Duration & fromSec(double t)
void mapDataToROS(const std::map< int, rtabmap::Transform > &poses, const std::multimap< int, rtabmap::Link > &links, const std::map< int, rtabmap::Signature > &signatures, const rtabmap::Transform &mapToOdom, rtabmap_ros::MapData &msg)
const double & error() const
std::vector< unsigned char > RTABMAP_EXP compressData(const cv::Mat &data)
void setLoopClosureId(int loopClosureId)
cv::Point3f point3fFromROS(const rtabmap_ros::Point3f &msg)
void infoFromROS(const rtabmap_ros::Info &info, rtabmap::Statistics &stat)
const std::string TYPE_32FC1
void mapGraphFromROS(const rtabmap_ros::MapGraph &msg, std::map< int, rtabmap::Transform > &poses, std::multimap< int, rtabmap::Link > &links, rtabmap::Transform &mapToOdom)
cv::Mat compressedMatFromBytes(const std::vector< unsigned char > &bytes, bool copy=true)
rtabmap::CameraModel cameraModelFromROS(const sensor_msgs::CameraInfo &camInfo, const rtabmap::Transform &localTransform=rtabmap::Transform::getIdentity())
std::vector< cv::KeyPoint > keypointsFromROS(const std::vector< rtabmap_ros::KeyPoint > &msg)
CvImagePtr toCvCopy(const sensor_msgs::ImageConstPtr &source, const std::string &encoding=std::string())
const std::string TYPE_16UC1
int id() const
Transform localTransform() const
const GPS & gps() const
void cameraModelToROS(const rtabmap::CameraModel &model, sensor_msgs::CameraInfo &camInfo)
cv::Mat P() const
const std::string MONO16
int getWeight() const
int loopClosureId() const
bool convertStereoMsg(const cv_bridge::CvImageConstPtr &leftImageMsg, const cv_bridge::CvImageConstPtr &rightImageMsg, const sensor_msgs::CameraInfo &leftCamInfoMsg, const sensor_msgs::CameraInfo &rightCamInfoMsg, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, cv::Mat &left, cv::Mat &right, rtabmap::StereoCameraModel &stereoModel, tf::TransformListener &listener, double waitForTransform)
std::list< V > uValues(const std::multimap< K, V > &mm)
rtabmap::Transform transformFromPoseMsg(const geometry_msgs::Pose &msg)
const Transform & loopClosureTransform() const
double cy() const
void lookupTransform(const std::string &target_frame, const std::string &source_frame, const ros::Time &time, StampedTransform &transform) const
cv::Mat userDataFromROS(const rtabmap_ros::UserData &dataMsg)
void setLikelihood(const std::map< int, float > &likelihood)
void transformToPoseMsg(const rtabmap::Transform &transform, geometry_msgs::Pose &msg)
const cv::Mat & userDataCompressed() const
void keypointToROS(const cv::KeyPoint &kpt, rtabmap_ros::KeyPoint &msg)
static Transform fromEigen3d(const Eigen::Affine3d &matrix)
const double & longitude() const
const std::vector< CameraModel > & cameraModels() const
const Transform & getPose() const
void transformMsgToEigen(const geometry_msgs::Transform &m, Eigen::Affine3d &e)
const double & altitude() const
void setStamp(double stamp)
double fx() const
std::multimap< int, cv::KeyPoint > words
SensorData & sensorData()
std::list< K > uKeys(const std::multimap< K, V > &mm)
void toCvShare(const rtabmap_ros::RGBDImageConstPtr &image, cv_bridge::CvImageConstPtr &rgb, cv_bridge::CvImageConstPtr &depth)
void setWeights(const std::map< int, int > &weights)
std::vector< cv::Point2f > points2fFromROS(const std::vector< rtabmap_ros::Point2f > &msg)
const StereoCameraModel & stereoCameraModel() const
void toROSMsg(const sensor_msgs::PointCloud2 &cloud, sensor_msgs::Image &image)
int currentGoalId() const
double timestampFromROS(const ros::Time &stamp)
std::vector< int > matchesIDs
const cv::Mat & imageCompressed() const
std::vector< cv::Point2f > newCorners
void setLoopClosureTransform(const Transform &loopClosureTransform)
const std::string header
void toCvCopy(const rtabmap_ros::RGBDImage &image, cv_bridge::CvImagePtr &rgb, cv_bridge::CvImagePtr &depth)
RegistrationInfo reg
void userDataToROS(const cv::Mat &data, rtabmap_ros::UserData &dataMsg, bool compress)
void compressedMatToBytes(const cv::Mat &compressed, std::vector< unsigned char > &bytes)
pcl::PointCloud< pcl::PointNormal >::Ptr RTABMAP_EXP removeNaNNormalsFromPointCloud(const pcl::PointCloud< pcl::PointNormal >::Ptr &cloud)
void transformLaserScanToPointCloud(const std::string &target_frame, const sensor_msgs::LaserScan &scan_in, sensor_msgs::PointCloud &cloud_out, tf::Transformer &tf, double range_cutoff, int channel_options=channel_option::Default)
#define ROS_ASSERT(cond)
rtabmap::Transform transformFromGeometryMsg(const geometry_msgs::Transform &msg)
cv::Mat D_raw() const
const std::vector< int > & localPath() const
const double & latitude() const
#define ROS_ERROR(...)
const double & bearing() const
int refImageId() const
void nodeDataToROS(const rtabmap::Signature &signature, rtabmap_ros::NodeData &msg)
static LaserScan backwardCompatibility(const cv::Mat &oldScanFormat, int maxPoints=0, int maxRange=0, const Transform &localTransform=Transform::getIdentity())
int imageHeight() const
rtabmap::Transform transformFromTF(const tf::Transform &transform)
Eigen::Affine3d toEigen3d() const
bool convertRGBDMsgs(const std::vector< cv_bridge::CvImageConstPtr > &imageMsgs, const std::vector< cv_bridge::CvImageConstPtr > &depthMsgs, const std::vector< sensor_msgs::CameraInfo > &cameraInfoMsgs, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, cv::Mat &rgb, cv::Mat &depth, std::vector< rtabmap::CameraModel > &cameraModels, tf::TransformListener &listener, double waitForTransform)
const cv::Point3f & gridViewPoint() const
cv::Point2f point2fFromROS(const rtabmap_ros::Point2f &msg)
void infoToROS(const rtabmap::Statistics &stats, rtabmap_ros::Info &info)
cv::Mat RTABMAP_EXP uncompressImage(const cv::Mat &bytes)
void addStatistic(const std::string &name, float value)
const Transform & localTransform() const
const std::map< int, float > & rawLikelihood() const
const double & stamp() const


rtabmap_ros
Author(s): Mathieu Labbe
autogenerated on Fri Jun 7 2019 21:55:04