30 #include <opencv2/highgui/highgui.hpp> 64 Eigen::Affine3d eigenTf;
76 long double recipNorm = 1.0 /
sqrt(msg.rotation.x * msg.rotation.x + msg.rotation.y * msg.rotation.y + msg.rotation.z * msg.rotation.z + msg.rotation.w * msg.rotation.w);
77 msg.rotation.x *= recipNorm;
78 msg.rotation.y *= recipNorm;
79 msg.rotation.z *= recipNorm;
80 msg.rotation.w *= recipNorm;
84 msg = geometry_msgs::Transform();
91 if(msg.rotation.w == 0 &&
92 msg.rotation.x == 0 &&
93 msg.rotation.y == 0 &&
99 Eigen::Affine3d tfTransform;
112 msg = geometry_msgs::Pose();
118 if(msg.orientation.w == 0 &&
119 msg.orientation.x == 0 &&
120 msg.orientation.y == 0 &&
121 msg.orientation.z ==0)
125 Eigen::Affine3d tfPose;
132 if(!image.rgb.data.empty())
136 else if(!image.rgbCompressed.data.empty())
138 #ifdef CV_BRIDGE_HYDRO 139 ROS_ERROR(
"Unsupported compressed image copy, please upgrade at least to ROS Indigo to use this.");
147 rgb = boost::make_shared<cv_bridge::CvImage>();
150 if(!image.depth.data.empty())
154 else if(!image.depthCompressed.data.empty())
157 ptr->header = image.depthCompressed.header;
159 ROS_ASSERT(ptr->image.empty() || ptr->image.type() == CV_32FC1 || ptr->image.type() == CV_16UC1);
166 depth = boost::make_shared<cv_bridge::CvImage>();
172 if(!image->rgb.data.empty())
176 else if(!image->rgbCompressed.data.empty())
178 #ifdef CV_BRIDGE_HYDRO 179 ROS_ERROR(
"Unsupported compressed image copy, please upgrade at least to ROS Indigo to use this.");
187 rgb = boost::make_shared<cv_bridge::CvImage>();
190 if(!image->depth.data.empty())
194 else if(!image->depthCompressed.data.empty())
196 if(image->depthCompressed.format.compare(
"jpg")==0)
198 #ifdef CV_BRIDGE_HYDRO 199 ROS_ERROR(
"Unsupported compressed image copy, please upgrade at least to ROS Indigo to use this.");
207 ptr->header = image->depthCompressed.header;
209 ROS_ASSERT(ptr->image.empty() || ptr->image.type() == CV_32FC1 || ptr->image.type() == CV_16UC1);
217 depth = boost::make_shared<cv_bridge::CvImage>();
243 ROS_ERROR(
"Input type must be image=mono8,mono16,rgb8,bgr8 (mono8 recommended), received types are %s (left) and %s (right)",
244 imageRectLeft->encoding.c_str(), imageRectRight->encoding.c_str());
248 if(!imageRectLeft->image.empty() && !imageRectRight->image.empty())
252 static bool shown =
false;
255 ROS_WARN(
"Detected baseline (%f m) is quite large! Is your " 256 "right camera_info P(0,3) correctly set? Note that " 257 "baseline=-P(0,3)/P(0,0). This warning is printed only once.",
286 ROS_WARN(
"Odom: input images empty?!?");
292 int imageWidth = imageMsg->image.cols;
293 int imageHeight = imageMsg->image.rows;
294 int depthWidth = depthMsg->image.cols;
295 int depthHeight = depthMsg->image.rows;
298 imageWidth % depthWidth == 0 && imageHeight % depthHeight == 0 &&
299 imageWidth/depthWidth == imageHeight/depthHeight,
300 uFormat(
"rgb=%dx%d depth=%dx%d", imageWidth, imageHeight, depthWidth, depthHeight).c_str());
318 ROS_ERROR(
"Input type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8 and " 319 "image_depth=32FC1,16UC1,mono16. Current rgb=%s and depth=%s",
320 imageMsg->encoding.c_str(),
321 depthMsg->encoding.c_str());
355 UASSERT(compressed.empty() || compressed.type() == CV_8UC1);
357 if(!compressed.empty())
359 bytes.resize(compressed.cols * compressed.rows);
360 memcpy(bytes.data(), compressed.data, bytes.size());
369 out = cv::Mat(1, bytes.size(), CV_8UC1, (
void*)bytes.data());
386 stat.
setStamp(info.header.stamp.toSec());
391 std::map<int, float> mapIntFloat;
392 for(
unsigned int i=0; i<info.posteriorKeys.size() && i<info.posteriorValues.size(); ++i)
394 mapIntFloat.insert(std::pair<int, float>(info.posteriorKeys.at(i), info.posteriorValues.at(i)));
398 for(
unsigned int i=0; i<info.likelihoodKeys.size() && i<info.likelihoodValues.size(); ++i)
400 mapIntFloat.insert(std::pair<int, float>(info.likelihoodKeys.at(i), info.likelihoodValues.at(i)));
404 for(
unsigned int i=0; i<info.rawLikelihoodKeys.size() && i<info.rawLikelihoodValues.size(); ++i)
406 mapIntFloat.insert(std::pair<int, float>(info.rawLikelihoodKeys.at(i), info.rawLikelihoodValues.at(i)));
409 std::map<int, int> mapIntInt;
410 for(
unsigned int i=0; i<info.weightsKeys.size() && i<info.weightsValues.size(); ++i)
412 mapIntInt.insert(std::pair<int, int>(info.weightsKeys.at(i), info.weightsValues.at(i)));
420 for(
unsigned int i=0; i<info.statsKeys.size() && i<info.statsValues.size(); i++)
422 stat.
addStatistic(info.statsKeys.at(i), info.statsValues.at(i));
457 cv::Mat information = cv::Mat(6,6,CV_64FC1, (
void*)msg.information.data()).clone();
463 msg.fromId = link.
from();
464 msg.toId = link.
to();
465 msg.type = link.
type();
468 memcpy(msg.information.data(), link.
infMatrix().data, 36*
sizeof(double));
475 return cv::KeyPoint(msg.pt.x, msg.pt.y, msg.size, msg.angle, msg.response, msg.octave, msg.class_id);
480 msg.angle = kpt.angle;
481 msg.class_id = kpt.class_id;
482 msg.octave = kpt.octave;
485 msg.response = kpt.response;
491 std::vector<cv::KeyPoint> v(msg.size());
492 for(
unsigned int i=0; i<msg.size(); ++i)
499 void keypointsToROS(
const std::vector<cv::KeyPoint> & kpts, std::vector<rtabmap_ros::KeyPoint> & msg)
501 msg.resize(kpts.size());
502 for(
unsigned int i=0; i<msg.size(); ++i)
510 return cv::Point2f(msg.x, msg.y);
519 std::vector<cv::Point2f>
points2fFromROS(
const std::vector<rtabmap_ros::Point2f> & msg)
521 std::vector<cv::Point2f> v(msg.size());
522 for(
unsigned int i=0; i<msg.size(); ++i)
529 void points2fToROS(
const std::vector<cv::Point2f> & kpts, std::vector<rtabmap_ros::Point2f> & msg)
531 msg.resize(kpts.size());
532 for(
unsigned int i=0; i<msg.size(); ++i)
540 return cv::Point3f(msg.x, msg.y, msg.z);
550 std::vector<cv::Point3f>
points3fFromROS(
const std::vector<rtabmap_ros::Point3f> & msg)
552 std::vector<cv::Point3f> v(msg.size());
553 for(
unsigned int i=0; i<msg.size(); ++i)
560 void points3fToROS(
const std::vector<cv::Point3f> & kpts, std::vector<rtabmap_ros::Point3f> & msg)
562 msg.resize(kpts.size());
563 for(
unsigned int i=0; i<msg.size(); ++i)
570 const sensor_msgs::CameraInfo & camInfo,
574 UASSERT(camInfo.K.empty() || camInfo.K.size() == 9);
575 if(!camInfo.K.empty())
577 K = cv::Mat(3, 3, CV_64FC1);
578 memcpy(K.data, camInfo.K.elems, 9*
sizeof(
double));
584 if(camInfo.D.size()>=4 &&
588 D = cv::Mat::zeros(1, 6, CV_64FC1);
589 D.at<
double>(0,0) = camInfo.D[0];
590 D.at<
double>(0,1) = camInfo.D[1];
591 D.at<
double>(0,4) = camInfo.D[2];
592 D.at<
double>(0,5) = camInfo.D[3];
596 D = cv::Mat(1, camInfo.D.size(), CV_64FC1);
597 memcpy(D.data, camInfo.D.data(), D.cols*
sizeof(double));
602 UASSERT(camInfo.R.empty() || camInfo.R.size() == 9);
603 if(!camInfo.R.empty())
605 R = cv::Mat(3, 3, CV_64FC1);
606 memcpy(R.data, camInfo.R.elems, 9*
sizeof(
double));
610 UASSERT(camInfo.P.empty() || camInfo.P.size() == 12);
611 if(!camInfo.P.empty())
613 P = cv::Mat(3, 4, CV_64FC1);
614 memcpy(P.data, camInfo.P.elems, 12*
sizeof(
double));
619 cv::Size(camInfo.width, camInfo.height),
625 sensor_msgs::CameraInfo & camInfo)
628 if(model.
K_raw().empty())
630 memset(camInfo.K.elems, 0.0, 9*
sizeof(
double));
634 memcpy(camInfo.K.elems, model.
K_raw().data, 9*
sizeof(double));
637 if(camInfo.D.size() == 6)
639 camInfo.D = std::vector<double>(4);
640 camInfo.D[0] = model.
D_raw().at<
double>(0,0);
641 camInfo.D[1] = model.
D_raw().at<
double>(0,1);
642 camInfo.D[2] = model.
D_raw().at<
double>(0,4);
643 camInfo.D[3] = model.
D_raw().at<
double>(0,5);
644 camInfo.distortion_model =
"equidistant";
648 camInfo.D = std::vector<double>(model.
D_raw().cols);
649 memcpy(camInfo.D.data(), model.
D_raw().data, model.
D_raw().cols*
sizeof(double));
650 if(camInfo.D.size() > 5)
652 camInfo.distortion_model =
"rational_polynomial";
656 camInfo.distortion_model =
"plumb_bob";
660 UASSERT(model.
R().empty() || model.
R().total() == 9);
661 if(model.
R().empty())
663 memset(camInfo.R.elems, 0.0, 9*
sizeof(
double));
667 memcpy(camInfo.R.elems, model.
R().data, 9*
sizeof(double));
670 UASSERT(model.
P().empty() || model.
P().total() == 12);
671 if(model.
P().empty())
673 memset(camInfo.P.elems, 0.0, 12*
sizeof(
double));
677 memcpy(camInfo.P.elems, model.
P().data, 12*
sizeof(double));
680 camInfo.binning_x = 1;
681 camInfo.binning_y = 1;
689 const sensor_msgs::CameraInfo & leftCamInfo,
690 const sensor_msgs::CameraInfo & rightCamInfo,
701 const rtabmap_ros::MapData & msg,
702 std::map<int, rtabmap::Transform> & poses,
703 std::multimap<int, rtabmap::Link> & links,
704 std::map<int, rtabmap::Signature> & signatures,
711 for(
unsigned int i=0; i<msg.nodes.size(); ++i)
713 signatures.insert(std::make_pair(msg.nodes[i].id,
nodeDataFromROS(msg.nodes[i])));
717 const std::map<int, rtabmap::Transform> & poses,
718 const std::multimap<int, rtabmap::Link> & links,
719 const std::map<int, rtabmap::Signature> & signatures,
721 rtabmap_ros::MapData & msg)
727 msg.nodes.resize(signatures.size());
729 for(std::multimap<int, rtabmap::Signature>::const_iterator iter = signatures.begin();
730 iter!=signatures.end();
738 const rtabmap_ros::MapGraph & msg,
739 std::map<int, rtabmap::Transform> & poses,
740 std::multimap<int, rtabmap::Link> & links,
744 UASSERT(msg.posesId.size() == msg.poses.size());
745 for(
unsigned int i=0; i<msg.posesId.size(); ++i)
749 for(
unsigned int i=0; i<msg.links.size(); ++i)
752 links.insert(std::make_pair(msg.links[i].fromId,
linkFromROS(msg.links[i])));
757 const std::map<int, rtabmap::Transform> & poses,
758 const std::multimap<int, rtabmap::Link> & links,
760 rtabmap_ros::MapGraph & msg)
763 msg.posesId.resize(poses.size());
764 msg.poses.resize(poses.size());
766 for(std::map<int, rtabmap::Transform>::const_iterator iter = poses.begin();
770 msg.posesId[index] = iter->first;
775 msg.links.resize(links.size());
777 for(std::multimap<int, rtabmap::Link>::const_iterator iter = links.begin();
781 linkToROS(iter->second, msg.links[index++]);
790 std::multimap<int, cv::KeyPoint> words;
791 std::multimap<int, cv::Point3f> words3D;
792 std::multimap<int, cv::Mat> wordsDescriptors;
793 pcl::PointCloud<pcl::PointXYZ> cloud;
795 if(msg.wordPts.data.size() &&
796 msg.wordPts.height*msg.wordPts.width == msg.wordIds.size())
802 for(
unsigned int i=0; i<msg.wordIds.size() && i<msg.wordKpts.size(); ++i)
805 int wordId = msg.wordIds.at(i);
806 words.insert(std::make_pair(wordId, pt));
809 words3D.insert(std::make_pair(wordId, cv::Point3f(cloud[i].x, cloud[i].y, cloud[i].z)));
811 if(i < descriptors.rows)
813 wordsDescriptors.insert(std::make_pair(wordId, descriptors.row(i).clone()));
817 if(words3D.size() && words3D.size() != words.size())
819 ROS_ERROR(
"Words 2D and 3D should be the same size (%d, %d)!", (
int)words.size(), (int)words3D.size());
823 std::vector<rtabmap::CameraModel> models;
824 if(msg.baseline > 0.0f)
827 if(msg.fx.size() == 1 &&
828 msg.fy.size() == 1 &&
829 msg.cx.size() == 1 &&
830 msg.cy.size() == 1 &&
831 msg.width.size() == 1 &&
832 msg.height.size() == 1 &&
833 msg.localTransform.size() == 1)
842 cv::Size(msg.width[0], msg.height[0]));
849 msg.fx.size() == msg.fy.size(),
850 msg.fx.size() == msg.cx.size(),
851 msg.fx.size() == msg.cy.size(),
852 msg.fx.size() == msg.localTransform.size())
854 for(
unsigned int i=0; i<msg.fx.size(); ++i)
863 cv::Size(msg.width[i], msg.height[i])));
876 stereoModel.isValidForProjection()?
880 msg.laserScanMaxRange,
892 msg.laserScanMaxRange,
902 s.setWords3(words3D);
903 s.setWordsDescriptors(wordsDescriptors);
904 s.sensorData().setOccupancyGrid(
910 s.sensorData().setGPS(
rtabmap::GPS(msg.gps.stamp, msg.gps.longitude, msg.gps.latitude, msg.gps.altitude, msg.gps.error, msg.gps.bearing));
916 msg.id = signature.
id();
917 msg.mapId = signature.
mapId();
972 msg.localTransform.resize(1);
978 msg.wordKpts.resize(signature.
getWords().size());
980 for(std::multimap<int, cv::KeyPoint>::const_iterator jter=signature.
getWords().begin();
989 pcl::PointCloud<pcl::PointXYZ> cloud;
990 cloud.resize(signature.
getWords3().size());
992 for(std::multimap<int, cv::Point3f>::const_iterator jter=signature.
getWords3().begin();
996 cloud[index].x = jter->second.x;
997 cloud[index].y = jter->second.y;
998 cloud[index++].z = jter->second.z;
1004 ROS_ERROR(
"Words 2D and words 3D must have the same size (%d vs %d)!",
1011 cv::Mat descriptors(
1017 for(std::multimap<int, cv::Mat>::const_iterator jter=signature.
getWordsDescriptors().begin();
1021 if(jter->second.cols == descriptors.cols &&
1022 jter->second.type() == descriptors.type())
1024 jter->second.copyTo(descriptors.row(index++));
1029 ROS_ERROR(
"Some descriptors have different type/size! Cannot copy them...");
1040 ROS_ERROR(
"Words and descriptors must have the same size (%d vs %d)!",
1061 msg.id = signature.
id();
1062 msg.mapId = signature.
mapId();
1073 info.
lost = msg.lost;
1080 info.
reg.
covariance = cv::Mat(6,6,CV_64FC1, (
void*)msg.covariance.data()).clone();
1091 info.
stamp = msg.stamp;
1096 info.
type = msg.type;
1098 UASSERT(msg.wordsKeys.size() == msg.wordsValues.size());
1099 for(
unsigned int i=0; i<msg.wordsKeys.size(); ++i)
1114 UASSERT(msg.localMapKeys.size() == msg.localMapValues.size());
1115 for(
unsigned int i=0; i<msg.localMapKeys.size(); ++i)
1127 msg.lost = info.
lost;
1136 memcpy(msg.covariance.data(), info.
reg.
covariance.data, 36*
sizeof(double));
1148 msg.stamp = info.
stamp;
1153 msg.type = info.
type;
1177 if(!dataMsg.data.empty())
1179 if(dataMsg.cols > 0 && dataMsg.rows > 0 && dataMsg.type >= 0)
1181 data = cv::Mat(dataMsg.rows, dataMsg.cols, dataMsg.type, (
void*)dataMsg.data.data()).clone();
1185 if(dataMsg.cols != (
int)dataMsg.data.size() || dataMsg.rows != 1 || dataMsg.type != CV_8UC1)
1187 ROS_ERROR(
"cols, rows and type fields of the UserData msg " 1188 "are not correctly set (cols=%d, rows=%d, type=%d)! We assume that the data " 1189 "is compressed (cols=%d, rows=1, type=%d(CV_8UC1)).",
1190 dataMsg.cols, dataMsg.rows, dataMsg.type, (
int)dataMsg.data.size(), CV_8UC1);
1193 data = cv::Mat(1, dataMsg.data.size(), CV_8UC1, (
void*)dataMsg.data.data()).clone();
1198 void userDataToROS(
const cv::Mat & data, rtabmap_ros::UserData & dataMsg,
bool compress)
1206 dataMsg.cols = dataMsg.data.size();
1207 dataMsg.type = CV_8UC1;
1211 dataMsg.data.resize(data.step[0] * data.rows);
1212 memcpy(dataMsg.data.data(), data.data, dataMsg.data.size());
1213 dataMsg.rows = data.rows;
1214 dataMsg.cols = data.cols;
1215 dataMsg.type = data.type();
1221 const std::string & fromFrameId,
1222 const std::string & toFrameId,
1225 double waitForTransform)
1231 if(waitForTransform > 0.0 && !stamp.
isZero())
1234 std::string errorMsg;
1237 ROS_WARN(
"Could not get transform from %s to %s after %f seconds (for stamp=%f)! Error=\"%s\".",
1238 fromFrameId.c_str(), toFrameId.c_str(), waitForTransform, stamp.
toSec(), errorMsg.c_str());
1249 ROS_WARN(
"(getting transform %s -> %s) %s", fromFrameId.c_str(), toFrameId.c_str(), ex.what());
1257 const std::string & sourceTargetFrame,
1258 const std::string & fixedFrame,
1262 double waitForTransform)
1268 ros::Time stamp = stampSource>stampTarget?stampSource:stampTarget;
1269 if(waitForTransform > 0.0 && !stamp.
isZero())
1271 std::string errorMsg;
1274 ROS_WARN(
"Could not get transform from %s to %s accordingly to %s after %f seconds (for stamps=%f -> %f)! Error=\"%s\".",
1275 sourceTargetFrame.c_str(), sourceTargetFrame.c_str(), fixedFrame.c_str(), waitForTransform, stampSource.
toSec(), stampTarget.
toSec(), errorMsg.c_str());
1281 listener.
lookupTransform(sourceTargetFrame, stampTarget, sourceTargetFrame, stampSource, fixedFrame, tmp);
1286 ROS_WARN(
"(getting transform movement of %s according to fixed %s) %s", sourceTargetFrame.c_str(), fixedFrame.c_str(), ex.what());
1292 const std::vector<cv_bridge::CvImageConstPtr> & imageMsgs,
1293 const std::vector<cv_bridge::CvImageConstPtr> & depthMsgs,
1294 const std::vector<sensor_msgs::CameraInfo> & cameraInfoMsgs,
1295 const std::string & frameId,
1296 const std::string & odomFrameId,
1300 std::vector<rtabmap::CameraModel> & cameraModels,
1302 double waitForTransform)
1305 (imageMsgs.size() == depthMsgs.size() || depthMsgs.empty()) &&
1306 imageMsgs.size() == cameraInfoMsgs.size());
1308 int imageWidth = imageMsgs[0]->image.cols;
1309 int imageHeight = imageMsgs[0]->image.rows;
1310 int depthWidth = depthMsgs.size()?depthMsgs[0]->image.cols:0;
1311 int depthHeight = depthMsgs.size()?depthMsgs[0]->image.rows:0;
1313 if(depthMsgs.size())
1316 imageWidth % depthWidth == 0 && imageHeight % depthHeight == 0 &&
1317 imageWidth/depthWidth == imageHeight/depthHeight,
1318 uFormat(
"rgb=%dx%d depth=%dx%d", imageWidth, imageHeight, depthWidth, depthHeight).c_str());
1321 int cameraCount = imageMsgs.size();
1322 for(
unsigned int i=0; i<imageMsgs.size(); ++i)
1334 ROS_ERROR(
"Input rgb type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8. Current rgb=%s",
1335 imageMsgs[i]->encoding.c_str());
1338 if(depthMsgs.size() &&
1343 ROS_ERROR(
"Input depth type must be image_depth=32FC1,16UC1,mono16. Current depth=%s",
1344 depthMsgs[i]->encoding.c_str());
1348 UASSERT_MSG(imageMsgs[i]->image.cols == imageWidth && imageMsgs[i]->image.rows == imageHeight,
1349 uFormat(
"imageWidth=%d vs %d imageHeight=%d vs %d",
1351 imageMsgs[i]->image.cols,
1353 imageMsgs[i]->image.rows).c_str());
1355 if(depthMsgs.size())
1357 UASSERT_MSG(depthMsgs[i]->image.cols == depthWidth && depthMsgs[i]->image.rows == depthHeight,
1358 uFormat(
"depthWidth=%d vs %d imageHeight=%d vs %d",
1360 depthMsgs[i]->image.cols,
1362 depthMsgs[i]->image.rows).c_str());
1363 stamp = depthMsgs[i]->header.stamp;
1367 stamp = imageMsgs[i]->header.stamp;
1372 if(localTransform.
isNull())
1374 ROS_ERROR(
"TF of received image %d at time %fs is not set!", i, stamp.toSec());
1378 if(!odomFrameId.empty() && odomStamp != stamp)
1389 ROS_WARN(
"Could not get odometry value for depth image stamp (%fs). Latest odometry " 1390 "stamp is %fs. The depth image pose will not be synchronized with odometry.", stamp.toSec(), odomStamp.
toSec());
1395 localTransform = sensorT * localTransform;
1418 rgb = cv::Mat(imageHeight, imageWidth*cameraCount, ptrImage->image.type());
1420 if(ptrImage->image.type() == rgb.type())
1422 ptrImage->image.copyTo(cv::Mat(rgb, cv::Rect(i*imageWidth, 0, imageWidth, imageHeight)));
1426 ROS_ERROR(
"Some RGB images are not the same type!");
1430 if(depthMsgs.size())
1433 cv::Mat subDepth = ptrDepth->image;
1437 depth = cv::Mat(depthHeight, depthWidth*cameraCount, subDepth.type());
1440 if(subDepth.type() == depth.type())
1442 subDepth.copyTo(cv::Mat(depth, cv::Rect(i*depthWidth, 0, depthWidth, depthHeight)));
1446 ROS_ERROR(
"Some Depth images are not the same type!");
1459 const sensor_msgs::CameraInfo& leftCamInfoMsg,
1460 const sensor_msgs::CameraInfo& rightCamInfoMsg,
1461 const std::string & frameId,
1462 const std::string & odomFrameId,
1468 double waitForTransform)
1470 UASSERT(leftImageMsg.get() && rightImageMsg.get());
1485 ROS_ERROR(
"Input type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8");
1486 ROS_ERROR(
"Input type must be image=mono8,mono16,rgb8,bgr8,bgra8,rgba8 Current left=%s and right=%s",
1487 leftImageMsg->encoding.c_str(),
1488 rightImageMsg->encoding.c_str());
1503 rtabmap::Transform localTransform =
getTransform(frameId, leftImageMsg->header.frame_id, leftImageMsg->header.stamp, listener, waitForTransform);
1504 if(localTransform.
isNull())
1509 if(!odomFrameId.empty() && odomStamp != leftImageMsg->header.stamp)
1515 leftImageMsg->header.stamp,
1520 ROS_WARN(
"Could not get odometry value for stereo msg stamp (%fs). Latest odometry " 1521 "stamp is %fs. The stereo image pose will not be synchronized with odometry.", leftImageMsg->header.stamp.toSec(), odomStamp.
toSec());
1525 localTransform = sensorT * localTransform;
1533 static bool shown =
false;
1536 ROS_WARN(
"Detected baseline (%f m) is quite large! Is your " 1537 "right camera_info P(0,3) correctly set? Note that " 1538 "baseline=-P(0,3)/P(0,0). You may need to calibrate your camera. " 1539 "This warning is printed only once.",
1548 const sensor_msgs::LaserScanConstPtr& scan2dMsg,
1549 const std::string & frameId,
1550 const std::string & odomFrameId,
1555 double waitForTransform)
1559 odomFrameId.empty()?frameId:odomFrameId,
1560 scan2dMsg->header.frame_id,
1561 scan2dMsg->header.stamp +
ros::Duration().
fromSec(scan2dMsg->ranges.size()*scan2dMsg->time_increment),
1571 scan2dMsg->header.frame_id,
1572 scan2dMsg->header.stamp,
1575 if(scanLocalTransform.
isNull())
1581 sensor_msgs::PointCloud2 scanOut;
1584 pcl::PointCloud<pcl::PointXYZ>::Ptr pclScan(
new pcl::PointCloud<pcl::PointXYZ>);
1586 pclScan->is_dense =
true;
1590 scan2dMsg->header.frame_id,
1591 odomFrameId.empty()?frameId:odomFrameId,
1592 scan2dMsg->header.stamp,
1601 if(!odomFrameId.empty() && odomStamp != scan2dMsg->header.stamp)
1607 scan2dMsg->header.stamp,
1612 ROS_WARN(
"Could not get odometry value for laser scan stamp (%fs). Latest odometry " 1613 "stamp is %fs. The laser scan pose will not be synchronized with odometry.", scan2dMsg->header.stamp.toSec(), odomStamp.
toSec());
1618 scanLocalTransform = sensorT * scanLocalTransform;
1628 const sensor_msgs::PointCloud2ConstPtr & scan3dMsg,
1629 const std::string & frameId,
1630 const std::string & odomFrameId,
1635 double waitForTransform)
1637 bool containNormals =
false;
1638 bool containColors =
false;
1639 for(
unsigned int i=0; i<scan3dMsg->fields.size(); ++i)
1641 if(scan3dMsg->fields[i].name.compare(
"normal_x") == 0)
1643 containNormals =
true;
1645 if(scan3dMsg->fields[i].name.compare(
"rgb") == 0 || scan3dMsg->fields[i].name.compare(
"rgba") == 0)
1647 containColors =
true;
1651 scanLocalTransform =
getTransform(frameId, scan3dMsg->header.frame_id, scan3dMsg->header.stamp, listener, waitForTransform);
1652 if(scanLocalTransform.
isNull())
1654 ROS_ERROR(
"TF of received scan cloud at time %fs is not set, aborting rtabmap update.", scan3dMsg->header.stamp.toSec());
1659 if(!odomFrameId.empty() && odomStamp != scan3dMsg->header.stamp)
1665 scan3dMsg->header.stamp,
1670 ROS_WARN(
"Could not get odometry value for laser scan stamp (%fs). Latest odometry " 1671 "stamp is %fs. The 3d laser scan pose will not be synchronized with odometry.", scan3dMsg->header.stamp.toSec(), odomStamp.
toSec());
1675 scanLocalTransform = sensorT * scanLocalTransform;
1683 pcl::PointCloud<pcl::PointXYZRGBNormal>::Ptr pclScan(
new pcl::PointCloud<pcl::PointXYZRGBNormal>);
1685 if(!pclScan->is_dense)
1693 pcl::PointCloud<pcl::PointNormal>::Ptr pclScan(
new pcl::PointCloud<pcl::PointNormal>);
1695 if(!pclScan->is_dense)
1706 pcl::PointCloud<pcl::PointXYZRGB>::Ptr pclScan(
new pcl::PointCloud<pcl::PointXYZRGB>);
1708 if(!pclScan->is_dense)
1717 pcl::PointCloud<pcl::PointXYZ>::Ptr pclScan(
new pcl::PointCloud<pcl::PointXYZ>);
1719 if(!pclScan->is_dense)
const std::multimap< int, cv::Point3f > & getWords3() const
bool convertScanMsg(const sensor_msgs::LaserScanConstPtr &scan2dMsg, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, cv::Mat &scan, rtabmap::Transform &scanLocalTransform, tf::TransformListener &listener, double waitForTransform)
CvImageConstPtr toCvShare(const sensor_msgs::ImageConstPtr &source, const std::string &encoding=std::string())
const std::string BAYER_GRBG8
LaserScan RTABMAP_EXP laserScanFromPointCloud(const pcl::PCLPointCloud2 &cloud, bool filterNaNs=true)
void keypointsToROS(const std::vector< cv::KeyPoint > &kpts, std::vector< rtabmap_ros::KeyPoint > &msg)
void transformEigenToTF(const Eigen::Affine3d &e, tf::Transform &t)
std::vector< cv::Point2f > refCorners
void transformToTF(const rtabmap::Transform &transform, tf::Transform &tfTransform)
cv::Mat RTABMAP_EXP uncompressData(const cv::Mat &bytes)
std::string uFormat(const char *fmt,...)
rtabmap::StereoCameraModel stereoCameraModelFromROS(const sensor_msgs::CameraInfo &leftCamInfo, const sensor_msgs::CameraInfo &rightCamInfo, const rtabmap::Transform &localTransform=rtabmap::Transform::getIdentity())
const cv::Mat & data() const
rtabmap::Signature nodeDataFromROS(const rtabmap_ros::NodeData &msg)
const LaserScan & laserScanCompressed() const
bool convertScan3dMsg(const sensor_msgs::PointCloud2ConstPtr &scan3dMsg, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, cv::Mat &scan, rtabmap::Transform &scanLocalTransform, tf::TransformListener &listener, double waitForTransform)
const Transform & getGroundTruthPose() const
void odomInfoToROS(const rtabmap::OdometryInfo &info, rtabmap_ros::OdomInfo &msg)
void transformTFToEigen(const tf::Transform &t, Eigen::Affine3d &e)
const std::multimap< int, cv::KeyPoint > & getWords() const
void points2fToROS(const std::vector< cv::Point2f > &kpts, std::vector< rtabmap_ros::Point2f > &msg)
rtabmap::SensorData rgbdImageFromROS(const rtabmap_ros::RGBDImageConstPtr &image)
Transform transformFiltered
void mapGraphToROS(const std::map< int, rtabmap::Transform > &poses, const std::multimap< int, rtabmap::Link > &links, const rtabmap::Transform &mapToOdom, rtabmap_ros::MapGraph &msg)
void poseEigenToMsg(const Eigen::Affine3d &e, geometry_msgs::Pose &m)
void setRefImageId(int refImageId)
GLM_FUNC_DECL vecType< T, P > sqrt(vecType< T, P > const &x)
rtabmap::OdometryInfo odomInfoFromROS(const rtabmap_ros::OdomInfo &msg)
const std::map< int, float > & likelihood() const
std::map< int, cv::Point3f > localMap
void setPosterior(const std::map< int, float > &posterior)
int proximityDetectionId() const
void poseMsgToEigen(const geometry_msgs::Pose &m, Eigen::Affine3d &e)
void setRawLikelihood(const std::map< int, float > &rawLikelihood)
const std::map< int, float > & posterior() const
std::vector< cv::Point3f > points3fFromROS(const std::vector< rtabmap_ros::Point3f > &msg)
void setProximityDetectionId(int id)
pcl::PointCloud< pcl::PointXYZ >::Ptr RTABMAP_EXP removeNaNFromPointCloud(const pcl::PointCloud< pcl::PointXYZ >::Ptr &cloud)
void transformEigenToMsg(const Eigen::Affine3d &e, geometry_msgs::Transform &m)
void transformToGeometryMsg(const rtabmap::Transform &transform, geometry_msgs::Transform &msg)
rtabmap::Signature nodeInfoFromROS(const rtabmap_ros::NodeData &msg)
const std::string TYPE_8UC1
void fromROSMsg(const sensor_msgs::PointCloud2 &cloud, pcl::PointCloud< T > &pcl_cloud)
float gridCellSize() const
float timeParticleFiltering
const std::multimap< int, cv::Mat > & getWordsDescriptors() const
void setCurrentGoalId(int goal)
std::vector< int > inliersIDs
const cv::Mat & gridObstacleCellsCompressed() const
void nodeInfoToROS(const rtabmap::Signature &signature, rtabmap_ros::NodeData &msg)
const cv::Mat & gridGroundCellsCompressed() const
const cv::Mat & gridEmptyCellsCompressed() const
int localBundleConstraints
const std::map< int, int > & weights() const
bool uStrContains(const std::string &string, const std::string &substring)
rtabmap::Transform getTransform(const std::string &fromFrameId, const std::string &toFrameId, const ros::Time &stamp, tf::TransformListener &listener, double waitForTransform)
const Transform & transform() const
void points3fToROS(const std::vector< cv::Point3f > &kpts, std::vector< rtabmap_ros::Point3f > &msg)
cv::Mat RTABMAP_EXP laserScan2dFromPointCloud(const pcl::PointCloud< pcl::PointXYZ > &cloud, const Transform &transform=Transform(), bool filterNaNs=true)
const std::string & getLabel() const
void point2fToROS(const cv::Point2f &kpt, rtabmap_ros::Point2f &msg)
#define UASSERT(condition)
cv::KeyPoint keypointFromROS(const rtabmap_ros::KeyPoint &msg)
const std::map< std::string, float > & data() const
void setLocalPath(const std::vector< int > &localPath)
bool isValidForProjection() const
const CameraModel & left() const
const cv::Mat & depthOrRightCompressed() const
void mapDataFromROS(const rtabmap_ros::MapData &msg, std::map< int, rtabmap::Transform > &poses, std::multimap< int, rtabmap::Link > &links, std::map< int, rtabmap::Signature > &signatures, rtabmap::Transform &mapToOdom)
std::vector< int > cornerInliers
void setExtended(bool extended)
void linkToROS(const rtabmap::Link &link, rtabmap_ros::Link &msg)
rtabmap::Link linkFromROS(const rtabmap_ros::Link &msg)
#define UASSERT_MSG(condition, msg_str)
CvImagePtr cvtColor(const CvImageConstPtr &source, const std::string &encoding)
void point3fToROS(const cv::Point3f &kpt, rtabmap_ros::Point3f &msg)
LaserScan RTABMAP_EXP transformLaserScan(const LaserScan &laserScan, const Transform &transform)
Duration & fromSec(double t)
void mapDataToROS(const std::map< int, rtabmap::Transform > &poses, const std::multimap< int, rtabmap::Link > &links, const std::map< int, rtabmap::Signature > &signatures, const rtabmap::Transform &mapToOdom, rtabmap_ros::MapData &msg)
const double & error() const
std::vector< unsigned char > RTABMAP_EXP compressData(const cv::Mat &data)
void setLoopClosureId(int loopClosureId)
cv::Point3f point3fFromROS(const rtabmap_ros::Point3f &msg)
void infoFromROS(const rtabmap_ros::Info &info, rtabmap::Statistics &stat)
const std::string TYPE_32FC1
void mapGraphFromROS(const rtabmap_ros::MapGraph &msg, std::map< int, rtabmap::Transform > &poses, std::multimap< int, rtabmap::Link > &links, rtabmap::Transform &mapToOdom)
cv::Mat compressedMatFromBytes(const std::vector< unsigned char > &bytes, bool copy=true)
rtabmap::CameraModel cameraModelFromROS(const sensor_msgs::CameraInfo &camInfo, const rtabmap::Transform &localTransform=rtabmap::Transform::getIdentity())
std::vector< cv::KeyPoint > keypointsFromROS(const std::vector< rtabmap_ros::KeyPoint > &msg)
CvImagePtr toCvCopy(const sensor_msgs::ImageConstPtr &source, const std::string &encoding=std::string())
const std::string TYPE_16UC1
Transform localTransform() const
const cv::Mat & infMatrix() const
void cameraModelToROS(const rtabmap::CameraModel &model, sensor_msgs::CameraInfo &camInfo)
int loopClosureId() const
bool convertStereoMsg(const cv_bridge::CvImageConstPtr &leftImageMsg, const cv_bridge::CvImageConstPtr &rightImageMsg, const sensor_msgs::CameraInfo &leftCamInfoMsg, const sensor_msgs::CameraInfo &rightCamInfoMsg, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, cv::Mat &left, cv::Mat &right, rtabmap::StereoCameraModel &stereoModel, tf::TransformListener &listener, double waitForTransform)
std::list< V > uValues(const std::multimap< K, V > &mm)
rtabmap::Transform transformFromPoseMsg(const geometry_msgs::Pose &msg)
const Transform & loopClosureTransform() const
cv::Mat userDataFromROS(const rtabmap_ros::UserData &dataMsg)
void setLikelihood(const std::map< int, float > &likelihood)
void transformToPoseMsg(const rtabmap::Transform &transform, geometry_msgs::Pose &msg)
const cv::Mat & userDataCompressed() const
void keypointToROS(const cv::KeyPoint &kpt, rtabmap_ros::KeyPoint &msg)
const double & longitude() const
const std::vector< CameraModel > & cameraModels() const
const Transform & getPose() const
void transformMsgToEigen(const geometry_msgs::Transform &m, Eigen::Affine3d &e)
const double & altitude() const
void setStamp(double stamp)
std::multimap< int, cv::KeyPoint > words
SensorData & sensorData()
float icpStructuralComplexity
std::list< K > uKeys(const std::multimap< K, V > &mm)
void toCvShare(const rtabmap_ros::RGBDImageConstPtr &image, cv_bridge::CvImageConstPtr &rgb, cv_bridge::CvImageConstPtr &depth)
void setWeights(const std::map< int, int > &weights)
std::vector< cv::Point2f > points2fFromROS(const std::vector< rtabmap_ros::Point2f > &msg)
const StereoCameraModel & stereoCameraModel() const
void toROSMsg(const sensor_msgs::PointCloud2 &cloud, sensor_msgs::Image &image)
int currentGoalId() const
double timestampFromROS(const ros::Time &stamp)
std::vector< int > matchesIDs
const cv::Mat & imageCompressed() const
std::vector< cv::Point2f > newCorners
void setLoopClosureTransform(const Transform &loopClosureTransform)
void toCvCopy(const rtabmap_ros::RGBDImage &image, cv_bridge::CvImagePtr &rgb, cv_bridge::CvImagePtr &depth)
void userDataToROS(const cv::Mat &data, rtabmap_ros::UserData &dataMsg, bool compress)
void compressedMatToBytes(const cv::Mat &compressed, std::vector< unsigned char > &bytes)
pcl::PointCloud< pcl::PointNormal >::Ptr RTABMAP_EXP removeNaNNormalsFromPointCloud(const pcl::PointCloud< pcl::PointNormal >::Ptr &cloud)
void transformLaserScanToPointCloud(const std::string &target_frame, const sensor_msgs::LaserScan &scan_in, sensor_msgs::PointCloud &cloud_out, tf::Transformer &tf, double range_cutoff, int channel_options=channel_option::Default)
rtabmap::Transform transformFromGeometryMsg(const geometry_msgs::Transform &msg)
const std::vector< int > & localPath() const
const double & latitude() const
const double & bearing() const
void nodeDataToROS(const rtabmap::Signature &signature, rtabmap_ros::NodeData &msg)
static LaserScan backwardCompatibility(const cv::Mat &oldScanFormat, int maxPoints=0, int maxRange=0, const Transform &localTransform=Transform::getIdentity())
rtabmap::Transform transformFromTF(const tf::Transform &transform)
bool convertRGBDMsgs(const std::vector< cv_bridge::CvImageConstPtr > &imageMsgs, const std::vector< cv_bridge::CvImageConstPtr > &depthMsgs, const std::vector< sensor_msgs::CameraInfo > &cameraInfoMsgs, const std::string &frameId, const std::string &odomFrameId, const ros::Time &odomStamp, cv::Mat &rgb, cv::Mat &depth, std::vector< rtabmap::CameraModel > &cameraModels, tf::TransformListener &listener, double waitForTransform)
const cv::Point3f & gridViewPoint() const
cv::Point2f point2fFromROS(const rtabmap_ros::Point2f &msg)
void infoToROS(const rtabmap::Statistics &stats, rtabmap_ros::Info &info)
cv::Mat RTABMAP_EXP uncompressImage(const cv::Mat &bytes)
void addStatistic(const std::string &name, float value)
const Transform & localTransform() const
const std::map< int, float > & rawLikelihood() const
const double & stamp() const