Go to the documentation of this file.
48 0.0f, 0.0f, 1.0f, 0.0f,
49 -1.0f, 0.0f, 0.0f, 0.0f,
50 0.0f, -1.0f, 0.0f, 0.0f);
52 0.0f, -1.0f, 0.0f, 0.0f,
53 0.0f, 0.0f, -1.0f, 0.0f,
54 1.0f, 0.0f, 0.0f, 0.0f);
58 deviceTColorCamera_(
Transform::getIdentity()),
60 uvs_initialized_(
false),
61 stampEpochOffset_(0.0),
62 smoothing_(smoothing),
65 upstreamRelocalizationAccThr_(upstreamRelocalizationAccThr),
66 previousAnchorStamp_(0.0),
67 dataGoodTracking_(
true)
120 int maxWaitTimeMs = maxWaitTime * 1000;
127 while(maxWaitTimeMs>0 &&
poseBuffer_.rbegin()->first < epochStamp && waitTry < maxWaitTimeMs)
136 if(maxWaitTimeMs > 0)
138 UWARN(
"Could not find poses to interpolate at time %f after waiting %d ms (latest is %f)...", epochStamp, maxWaitTimeMs,
poseBuffer_.rbegin()->first);
142 UWARN(
"Could not find poses to interpolate at time %f (latest is %f)...", epochStamp,
poseBuffer_.rbegin()->first);
147 std::map<double, Transform>::const_iterator iterB =
poseBuffer_.lower_bound(epochStamp);
148 std::map<double, Transform>::const_iterator iterA = iterB;
157 if(iterA == iterB && epochStamp == iterA->first)
159 pose = iterA->second;
161 else if(epochStamp >= iterA->first && epochStamp <= iterB->
first)
163 pose = iterA->second.
interpolate((epochStamp-iterA->first) / (iterB->first-iterA->first), iterB->second);
167 UWARN(
"Could not find pose data to interpolate at time %f (earliest is %f). Are sensors synchronized?", epochStamp, iterA->first);
206 bool showLog =
false;
210 std::vector<float> currentLinearVelocity(3);
214 currentLinearVelocity[0] = dx /
dt;
215 currentLinearVelocity[1] = dy /
dt;
216 currentLinearVelocity[2] = dz /
dt;
222 float acceleration =
sqrt(ax*ax + ay*ay + az*az);
238 UERROR(
"Upstream re-localization has been suppressed because of "
239 "high acceleration detected (%f m/s^2) causing a jump!",
264 <<
" " <<
iter->second.first.x()
265 <<
" " <<
iter->second.first.y()
266 <<
" " <<
iter->second.first.z()
267 <<
" " <<
iter->second.second.x()
268 <<
" " <<
iter->second.second.y()
269 <<
" " <<
iter->second.second.z() << std::endl;
271 UERROR(
"timestamp original_xyz corrected_xyz:\n%s",
stream.str().c_str());
335 cv::cvtColor(
data.imageRaw(), rgbImage, cv::COLOR_BGR2RGBA);
338 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
339 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
340 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
341 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
343 glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
347 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, rgbImage.cols, rgbImage.rows, 0, GL_RGBA, GL_UNSIGNED_BYTE, rgbImage.data);
349 GLint
error = glGetError();
350 if(
error != GL_NO_ERROR)
352 LOGE(
"OpenGL: Could not allocate texture (0x%x)\n",
error);
388 LOGE(
"To use CameraMobile::updateOnRender(), CameraMobile::updateDataOnRender() "
389 "should be overridden by inherited classes. Returning empty data!\n");
434 cv::transpose(rgb,rgbt);
438 cv::transpose(depth,deptht);
441 cv::Size sizet(
model.imageHeight(),
model.imageWidth());
447 model.localTransform()*
rtabmap::Transform(0,-1,0,0, 1,0,0,0, 0,0,1,0));
448 model.setImageSize(sizet);
452 for(
size_t i=0;
i<keypoints.size(); ++
i)
466 cv::flip(depth,depth,0);
468 cv::Size sizet(
model.imageWidth(),
model.imageHeight());
475 model.setImageSize(sizet);
479 for(
size_t i=0;
i<keypoints.size(); ++
i)
494 cv::flip(depth,depth,1);
496 cv::Size sizet(
model.imageHeight(),
model.imageWidth());
502 model.localTransform()*
rtabmap::Transform(0,1,0,0, -1,0,0,0, 0,0,1,0));
503 model.setImageSize(sizet);
507 for(
size_t i=0;
i<keypoints.size(); ++
i)
520 bool firstFrame =
true;
521 bool dataGoodTracking =
true;
543 info->odomCovariance = cv::Mat::eye(6,6,CV_64FC1) * (firstFrame?9999.0:0.00001);
548 info->odomCovariance.at<
double>(3,3) *= 0.01;
549 info->odomCovariance.at<
double>(4,4) *= 0.01;
550 if(!dataGoodTracking)
552 UERROR(
"not good tracking!");
555 info->odomCovariance.at<
double>(0,0) *= 10;
556 info->odomCovariance.at<
double>(1,1) *= 10;
557 info->odomCovariance.at<
double>(2,2) *= 10;
558 info->odomCovariance.at<
double>(5,5) *= 10;
561 info->odomPose = dataPose;
566 UWARN(
"CameraMobile::captureImage() invalid data!");
572 const cv::Mat & pointCloudData,
576 std::vector<cv::KeyPoint> * kpts,
577 std::vector<cv::Point3f> * kpts3D,
580 if(!pointCloudData.empty())
582 cv::Mat scanData(1, pointCloudData.cols, CV_32FC4);
583 float * ptr = scanData.ptr<
float>();
584 const float * inPtr = pointCloudData.ptr<
float>();
585 int ic = pointCloudData.channels();
586 UASSERT(pointCloudData.depth() == CV_32F && ic >= 3);
589 for(
unsigned int i=0;
i<pointCloudData.cols; ++
i)
591 cv::Point3f pt(inPtr[
i*ic], inPtr[
i*ic + 1], inPtr[
i*ic + 2]);
594 ptr[oi*4 + 1] = pt.y;
595 ptr[oi*4 + 2] = pt.z;
603 model.reproject(pt.x, pt.y, pt.z, u,
v);
604 unsigned char r=255,
g=255,
b=255;
607 b=rgb.at<cv::Vec3b>(
v,u).val[0];
608 g=rgb.at<cv::Vec3b>(
v,u).val[1];
609 r=rgb.at<cv::Vec3b>(
v,u).val[2];
611 kpts->push_back(cv::KeyPoint(u,
v,kptsSize));
613 kpts3D->push_back(org);
615 *(
int*)&ptr[oi*4 + 3] =
int(
b) | (
int(
g) << 8) | (
int(r) << 16);
bool acquire(int n=1, int ms=0)
T uNorm(const std::vector< T > &v)
virtual SensorData captureImage(SensorCaptureInfo *info=0)
virtual bool isCalibrated() const
static LaserScan backwardCompatibility(const cv::Mat &oldScanFormat, int maxPoints=0, int maxRange=0, const Transform &localTransform=Transform::getIdentity())
ScreenRotation colorCameraToDisplayRotation_
glm::mat4 projectionMatrix_
void setGPS(const GPS &gps)
CameraModel occlusionModel_
rtabmap::Transform previousAnchorPose_
float transformed_uvs_[8]
void addEnvSensor(int type, float value)
@ ROTATION_270
270 degree rotation.
Transform deviceTColorCamera_
const std::vector< CameraModel > & cameraModels() const
std::map< double, std::pair< rtabmap::Transform, rtabmap::Transform > > relocalizationDebugBuffer_
CameraMobile(bool smoothing=false, float upstreamRelocalizationAccThr=0.0f)
void setLocalTransform(const Transform &transform)
void setRGBDImage(const cv::Mat &rgb, const cv::Mat &depth, const CameraModel &model, bool clearPreviousData=true)
cv::Point3f RTABMAP_CORE_EXPORT transformPoint(const cv::Point3f &pt, const Transform &transform)
float upstreamRelocalizationAccThr_
static const rtabmap::Transform rtabmap_world_T_opengl_world(0.0f, 0.0f,-1.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f)
cv::Mat RTABMAP_CORE_EXPORT fastBilateralFiltering(const cv::Mat &depth, float sigmaS=15.0f, float sigmaR=0.05f, bool earlyDivision=false)
GLM_FUNC_DECL vecType< T, P > sqrt(vecType< T, P > const &x)
static const rtabmap::Transform opengl_world_T_rtabmap_world(0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f)
EnvSensors lastEnvSensors_
constexpr int first(int i)
void update(const SensorData &data, const Transform &pose, const glm::mat4 &viewMatrix, const glm::mat4 &projectionMatrix, const float *texCoord)
bool isValidForProjection() const
@ ROTATION_90
90 degree rotation.
virtual bool getPose(double epochStamp, Transform &pose, cv::Mat &covariance, double maxWaitTime=0.06)
#define UASSERT(condition)
void error(const char *str)
@ ROTATION_180
180 degree rotation.
const double & stamp() const
virtual bool init(const std::string &calibrationFolder=".", const std::string &cameraName="")
static const rtabmap::Transform opticalRotation
void setFeatures(const std::vector< cv::KeyPoint > &keypoints, const std::vector< cv::Point3f > &keypoints3D, const cv::Mat &descriptors)
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy y set format x g set format y g set format x2 g set format y2 g set format z g set angles radians set nogrid set key title set key left top Right noreverse box linetype linewidth samplen spacing width set nolabel set noarrow set nologscale set logscale x set set pointsize set encoding default set nopolar set noparametric set set set set surface set nocontour set clabel set mapping cartesian set nohidden3d set cntrparam order set cntrparam linear set cntrparam levels auto set cntrparam points set size set set xzeroaxis lt lw set x2zeroaxis lt lw set yzeroaxis lt lw set y2zeroaxis lt lw set tics in set ticslevel set tics set mxtics default set mytics default set mx2tics default set my2tics default set xtics border mirror norotate autofreq set ytics border mirror norotate autofreq set ztics border nomirror norotate autofreq set nox2tics set noy2tics set timestamp bottom norotate offset
Point2(* f)(const Point3 &, OptionalJacobian< 2, 3 >)
static LaserScan scanFromPointCloudData(const cv::Mat &pointCloudData, const Transform &pose, const CameraModel &model, const cv::Mat &rgb, std::vector< cv::KeyPoint > *kpts=0, std::vector< cv::Point3f > *kpts3D=0, int kptsSize=3)
void poseReceived(const Transform &pose, double deviceStamp)
std::string UTILITE_EXPORT uFormat(const char *fmt,...)
const cv::Mat & imageRaw() const
ULogger class and convenient macros.
virtual SensorData updateDataOnRender(Transform &pose)
iterator iter(handle obj)
static const float bilateralFilteringSigmaS
const char * c_str(Args &&...args)
Array< int, Dynamic, 1 > v
static const float bilateralFilteringSigmaR
void uSleep(unsigned int ms)
static const rtabmap::Transform opticalRotationInv
const std::vector< cv::KeyPoint > & keypoints() const
@ ROTATION_0
0 degree rotation (natural orientation)
const cv::Mat & depthOrRightRaw() const
glm::mat4 glmFromTransform(const rtabmap::Transform &transform)
void setGPS(const GPS &gps)
const Transform & localTransform() const
const std::vector< cv::Point3f > & keypoints3D() const
GLM_FUNC_DECL matType< T, P > inverse(matType< T, P > const &m)
void setEnvSensors(const EnvSensors &sensors)
RTABMAP_DEPRECATED void setDepthOrRightRaw(const cv::Mat &image)
std::vector< float > previousAnchorLinearVelocity_
double previousAnchorStamp_
std::map< double, Transform > poseBuffer_
void post(UEvent *event, bool async=true) const
rtabmap
Author(s): Mathieu Labbe
autogenerated on Sun Dec 1 2024 03:42:42