reconstruction.hpp
Go to the documentation of this file.
00001 
00005 #ifndef _RECONSTRUCTION_H_
00006 #define _RECONSTRUCTION_H_
00007 
00008 #include "general_resources.hpp"
00009 #include "opencv_resources.hpp"
00010 #include "ros_resources.hpp"
00011 #include "camera.hpp"
00012 #include "geometry.hpp"
00013 #include "tracks.hpp"
00014 
00015 #include <visualization_msgs/MarkerArray.h>
00016 
00017 // SBA Includes
00018 #include <sba/sba.h>
00019 #include <sba/sba_file_io.h>
00020 #include <sba/visualization.h>
00021 
00022 #define _USE_MATH_DEFINES
00023 #include <math.h>
00024 
00025 #define DEFAULT_NEAR_RADIUS 0.3
00026 
00027 using namespace sba;
00028 
00029 // #include <pcl/visualization/cloud_viewer.h>
00030 
00031 #define POSITION_LIMIT                          1000
00032 #define DEFAULT_MEAN_MAX_DIST           0.20
00033 #define DEFAULT_MEAN_MODE                       0
00034 #define CLUSTER_MEAN_MODE                       1
00035 #define MAX_REPROJECTION_DISPARITY      20.0
00036 
00037 #define EPSILON 0.00001
00038 // #define __SFM__DEBUG__
00039 
00040 
00041 void getTranslationBetweenCameras(cv::Mat& C1, cv::Mat& C2, double *translations);
00042 
00043 void triangulateTracks(vector<featureTrack>& tracks, vector<unsigned int>& indices, cameraParameters& cameraData, cv::Mat *cameras, unsigned int earliest_index, unsigned int latest_index);
00044 
00045 unsigned int putativelyTriangulateNewTracks(vector<featureTrack>& tracks, vector<unsigned int>& indices, cameraParameters& cameraData, cv::Mat *cameras, unsigned int earliest_index, unsigned int latest_index);
00046 unsigned int addNewPoints(vector<featureTrack>& tracks, vector<unsigned int>& indices, cameraParameters& cameraData, cv::Mat *cameras, unsigned int earliest_index, unsigned int latest_index);
00047 
00048 void findTriangulatableTracks(vector<featureTrack>& tracks, vector<unsigned int>& indices, vector<unsigned int>& cameras, unsigned int min_length = 10);
00049 void findTriangulatableTracks3(vector<featureTrack>& tracks, vector<unsigned int>& indices, int latest_index = -1, unsigned int min_length = 10);
00050 
00051 void filterNearPoints(vector<featureTrack>& featureTrackVector, double x, double y, double z, double limit = DEFAULT_NEAR_RADIUS);
00052 
00053 // For odometry node, avoids triangulating if pairs of camera views are not sufficiently spaced..
00054 int initialTrackTriangulationDummy(vector<featureTrack>& tracks, vector<unsigned int>& indices, cameraParameters& cameraData, geometry_msgs::PoseStamped *keyframePoses, unsigned int keyframeCount, double minSeparation = 0.2, double maxSeparation = 1.0, int minEstimates = 3, double maxStandardDev = 0.1, bool handedness = false, int xCode = 0);
00055 int initialTrackTriangulation(vector<featureTrack>& tracks, vector<unsigned int>& indices, cameraParameters& cameraData, geometry_msgs::PoseStamped *keyframePoses, unsigned int keyframeCount, double minSeparation = 0.2, double maxSeparation = 1.0, int minEstimates = 3, double maxStandardDev = 0.1, double maxReprojectionDisparity = MAX_REPROJECTION_DISPARITY);
00056 
00057 void findP1Matrix(cv::Mat& P1, const cv::Mat& R, const cv::Mat& t);
00058 
00059 void reduceVectorsToTrackedPoints(const vector<cv::Point2f>& points1, vector<cv::Point2f>& trackedPoints1, const vector<cv::Point2f>& points2, vector<cv::Point2f>& trackedPoints2, vector<uchar>& statusVec);
00060 
00061 int TriangulateNewTracks(vector<featureTrack>& trackVector, const int index1, const int index2, const cv::Mat& K, const cv::Mat& K_inv, const cv::Mat& P0, const cv::Mat& P1, bool initializeOnFocalPlane = false);
00062 void ExtractPointCloud(vector<cv::Point3d>& cloud, vector<featureTrack>& trackVector);
00063 void Triangulate(const cv::Point2f& pt1, const cv::Point2f& pt2, const cv::Mat& K, const cv::Mat& Kinv, const cv::Mat& P1, const cv::Mat& P2, cv::Point3d& xyzPoint, bool debug = false);
00064 
00065 void LinearLSTriangulation(cv::Mat& dst, const cv::Point3d& u1, const cv::Mat& P1, const cv::Point3d& u2, const cv::Mat& P2);
00066 void Triangulate_1(const cv::Point2d& pt1, const cv::Point2d& pt2, const cv::Mat& K, const cv::Mat& Kinv, const cv::Mat& P1, const cv::Mat& P2, cv::Point3d& xyzPoint, bool iterate = true);
00067 void IterativeLinearLSTriangulation(cv::Mat& dst, const cv::Point3d& u1, const cv::Mat& P1, const cv::Point3d& u2, const cv::Mat& P2);
00068 
00069 
00070 bool findClusterMean(const vector<cv::Point3d>& estimatedLocations, cv::Point3d& pt3d, int mode = DEFAULT_MEAN_MODE, int minEstimates = 3, double maxStandardDev = 0.1);
00071 
00072 void TriangulatePoints(const vector<cv::Point2f>& pt_set1,
00073                        const vector<cv::Point2f>& pt_set2,
00074                        const cv::Mat& K,
00075                        const cv::Mat& Kinv,
00076                        const cv::Mat& P,
00077                        const cv::Mat& P1,
00078                        vector<cv::Point3d>& pointcloud,
00079                        vector<cv::Point2f>& correspImg1Pt);
00080                        
00081 void initializeP0(cv::Mat& P);
00082 void getWandZ(cv::Mat& W, cv::Mat& Winv, cv::Mat& Z);
00083 
00084 bool findBestReconstruction(const cv::Mat& P0, cv::Mat& P1, cv::Mat& R, cv::Mat& t, const cv::SVD& svd, const cv::Mat& K, const vector<cv::Point2f>& pts1, const vector<cv::Point2f>& pts2, bool useDefault = false);
00085 
00086 int addToTracks(SysSBA& sys, int im1, vector<cv::Point2f>& pts1, int im2, vector<cv::Point2f>& pts2);
00087 
00088 void addFixedCamera(SysSBA& sys, cameraParameters& cameraData, const cv::Mat& C);
00089 void addNewCamera(SysSBA& sys, cameraParameters& cameraData, const cv::Mat& C);
00090 void updateCameraNode_2(SysSBA& sys, const cv::Mat& R, const cv::Mat& t, int image_index);
00091 void updateCameraNode_2(SysSBA& sys, const cv::Mat& C, int image_index);
00092 void addBlankCamera(SysSBA& sys, cameraParameters& cameraData, bool isFixed = false);
00093 
00094 double retrieveCameraPose(const SysSBA& sys, unsigned int idx, geometry_msgs::Pose& pose);
00095 void retrieveCameraPose(const SysSBA& sys, unsigned int idx, cv::Mat& camera);
00096 void retrieveAllCameras(cv::Mat *allCameraPoses, const SysSBA& sys);
00097 void retrieveAllPoints(vector<cv::Point3d>& pts, const SysSBA& sys);
00098 void updateTracks(vector<featureTrack>& trackVector, const SysSBA& sys);
00099 
00100 void updateCameraNode(SysSBA& sys, cv::Mat R, cv::Mat t, int img1, int img2);
00101 void printSystemSummary(SysSBA& sys);
00102 
00103 
00104 
00105 float ReciprocalSqrt( float x );
00106 
00107 // maxIndex says what's the latest frame for which tracks should be included
00108 void assignTracksToSBA(SysSBA& sys, vector<featureTrack>& trackVector, int maxIndex);
00109 
00110 void transfer3dPoint(const cv::Point3d& src, cv::Point3d& dst, const cv::Mat& C);
00111 
00112 void transfer3DPoints(const vector<cv::Point3d>& src, vector<cv::Point3d>& dst, const cv::Mat& C);
00113 
00114 void addNewPoints(SysSBA& sys, const vector<cv::Point3d>& pc);
00115 
00116 double getRotationInDegrees(const cv::Mat& R);
00117 double getDistanceInUnits(const cv::Mat& t);
00118 
00119 double getQuaternionAngle(const Quaterniond& q1, const Quaterniond& q2);
00120 
00121 void convertPoint3dToMat(const cv::Point3d& src, cv::Mat& dst);
00122 
00123 Quaterniond defaultQuaternion();
00124 
00125 void convertProjectionMatCVToEigen(const cv::Mat& mat, Eigen::Matrix< double, 3, 4 > m);
00126 void convertProjectionMatEigenToCV(const Eigen::Matrix< double, 3, 4 > m, cv::Mat& mat);
00127 
00128 double dotProduct(const cv::Mat& vec1, const cv::Mat& vec2);
00129 double dotProduct(const Quaterniond& q1, const Quaterniond& q2);
00130 
00131 void constrainDodgyPoints(SysSBA& sys);
00132 
00133 //void convertFromMatToQuaternion(const Mat& mat, Quaterniond& quat);
00134 
00135 void convertVec4dToMat(const Vector4d& vec4, cv::Mat& mat);
00136 
00137 void findFourTransformations(cv::Mat *C, const cv::Mat& E, const cv::Mat& K, const vector<cv::Point2f>& pts1, const vector<cv::Point2f>& pts2);
00138 
00139 // I think these are known as a cheirality test?
00140 int pointsInFront(const cv::Mat& C1, const cv::Mat& C2, const vector<cv::Point3d>& pts);
00141 int pointsInFront(const cv::Mat& C1, const cv::Mat& K, const vector<cv::Point2f>& pts1, const vector<cv::Point2f>& pts2);
00142 
00143 void compileTransform(cv::Mat& c, const cv::Mat& R, const cv::Mat& t);
00144 void combineTransforms(cv::Mat& CN, const cv::Mat& C0, const cv::Mat& C1);
00145 void decomposeTransform(const cv::Mat& c, cv::Mat& R, cv::Mat& t);
00146 
00147 double calcFrameScore(double geomScore, int numFeatures, int numTracks);
00148 
00149 //double calcGeometryScore(vector<Point2f>& points1, vector<Point2f>& points2, Mat& F, Mat& Fmask, Mat& H, Mat& Hmask);
00150 double calcGeometryScore(int numInliers_H, int numInliers_F, double sampsonError_H, double sampsonError_F);
00151 
00152 double calcSampsonError(vector<cv::Point2f>& points1, vector<cv::Point2f>& points2, cv::Mat& H, cv::Mat& Hmask);
00153 
00154 double calcSampsonDistance(cv::Point2f& pt1, cv::Point2f& pt2, cv::Mat& F);
00155 
00156 double calcInlierGeometryDistance(vector<cv::Point2f>& points1, vector<cv::Point2f>& points2, cv::Mat& mat, cv::Mat& mask, int distMethod = SAMPSON_DISTANCE);
00157 
00158 void assignIntrinsicsToP0(cv::Mat& P0, const cv::Mat& K);
00159 
00160 // I think this will give you all of the points that were successfully tracked from idx1 to idx2
00161 void getPointsFromTracks(vector<featureTrack>& tracks, vector<cv::Point2f>& pts1, vector<cv::Point2f>& pts2, int idx1, int idx2);
00162 void getPoints3dFromTracks(vector<featureTrack>& tracks, vector<cv::Point3d>& cloud, int idx1 = -1, int idx2 = -1);
00163 
00164 // Gets the 2d locations of points that have been tracked
00165 void getCorrespondingPoints(vector<featureTrack>& tracks, const vector<cv::Point2f>& pts1, vector<cv::Point2f>& pts2, int idx0, int idx1);
00166 
00167 // Given some 3D co-ordinates and two image indices, find the corresponding image points that show as many of 
00168 // these as possible (actually it should be all of them if this is an intermediate point)
00169 void findFeaturesForPoints(vector<featureTrack>& tracks, vector<cv::Point2f>& pts1, vector<cv::Point2f>& pts2, vector<cv::Point3d>& pts3d, int idx1, int idx2);
00170 
00171 void estimateNewPose(vector<featureTrack>& tracks, cv::Mat& K, int idx, cv::Mat& pose);
00172 
00173 void obtainAppropriateBaseTransformation(cv::Mat& C0, vector<featureTrack>& tracks);
00174 
00175 void findCentroidAndSpread(vector<featureTrack>& tracks, cv::Point3d& centroid, cv::Point3d& deviations);
00176 
00177 void filterToCompleteTracks(vector<unsigned int>& dst, vector<unsigned int>& src, vector<featureTrack>& tracks, int idx1, int idx2);
00178 void getActiveTracks(vector<unsigned int>& indices, vector<featureTrack>& tracks, int idx1, int idx2);
00179 
00180 int countTriangulatedTracks(const vector<featureTrack>& tracks);
00181 int countActiveTriangulatedTracks(vector<unsigned int>& indices, vector<featureTrack>& tracks);
00182 
00183 void updateTriangulatedPoints(vector<featureTrack>& tracks, vector<unsigned int>& indices, vector<cv::Point3d>& cloud);
00184 
00185 void getActive3dPoints(vector<featureTrack>& tracks, vector<unsigned int>& indices, vector<cv::Point3d>& cloud);
00186 
00187 // Gets 2D locations of active tracks into simple vectors
00188 void filterToActivePoints(vector<featureTrack>& tracks, vector<cv::Point2f>& pts1, vector<cv::Point2f>& pts2, vector<unsigned int>& indices, int idx1, int idx2);
00189 
00190 void reduceActiveToTriangulated(vector<featureTrack>& tracks, vector<unsigned int>& indices, vector<unsigned int>& untriangulated);
00191 
00192 void updateSystemTracks(SysSBA& sys, vector<featureTrack>& tracks, unsigned int start_index);
00193 
00194 int findBestCandidate(const cv::Mat *CX, const cv::Mat& K, const vector<cv::Point2f>& pts1, const vector<cv::Point2f>& pts2, cv::Mat& C);
00195 
00196 void summarizeTransformation(const cv::Mat& C, char *summary);
00197 
00198 void reverseTranslation(cv::Mat& C);
00199 
00200 void removeShortTracks(vector<featureTrack>& tracks, int idx1, int idx2);
00201 
00202 bool pointIsInFront(const cv::Mat& C, const cv::Point3d& pt);
00203 
00204 void getIndicesForTriangulation(vector<unsigned int>& dst, vector<unsigned int>& src, vector<unsigned int>& already_triangulated);
00205 
00206 void reconstructSubsequence(vector<featureTrack>& tracks, vector<cv::Point3d>& ptCloud, int idx1, int idx2);
00207 
00208 
00209 
00210 void findCentroid(vector<featureTrack>& tracks, cv::Point3d& centroid, cv::Point3d& stdDeviation);
00211 
00212 // Between two images, gets all 2D point locations and 3D locations needed for BA
00213 void getTriangulatedFullSpanPoints(vector<featureTrack>& tracks, vector<cv::Point2f>& pts1, vector<cv::Point2f>& pts2, int idx1, int idx2, vector<cv::Point3f>& points3);
00214 
00215 bool estimatePoseFromKnownPoints(cv::Mat& dst, cameraParameters camData, vector<featureTrack>& tracks, unsigned int index, const cv::Mat& guide, unsigned minAppearances = 1, unsigned int iterCount = 100, double maxReprojErr = 4.0, double inliersPercentage = 0.9, double *reprojError = 0, double *pnpInlierProp = 0, bool debug = false);
00216         
00217 #endif


thermalvis
Author(s): Stephen Vidas
autogenerated on Sun Jan 5 2014 11:38:45