$search
00001 #include <cstring> 00002 #include <stdexcept> 00003 00004 #include <boost/format.hpp> 00005 00006 #include <LinearMath/btMatrix3x3.h> 00007 #include <LinearMath/btQuaternion.h> 00008 #include <tf/transform_datatypes.h> 00009 00010 #include <sensor_msgs/Image.h> 00011 #include <sensor_msgs/image_encodings.h> 00012 #include <sensor_msgs/distortion_models.h> 00013 00014 #include <visp/vpImage.h> 00015 00016 #define protected public 00017 # include <visp/vpMbEdgeTracker.h> 00018 #undef protected 00019 00020 #include "conversion.hh" 00021 00022 void rosImageToVisp(vpImage<unsigned char>& dst, 00023 const sensor_msgs::Image::ConstPtr& src) 00024 { 00025 using sensor_msgs::image_encodings::RGB8; 00026 using sensor_msgs::image_encodings::RGBA8; 00027 using sensor_msgs::image_encodings::BGR8; 00028 using sensor_msgs::image_encodings::BGRA8; 00029 using sensor_msgs::image_encodings::MONO8; 00030 using sensor_msgs::image_encodings::MONO16; 00031 using sensor_msgs::image_encodings::numChannels; 00032 00033 // Resize the image if necessary. 00034 if (src->width != dst.getWidth() || src->height != dst.getHeight()) 00035 { 00036 ROS_INFO 00037 ("dst is %dx%d but src size is %dx%d, resizing.", 00038 src->width, src->height, 00039 dst.getWidth (), dst.getHeight ()); 00040 dst.resize (src->height, src->width); 00041 } 00042 00043 if(src->encoding == MONO8) 00044 memcpy(dst.bitmap, 00045 &src->data[0], 00046 dst.getHeight () * src->step * sizeof(unsigned char)); 00047 else if(src->encoding == RGB8 || src->encoding == RGBA8 00048 || src->encoding == BGR8 || src->encoding == BGRA8) 00049 { 00050 unsigned nc = numChannels(src->encoding); 00051 unsigned cEnd = 00052 (src->encoding == RGBA8 || src->encoding == BGRA8) ? nc - 1 : nc; 00053 00054 for(unsigned i = 0; i < dst.getWidth (); ++i) 00055 for(unsigned j = 0; j < dst.getHeight (); ++j) 00056 { 00057 int acc = 0; 00058 for(unsigned c = 0; c < cEnd; ++c) 00059 acc += src->data[j * src->step + i * nc + c]; 00060 dst[j][i] = acc / nc; 00061 } 00062 } 00063 else 00064 { 00065 boost::format fmt("bad encoding '%1'"); 00066 fmt % src->encoding; 00067 throw std::runtime_error(fmt.str()); 00068 } 00069 } 00070 00071 void vispImageToRos(sensor_msgs::Image& dst, 00072 const vpImage<unsigned char>& src) 00073 { 00074 dst.width = src.getWidth(); 00075 dst.height = src.getHeight(); 00076 dst.encoding = sensor_msgs::image_encodings::MONO8; 00077 dst.step = src.getWidth(); 00078 dst.data.resize(dst.height * dst.step); 00079 for(unsigned i = 0; i < src.getWidth (); ++i) 00080 for(unsigned j = 0; j < src.getHeight (); ++j) 00081 dst.data[j * dst.step + i] = src[j][i]; 00082 } 00083 00084 void vpHomogeneousMatrixToTransform(geometry_msgs::Transform& dst, 00085 const vpHomogeneousMatrix& src) 00086 { 00087 btMatrix3x3 rotation; 00088 btQuaternion quaternion; 00089 for(unsigned i = 0; i < 3; ++i) 00090 for(unsigned j = 0; j < 3; ++j) 00091 rotation[i][j] = src[i][j]; 00092 rotation.getRotation(quaternion); 00093 00094 dst.translation.x = src[0][3]; 00095 dst.translation.y = src[1][3]; 00096 dst.translation.z = src[2][3]; 00097 00098 dst.rotation.x = quaternion.x(); 00099 dst.rotation.y = quaternion.y(); 00100 dst.rotation.z = quaternion.z(); 00101 dst.rotation.w = quaternion.w(); 00102 } 00103 00104 void transformToVpHomogeneousMatrix(vpHomogeneousMatrix& dst, 00105 const geometry_msgs::Transform& src) 00106 { 00107 btQuaternion quaternion 00108 (src.rotation.x, src.rotation.y, src.rotation.z, src.rotation.w); 00109 btMatrix3x3 rotation(quaternion); 00110 00111 // Copy the rotation component. 00112 for(unsigned i = 0; i < 3; ++i) 00113 for(unsigned j = 0; j < 3; ++j) 00114 dst[i][j] = rotation[i][j]; 00115 00116 // Copy the translation component. 00117 dst[0][3] = src.translation.x; 00118 dst[1][3] = src.translation.y; 00119 dst[2][3] = src.translation.z; 00120 } 00121 00122 void transformToVpHomogeneousMatrix(vpHomogeneousMatrix& dst, 00123 const geometry_msgs::Pose& src) 00124 { 00125 btQuaternion quaternion 00126 (src.orientation.x, src.orientation.y, src.orientation.z, 00127 src.orientation.w); 00128 btMatrix3x3 rotation(quaternion); 00129 00130 // Copy the rotation component. 00131 for(unsigned i = 0; i < 3; ++i) 00132 for(unsigned j = 0; j < 3; ++j) 00133 dst[i][j] = rotation[i][j]; 00134 00135 // Copy the translation component. 00136 dst[0][3] = src.position.x; 00137 dst[1][3] = src.position.y; 00138 dst[2][3] = src.position.z; 00139 } 00140 00141 void transformToVpHomogeneousMatrix(vpHomogeneousMatrix& dst, 00142 const tf::Transform& src) 00143 { 00144 // Copy the rotation component. 00145 for(unsigned i = 0; i < 3; ++i) 00146 for(unsigned j = 0; j < 3; ++j) 00147 dst[i][j] = src.getBasis ()[i][j]; 00148 00149 // Copy the translation component. 00150 for (unsigned i = 0; i < 3; ++i) 00151 dst[i][3] = src.getOrigin ()[i]; 00152 dst[3][3] = 1.; 00153 } 00154 00155 void convertMovingEdgeConfigToVpMe(const visp_tracker::MovingEdgeConfig& config, 00156 vpMe& moving_edge, 00157 vpMbEdgeTracker& tracker) 00158 { 00159 moving_edge.mask_size = config.mask_size; 00160 moving_edge.n_mask = config.n_mask; 00161 moving_edge.range = config.range; 00162 moving_edge.threshold = config.threshold; 00163 moving_edge.mu1 = config.mu1; 00164 moving_edge.mu2 = config.mu2; 00165 moving_edge.sample_step = config.sample_step; 00166 moving_edge.ntotal_sample = config.ntotal_sample; 00167 00168 moving_edge.strip = config.strip; 00169 moving_edge.min_samplestep = config.min_samplestep; 00170 moving_edge.aberration = config.aberration; 00171 moving_edge.init_aberration = config.init_aberration; 00172 00173 tracker.setLambda(config.lambda); 00174 tracker.setFirstThreshold(config.first_threshold); 00175 } 00176 00177 void convertVpMeToMovingEdgeConfig(const vpMe& moving_edge, 00178 const vpMbEdgeTracker& tracker, 00179 visp_tracker::MovingEdgeConfig& config) 00180 { 00181 config.mask_size = moving_edge.mask_size; 00182 config.n_mask = moving_edge.n_mask; 00183 config.range = moving_edge.range; 00184 config.threshold = moving_edge.threshold; 00185 config.mu1 = moving_edge.mu1; 00186 config.mu2 = moving_edge.mu2; 00187 config.sample_step = moving_edge.sample_step; 00188 config.ntotal_sample = moving_edge.ntotal_sample; 00189 00190 config.strip = moving_edge.strip; 00191 config.min_samplestep = moving_edge.min_samplestep; 00192 config.aberration = moving_edge.aberration; 00193 config.init_aberration = moving_edge.init_aberration; 00194 00195 config.lambda = tracker.lambda; 00196 config.first_threshold = tracker.percentageGdPt; 00197 } 00198 00199 void convertVpMeToInitRequest(const vpMe& moving_edge, 00200 const vpMbEdgeTracker& tracker, 00201 visp_tracker::Init& srv) 00202 { 00203 srv.request.moving_edge.mask_size = moving_edge.mask_size; 00204 srv.request.moving_edge.n_mask = moving_edge.n_mask; 00205 srv.request.moving_edge.range = moving_edge.range; 00206 srv.request.moving_edge.threshold = moving_edge.threshold; 00207 srv.request.moving_edge.mu1 = moving_edge.mu1; 00208 srv.request.moving_edge.mu2 = moving_edge.mu2; 00209 srv.request.moving_edge.sample_step = moving_edge.sample_step; 00210 srv.request.moving_edge.ntotal_sample = moving_edge.ntotal_sample; 00211 00212 srv.request.moving_edge.strip = moving_edge.strip; 00213 srv.request.moving_edge.min_samplestep = moving_edge.min_samplestep; 00214 srv.request.moving_edge.aberration = moving_edge.aberration; 00215 srv.request.moving_edge.init_aberration = moving_edge.init_aberration; 00216 00217 srv.request.moving_edge.lambda = tracker.lambda; 00218 srv.request.moving_edge.first_threshold = tracker.percentageGdPt; 00219 } 00220 00221 void convertInitRequestToVpMe(const visp_tracker::Init::Request& req, 00222 vpMbEdgeTracker& tracker, 00223 vpMe& moving_edge) 00224 { 00225 moving_edge.mask_size = req.moving_edge.mask_size; 00226 moving_edge.n_mask = req.moving_edge.n_mask; 00227 moving_edge.range = req.moving_edge.range; 00228 moving_edge.threshold = req.moving_edge.threshold; 00229 moving_edge.mu1 = req.moving_edge.mu1; 00230 moving_edge.mu2 = req.moving_edge.mu2; 00231 moving_edge.sample_step = req.moving_edge.sample_step; 00232 moving_edge.ntotal_sample = req.moving_edge.ntotal_sample; 00233 00234 moving_edge.strip = req.moving_edge.strip; 00235 moving_edge.min_samplestep = req.moving_edge.min_samplestep; 00236 moving_edge.aberration = req.moving_edge.aberration; 00237 moving_edge.init_aberration = req.moving_edge.init_aberration; 00238 00239 tracker.setLambda(req.moving_edge.lambda); 00240 tracker.setFirstThreshold(req.moving_edge.first_threshold); 00241 } 00242 00243 void initializeVpCameraFromCameraInfo(vpCameraParameters& cam, 00244 sensor_msgs::CameraInfoConstPtr info) 00245 { 00246 if (!info) 00247 throw std::runtime_error ("missing camera calibration data"); 00248 00249 // Check that the camera is calibrated, as specified in the 00250 // sensor_msgs/CameraInfo message documentation. 00251 if (info->K.size() != 3 * 3 || info->K[0] == 0.) 00252 throw std::runtime_error ("uncalibrated camera"); 00253 00254 // Check matrix size. 00255 if (!info || info->P.size() != 3 * 4) 00256 throw std::runtime_error 00257 ("camera calibration P matrix has an incorrect size"); 00258 00259 if (info->distortion_model.empty ()) 00260 { 00261 const double& px = info->K[0 * 3 + 0]; 00262 const double& py = info->K[1 * 3 + 1]; 00263 const double& u0 = info->K[0 * 3 + 2]; 00264 const double& v0 = info->K[1 * 3 + 2]; 00265 cam.initPersProjWithoutDistortion(px, py, u0, v0); 00266 return; 00267 } 00268 00269 if (info->distortion_model == sensor_msgs::distortion_models::PLUMB_BOB) 00270 { 00271 const double& px = info->P[0 * 4 + 0]; 00272 const double& py = info->P[1 * 4 + 1]; 00273 const double& u0 = info->P[0 * 4 + 2]; 00274 const double& v0 = info->P[1 * 4 + 2]; 00275 cam.initPersProjWithoutDistortion(px, py, u0, v0); 00276 return; 00277 } 00278 00279 throw std::runtime_error ("unsupported distortion model"); 00280 }