39 #if ( CV_MAJOR_VERSION >= 4) 40 #include <opencv2/opencv.hpp> 41 #include <opencv2/highgui/highgui_c.h> 42 #include <opencv2/calib3d/calib3d_c.h> 44 #include <opencv/highgui.h> 45 #include <opencv/cv.hpp> 47 #include <posedetection_msgs/Feature0DDetect.h> 48 #include <posedetection_msgs/ImageFeature0D.h> 49 #include <posedetection_msgs/ObjectDetection.h> 50 #include <posedetection_msgs/Object6DPose.h> 51 #include <geometry_msgs/PoseStamped.h> 52 #include <geometry_msgs/Pose.h> 53 #include <geometry_msgs/Quaternion.h> 58 #include <boost/shared_ptr.hpp> 59 #include <boost/foreach.hpp> 60 #include <boost/filesystem.hpp> 64 #include <dynamic_reconfigure/server.h> 65 #include <jsk_perception/point_pose_extractorConfig.h> 66 #include <jsk_perception/SetTemplate.h> 73 std::vector<cv::KeyPoint>& keypoints,
74 cv::Mat& descriptors){
75 keypoints.resize(features.scales.size());
76 descriptors.create(features.scales.size(),features.descriptor_dim,CV_32FC1);
77 std::vector<cv::KeyPoint>::iterator keypoint_it = keypoints.begin();
78 for (
int i=0; keypoint_it != keypoints.end(); ++keypoint_it, ++i ) {
79 *keypoint_it = cv::KeyPoint(cv::Point2f(features.positions[i*2+0],
80 features.positions[i*2+1]),
81 features.descriptor_dim,
82 features.orientations[i],
86 for (
int j = 0; j < features.descriptor_dim; j++){
87 descriptors.at<
float>(i,j) =
88 features.descriptors[i*features.descriptor_dim+j];
118 std::string matching_frame,
119 int original_width_size,
120 int original_height_size,
121 double template_width,
122 double template_height,
124 cv::Mat affine_matrix,
125 double reprojection_threshold,
126 double distanceratio_threshold,
127 std::string window_name,
130 _template_img = img.clone();
131 _matching_frame = matching_frame;
132 _original_width_size = original_width_size;
133 _original_height_size = original_height_size;
134 _template_width = template_width;
135 _template_height = template_height;
136 _relativepose = relativepose;
137 _affine_matrix = affine_matrix;
138 _window_name = window_name;
139 _reprojection_threshold = reprojection_threshold;
140 _distanceratio_threshold = distanceratio_threshold;
141 _template_keypoints.clear();
142 _correspondances.clear();
148 std::cerr <<
"delete " << _window_name << std::endl;
161 if (_template_img.empty() && _template_keypoints.size() == 0 &&
162 _template_descriptors.empty()){
172 posedetection_msgs::Feature0DDetect srv;
174 if ( _template_img.empty()) {
175 ROS_ERROR (
"template picture is empty.");
180 cv_img.
image = cv::Mat(_template_img);
181 cv_img.encoding = std::string(
"bgr8");
182 srv.request.image = *(cv_img.toImageMsg());
183 if (client.
call(srv)) {
184 ROS_INFO_STREAM(
"get features with " << srv.response.features.scales.size() <<
" descriptoins");
185 features2keypoint (srv.response.features, _template_keypoints, _template_descriptors);
190 cv::invert(_affine_matrix, M_inv);
196 for (
int i = 0; i < (
int)_template_keypoints.size(); i++){
198 cv::Mat pt_mat(cv::Size(1,1), CV_32FC2, &pt);
199 cv::Mat pt_src_mat(cv::Size(1,1), CV_32FC2, &_template_keypoints.at(i).pt);
200 cv::perspectiveTransform (pt_src_mat, pt_mat, M_inv);
201 _template_keypoints.at(i).pt = pt_mat.at<cv::Point2f>(0,0);
207 ROS_ERROR(
"Failed to call service Feature0DDetect");
215 static std::vector<double> slog_table;
216 int osize = slog_table.size();
218 slog_table.resize(n+1);
221 slog_table[1] =
log(1.0);
224 for(
int i = osize; i <= n; i++ ){
225 slog_table[i] = slog_table[i-1] +
log(i);
228 return slog_table[n];
231 int min_inlier(
int n,
int m,
double p_badsupp,
double p_badxform )
235 double lp_badsupp =
log( p_badsupp );
236 double lp_badsupp1 =
log( 1.0 - p_badsupp );
237 for( j = m+1; j <= n; j++ ){
239 for( i = j; i <= n; i++ ){
240 pi = (i-m) * lp_badsupp + (n-i+m) * lp_badsupp1 +
244 if( sum < p_badxform )
252 std::vector<cv::KeyPoint> sourceimg_keypoints,
254 double err_thr, cv::Mat &stack_img,
255 cv::flann::Index* ft, posedetection_msgs::Object6DPose* o6p){
257 if ( _template_keypoints.size()== 0 &&
258 _template_descriptors.empty() ){
261 if ( _template_keypoints.size()== 0 &&
262 _template_descriptors.empty() ) {
263 ROS_ERROR (
"Template image was not set.");
267 cv::Size stack_size = cv::Size(
MAX(src_img.cols,_template_img.cols),
268 src_img.rows+_template_img.rows);
269 stack_img = cv::Mat(stack_size,CV_8UC3);
270 stack_img = cv::Scalar(0);
271 cv::Mat stack_img_tmp(stack_img,cv::Rect(0,0,_template_img.cols,_template_img.rows));
272 cv::add(stack_img_tmp, _template_img, stack_img_tmp);
273 cv::Mat stack_img_src(stack_img,cv::Rect(0,_template_img.rows,src_img.cols,src_img.rows));
274 cv::add(stack_img_src, src_img, stack_img_src);
275 _previous_stack_img = stack_img.clone();
278 cv::Mat m_indices(_template_descriptors.rows, 2, CV_32S);
279 cv::Mat m_dists(_template_descriptors.rows, 2, CV_32F);
280 ft->knnSearch(_template_descriptors, m_indices, m_dists, 2, cv::flann::SearchParams(-1) );
283 std::vector<cv::Point2f> pt1, pt2;
284 std::vector<int> queryIdxs,trainIdxs;
285 for (
unsigned int j = 0; j < _template_keypoints.size(); j++ ) {
286 if ( m_dists.at<
float>(j,0) < m_dists.at<
float>(j,1) * _distanceratio_threshold) {
287 queryIdxs.push_back(j);
288 trainIdxs.push_back(m_indices.at<
int>(j,0));
291 if ( queryIdxs.size() == 0 ) {
292 ROS_WARN_STREAM(
"could not find matched points with distanceratio(" <<_distanceratio_threshold <<
")");
298 ROS_INFO (
"Found %d total matches among %d template keypoints", (
int)pt2.size(), (
int)_template_keypoints.size());
301 std::vector<uchar> mask((
int)pt2.size());
303 if ( pt1.size() > 4 ) {
305 H = cv::findHomography(cv::Mat(pt1), cv::Mat(pt2), mask, CV_RANSAC, _reprojection_threshold);
309 for (
int j = 0; j < (
int)pt1.size(); j++){
310 cv::Point2f pt, pt_orig;
311 cv::Mat pt_mat(cv::Size(1,1), CV_32FC2, &pt);
312 cv::Mat pt_src_mat(cv::Size(1,1), CV_32FC2, &pt1.at(j));
313 cv::perspectiveTransform (pt_src_mat, pt_mat, _affine_matrix);
314 pt_orig = pt_mat.at<cv::Point2f>(0,0);
316 cv::line(stack_img, pt_orig, pt2.at(j)+cv::Point2f(0,_template_img.rows),
317 CV_RGB(0,255,0), 1,8,0);
320 cv::line(stack_img, pt_orig, pt2.at(j)+cv::Point2f(0,_template_img.rows),
321 CV_RGB(255,0,255), 1,8,0);
325 for (
int k=0; k < (
int)mask.size(); k++){
326 inlier_sum += (
int)mask.at(k);
329 double text_scale = 1.5;
331 int fontFace = 0, thickness = 0, baseLine;
336 text =
"inlier: " + boost::lexical_cast<std::string>((
int)inlier_sum) +
" / " + boost::lexical_cast<std::string>((
int)pt2.size());
337 text_size = cv::getTextSize(text, fontFace, text_scale, thickness, &baseLine);
338 x = stack_img.size().width - text_size.width;
339 y = text_size.height + thickness + 10;
340 cv::putText (stack_img, text, cv::Point(x, y),
341 fontFace, text_scale, CV_RGB(0, 255, 0),
342 thickness, 8,
false);
344 text =
"template: " + boost::lexical_cast<std::string>((
int)_template_keypoints.size());
345 text_size = cv::getTextSize(text, fontFace, text_scale, thickness, &baseLine);
346 x = stack_img.size().width - text_size.width;
347 y += text_size.height + thickness + 10;
348 cv::putText (stack_img, text, cv::Point(x, y),
349 fontFace, text_scale, CV_RGB(0, 255, 0),
350 thickness, 8,
false);
354 ROS_INFO(
" _correspondances.size: %d", (
int)_correspondances.size());
355 for (
int j = 0; j < (
int)_correspondances.size(); j++){
356 cv::circle(stack_img, cv::Point2f(_correspondances.at(j).x, _correspondances.at(j).y + _template_img.size().height),
357 8, CV_RGB(255,0,0), -1);
360 ROS_INFO(
" inlier_sum:%d min_lier:%d", inlier_sum,
min_inlier((
int)pt2.size(), 4, 0.10, 0.01));
361 if ((cv::countNonZero( H ) == 0) || (inlier_sum <
min_inlier((
int)pt2.size(), 4, 0.10, 0.01))){
362 ROS_INFO(
" inlier_sum < min_lier return-from estimate-od");
363 if( _window_name !=
"" )
364 cv::imshow(_window_name, stack_img);
372 sprintf(chr,
"%d", (
int)pt2.size());
373 _type = _matching_frame +
"_" + std::string(chr);
375 sprintf(chr,
"%d", inlier_sum);
376 _type = _type +
"_" + std::string(chr);
378 cv::Point2f corners2d[4] = {cv::Point2f(0,0),
379 cv::Point2f(_original_width_size,0),
380 cv::Point2f(_original_width_size,_original_height_size),
381 cv::Point2f(0,_original_height_size)};
382 cv::Mat corners2d_mat (cv::Size(4, 1), CV_32FC2, corners2d);
383 cv::Point3f corners3d[4] = {cv::Point3f(0,0,0),
384 cv::Point3f(0,_template_width,0),
385 cv::Point3f(_template_height,_template_width,0),
386 cv::Point3f(_template_height,0,0)};
387 cv::Mat corners3d_mat (cv::Size(4, 1), CV_32FC3, corners3d);
389 cv::Mat corners2d_mat_trans;
391 cv::perspectiveTransform (corners2d_mat, corners2d_mat_trans, H);
393 double fR3[3], fT3[3];
394 cv::Mat rvec(3, 1, CV_64FC1, fR3);
395 cv::Mat tvec(3, 1, CV_64FC1, fT3);
396 cv::Mat zero_distortion_mat = cv::Mat::zeros(4, 1, CV_64FC1);
398 cv::solvePnP (corners3d_mat, corners2d_mat_trans,
407 double rx = fR3[0], ry = fR3[1], rz = fR3[2];
409 double angle = cv::norm(rvec);
411 checktf.setRotation( quat );
415 ROS_INFO(
" tx: (%0.2lf,%0.2lf,%0.2lf) rx: (%0.2lf,%0.2lf,%0.2lf)",
416 resulttf.getOrigin().getX(),
417 resulttf.getOrigin().getY(),
418 resulttf.getOrigin().getZ(),
419 resulttf.getRotation().getAxis().x() * resulttf.getRotation().getAngle(),
420 resulttf.getRotation().getAxis().y() * resulttf.getRotation().getAngle(),
421 resulttf.getRotation().getAxis().z() * resulttf.getRotation().getAngle());
423 o6p->pose.position.x = resulttf.getOrigin().getX();
424 o6p->pose.position.y = resulttf.getOrigin().getY();
425 o6p->pose.position.z = resulttf.getOrigin().getZ();
426 o6p->pose.orientation.w = resulttf.getRotation().w();
427 o6p->pose.orientation.x = resulttf.getRotation().x();
428 o6p->pose.orientation.y = resulttf.getRotation().y();
429 o6p->pose.orientation.z = resulttf.getRotation().z();
433 std::vector<cv::Point2f> projected_top;
441 tf::Vector3(_template_height, _template_width, -0.03),
444 projected_top = std::vector<cv::Point2f>(8);
446 for(
int i=0; i<8; i++) {
447 coords[i] = checktf * coords[i];
448 cv::Point3f pt(coords[i].
getX(), coords[i].
getY(), coords[i].
getZ());
454 float max_x, max_y, min_x, min_y;
455 max_x = max_y = -1e9;
457 for (
int j = 0; j < 4; j++){
458 cv::Point2f pt = corners2d_mat_trans.at<cv::Point2f>(0,j);
459 max_x = std::max(max_x, pt.x), max_y = std::max(max_y, pt.y);
460 min_x = std::min(min_x, pt.x), min_y = std::min(min_y, pt.y);
462 if((max_x - min_x) < 30 || (max_y - min_y) < 30 ||
463 src_img.rows < (max_x - min_x)/2 || src_img.cols < (max_y - min_y)/2){
464 ROS_INFO(
" matched region is too big or small (2< && <30) width:%f height:%f return-from estimate-od", max_x - min_x, max_y - min_y);
470 bool err_success =
true;
471 for (
int j = 0; j < 4; j++){
472 double err =
sqrt(
pow((corners2d_mat_trans.at<cv::Point2f>(0,j).x - projected_top.at(j).x), 2) +
473 pow((corners2d_mat_trans.at<cv::Point2f>(0,j).y - projected_top.at(j).y), 2));
476 if (err_sum > err_thr){
477 ROS_INFO(
" err_sum:%f > err_thr:%f return-from estimate-od", err_sum, err_thr);
480 o6p->reliability = 1.0 - (err_sum / err_thr);
483 for (
int j = 0; j < corners2d_mat_trans.cols; j++){
484 cv::Point2f p1(corners2d_mat_trans.at<cv::Point2f>(0,j).x,
485 corners2d_mat_trans.at<cv::Point2f>(0,j).y+_template_img.rows);
486 cv::Point2f p2(corners2d_mat_trans.at<cv::Point2f>(0,(j+1)%corners2d_mat_trans.cols).x,
487 corners2d_mat_trans.at<cv::Point2f>(0,(j+1)%corners2d_mat_trans.cols).y+_template_img.rows);
488 cv::line (stack_img, p1, p2, CV_RGB(255, 0, 0),
494 if(projected_top.size() == 8) {
496 std::vector<cv::Point2f> ps(cnt);
497 for(
int i=0; i<cnt; i++)
498 ps[i] = cv::Point2f(projected_top[i].
x,
499 projected_top[i].
y+_template_img.rows);
501 int draw_width = ( err_success ? 3 : 1);
502 for(
int i=0; i<4; i++) {
503 cv::line (stack_img, ps[i], ps[(i+1)%4], CV_RGB(0, 0, 255), draw_width, CV_AA, 0);
504 cv::line (stack_img, ps[i+4], ps[(i+1)%4+4], CV_RGB(0, 0, 255), draw_width, CV_AA, 0);
505 cv::line (stack_img, ps[i], ps[i+4], CV_RGB(0, 0, 255), draw_width, CV_AA, 0);
516 std::vector<cv::Point2f> ps(4);
518 for(
int i=0; i<4; i++) {
519 coords[i] = resulttf * coords[i];
520 cv::Point3f pt(coords[i].
getX(), coords[i].
getY(), coords[i].
getZ());
522 ps[i].y += _template_img.rows;
525 cv::line (stack_img, ps[0], ps[1], CV_RGB(255, 0, 0), 3, CV_AA, 0);
526 cv::line (stack_img, ps[0], ps[2], CV_RGB(0, 255, 0), 3, CV_AA, 0);
527 cv::line (stack_img, ps[0], ps[3], CV_RGB(0, 0, 255), 3, CV_AA, 0);
534 text =
"error: " + boost::lexical_cast<std::string>(err_sum);
535 x = stack_img.size().width - 16*17*text_scale;
536 y = _template_img.size().height - (16 + 2)*text_scale*6;
537 cv::putText (stack_img, text, cv::Point(x, y),
538 0, text_scale, CV_RGB(0, 255, 0),
540 ROS_INFO(
" %s < %f (threshold)", text.c_str(), err_thr );
543 if( _window_name !=
"" )
544 cv::imshow(_window_name, stack_img);
576 _client = _n.
serviceClient<posedetection_msgs::Feature0DDetect>(
"Feature0DDetect");
577 _pub = _n.
advertise<posedetection_msgs::ObjectDetection>(
"ObjectDetection", 10);
578 _pub_agg = _n.
advertise<posedetection_msgs::ObjectDetection>(
"ObjectDetection_agg", 10);
579 _pub_pose = _n.
advertise<geometry_msgs::PoseStamped>(
"object_pose", 10);
580 _debug_pub = it.
advertise(
"debug_image", 1);
582 _initialized =
false;
594 cv::Mat tmp_template, tmp_warp_template;
595 std::vector<cv::Point2f>pt1, pt2;
598 std::cout <<
"input template's [width]" << std::endl;
600 std::cout <<
"input template's [height]" << std::endl;
602 std::cout <<
"input template's [filename]" << std::endl;
605 for (
int i = 0; i < 4; i++){
609 cv::Rect rect = cv::boundingRect(cv::Mat(pt1));
610 double scale = std::max(width, height) / 500.0;
611 int iwidth = width / scale, iheight = height / scale;
612 pt2.push_back(cv::Point2d(0,0));
613 pt2.push_back(cv::Point2d(iwidth,0));
614 pt2.push_back(cv::Point2d(iwidth,iheight));
615 pt2.push_back(cv::Point2d(0, iheight));
616 H = cv::findHomography(cv::Mat(pt1), cv::Mat(pt2));
619 cv::Point2f((rect.tl().x + rect.br().x)/2.0,(rect.tl().y + rect.br().y)/2.0),
621 cv::warpPerspective(mt->
_previous_stack_img, tmp_warp_template, H, cv::Size(iwidth, iheight));
624 cv::imwrite(filename,tmp_template);
625 boost::filesystem::path fname(filename);
626 std::stringstream ss;
627 ss << fname.stem() <<
"_wrap" << fname.extension();
628 cv::imwrite(ss.str(),tmp_warp_template);
629 }
catch (cv::Exception e) {
630 std::cerr << e.what() << std::endl;
633 for (
int i = 0; i < (
int)pt1.size(); i++){
634 pt2.push_back(cv::Point2d((
int)pt1.at(i).x - rect.x,
643 cv::Mat M = (cv::Mat_<double>(3,3) << 1,0,0, 0,1,0, 0,0,1);
644 std::string window_name =
"sample" + boost::lexical_cast<std::string>((
int)_templates.size());
648 tmp_warp_template.size().width, tmp_warp_template.size().height,
655 cv::getWindowProperty(mt->
_window_name, CV_WND_PROP_AUTOSIZE));
658 _templates.push_back(tmplt);
660 cv::getWindowProperty(mt->
_window_name, CV_WND_PROP_AUTOSIZE));
662 &cvmousecb,
static_cast<void *
>(_templates.back()));
666 static void cvmousecb (
int event,
int x,
int y,
int flags,
void* param){
670 case CV_EVENT_LBUTTONUP: {
672 ROS_INFO(
"add correspondence (%d, %d)", (
int)pt.x, (
int)pt.y);
675 make_template_from_mousecb(mt);
681 case CV_EVENT_RBUTTONUP: {
691 std::string matching_frame;
692 std::string _pose_str;
693 double template_width;
694 double template_height;
695 std::string template_filename;
696 std::string window_name;
699 local_nh.
param(
"child_frame_id", matching_frame, std::string(
"matching"));
700 local_nh.
param(
"object_width", template_width, 0.06);
701 local_nh.
param(
"object_height", template_height, 0.0739);
702 local_nh.
param(
"relative_pose", _pose_str, std::string(
"0 0 0 0 0 0 1"));
703 std::string default_template_file_name;
705 #ifdef ROSPACK_EXPORT 707 rospack::Package *
p = rp.get_pkg(
"jsk_perception");
708 if (p!=
NULL) default_template_file_name = p->path + std::string(
"/sample/opencv-logo2.png");
711 std::vector<std::string> search_path;
713 rp.
crawl(search_path, 1);
715 if (rp.
find(
"jsk_perception",path)==
true) default_template_file_name = path + std::string(
"/sample/opencv-logo2.png");
717 }
catch (std::runtime_error &e) {
719 local_nh.
param(
"template_filename", template_filename, default_template_file_name);
720 local_nh.
param(
"reprojection_threshold", _reprojection_threshold, 3.0);
721 local_nh.
param(
"distanceratio_threshold", _distanceratio_threshold, 0.49);
722 local_nh.
param(
"error_threshold", _err_thr, 50.0);
723 local_nh.
param(
"theta_step", _th_step, 5.0);
724 local_nh.
param(
"phi_step", _phi_step, 5.0);
725 local_nh.
param(
"viewer_window", _viewer,
true);
726 local_nh.
param(
"window_name", window_name, std::string(
"sample1"));
727 local_nh.
param(
"autosize", _autosize,
false);
728 local_nh.
param(
"publish_null_object_detection", pnod,
false);
733 cv::Mat template_img;
734 template_img = cv::imread (template_filename, 1);
735 if ( template_img.empty()) {
736 ROS_ERROR (
"template picture <%s> cannot read. template picture is not found or uses unsuported format.", template_filename.c_str());
741 std::vector<double> rv(7);
742 std::istringstream iss(_pose_str);
744 for(
int i=0; i<6; i++)
757 add_new_template(template_img, window_name, transform,
758 template_width, template_height, _th_step, _phi_step);
764 double template_width,
double template_height, cv::Size &size){
766 cv::Point3f coner[4] = {cv::Point3f(-(template_width/2.0), -(template_height/2.0),0),
767 cv::Point3f(template_width/2.0,-(template_height/2.0),0),
768 cv::Point3f(template_width/2.0,template_height/2.0,0),
769 cv::Point3f(-(template_width/2.0),template_height/2.0,0)};
770 cv::Mat coner_mat (cv::Size(4, 1), CV_32FC3, coner);
771 std::vector<cv::Point2f> coner_img_points;
773 cv::Mat zero_distortion_mat = cv::Mat::zeros(4, 1, CV_64FC1);
774 cv::projectPoints(coner_mat, rvec, tvec,
778 float x_min = 10000, x_max = 0;
780 for (
int i = 0; i < (
int)coner_img_points.size(); i++){
781 x_min = std::min(x_min, coner_img_points.at(i).x);
782 x_max = std::max(x_max, coner_img_points.at(i).x);
783 y_min = std::min(y_min, coner_img_points.at(i).y);
784 y_max = std::max(y_max, coner_img_points.at(i).y);
787 std::vector<cv::Point2f> coner_img_points_trans;
788 for (
int i = 0; i < (
int)coner_img_points.size(); i++){
789 cv::Point2f pt_tmp(coner_img_points.at(i).x - x_min,
790 coner_img_points.at(i).y - y_min);
791 coner_img_points_trans.push_back(pt_tmp);
794 cv::Point2f template_points[4] = {cv::Point2f(0,0),
795 cv::Point2f(src.size().width,0),
796 cv::Point2f(src.size().width,src.size().height),
797 cv::Point2f(0,src.size().height)};
798 cv::Mat template_points_mat (cv::Size(4, 1), CV_32FC2, template_points);
800 size = cv::Size(x_max - x_min,
y_max - y_min);
801 return cv::findHomography(template_points_mat, cv::Mat(coner_img_points_trans), 0, 3);
806 std::vector<cv:: Mat> &Mvec,
807 double template_width,
double template_height,
808 double th_step,
double phi_step){
810 std::vector<cv::Size> sizevec;
812 for (
int i = (
int)((-3.14/4.0)/th_step); i*th_step < 3.14/4.0; i++){
813 for (
int j = (
int)((-3.14/4.0)/phi_step); j*phi_step < 3.14/4.0; j++){
814 double fR3[3], fT3[3];
815 cv::Mat rvec(3, 1, CV_64FC1, fR3);
816 cv::Mat tvec(3, 1, CV_64FC1, fT3);
819 quat.
setEuler(0, th_step*i, phi_step*j);
829 M = make_homography(src, rvec, tvec, template_width, template_height, size);
831 sizevec.push_back(size);
835 for (
int i = 0; i < (
int)Mvec.size(); i++){
837 cv::warpPerspective(src, dst, Mvec.at(i), sizevec.at(i),
838 CV_INTER_LINEAR, IPL_BORDER_CONSTANT, 0);
845 double template_width,
double template_height,
846 double theta_step=5.0,
double phi_step=5.0)
848 std::vector<cv::Mat> imgs;
849 std::vector<cv::Mat> Mvec;
850 make_warped_images(img, imgs, Mvec,
851 template_width, template_height, theta_step, phi_step);
853 for (
int i = 0; i < (
int)imgs.size(); i++){
854 std::string
type = typestr;
855 if(imgs.size() > 1) {
857 sprintf(chr,
"%d", i);
858 type +=
"_" + std::string(chr);
863 img.size().width, img.size().height,
864 template_width, template_height,
865 relative_pose, Mvec.at(i),
868 (_viewer ? type :
""), _autosize);
869 _templates.push_back(tmplt);
872 cv::namedWindow(type, _autosize ? CV_WINDOW_AUTOSIZE : 0);
873 cvSetMouseCallback (type.c_str(), &cvmousecb,
static_cast<void *
>(_templates.back()));
880 jsk_perception::SetTemplate::Response &res){
883 cv::Mat
img(cv_ptr->image);
886 req.relativepose.orientation.y,
887 req.relativepose.orientation.z,
888 req.relativepose.orientation.w),
890 req.relativepose.position.y,
891 req.relativepose.position.z));
894 add_new_template(img, req.type, transform,
895 req.dimx, req.dimy, 1.0, 1.0);
901 std::vector<cv::KeyPoint> sourceimg_keypoints;
902 cv::Mat sourceimg_descriptors;
903 std::vector<posedetection_msgs::Object6DPose> vo6p;
904 posedetection_msgs::ObjectDetection od;
908 cv::Mat src_img(cv_ptr->image);
914 ROS_FATAL(
"intrinsic matrix is zero, your camera info looks invalid");
916 if ( !_initialized ) {
919 std::cerr <<
"initialize templates done" << std::endl;
923 if ( sourceimg_keypoints.size () < 2 ) {
924 ROS_INFO (
"The number of keypoints in source image is less than 2");
928 cv::flann::Index *ft =
new cv::flann::Index(sourceimg_descriptors, cv::flann::KDTreeIndexParams(1));
931 ROS_INFO(
"_templates size: %d", (
int)_templates.size());
932 for (
int i = 0; i < (
int)_templates.size(); i++){
933 posedetection_msgs::Object6DPose o6p;
936 if (_templates.at(i)->estimate_od(_client, src_img, sourceimg_keypoints, pcam, _err_thr, debug_img, ft, &o6p))
941 out_msg.
header = msg->image.header;
943 out_msg.
image = debug_img;
947 if (((
int)vo6p.size() != 0) || pnod) {
948 od.header.stamp = msg->image.header.stamp;
949 od.header.frame_id = msg->image.header.frame_id;
954 geometry_msgs::PoseStamped pose_msg;
955 pose_msg.header = od.header;
956 pose_msg.pose = od.objects[0].pose;
975 if ( i++ % 100 == 0 ) {
988 std::cout <<
"id = " << config.template_id << std::endl;
989 std::cout <<
"lvl = " << level << std::endl;
990 if((
int)_templates.size() <= config.template_id) {
992 config.template_id = 0;
993 if(_templates.size() != 0)
994 config.frame_id = _templates[0]->_matching_frame;
1001 _err_thr = config.error_threshold;
1007 config.error_threshold = _err_thr;
1018 ros::init (argc, argv,
"PointPoseExtractor");
1022 dynamic_reconfigure::Server<jsk_perception::point_pose_extractorConfig>
server;
1023 dynamic_reconfigure::Server<jsk_perception::point_pose_extractorConfig>::CallbackType
f;
1025 server.setCallback(f);
ServiceClient serviceClient(const std::string &service_name, bool persistent=false, const M_string &header_values=M_string())
const cv::Matx33d & intrinsicMatrix() const
static tf::Quaternion createQuaternionFromRPY(double roll, double pitch, double yaw)
tfScalar getAngle() const
void publish(const boost::shared_ptr< M > &message) const
TFSIMD_FORCE_INLINE const tfScalar & getX() const
virtual ~Matching_Template()
Subscriber subscribe(const std::string &topic, uint32_t queue_size, void(T::*fp)(M), T *obj, const TransportHints &transport_hints=TransportHints())
INLINE Rall1d< T, V, S > log(const Rall1d< T, V, S > &arg)
ROSCONSOLE_DECL void initialize()
def convert(filename_in, filename_out, ffmpeg_executable="ffmpeg")
ROSCPP_DECL void init(int &argc, char **argv, const std::string &name, uint32_t options=0)
Publisher advertise(const std::string &base_topic, uint32_t queue_size, bool latch=false)
bool call(MReq &req, MRes &res)
std::string get_window_name()
int _original_height_size
ServiceServer advertiseService(const std::string &service, bool(T::*srv_func)(MReq &, MRes &), T *obj)
Matching_Template(cv::Mat img, std::string matching_frame, int original_width_size, int original_height_size, double template_width, double template_height, tf::Transform relativepose, cv::Mat affine_matrix, double reprojection_threshold, double distanceratio_threshold, std::string window_name, bool autosize)
int set_template(ros::ServiceClient client)
TFSIMD_FORCE_INLINE const tfScalar & x() const
TFSIMD_FORCE_INLINE const tfScalar & z() const
double _reprojection_threshold
void publish(const sensor_msgs::Image &message) const
bool fromCameraInfo(const sensor_msgs::CameraInfo &msg)
bool param(const std::string ¶m_name, T ¶m_val, const T &default_val) const
std::vector< cv::Point2d > _correspondances
CvImagePtr toCvCopy(const sensor_msgs::ImageConstPtr &source, const std::string &encoding=std::string())
std::string _matching_frame
TFSIMD_FORCE_INLINE const tfScalar & y() const
Publisher advertise(const std::string &topic, uint32_t queue_size, bool latch=false)
#define ROS_WARN_STREAM(args)
bool getSearchPathFromEnv(std::vector< std::string > &sp)
pointer MAX(context *ctx, int n, argv)
void setEuler(const tfScalar &yaw, const tfScalar &pitch, const tfScalar &roll)
INLINE Rall1d< T, V, S > exp(const Rall1d< T, V, S > &arg)
INLINE Rall1d< T, V, S > pow(const Rall1d< T, V, S > &arg, double m)
tf::Transform _relativepose
uint32_t getNumSubscribers() const
#define ROS_INFO_STREAM(args)
cv::Mat _template_descriptors
void crawl(std::vector< std::string > search_path, bool force)
int min_inlier(int n, int m, double p_badsupp, double p_badxform)
cv::Mat _previous_stack_img
bool find(const std::string &name, std::string &path)
std::vector< cv::KeyPoint > _template_keypoints
cv::Point2d project3dToPixel(const cv::Point3d &xyz) const
bool estimate_od(ros::ServiceClient client, cv::Mat src_img, std::vector< cv::KeyPoint > sourceimg_keypoints, image_geometry::PinholeCameraModel pcam, double err_thr, cv::Mat &stack_img, cv::flann::Index *ft, posedetection_msgs::Object6DPose *o6p)
std::string getTopic() const
TFSIMD_FORCE_INLINE const tfScalar & getZ() const
double _distanceratio_threshold
ROSCPP_DECL void spinOnce()
void setRotation(const Vector3 &axis, const tfScalar &angle)
std::vector< cv::Point2d > * correspondances()
sensor_msgs::ImagePtr toImageMsg() const
TFSIMD_FORCE_INLINE const tfScalar & getY() const