39 #include <opencv2/core/core.hpp> 
   40 #include <opencv2/imgproc/imgproc_c.h>  
   41 #include <opencv2/imgproc/imgproc.hpp> 
   42 #include <opencv2/objdetect/objdetect.hpp> 
   43 #include <opencv2/highgui/highgui.hpp> 
   51 #include <sensor_msgs/Image.h> 
   52 #include <sensor_msgs/CameraInfo.h> 
   62 #if CV_MAJOR_VERSION < 3 
   65 void subtractPlane(
const cv::Mat& depth, cv::Mat& mask, std::vector<CvPoint>& chain, 
double f);
 
   67 std::vector<CvPoint> 
maskFromTemplate(
const std::vector<cv::linemod::Template>& templates,
 
   68                                       int num_modalities, cv::Point offset, cv::Size size,
 
   69                                       cv::Mat& mask, cv::Mat& dst);
 
   72                         int num_modalities, cv::Point offset, cv::Size size,
 
   75 void drawResponse(
const std::vector<cv::linemod::Template>& templates,
 
   76                   int num_modalities, cv::Mat& dst, cv::Point offset, 
int T);
 
   84   static void start(
const std::string& a_img_name)
 
  104   static void cv_on_mouse(
int a_event, 
int a_x, 
int a_y, 
int, 
void *)
 
  121   printf(
"Usage: openni_demo [templates.yml]\n\n" 
  122          "Place your object on a planar, featureless surface. With the mouse,\n" 
  123          "frame it in the 'color' window and right click to learn a first template.\n" 
  124          "Then press 'l' to enter online learning mode, and move the camera around.\n" 
  125          "When the match score falls between 90-95%% the demo will add a new template.\n\n" 
  127          "\t h   -- This help page\n" 
  128          "\t l   -- Toggle online learning\n" 
  129          "\t m   -- Toggle printing match result\n" 
  130          "\t t   -- Toggle printing timings\n" 
  131          "\t w   -- Write learned templates to disk\n" 
  132          "\t [ ] -- Adjust matching threshold: '[' down,  ']' up\n" 
  144       start_ = cv::getTickCount();
 
  150       int64 
end = cv::getTickCount();
 
  157       double ret = 
time_ / cv::getTickFrequency();
 
  169   cv::Ptr<cv::linemod::Detector> 
detector = 
new cv::linemod::Detector;
 
  170   cv::FileStorage fs(
filename, cv::FileStorage::READ);
 
  173   cv::FileNode fn = fs[
"classes"];
 
  174   for (cv::FileNodeIterator 
i = fn.begin(), iend = fn.end(); 
i != iend; ++
i)
 
  182   cv::FileStorage fs(
filename, cv::FileStorage::WRITE);
 
  185   std::vector<std::string> ids = 
detector->classIds();
 
  186   fs << 
"classes" << 
"[";
 
  187   for (
int i = 0; 
i < (
int)ids.size(); ++
i)
 
  201 void callback(
const sensor_msgs::Image::ConstPtr& rgb_image,
 
  202               const sensor_msgs::Image::ConstPtr& depth_image,
 
  203               const sensor_msgs::CameraInfo::ConstPtr& rgb_camera_info,
 
  204               const sensor_msgs::CameraInfo::ConstPtr& depth_camera_info)
 
  206   bool show_match_result = 
true;
 
  207   bool show_timings = 
false;
 
  208   bool learn_online = 
false;
 
  212   cv::Size roi_size(200, 200);
 
  213   int learning_lower_bound = 90;
 
  214   int learning_upper_bound = 95;
 
  218   int num_modalities = (
int)
detector->getModalities().size();
 
  220   cv::Mat color = cv_rgb->image;
 
  225   cv::Mat depth =  cv_depth->image;
 
  231   double focal_length = depth_camera_info->K[0]; 
 
  233   std::vector<cv::Mat> sources;
 
  234   sources.push_back(color);
 
  235   sources.push_back(depth);
 
  236   cv::Mat 
display = color.clone();
 
  244     cv::Point roi_offset(roi_size.width / 2, roi_size.height / 2);
 
  245     cv::Point pt1 = mouse - roi_offset; 
 
  246     cv::Point pt2 = mouse + roi_offset; 
 
  248     if (event == CV_EVENT_RBUTTONDOWN)
 
  251       std::vector<CvPoint> chain(4);
 
  253       chain[1] = cv::Point(pt2.x, pt1.y);
 
  255       chain[3] = cv::Point(pt1.x, pt2.y);
 
  259       cv::imshow(
"mask", mask);
 
  262       std::string class_id = cv::format(
"class%d", 
num_classes);
 
  264       extract_timer.
start();
 
  265       int template_id = 
detector->addTemplate(sources, class_id, mask, &bb);
 
  266       extract_timer.
stop();
 
  267       if (template_id != -1)
 
  269         printf(
"*** Added template (id %d) for new object class %d***\n",
 
  278     cv::rectangle(
display, pt1, pt2, CV_RGB(0,0,0), 3);
 
  279     cv::rectangle(
display, pt1, pt2, CV_RGB(255,255,0), 1);
 
  283   std::vector<cv::linemod::Match> matches;
 
  284   std::vector<std::string> class_ids;
 
  285   std::vector<cv::Mat> quantized_images;
 
  290   int classes_visited = 0;
 
  291   std::set<std::string> visited;
 
  293   for (
int i = 0; (
i < (
int)matches.size()) && (classes_visited < 
num_classes); ++
i)
 
  295     cv::linemod::Match m = matches[
i];
 
  297     if (visited.insert(m.class_id).second)
 
  301       if (show_match_result)
 
  303         printf(
"Similarity: %5.1f%%; x: %3d; y: %3d; class: %s; template: %3d\n",
 
  304                m.similarity, m.x, m.y, m.class_id.c_str(), m.template_id);
 
  308       const std::vector<cv::linemod::Template>& templates = 
detector->getTemplates(m.class_id, m.template_id);
 
  311       if (learn_online == 
true)
 
  317         cv::Mat color_mask, depth_mask;
 
  319                                                       cv::Point(m.x, m.y), color.size(),
 
  323         cv::imshow(
"mask", depth_mask);
 
  326         if (learning_lower_bound < m.similarity && m.similarity < learning_upper_bound)
 
  328           extract_timer.
start();
 
  329           int template_id = 
detector->addTemplate(sources, m.class_id, depth_mask);
 
  330           extract_timer.
stop();
 
  331           if (template_id != -1)
 
  333             printf(
"*** Added template (id %d) for existing object class %s***\n",
 
  334                    template_id, m.class_id.c_str());
 
  341   if (show_match_result && matches.empty())
 
  342     printf(
"No matches found...\n");
 
  345     printf(
"Training: %.2fs\n", extract_timer.
time());
 
  346     printf(
"Matching: %.2fs\n", match_timer.
time());
 
  348   if (show_match_result || show_timings)
 
  349     printf(
"------------------------------------------------------------\n");
 
  352   cv::imshow(
"normals", quantized_images[1]);
 
  355   char key = (char)cvWaitKey(10);
 
  366     show_match_result = !show_match_result;
 
  367     printf(
"Show match result %s\n", show_match_result ? 
"ON" : 
"OFF");
 
  371     show_timings = !show_timings;
 
  372     printf(
"Show timings %s\n", show_timings ? 
"ON" : 
"OFF");
 
  376     learn_online = !learn_online;
 
  377     printf(
"Online learning %s\n", learn_online ? 
"ON" : 
"OFF");
 
  392     printf(
"Wrote detector and templates to %s\n", 
filename.c_str());
 
  402   sensor_msgs::CameraInfo,
 
  403   sensor_msgs::CameraInfo
 
  407 int main(
int argc, 
char** argv)
 
  410   ROS_ERROR(
"linemod has been moved to opencv_contrib in OpenCV3");
 
  411 #if CV_MAJOR_VERSION < 3 
  414   cv::namedWindow(
"color");
 
  415   cv::namedWindow(
"normals");
 
  424     detector = cv::linemod::getDefaultLINEMOD();
 
  430     std::vector<std::string> ids = 
detector->classIds();
 
  432     printf(
"Loaded %s with %d classes and %d templates\n",
 
  436       printf(
"Class ids:\n");
 
  437       std::copy(ids.begin(), ids.end(), std::ostream_iterator<std::string>(std::cout, 
"\n"));
 
  447   sub_rgb_image.
subscribe(nh, 
"/camera/rgb/image_rect_color", 1);
 
  448   sub_depth_image.
subscribe(nh, 
"/camera/depth/image_rect_raw", 1);
 
  449   sub_rgb_camera_info.
subscribe(nh, 
"/camera/rgb/camera_info", 1);
 
  450   sub_depth_camera_info.
subscribe(nh, 
"/camera/depth/camera_info", 1);
 
  452     = boost::make_shared<message_filters::Synchronizer<SyncPolicy> >(1000);
 
  453   sync_->connectInput(sub_rgb_image, sub_depth_image,
 
  454                       sub_rgb_camera_info, sub_depth_camera_info);
 
  465 #if CV_MAJOR_VERSION < 3 
  466 static void reprojectPoints(
const std::vector<cv::Point3d>& proj, std::vector<cv::Point3d>& real, 
double f)
 
  468   real.resize(proj.size());
 
  469   double f_inv = 1.0 / 
f;
 
  471   for (
int i = 0; 
i < (
int)proj.size(); ++
i)
 
  473     double Z = proj[
i].z;
 
  474     real[
i].x = (proj[
i].x - 320.) * (f_inv * 
Z);
 
  475     real[
i].y = (proj[
i].y - 240.) * (f_inv * 
Z);
 
  480 static void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::vector<CvPoint> & a_chain, 
double f)
 
  482   const int l_num_cost_pts = 200;
 
  486   IplImage * lp_mask = cvCreateImage(cvGetSize(ap_depth), IPL_DEPTH_8U, 1);
 
  487   cvSet(lp_mask, cvRealScalar(0));
 
  489   std::vector<CvPoint> l_chain_vector;
 
  491   float l_chain_length = 0;
 
  492   float * lp_seg_length = 
new float[a_chain.size()];
 
  494   for (
int l_i = 0; l_i < (
int)a_chain.size(); ++l_i)
 
  496     float x_diff = (
float)(a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x);
 
  497     float y_diff = (
float)(a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y);
 
  498     lp_seg_length[l_i] = 
sqrt(x_diff*x_diff + y_diff*y_diff);
 
  499     l_chain_length += lp_seg_length[l_i];
 
  501   for (
int l_i = 0; l_i < (
int)a_chain.size(); ++l_i)
 
  503     if (lp_seg_length[l_i] > 0)
 
  505       int l_cur_num = cvRound(l_num_cost_pts * lp_seg_length[l_i] / l_chain_length);
 
  506       float l_cur_len = lp_seg_length[l_i] / l_cur_num;
 
  508       for (
int l_j = 0; l_j < l_cur_num; ++l_j)
 
  510         float l_ratio = (l_cur_len * l_j / lp_seg_length[l_i]);
 
  514         l_pts.x = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x) + a_chain[l_i].x);
 
  515         l_pts.y = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y) + a_chain[l_i].y);
 
  517         l_chain_vector.push_back(l_pts);
 
  521   std::vector<cv::Point3d> lp_src_3Dpts(l_chain_vector.size());
 
  523   for (
int l_i = 0; l_i < (
int)l_chain_vector.size(); ++l_i)
 
  525     lp_src_3Dpts[l_i].x = l_chain_vector[l_i].x;
 
  526     lp_src_3Dpts[l_i].y = l_chain_vector[l_i].y;
 
  527     lp_src_3Dpts[l_i].z = CV_IMAGE_ELEM(ap_depth, 
unsigned short, cvRound(lp_src_3Dpts[l_i].y), cvRound(lp_src_3Dpts[l_i].
x));
 
  534   CvMat * lp_pts = cvCreateMat((
int)l_chain_vector.size(), 4, CV_32F);
 
  535   CvMat * lp_v = cvCreateMat(4, 4, CV_32F);
 
  536   CvMat * lp_w = cvCreateMat(4, 1, CV_32F);
 
  538   for (
int l_i = 0; l_i < (
int)l_chain_vector.size(); ++l_i)
 
  540     CV_MAT_ELEM(*lp_pts, 
float, l_i, 0) = (
float)lp_src_3Dpts[l_i].
x;
 
  541     CV_MAT_ELEM(*lp_pts, 
float, l_i, 1) = (
float)lp_src_3Dpts[l_i].y;
 
  542     CV_MAT_ELEM(*lp_pts, 
float, l_i, 2) = (
float)lp_src_3Dpts[l_i].z;
 
  543     CV_MAT_ELEM(*lp_pts, 
float, l_i, 3) = 1.0f;
 
  545   cvSVD(lp_pts, lp_w, 0, lp_v);
 
  547   float l_n[4] = {CV_MAT_ELEM(*lp_v, 
float, 0, 3),
 
  548                   CV_MAT_ELEM(*lp_v, 
float, 1, 3),
 
  549                   CV_MAT_ELEM(*lp_v, 
float, 2, 3),
 
  550                   CV_MAT_ELEM(*lp_v, 
float, 3, 3)};
 
  552   float l_norm = 
sqrt(l_n[0] * l_n[0] + l_n[1] * l_n[1] + l_n[2] * l_n[2]);
 
  559   float l_max_dist = 0;
 
  561   for (
int l_i = 0; l_i < (
int)l_chain_vector.size(); ++l_i)
 
  563     float l_dist =  l_n[0] * CV_MAT_ELEM(*lp_pts, 
float, l_i, 0) +
 
  564       l_n[1] * CV_MAT_ELEM(*lp_pts, 
float, l_i, 1) +
 
  565       l_n[2] * CV_MAT_ELEM(*lp_pts, 
float, l_i, 2) +
 
  566       l_n[3] * CV_MAT_ELEM(*lp_pts, 
float, l_i, 3);
 
  568     if (fabs(l_dist) > l_max_dist)
 
  572   int l_minx = ap_depth->width;
 
  573   int l_miny = ap_depth->height;
 
  577   for (
int l_i = 0; l_i < (
int)a_chain.size(); ++l_i)
 
  579     l_minx = std::min(l_minx, a_chain[l_i].
x);
 
  580     l_miny = std::min(l_miny, a_chain[l_i].y);
 
  581     l_maxx = std::max(l_maxx, a_chain[l_i].
x);
 
  582     l_maxy = std::max(l_maxy, a_chain[l_i].y);
 
  584   int l_w = l_maxx - l_minx + 1;
 
  585   int l_h = l_maxy - l_miny + 1;
 
  586   int l_nn = (
int)a_chain.size();
 
  588   CvPoint * lp_chain = 
new CvPoint[l_nn];
 
  590   for (
int l_i = 0; l_i < l_nn; ++l_i)
 
  591     lp_chain[l_i] = a_chain[l_i];
 
  593   cvFillPoly(lp_mask, &lp_chain, &l_nn, 1, cvScalar(255, 255, 255));
 
  599   std::vector<cv::Point3d> lp_dst_3Dpts(l_h * l_w);
 
  603   for (
int l_r = 0; l_r < l_h; ++l_r)
 
  605     for (
int l_c = 0; l_c < l_w; ++l_c)
 
  607       lp_dst_3Dpts[l_ind].x = l_c + l_minx;
 
  608       lp_dst_3Dpts[l_ind].y = l_r + l_miny;
 
  609       lp_dst_3Dpts[l_ind].z = CV_IMAGE_ELEM(ap_depth, 
unsigned short, l_r + l_miny, l_c + l_minx);
 
  617   for (
int l_r = 0; l_r < l_h; ++l_r)
 
  619     for (
int l_c = 0; l_c < l_w; ++l_c)
 
  621       float l_dist = (
float)(l_n[0] * lp_dst_3Dpts[l_ind].
x + l_n[1] * lp_dst_3Dpts[l_ind].y + lp_dst_3Dpts[l_ind].z * l_n[2] + l_n[3]);
 
  625       if (CV_IMAGE_ELEM(lp_mask, 
unsigned char, l_r + l_miny, l_c + l_minx) != 0)
 
  627         if (fabs(l_dist) < std::max(l_thres, (l_max_dist * 2.0
f)))
 
  629           for (
int l_p = 0; l_p < (
int)a_masks.size(); ++l_p)
 
  631             int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
 
  632             int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
 
  634             CV_IMAGE_ELEM(a_masks[l_p], 
unsigned char, l_row, l_col) = 0;
 
  639           for (
int l_p = 0; l_p < (
int)a_masks.size(); ++l_p)
 
  641             int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
 
  642             int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
 
  644             CV_IMAGE_ELEM(a_masks[l_p], 
unsigned char, l_row, l_col) = 255;
 
  650   cvReleaseImage(&lp_mask);
 
  651   cvReleaseMat(&lp_pts);
 
  656 void subtractPlane(
const cv::Mat& depth, cv::Mat& mask, std::vector<CvPoint>& chain, 
double f)
 
  658   mask = cv::Mat::zeros(depth.size(), CV_8U);
 
  659   std::vector<IplImage*> 
tmp;
 
  660   IplImage mask_ipl = mask;
 
  661   tmp.push_back(&mask_ipl);
 
  662   IplImage depth_ipl = depth;
 
  667                                       int num_modalities, cv::Point offset, cv::Size size,
 
  668                                       cv::Mat& mask, cv::Mat& dst)
 
  673   cv::dilate(mask, mask, cv::Mat(), cv::Point(-1,-1), 
OFFSET);
 
  675   CvMemStorage * lp_storage = cvCreateMemStorage(0);
 
  676   CvTreeNodeIterator l_iterator;
 
  677   CvSeqReader l_reader;
 
  678   CvSeq * lp_contour = 0;
 
  680   cv::Mat mask_copy = mask.clone();
 
  681   IplImage mask_copy_ipl = mask_copy;
 
  682   cvFindContours(&mask_copy_ipl, lp_storage, &lp_contour, 
sizeof(CvContour),
 
  683                  CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
 
  685   std::vector<CvPoint> l_pts1; 
 
  687   cvInitTreeNodeIterator(&l_iterator, lp_contour, 1);
 
  688   while ((lp_contour = (CvSeq *)cvNextTreeNode(&l_iterator)) != 0)
 
  691     cvStartReadSeq(lp_contour, &l_reader, 0);
 
  692     CV_READ_SEQ_ELEM(l_pt0, l_reader);
 
  693     l_pts1.push_back(l_pt0);
 
  695     for (
int i = 0; 
i < lp_contour->total; ++
i)
 
  698       CV_READ_SEQ_ELEM(l_pt1, l_reader);
 
  700       cv::line(dst, l_pt0, l_pt1, CV_RGB(0, 255, 0), 2);
 
  703       l_pts1.push_back(l_pt0);
 
  706   cvReleaseMemStorage(&lp_storage);
 
  714   cv::Mat color(quantized.size(), CV_8UC3);
 
  715   for (
int r = 0; 
r < quantized.rows; ++
r)
 
  717     const uchar* quant_r = quantized.ptr(
r);
 
  718     cv::Vec3b* color_r = color.ptr<cv::Vec3b>(
r);
 
  720     for (
int c = 0; c < quantized.cols; ++c)
 
  722       cv::Vec3b& bgr = color_r[c];
 
  725       case 0:   bgr[0]=  0; bgr[1]=  0; bgr[2]=  0;    
break;
 
  726       case 1:   bgr[0]= 55; bgr[1]= 55; bgr[2]= 55;    
break;
 
  727       case 2:   bgr[0]= 80; bgr[1]= 80; bgr[2]= 80;    
break;
 
  728       case 4:   bgr[0]=105; bgr[1]=105; bgr[2]=105;    
break;
 
  729       case 8:   bgr[0]=130; bgr[1]=130; bgr[2]=130;    
break;
 
  730       case 16:  bgr[0]=155; bgr[1]=155; bgr[2]=155;    
break;
 
  731       case 32:  bgr[0]=180; bgr[1]=180; bgr[2]=180;    
break;
 
  732       case 64:  bgr[0]=205; bgr[1]=205; bgr[2]=205;    
break;
 
  733       case 128: bgr[0]=230; bgr[1]=230; bgr[2]=230;    
break;
 
  734       case 255: bgr[0]=  0; bgr[1]=  0; bgr[2]=255;    
break;
 
  735       default:  bgr[0]=  0; bgr[1]=255; bgr[2]=  0;    
break;
 
  745                         int num_modalities, cv::Point offset, cv::Size size,
 
  748   std::vector<cv::Point> points;
 
  749   for (
int m = 0; m < num_modalities; ++m)
 
  751     for (
int i = 0; 
i < (
int)templates[m].features.size(); ++
i)
 
  753       cv::linemod::Feature 
f = templates[m].features[
i];
 
  754       points.push_back(cv::Point(
f.x, 
f.y) + 
offset);
 
  758   std::vector<cv::Point> hull;
 
  759   cv::convexHull(points, hull);
 
  761   dst = cv::Mat::zeros(size, CV_8U);
 
  762   const int hull_count = (
int)hull.size();
 
  763   const cv::Point* hull_pts = &hull[0];
 
  764   cv::fillPoly(dst, &hull_pts, &hull_count, 1, cv::Scalar(255));
 
  768                   int num_modalities, cv::Mat& dst, cv::Point offset, 
int T)
 
  770   static const cv::Scalar 
COLORS[5] = { CV_RGB(0, 0, 255),
 
  776   for (
int m = 0; m < num_modalities; ++m)
 
  781     cv::Scalar color = 
COLORS[m];
 
  783     for (
int i = 0; 
i < (
int)templates[m].features.size(); ++
i)
 
  785       cv::linemod::Feature 
f = templates[m].features[
i];