00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039 #include <opencv2/core/core.hpp>
00040 #include <opencv2/imgproc/imgproc_c.h>
00041 #include <opencv2/imgproc/imgproc.hpp>
00042 #include <opencv2/objdetect/objdetect.hpp>
00043 #include <opencv2/highgui/highgui.hpp>
00044 #include <iterator>
00045 #include <set>
00046 #include <cstdio>
00047 #include <iostream>
00048
00049
00050 #include <ros/ros.h>
00051 #include <sensor_msgs/Image.h>
00052 #include <sensor_msgs/CameraInfo.h>
00053 #include <message_filters/subscriber.h>
00054 #include <message_filters/time_synchronizer.h>
00055 #include <message_filters/synchronizer.h>
00056 #include <message_filters/sync_policies/approximate_time.h>
00057 #include <cv_bridge/cv_bridge.h>
00058 #include <sensor_msgs/image_encodings.h>
00059
00060
00061
00062 #if CV_MAJOR_VERSION < 3
00063
00064
00065 void subtractPlane(const cv::Mat& depth, cv::Mat& mask, std::vector<CvPoint>& chain, double f);
00066
00067 std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
00068 int num_modalities, cv::Point offset, cv::Size size,
00069 cv::Mat& mask, cv::Mat& dst);
00070
00071 void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
00072 int num_modalities, cv::Point offset, cv::Size size,
00073 cv::Mat& dst);
00074
00075 void drawResponse(const std::vector<cv::linemod::Template>& templates,
00076 int num_modalities, cv::Mat& dst, cv::Point offset, int T);
00077
00078 cv::Mat displayQuantized(const cv::Mat& quantized);
00079
00080
00081 class Mouse
00082 {
00083 public:
00084 static void start(const std::string& a_img_name)
00085 {
00086 cvSetMouseCallback(a_img_name.c_str(), Mouse::cv_on_mouse, 0);
00087 }
00088 static int event(void)
00089 {
00090 int l_event = m_event;
00091 m_event = -1;
00092 return l_event;
00093 }
00094 static int x(void)
00095 {
00096 return m_x;
00097 }
00098 static int y(void)
00099 {
00100 return m_y;
00101 }
00102
00103 private:
00104 static void cv_on_mouse(int a_event, int a_x, int a_y, int, void *)
00105 {
00106 m_event = a_event;
00107 m_x = a_x;
00108 m_y = a_y;
00109 }
00110
00111 static int m_event;
00112 static int m_x;
00113 static int m_y;
00114 };
00115 int Mouse::m_event;
00116 int Mouse::m_x;
00117 int Mouse::m_y;
00118
00119 static void help()
00120 {
00121 printf("Usage: openni_demo [templates.yml]\n\n"
00122 "Place your object on a planar, featureless surface. With the mouse,\n"
00123 "frame it in the 'color' window and right click to learn a first template.\n"
00124 "Then press 'l' to enter online learning mode, and move the camera around.\n"
00125 "When the match score falls between 90-95%% the demo will add a new template.\n\n"
00126 "Keys:\n"
00127 "\t h -- This help page\n"
00128 "\t l -- Toggle online learning\n"
00129 "\t m -- Toggle printing match result\n"
00130 "\t t -- Toggle printing timings\n"
00131 "\t w -- Write learned templates to disk\n"
00132 "\t [ ] -- Adjust matching threshold: '[' down, ']' up\n"
00133 "\t q -- Quit\n\n");
00134 }
00135
00136
00137 class Timer
00138 {
00139 public:
00140 Timer() : start_(0), time_(0) {}
00141
00142 void start()
00143 {
00144 start_ = cv::getTickCount();
00145 }
00146
00147 void stop()
00148 {
00149 CV_Assert(start_ != 0);
00150 int64 end = cv::getTickCount();
00151 time_ += end - start_;
00152 start_ = 0;
00153 }
00154
00155 double time()
00156 {
00157 double ret = time_ / cv::getTickFrequency();
00158 time_ = 0;
00159 return ret;
00160 }
00161
00162 private:
00163 int64 start_, time_;
00164 };
00165
00166
00167 static cv::Ptr<cv::linemod::Detector> readLinemod(const std::string& filename)
00168 {
00169 cv::Ptr<cv::linemod::Detector> detector = new cv::linemod::Detector;
00170 cv::FileStorage fs(filename, cv::FileStorage::READ);
00171 detector->read(fs.root());
00172
00173 cv::FileNode fn = fs["classes"];
00174 for (cv::FileNodeIterator i = fn.begin(), iend = fn.end(); i != iend; ++i)
00175 detector->readClass(*i);
00176
00177 return detector;
00178 }
00179
00180 static void writeLinemod(const cv::Ptr<cv::linemod::Detector>& detector, const std::string& filename)
00181 {
00182 cv::FileStorage fs(filename, cv::FileStorage::WRITE);
00183 detector->write(fs);
00184
00185 std::vector<std::string> ids = detector->classIds();
00186 fs << "classes" << "[";
00187 for (int i = 0; i < (int)ids.size(); ++i)
00188 {
00189 fs << "{";
00190 detector->writeClass(ids[i], fs);
00191 fs << "}";
00192 }
00193 fs << "]";
00194 }
00195
00196
00197 cv::Ptr<cv::linemod::Detector> detector;
00198 std::string filename;
00199 int num_classes = 0;
00200 int matching_threshold = 80;
00201 void callback(const sensor_msgs::Image::ConstPtr& rgb_image,
00202 const sensor_msgs::Image::ConstPtr& depth_image,
00203 const sensor_msgs::CameraInfo::ConstPtr& rgb_camera_info,
00204 const sensor_msgs::CameraInfo::ConstPtr& depth_camera_info)
00205 {
00206 bool show_match_result = true;
00207 bool show_timings = false;
00208 bool learn_online = false;
00209
00210
00212 cv::Size roi_size(200, 200);
00213 int learning_lower_bound = 90;
00214 int learning_upper_bound = 95;
00215
00216 Timer extract_timer;
00217 Timer match_timer;
00218 int num_modalities = (int)detector->getModalities().size();
00219 cv_bridge::CvImagePtr cv_rgb = cv_bridge::toCvCopy(rgb_image, sensor_msgs::image_encodings::BGR8);
00220 cv::Mat color = cv_rgb->image;
00221
00222 cv_bridge::CvImagePtr cv_depth = cv_bridge::toCvCopy(
00223 depth_image, sensor_msgs::image_encodings::TYPE_16UC1);
00224
00225 cv::Mat depth = cv_depth->image;
00226
00227
00228
00229
00230
00231 double focal_length = depth_camera_info->K[0];
00232
00233 std::vector<cv::Mat> sources;
00234 sources.push_back(color);
00235 sources.push_back(depth);
00236 cv::Mat display = color.clone();
00237
00238 if (!learn_online)
00239 {
00240 cv::Point mouse(Mouse::x(), Mouse::y());
00241 int event = Mouse::event();
00242
00243
00244 cv::Point roi_offset(roi_size.width / 2, roi_size.height / 2);
00245 cv::Point pt1 = mouse - roi_offset;
00246 cv::Point pt2 = mouse + roi_offset;
00247
00248 if (event == CV_EVENT_RBUTTONDOWN)
00249 {
00250
00251 std::vector<CvPoint> chain(4);
00252 chain[0] = pt1;
00253 chain[1] = cv::Point(pt2.x, pt1.y);
00254 chain[2] = pt2;
00255 chain[3] = cv::Point(pt1.x, pt2.y);
00256 cv::Mat mask;
00257 subtractPlane(depth, mask, chain, focal_length);
00258
00259 cv::imshow("mask", mask);
00260
00261
00262 std::string class_id = cv::format("class%d", num_classes);
00263 cv::Rect bb;
00264 extract_timer.start();
00265 int template_id = detector->addTemplate(sources, class_id, mask, &bb);
00266 extract_timer.stop();
00267 if (template_id != -1)
00268 {
00269 printf("*** Added template (id %d) for new object class %d***\n",
00270 template_id, num_classes);
00271
00272 }
00273
00274 ++num_classes;
00275 }
00276
00277
00278 cv::rectangle(display, pt1, pt2, CV_RGB(0,0,0), 3);
00279 cv::rectangle(display, pt1, pt2, CV_RGB(255,255,0), 1);
00280 }
00281
00282
00283 std::vector<cv::linemod::Match> matches;
00284 std::vector<std::string> class_ids;
00285 std::vector<cv::Mat> quantized_images;
00286 match_timer.start();
00287 detector->match(sources, (float)matching_threshold, matches, class_ids, quantized_images);
00288 match_timer.stop();
00289
00290 int classes_visited = 0;
00291 std::set<std::string> visited;
00292
00293 for (int i = 0; (i < (int)matches.size()) && (classes_visited < num_classes); ++i)
00294 {
00295 cv::linemod::Match m = matches[i];
00296
00297 if (visited.insert(m.class_id).second)
00298 {
00299 ++classes_visited;
00300
00301 if (show_match_result)
00302 {
00303 printf("Similarity: %5.1f%%; x: %3d; y: %3d; class: %s; template: %3d\n",
00304 m.similarity, m.x, m.y, m.class_id.c_str(), m.template_id);
00305 }
00306
00307
00308 const std::vector<cv::linemod::Template>& templates = detector->getTemplates(m.class_id, m.template_id);
00309 drawResponse(templates, num_modalities, display, cv::Point(m.x, m.y), detector->getT(0));
00310
00311 if (learn_online == true)
00312 {
00315
00316
00317 cv::Mat color_mask, depth_mask;
00318 std::vector<CvPoint> chain = maskFromTemplate(templates, num_modalities,
00319 cv::Point(m.x, m.y), color.size(),
00320 color_mask, display);
00321 subtractPlane(depth, depth_mask, chain, focal_length);
00322
00323 cv::imshow("mask", depth_mask);
00324
00325
00326 if (learning_lower_bound < m.similarity && m.similarity < learning_upper_bound)
00327 {
00328 extract_timer.start();
00329 int template_id = detector->addTemplate(sources, m.class_id, depth_mask);
00330 extract_timer.stop();
00331 if (template_id != -1)
00332 {
00333 printf("*** Added template (id %d) for existing object class %s***\n",
00334 template_id, m.class_id.c_str());
00335 }
00336 }
00337 }
00338 }
00339 }
00340
00341 if (show_match_result && matches.empty())
00342 printf("No matches found...\n");
00343 if (show_timings)
00344 {
00345 printf("Training: %.2fs\n", extract_timer.time());
00346 printf("Matching: %.2fs\n", match_timer.time());
00347 }
00348 if (show_match_result || show_timings)
00349 printf("------------------------------------------------------------\n");
00350
00351 cv::imshow("color", display);
00352 cv::imshow("normals", quantized_images[1]);
00353
00354 cv::FileStorage fs;
00355 char key = (char)cvWaitKey(10);
00356
00357
00358
00359 switch (key)
00360 {
00361 case 'h':
00362 help();
00363 break;
00364 case 'm':
00365
00366 show_match_result = !show_match_result;
00367 printf("Show match result %s\n", show_match_result ? "ON" : "OFF");
00368 break;
00369 case 't':
00370
00371 show_timings = !show_timings;
00372 printf("Show timings %s\n", show_timings ? "ON" : "OFF");
00373 break;
00374 case 'l':
00375
00376 learn_online = !learn_online;
00377 printf("Online learning %s\n", learn_online ? "ON" : "OFF");
00378 break;
00379 case '[':
00380
00381 matching_threshold = std::max(matching_threshold - 1, -100);
00382 printf("New threshold: %d\n", matching_threshold);
00383 break;
00384 case ']':
00385
00386 matching_threshold = std::min(matching_threshold + 1, +100);
00387 printf("New threshold: %d\n", matching_threshold);
00388 break;
00389 case 'w':
00390
00391 writeLinemod(detector, filename);
00392 printf("Wrote detector and templates to %s\n", filename.c_str());
00393 break;
00394 default:
00395 ;
00396 }
00397 }
00398
00399 typedef message_filters::sync_policies::ApproximateTime<
00400 sensor_msgs::Image,
00401 sensor_msgs::Image,
00402 sensor_msgs::CameraInfo,
00403 sensor_msgs::CameraInfo
00404 > SyncPolicy;
00405
00406 #endif
00407 int main(int argc, char** argv)
00408 {
00409 ros::init(argc, argv, "linemod");
00410 ROS_ERROR("linemod has been moved to opencv_contrib in OpenCV3");
00411 #if CV_MAJOR_VERSION < 3
00412
00413 help();
00414 cv::namedWindow("color");
00415 cv::namedWindow("normals");
00416 Mouse::start("color");
00417
00418
00419
00420
00421 if (argc == 1)
00422 {
00423 filename = "linemod_templates.yml";
00424 detector = cv::linemod::getDefaultLINEMOD();
00425 }
00426 else
00427 {
00428 detector = readLinemod(argv[1]);
00429
00430 std::vector<std::string> ids = detector->classIds();
00431 num_classes = detector->numClasses();
00432 printf("Loaded %s with %d classes and %d templates\n",
00433 argv[1], num_classes, detector->numTemplates());
00434 if (!ids.empty())
00435 {
00436 printf("Class ids:\n");
00437 std::copy(ids.begin(), ids.end(), std::ostream_iterator<std::string>(std::cout, "\n"));
00438 }
00439 }
00440
00441
00442 ros::NodeHandle nh;
00443 message_filters::Subscriber<sensor_msgs::Image> sub_rgb_image;
00444 message_filters::Subscriber<sensor_msgs::Image> sub_depth_image;
00445 message_filters::Subscriber<sensor_msgs::CameraInfo> sub_rgb_camera_info;
00446 message_filters::Subscriber<sensor_msgs::CameraInfo> sub_depth_camera_info;
00447 sub_rgb_image.subscribe(nh, "/camera/rgb/image_rect_color", 1);
00448 sub_depth_image.subscribe(nh, "/camera/depth/image_rect_raw", 1);
00449 sub_rgb_camera_info.subscribe(nh, "/camera/rgb/camera_info", 1);
00450 sub_depth_camera_info.subscribe(nh, "/camera/depth/camera_info", 1);
00451 boost::shared_ptr<message_filters::Synchronizer<SyncPolicy> >sync_
00452 = boost::make_shared<message_filters::Synchronizer<SyncPolicy> >(1000);
00453 sync_->connectInput(sub_rgb_image, sub_depth_image,
00454 sub_rgb_camera_info, sub_depth_camera_info);
00455 sync_->registerCallback(callback);
00456 #endif
00457 ros::spin();
00458
00459
00460
00461
00462 return 0;
00463 }
00464
00465 #if CV_MAJOR_VERSION < 3
00466 static void reprojectPoints(const std::vector<cv::Point3d>& proj, std::vector<cv::Point3d>& real, double f)
00467 {
00468 real.resize(proj.size());
00469 double f_inv = 1.0 / f;
00470
00471 for (int i = 0; i < (int)proj.size(); ++i)
00472 {
00473 double Z = proj[i].z;
00474 real[i].x = (proj[i].x - 320.) * (f_inv * Z);
00475 real[i].y = (proj[i].y - 240.) * (f_inv * Z);
00476 real[i].z = Z;
00477 }
00478 }
00479
00480 static void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::vector<CvPoint> & a_chain, double f)
00481 {
00482 const int l_num_cost_pts = 200;
00483
00484 float l_thres = 4;
00485
00486 IplImage * lp_mask = cvCreateImage(cvGetSize(ap_depth), IPL_DEPTH_8U, 1);
00487 cvSet(lp_mask, cvRealScalar(0));
00488
00489 std::vector<CvPoint> l_chain_vector;
00490
00491 float l_chain_length = 0;
00492 float * lp_seg_length = new float[a_chain.size()];
00493
00494 for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
00495 {
00496 float x_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x);
00497 float y_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y);
00498 lp_seg_length[l_i] = sqrt(x_diff*x_diff + y_diff*y_diff);
00499 l_chain_length += lp_seg_length[l_i];
00500 }
00501 for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
00502 {
00503 if (lp_seg_length[l_i] > 0)
00504 {
00505 int l_cur_num = cvRound(l_num_cost_pts * lp_seg_length[l_i] / l_chain_length);
00506 float l_cur_len = lp_seg_length[l_i] / l_cur_num;
00507
00508 for (int l_j = 0; l_j < l_cur_num; ++l_j)
00509 {
00510 float l_ratio = (l_cur_len * l_j / lp_seg_length[l_i]);
00511
00512 CvPoint l_pts;
00513
00514 l_pts.x = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x) + a_chain[l_i].x);
00515 l_pts.y = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y) + a_chain[l_i].y);
00516
00517 l_chain_vector.push_back(l_pts);
00518 }
00519 }
00520 }
00521 std::vector<cv::Point3d> lp_src_3Dpts(l_chain_vector.size());
00522
00523 for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
00524 {
00525 lp_src_3Dpts[l_i].x = l_chain_vector[l_i].x;
00526 lp_src_3Dpts[l_i].y = l_chain_vector[l_i].y;
00527 lp_src_3Dpts[l_i].z = CV_IMAGE_ELEM(ap_depth, unsigned short, cvRound(lp_src_3Dpts[l_i].y), cvRound(lp_src_3Dpts[l_i].x));
00528
00529 }
00530
00531
00532 reprojectPoints(lp_src_3Dpts, lp_src_3Dpts, f);
00533
00534 CvMat * lp_pts = cvCreateMat((int)l_chain_vector.size(), 4, CV_32F);
00535 CvMat * lp_v = cvCreateMat(4, 4, CV_32F);
00536 CvMat * lp_w = cvCreateMat(4, 1, CV_32F);
00537
00538 for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
00539 {
00540 CV_MAT_ELEM(*lp_pts, float, l_i, 0) = (float)lp_src_3Dpts[l_i].x;
00541 CV_MAT_ELEM(*lp_pts, float, l_i, 1) = (float)lp_src_3Dpts[l_i].y;
00542 CV_MAT_ELEM(*lp_pts, float, l_i, 2) = (float)lp_src_3Dpts[l_i].z;
00543 CV_MAT_ELEM(*lp_pts, float, l_i, 3) = 1.0f;
00544 }
00545 cvSVD(lp_pts, lp_w, 0, lp_v);
00546
00547 float l_n[4] = {CV_MAT_ELEM(*lp_v, float, 0, 3),
00548 CV_MAT_ELEM(*lp_v, float, 1, 3),
00549 CV_MAT_ELEM(*lp_v, float, 2, 3),
00550 CV_MAT_ELEM(*lp_v, float, 3, 3)};
00551
00552 float l_norm = sqrt(l_n[0] * l_n[0] + l_n[1] * l_n[1] + l_n[2] * l_n[2]);
00553
00554 l_n[0] /= l_norm;
00555 l_n[1] /= l_norm;
00556 l_n[2] /= l_norm;
00557 l_n[3] /= l_norm;
00558
00559 float l_max_dist = 0;
00560
00561 for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
00562 {
00563 float l_dist = l_n[0] * CV_MAT_ELEM(*lp_pts, float, l_i, 0) +
00564 l_n[1] * CV_MAT_ELEM(*lp_pts, float, l_i, 1) +
00565 l_n[2] * CV_MAT_ELEM(*lp_pts, float, l_i, 2) +
00566 l_n[3] * CV_MAT_ELEM(*lp_pts, float, l_i, 3);
00567
00568 if (fabs(l_dist) > l_max_dist)
00569 l_max_dist = l_dist;
00570 }
00571
00572 int l_minx = ap_depth->width;
00573 int l_miny = ap_depth->height;
00574 int l_maxx = 0;
00575 int l_maxy = 0;
00576
00577 for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
00578 {
00579 l_minx = std::min(l_minx, a_chain[l_i].x);
00580 l_miny = std::min(l_miny, a_chain[l_i].y);
00581 l_maxx = std::max(l_maxx, a_chain[l_i].x);
00582 l_maxy = std::max(l_maxy, a_chain[l_i].y);
00583 }
00584 int l_w = l_maxx - l_minx + 1;
00585 int l_h = l_maxy - l_miny + 1;
00586 int l_nn = (int)a_chain.size();
00587
00588 CvPoint * lp_chain = new CvPoint[l_nn];
00589
00590 for (int l_i = 0; l_i < l_nn; ++l_i)
00591 lp_chain[l_i] = a_chain[l_i];
00592
00593 cvFillPoly(lp_mask, &lp_chain, &l_nn, 1, cvScalar(255, 255, 255));
00594
00595 delete[] lp_chain;
00596
00597
00598
00599 std::vector<cv::Point3d> lp_dst_3Dpts(l_h * l_w);
00600
00601 int l_ind = 0;
00602
00603 for (int l_r = 0; l_r < l_h; ++l_r)
00604 {
00605 for (int l_c = 0; l_c < l_w; ++l_c)
00606 {
00607 lp_dst_3Dpts[l_ind].x = l_c + l_minx;
00608 lp_dst_3Dpts[l_ind].y = l_r + l_miny;
00609 lp_dst_3Dpts[l_ind].z = CV_IMAGE_ELEM(ap_depth, unsigned short, l_r + l_miny, l_c + l_minx);
00610 ++l_ind;
00611 }
00612 }
00613 reprojectPoints(lp_dst_3Dpts, lp_dst_3Dpts, f);
00614
00615 l_ind = 0;
00616
00617 for (int l_r = 0; l_r < l_h; ++l_r)
00618 {
00619 for (int l_c = 0; l_c < l_w; ++l_c)
00620 {
00621 float l_dist = (float)(l_n[0] * lp_dst_3Dpts[l_ind].x + l_n[1] * lp_dst_3Dpts[l_ind].y + lp_dst_3Dpts[l_ind].z * l_n[2] + l_n[3]);
00622
00623 ++l_ind;
00624
00625 if (CV_IMAGE_ELEM(lp_mask, unsigned char, l_r + l_miny, l_c + l_minx) != 0)
00626 {
00627 if (fabs(l_dist) < std::max(l_thres, (l_max_dist * 2.0f)))
00628 {
00629 for (int l_p = 0; l_p < (int)a_masks.size(); ++l_p)
00630 {
00631 int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
00632 int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
00633
00634 CV_IMAGE_ELEM(a_masks[l_p], unsigned char, l_row, l_col) = 0;
00635 }
00636 }
00637 else
00638 {
00639 for (int l_p = 0; l_p < (int)a_masks.size(); ++l_p)
00640 {
00641 int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
00642 int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
00643
00644 CV_IMAGE_ELEM(a_masks[l_p], unsigned char, l_row, l_col) = 255;
00645 }
00646 }
00647 }
00648 }
00649 }
00650 cvReleaseImage(&lp_mask);
00651 cvReleaseMat(&lp_pts);
00652 cvReleaseMat(&lp_w);
00653 cvReleaseMat(&lp_v);
00654 }
00655
00656 void subtractPlane(const cv::Mat& depth, cv::Mat& mask, std::vector<CvPoint>& chain, double f)
00657 {
00658 mask = cv::Mat::zeros(depth.size(), CV_8U);
00659 std::vector<IplImage*> tmp;
00660 IplImage mask_ipl = mask;
00661 tmp.push_back(&mask_ipl);
00662 IplImage depth_ipl = depth;
00663 filterPlane(&depth_ipl, tmp, chain, f);
00664 }
00665
00666 std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
00667 int num_modalities, cv::Point offset, cv::Size size,
00668 cv::Mat& mask, cv::Mat& dst)
00669 {
00670 templateConvexHull(templates, num_modalities, offset, size, mask);
00671
00672 const int OFFSET = 30;
00673 cv::dilate(mask, mask, cv::Mat(), cv::Point(-1,-1), OFFSET);
00674
00675 CvMemStorage * lp_storage = cvCreateMemStorage(0);
00676 CvTreeNodeIterator l_iterator;
00677 CvSeqReader l_reader;
00678 CvSeq * lp_contour = 0;
00679
00680 cv::Mat mask_copy = mask.clone();
00681 IplImage mask_copy_ipl = mask_copy;
00682 cvFindContours(&mask_copy_ipl, lp_storage, &lp_contour, sizeof(CvContour),
00683 CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
00684
00685 std::vector<CvPoint> l_pts1;
00686
00687 cvInitTreeNodeIterator(&l_iterator, lp_contour, 1);
00688 while ((lp_contour = (CvSeq *)cvNextTreeNode(&l_iterator)) != 0)
00689 {
00690 CvPoint l_pt0;
00691 cvStartReadSeq(lp_contour, &l_reader, 0);
00692 CV_READ_SEQ_ELEM(l_pt0, l_reader);
00693 l_pts1.push_back(l_pt0);
00694
00695 for (int i = 0; i < lp_contour->total; ++i)
00696 {
00697 CvPoint l_pt1;
00698 CV_READ_SEQ_ELEM(l_pt1, l_reader);
00700 cv::line(dst, l_pt0, l_pt1, CV_RGB(0, 255, 0), 2);
00701
00702 l_pt0 = l_pt1;
00703 l_pts1.push_back(l_pt0);
00704 }
00705 }
00706 cvReleaseMemStorage(&lp_storage);
00707
00708 return l_pts1;
00709 }
00710
00711
00712 cv::Mat displayQuantized(const cv::Mat& quantized)
00713 {
00714 cv::Mat color(quantized.size(), CV_8UC3);
00715 for (int r = 0; r < quantized.rows; ++r)
00716 {
00717 const uchar* quant_r = quantized.ptr(r);
00718 cv::Vec3b* color_r = color.ptr<cv::Vec3b>(r);
00719
00720 for (int c = 0; c < quantized.cols; ++c)
00721 {
00722 cv::Vec3b& bgr = color_r[c];
00723 switch (quant_r[c])
00724 {
00725 case 0: bgr[0]= 0; bgr[1]= 0; bgr[2]= 0; break;
00726 case 1: bgr[0]= 55; bgr[1]= 55; bgr[2]= 55; break;
00727 case 2: bgr[0]= 80; bgr[1]= 80; bgr[2]= 80; break;
00728 case 4: bgr[0]=105; bgr[1]=105; bgr[2]=105; break;
00729 case 8: bgr[0]=130; bgr[1]=130; bgr[2]=130; break;
00730 case 16: bgr[0]=155; bgr[1]=155; bgr[2]=155; break;
00731 case 32: bgr[0]=180; bgr[1]=180; bgr[2]=180; break;
00732 case 64: bgr[0]=205; bgr[1]=205; bgr[2]=205; break;
00733 case 128: bgr[0]=230; bgr[1]=230; bgr[2]=230; break;
00734 case 255: bgr[0]= 0; bgr[1]= 0; bgr[2]=255; break;
00735 default: bgr[0]= 0; bgr[1]=255; bgr[2]= 0; break;
00736 }
00737 }
00738 }
00739
00740 return color;
00741 }
00742
00743
00744 void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
00745 int num_modalities, cv::Point offset, cv::Size size,
00746 cv::Mat& dst)
00747 {
00748 std::vector<cv::Point> points;
00749 for (int m = 0; m < num_modalities; ++m)
00750 {
00751 for (int i = 0; i < (int)templates[m].features.size(); ++i)
00752 {
00753 cv::linemod::Feature f = templates[m].features[i];
00754 points.push_back(cv::Point(f.x, f.y) + offset);
00755 }
00756 }
00757
00758 std::vector<cv::Point> hull;
00759 cv::convexHull(points, hull);
00760
00761 dst = cv::Mat::zeros(size, CV_8U);
00762 const int hull_count = (int)hull.size();
00763 const cv::Point* hull_pts = &hull[0];
00764 cv::fillPoly(dst, &hull_pts, &hull_count, 1, cv::Scalar(255));
00765 }
00766
00767 void drawResponse(const std::vector<cv::linemod::Template>& templates,
00768 int num_modalities, cv::Mat& dst, cv::Point offset, int T)
00769 {
00770 static const cv::Scalar COLORS[5] = { CV_RGB(0, 0, 255),
00771 CV_RGB(0, 255, 0),
00772 CV_RGB(255, 255, 0),
00773 CV_RGB(255, 140, 0),
00774 CV_RGB(255, 0, 0) };
00775
00776 for (int m = 0; m < num_modalities; ++m)
00777 {
00778
00779
00780
00781 cv::Scalar color = COLORS[m];
00782
00783 for (int i = 0; i < (int)templates[m].features.size(); ++i)
00784 {
00785 cv::linemod::Feature f = templates[m].features[i];
00786 cv::Point pt(f.x + offset.x, f.y + offset.y);
00787 cv::circle(dst, pt, T / 2, color);
00788 }
00789 }
00790 }
00791 #endif