00001 #include <FaceFind.h>
00002 #include <Global.h>
00003
00004
00005 #include <cv_bridge/CvBridge.h>
00006 #include <opencv2/highgui/highgui.hpp>
00007
00008
00009 #include <pcl/io/pcd_io.h>
00010 #include <pcl_ros/transforms.h>
00011 #include <sensor_msgs/point_cloud_conversion.h>
00012 #include <sensor_msgs/image_encodings.h>
00013 #include <pr2_msgs/SetPeriodicCmd.h>
00014
00015
00016 #include <actionlib/client/simple_action_client.h>
00017 #include <pr2_controllers_msgs/PointHeadAction.h>
00018 #include <cv_bridge/cv_bridge.h>
00019
00020 typedef actionlib::SimpleActionClient<pr2_controllers_msgs::PointHeadAction> PointHeadClient;
00021
00022
00023 typedef cv::Vec<uchar, 3> Vec3b;
00024 cv::Vec3b const BLACK = Vec3b(0, 0, 0);
00025 cv::Vec3b const WHITE = Vec3b(255, 255, 255);
00026 cv::Vec3b const BLUE = Vec3b(255, 0, 0);
00027 cv::Vec3b const GREEN = Vec3b(0, 255, 0);
00028
00029 FaceFind::FaceFind() {
00030 facesQueue_ = std::deque<FaceWrapper>();
00031
00032 model_ = image_geometry::PinholeCameraModel();
00033 prosilicamodel_ = image_geometry::PinholeCameraModel();
00034
00035 headBusy_ = false;
00036 mouseClicked_ = 0;
00037 mirror_ = false;
00038 mirroredMaskExists_ = false;
00039 drawingCorners_ = 0;
00040
00041 lastCameraImage_ = ros::Time(0);
00042 }
00043
00044
00045
00046 void FaceFind::ProsilicaCBAll(const sensor_msgs::Image::ConstPtr &image) {
00047
00048 try {
00049
00050 cv::Mat cv_image_ = cv_bridge::toCvCopy(image,
00051 sensor_msgs::image_encodings::BGR8)->image;
00052
00053
00054
00055 storedProsilicaImage_ = cv_image_.clone();
00056 } catch (...) {
00057 ROS_ERROR("error in prosilica image receiving, image ignored");
00058 return;
00059 }
00060 }
00061
00062 void FaceFind::ImageCBAll(const sensor_msgs::Image::ConstPtr &limage) {
00063
00064 try {
00065
00066
00067
00068 cv::Mat cv_image_left_ = cv_bridge::toCvCopy(limage,
00069 sensor_msgs::image_encodings::BGR8)->image;
00070 try {
00071
00072 if (limage->header.stamp - lastCameraImage_ >= ros::Duration(0)) {
00073 storedImage_ = cv_image_left_.clone();
00074 lastCameraImage_ = limage->header.stamp;
00075 }
00076 } catch (...) {
00077 return;
00078 }
00079
00080 } catch (...) {
00081 ROS_ERROR("error in wide_stereo image receiving, ignoring image");
00082 return;
00083 }
00084
00085
00086 for (uint index = 0; index < facesQueue_.size(); ++index) {
00087
00088 ros::Time begin = ros::Time::now();
00089 ros::Duration age = begin - facesQueue_[index].GetTime();
00090
00091
00092 geometry_msgs::Point p = facesQueue_[index].GetPosition();
00093 cv::Point3d frame_pos = cv::Point3d(p.x, p.y, p.z);
00094
00095
00096 if (age.sec >= 2 && !headBusy_) {
00097
00098 facesQueue_.erase(facesQueue_.begin() + index);
00099 index--;
00100
00101 } else {
00102
00103
00104 cv::Point2d pixel_pos;
00105 pixel_pos = model_.project3dToPixel(frame_pos);
00106
00107
00108 int sizex = 35 / frame_pos.z * 1.6;
00109 int sizey = 45 / frame_pos.z * 1.6;
00110
00111
00112 CvScalar edgeColor;
00113 if (CheckFaceBox((int) pixel_pos.x - sizex,
00114 (int) pixel_pos.y - sizey, (int) pixel_pos.x + sizex,
00115 (int) pixel_pos.y + sizey)) {
00116 edgeColor = CV_RGB(0, 0, 150);
00117
00118 if (mouseClicked_ != 0) {
00119 StartPhoto(index);
00120 }
00121
00122 } else
00123 edgeColor = CV_RGB(0, 0, 0);
00124
00125
00126 cv::rectangle(
00127 storedImage_,
00128 cv::Point((int) pixel_pos.x - sizex,
00129 (int) pixel_pos.y - sizey),
00130 cv::Point((int) pixel_pos.x + sizex,
00131 (int) pixel_pos.y + sizey), edgeColor, 2);
00132
00133 }
00134 }
00135
00136
00137 Q_EMIT newCameraImage(cvMat2QImage(storedImage_, 0));
00138
00139 }
00140
00141 void FaceFind::FacePosCBAll(
00142 const people_msgs::PositionMeasurement::ConstPtr &position) {
00143 facesQueue_.push_back(FaceWrapper(position->pos, position->header.stamp));
00144
00145 }
00146
00147 void FaceFind::CameraModelCBAll(
00148 const sensor_msgs::CameraInfoConstPtr& cam_info) {
00149 model_.fromCameraInfo(cam_info);
00150
00151 }
00152
00153 void FaceFind::ProsilicaCameraModelCBAll(
00154 const sensor_msgs::CameraInfoConstPtr& cam_info) {
00155 prosilicamodel_.fromCameraInfo(cam_info);
00156
00157 }
00158
00159 bool FaceFind::StatusMessageCBAll(
00160 portrait_robot_msgs::alubsc_status_msg::Request &req,
00161 portrait_robot_msgs::alubsc_status_msg::Response &res) {
00162 Q_EMIT newStatusMessage((int) req.ID, QString(req.message.c_str()));
00163
00164 return true;
00165 }
00166
00167 void FaceFind::PhotoFromFile(const char* filename1, const char* filename2) {
00168 storedPhoto_ = cv::imread(filename1, 1);
00169 cv::Mat resizedPhoto, resizedMask;
00170 cv::resize(storedPhoto_, resizedPhoto, cv::Size(PORTRAIT_X, PORTRAIT_Y));
00171
00172 if (mirror_) {
00173 storedMirroredMaskImage_ = cv::imread(filename2, 1);
00174 cv::resize(storedMirroredMaskImage_, resizedMask,
00175 cv::Size(PORTRAIT_X, PORTRAIT_Y));
00176 } else {
00177 storedMaskImage_ = cv::imread(filename2, 1);
00178 cv::resize(storedMaskImage_, resizedMask,
00179 cv::Size(PORTRAIT_X, PORTRAIT_Y));
00180 }
00181
00182 Q_EMIT
00183 newPhoto(cvMat2QImage(resizedPhoto, 1));
00184 Q_EMIT
00185 newMask(cvMat2QImage(resizedMask, 2));
00186 Q_EMIT activateButton(2);
00187 }
00188 void FaceFind::EdgeImageFromFile(const char* filename) {
00189 edgeImage_ = cv::imread(filename, 1);
00190 cv::Mat resizedImage;
00191 cv::resize(edgeImage_, resizedImage, cv::Size(PORTRAIT_X, PORTRAIT_Y));
00192 Q_EMIT
00193 newMask(cvMat2QImage(resizedImage, 3));
00194 Q_EMIT activateButton(3);
00195 }
00196
00197 bool FaceFind::EdgeImageCBAll(
00198 portrait_robot_msgs::alubsc_node_instr::Request &req,
00199 portrait_robot_msgs::alubsc_node_instr::Response &res) {
00200 if (req.images.size() > 0) {
00201 sensor_msgs::Image& image = req.images.at(0);
00202
00203 try {
00204 sensor_msgs::CvBridge newbridge;
00205
00206 if (!newbridge.fromImage(image, "bgr8"))
00207 throw sensor_msgs::CvBridgeException(
00208 "Conversion to OpenCV image failed");
00209 else {
00210 edgeImage_ = cv::Mat(newbridge.toIpl(), true);
00211 cv::Mat resizedImage;
00212 cv::resize(edgeImage_, resizedImage,
00213 cv::Size(PORTRAIT_X, PORTRAIT_Y));
00214 Q_EMIT
00215 newMask(cvMat2QImage(resizedImage, 3));
00216 Q_EMIT
00217 activateButton(3);
00218 cv::imwrite("last_edge_image.png", edgeImage_);
00219 res.success = true;
00220 return true;
00221 }
00222 } catch (...) {
00223 ROS_ERROR("ERROR: Could not convert the edge image for receiving.");
00224 Q_EMIT
00225 setGUIStatus(
00226 "ERROR: Could not convert the edge image for receiving.");
00227 res.success = false;
00228 return false;
00229 }
00230 } else {
00231 res.success = false;
00232 return false;
00233 }
00234 }
00235
00236 void FaceFind::PenButton() {
00237 portrait_robot_msgs::alubsc_node_instr srv;
00238 srv.request.abort = false;
00239
00240 if (pen_gripper_client.exists()) {
00241 pen_gripper_client.call(srv);
00242 Q_EMIT activateButton(1);
00243 } else
00244 Q_EMIT newStatusMessage(1,
00245 QString("<font color=red>Client is not ready.</font>"));
00246 }
00247
00248 void FaceFind::EdgeButton() {
00249 portrait_robot_msgs::alubsc_node_instr srv;
00250 srv.request.abort = false;
00251 std::vector<sensor_msgs::Image> imageArray(2);
00252 IplImage image1 = storedPhoto_;
00253 IplImage image2;
00254 if (mirror_)
00255 image2 = storedMirroredMaskImage_;
00256 else
00257 image2 = storedMaskImage_;
00258
00259 cv::imwrite("last_photo.png", cv::Mat(&image1));
00260 cv::imwrite("last_mask.png", cv::Mat(&image2));
00261 try {
00262 sensor_msgs::CvBridge newbridge;
00263
00264 imageArray.at(0) = *(newbridge.cvToImgMsg(&image1));
00265 imageArray.at(1) = *(newbridge.cvToImgMsg(&image2));
00266
00267 srv.request.images = imageArray;
00268
00269 if (contour_detector_client.exists()) {
00270 contour_detector_client.call(srv);
00271 } else
00272 Q_EMIT newStatusMessage(2,
00273 QString("<font color=red>Client is not ready.</font>"));
00274 } catch (...) {
00275 ROS_ERROR("ERROR: Could not convert the photo and mask image.");
00276 Q_EMIT
00277 setGUIStatus("ERROR: Could not convert the photo and mask image.");
00278 return;
00279 }
00280 }
00281
00282 void FaceFind::CornerButton() {
00283 portrait_robot_msgs::alubsc_node_instr srv;
00284 srv.request.abort = false;
00285
00286 switch (drawingCorners_) {
00287 case 0:
00288 if (corner_client.exists()) {
00289 corner_client.call(srv);
00290 ++drawingCorners_;
00291 } else
00292 Q_EMIT newStatusMessage(3,
00293 QString("<font color=red>Client is not ready.</font>"));
00294 break;
00295
00296 case 1:
00297 if (corner_client.exists()) {
00298 corner_client.call(srv);
00299 ++drawingCorners_;
00300 } else
00301 Q_EMIT newStatusMessage(3,
00302 QString("<font color=red>Client is not ready.</font>"));
00303 break;
00304
00305 case 2:
00306 if (corner_client.exists()) {
00307 corner_client.call(srv);
00308 ++drawingCorners_;
00309 } else
00310 Q_EMIT newStatusMessage(3,
00311 QString("<font color=red>Client is not ready.</font>"));
00312 break;
00313
00314 case 3:
00315 if (corner_client.exists()) {
00316 corner_client.call(srv);
00317 drawingCorners_ = 0;
00318 Q_EMIT
00319 newStatusMessage(
00320 3,
00321 QString(
00322 "<font color=red>Please make sure that the robot has a pen before you start painting.</font>"));
00323 Q_EMIT activateButton(4);
00324 } else
00325 Q_EMIT newStatusMessage(3,
00326 QString("<font color=red>Client is not ready.</font>"));
00327 break;
00328
00329 default:
00330 Q_EMIT setGUIStatus(QString("ERROR: Wrong parameter!"));
00331 break;
00332 }
00333 }
00334
00335 void FaceFind::PaintButton() {
00336 portrait_robot_msgs::alubsc_node_instr srv;
00337 srv.request.abort = false;
00338
00339 if (drawingCorners_ == 0) {
00340 std::vector<sensor_msgs::Image> imageArray(1);
00341 IplImage image1 = edgeImage_;
00342 try {
00343 sensor_msgs::CvBridge newbridge;
00344
00345 imageArray.at(0) = *(newbridge.cvToImgMsg(&image1));
00346
00347 srv.request.images = imageArray;
00348
00349 if (painter_client.exists()) {
00350 painter_client.call(srv);
00351 } else
00352 Q_EMIT newStatusMessage(3,
00353 QString("<font color=red>Client is not ready.</font>"));
00354 } catch (...) {
00355 ROS_ERROR("ERROR: Could not convert the edge image for sending.");
00356 Q_EMIT
00357 setGUIStatus("ERROR: Could not convert edge image for sending.");
00358 return;
00359 }
00360 } else {
00361 Q_EMIT setGUIStatus(QString("ERROR: Wrong parameter!"));
00362 }
00363 }
00364
00365 void FaceFind::HeadButton() {
00366 Q_EMIT setGUIInfo("Turning the head.");
00367
00368
00369 pr2_controllers_msgs::PointHeadGoal goal;
00370
00371
00372 geometry_msgs::PointStamped point;
00373 point.header.frame_id = "/torso_lift_link";
00374 point.point.x = 2;
00375 point.point.y = 0;
00376 point.point.z = 0.5;
00377 goal.target = point;
00378
00379 goal.pointing_frame = "high_def_optical_frame";
00380
00381
00382 goal.min_duration = ros::Duration(0.5);
00383
00384
00385 goal.max_velocity = 1.0;
00386
00387 PointHeadClient* point_head_client_ = new PointHeadClient(
00388 "/head_traj_controller/point_head_action", true);
00389 while (!point_head_client_->waitForServer(ros::Duration(5.0))) {
00390
00391 }
00392
00393
00394 point_head_client_->sendGoal(goal);
00395
00396
00397 point_head_client_->waitForResult(ros::Duration(2));
00398 }
00399
00400 void FaceFind::AbortButton() {
00401 portrait_robot_msgs::alubsc_node_instr srv;
00402 srv.request.abort = true;
00403
00404 if (contour_detector_client.exists())
00405 contour_detector_client.call(srv);
00406
00407 if (painter_client.exists())
00408 painter_client.call(srv);
00409 drawingCorners_ = 0;
00410
00411 if (corner_client.exists())
00412 corner_client.call(srv);
00413
00414 Q_EMIT setGUIStatus(QString("Aborted the application."));
00415 }
00416
00417
00418
00419 void FaceFind::StartPhoto(uint index) {
00420
00421 int myclicked = mouseClicked_;
00422 mouseClicked_ = 0;
00423
00424 if (facesQueue_.size() == 0) {
00425 Q_EMIT setGUIStatus("There was no face found. Please try again.");
00426 return;
00427 } else if (index >= facesQueue_.size()) {
00428 Q_EMIT setGUIStatus(
00429 "The face is not available any more. Please try again.");
00430 return;
00431 }
00432
00433 if (!headBusy_) {
00434
00435 headBusy_ = true;
00436
00437 if (myclicked == 1) {
00438
00439 cv::Point2d pixel_pos;
00440 geometry_msgs::Point p = facesQueue_[index].GetPosition();
00441 cv::Point3d frame_pos = cv::Point3d(p.x, p.y, p.z);
00442
00443 AdjustLaser(p);
00444
00445 TurnHead(frame_pos);
00446
00447 } else if (myclicked == 2) {
00448
00449 TakePhoto(index);
00450
00451 }
00452
00453 headBusy_ = false;
00454
00455 }
00456
00457 }
00458
00459 void FaceFind::TakePhoto(uint index) {
00460
00461
00462 bool prosilica = true;
00463
00464 cv::Mat currentImage;
00465 cv::Point2d pixel_pos;
00466 geometry_msgs::Point p = facesQueue_[index].GetPosition();
00467 cv::Point3d frame_pos = cv::Point3d(p.x, p.y, p.z);
00468
00469 tf::TransformListener tf;
00470
00471
00472 if (prosilica) {
00473 try {
00474 geometry_msgs::PointStamped ps;
00475 geometry_msgs::PointStamped ps2;
00476 ps.header.frame_id = "wide_stereo_optical_frame";
00477 ps.point = p;
00478 ros::Time time = ros::Time(0);
00479 tf.waitForTransform("high_def_optical_frame",
00480 "/wide_stereo_optical_frame", time, ros::Duration(1.5));
00481 tf.transformPoint("high_def_optical_frame", time, ps, "base_link",
00482 ps2);
00483 frame_pos = cv::Point3d(ps2.point.x, ps2.point.y, ps2.point.z);
00484 } catch (...) {
00485 ROS_ERROR("error transforming");
00486 Q_EMIT
00487 setGUIStatus("* Error in the transformation! *");
00488 return;
00489 }
00490 }
00491
00492
00493 if (prosilica) {
00494 pixel_pos = prosilicamodel_.project3dToPixel(frame_pos);
00495 } else {
00496 pixel_pos = model_.project3dToPixel(frame_pos);
00497 }
00498 currentImage = (
00499 prosilica ? storedProsilicaImage_.clone() : storedImage_.clone());
00500
00501
00502 int sizex = 35 / facesQueue_[index].GetPosition().z * (prosilica ? 9 : 1.6);
00503 int sizey = 45 / facesQueue_[index].GetPosition().z * (prosilica ? 9 : 1.6);
00504
00505
00506 CvRect roi;
00507 roi.x = pixel_pos.x - sizex;
00508 roi.y = pixel_pos.y - (sizey * 1.2);
00509 roi.width = 2 * sizex;
00510 roi.height = 2 * sizey;
00511
00512 pcl::PointCloud<pcl::PointXYZ> storedPointCloud2;
00513 pcl::PointCloud<pcl::PointXYZ> newPointCloud;
00514
00515 try {
00516
00517 sensor_msgs::PointCloud2 cloud2_msg;
00518 laser_assembler::AssembleScans srv;
00519 srv.request.begin = ros::Time::now() - ros::Duration(5, 0);
00520 srv.request.end = ros::Time::now();
00521
00522 if (laserscanner_client.call(srv)) {
00523
00524 sensor_msgs::convertPointCloudToPointCloud2(srv.response.cloud,
00525 cloud2_msg);
00526 pcl::fromROSMsg(cloud2_msg, storedPointCloud2);
00527
00528 } else {
00529 ROS_ERROR("error pointcloud");
00530 Q_EMIT setGUIStatus("* Error in the pointcloud! *");
00531 }
00532
00533 tf::StampedTransform transform;
00534 try {
00535
00536 ros::Time time = ros::Time::now();
00537
00538
00539
00540 if (prosilica) {
00541 tf.waitForTransform("/high_def_optical_frame", "/base_link",
00542 time, ros::Duration(0.5));
00543 tf.lookupTransform("/high_def_optical_frame", "/base_link",
00544 time, transform);
00545 } else {
00546 tf.waitForTransform("/wide_stereo_optical_frame", "/base_link",
00547 time, ros::Duration(0.5));
00548 tf.lookupTransform("/wide_stereo_optical_frame", "/base_link",
00549 time, transform);
00550 }
00551
00552 } catch (tf::TransformException ex) {
00553 ROS_ERROR("%s", ex.what());
00554 Q_EMIT
00555 setGUIStatus(tr(ex.what()));
00556 return;
00557 } catch (...) {
00558 ROS_ERROR("error in transform acquisition");
00559 Q_EMIT
00560 setGUIStatus("* Error in the transform acquisition! *");
00561 return;
00562 }
00563
00564 try {
00565 pcl_ros::transformPointCloud(storedPointCloud2, newPointCloud,
00566 transform);
00567 } catch (...) {
00568 ROS_ERROR("error in transforming");
00569 Q_EMIT
00570 setGUIStatus("* Error in the pointcloud transformation! *");
00571 return;
00572 }
00573
00574 } catch (cv::Exception& e) {
00575
00576 ROS_ERROR("%s", e.what());
00577 Q_EMIT
00578 setGUIStatus(tr(e.what()));
00579 return;
00580 }
00581
00582 catch (...) {
00583 ROS_ERROR("real error in transforming");
00584 Q_EMIT setGUIStatus(
00585 "* Error (big one) in the pointcloud transformation! *");
00586 }
00587
00588 if (roi.x < 0 || roi.y < 0 || roi.x + roi.width >= currentImage.cols
00589 || roi.y + roi.height >= currentImage.rows) {
00590
00591 Q_EMIT setGUIStatus("* The image values breach the border! *");
00592
00593 } else {
00594
00595 storedPhoto_ = currentImage(roi);
00596
00597
00598 storedMaskImage_ = storedPhoto_.clone();
00599 storedMaskImage_ = CV_RGB(255, 255, 255);
00600
00601 std::vector<pcl::PointXYZ, Eigen::aligned_allocator<pcl::PointXYZ> >::iterator iter;
00602 for (iter = newPointCloud.begin(); iter != newPointCloud.end();
00603 ++iter) {
00604 try {
00605
00606 cv::Point2d pixel;
00607 pcl::PointXYZ p = *iter;
00608 cv::Point3d point = cv::Point3d(p.x, p.y, p.z);
00609
00610
00611 if (prosilica) {
00612 pixel = prosilicamodel_.project3dToPixel(point);
00613 } else {
00614 pixel = model_.project3dToPixel(point);
00615 }
00616
00617
00618 if (pixel.x < roi.x + 2 || pixel.x >= roi.x + roi.width - 2
00619 || pixel.y < roi.y + 2
00620 || pixel.y >= roi.y + roi.height - 2) {
00621
00622 continue;
00623
00624 } else {
00625
00626
00627 if (point.z >= frame_pos.z - 0.5
00628 && point.z < frame_pos.z + 0.5) {
00629 cv::Point p = cv::Point(pixel.x - roi.x,
00630 pixel.y - roi.y);
00631
00632 cv::line(storedMaskImage_, p, p, CV_RGB(0, 0, 0));
00633 }
00634
00635 }
00636
00637 } catch (...) {
00638 ROS_ERROR("ERROR: error in pointcloud stuff");
00639 Q_EMIT
00640 setGUIStatus("* Error in pointcloud! *");
00641 return;
00642 }
00643 }
00644
00645 MakeMask(storedMaskImage_);
00646 mirroredMaskExists_ = false;
00647
00648 cv::Mat resizedPhoto, resizedMask;
00649
00650 cv::resize(storedPhoto_, resizedPhoto,
00651 cv::Size(PORTRAIT_X, PORTRAIT_Y));
00652
00653 if (mirror_) {
00654 MirrorPointCloud();
00655
00656 cv::resize(storedMirroredMaskImage_, resizedMask,
00657 cv::Size(PORTRAIT_X, PORTRAIT_Y));
00658 } else {
00659 cv::resize(storedMaskImage_, resizedMask,
00660 cv::Size(PORTRAIT_X, PORTRAIT_Y));
00661 }
00662
00663 Q_EMIT
00664 newPhoto(cvMat2QImage(resizedPhoto, 1));
00665 Q_EMIT
00666 newMask(cvMat2QImage(resizedMask, 2));
00667 Q_EMIT
00668 setGUIInfo("The photo was successfully taken.");
00669 Q_EMIT activateButton(2);
00670 }
00671 }
00672
00673 void FaceFind::TurnHead(cv::Point3d facecenter) {
00674 Q_EMIT setGUIInfo("Turning the head.");
00675
00676
00677 pr2_controllers_msgs::PointHeadGoal goal;
00678
00679
00680 geometry_msgs::PointStamped point;
00681 point.header.frame_id = "/wide_stereo_optical_frame";
00682 point.point.x = facecenter.x;
00683 point.point.y = facecenter.y;
00684 point.point.z = facecenter.z;
00685 goal.target = point;
00686
00687
00688
00689 goal.pointing_frame = "high_def_optical_frame";
00690
00691
00692 goal.min_duration = ros::Duration(0.5);
00693
00694
00695 goal.max_velocity = 1.0;
00696
00697 PointHeadClient* point_head_client_ = new PointHeadClient(
00698 "/head_traj_controller/point_head_action", true);
00699 while (!point_head_client_->waitForServer(ros::Duration(5.0))) {
00700
00701 }
00702
00703
00704 point_head_client_->sendGoal(goal);
00705
00706
00707 point_head_client_->waitForResult(ros::Duration(2));
00708 }
00709
00710 void FaceFind::MakeMask(cv::Mat& maskImage) {
00711 cv::Mat_<cv::Vec3b>& copy = (cv::Mat_<cv::Vec3b>&) maskImage;
00712
00713 for (int x = 0; x < copy.cols; ++x) {
00714 for (int y = 0; y < copy.rows; ++y) {
00715
00716 bool found = false;
00717 int blobsize = 8 * copy.cols / 350;
00718
00719 for (int deltax = -blobsize; deltax < blobsize; ++deltax) {
00720 if (found)
00721 break;
00722
00723 for (int deltay = -blobsize; deltay < blobsize; ++deltay) {
00724 if (found)
00725 break;
00726
00727
00728
00729
00730 if (deltax * deltax + deltay * deltay > blobsize * blobsize)
00731 continue;
00732
00733 if (x + deltax < 0 || x + deltax >= copy.cols
00734 || y + deltay < 0 || y + deltay >= copy.rows)
00735 continue;
00736 if (copy(y + deltay, x + deltax) == BLACK)
00737 found = true;
00738 }
00739 }
00740
00741 if (found)
00742 copy(y, x) = (copy(y, x) == BLACK ? BLACK : BLUE);
00743 }
00744 }
00745
00746
00747 Cluster(copy, 0, copy.cols, 1, 0, copy.rows, 1);
00748 Cluster(copy, copy.cols - 1, -1, -1, 0, copy.rows, 1);
00749 Cluster(copy, 0, copy.cols, 1, copy.rows - 1, -1, -1);
00750 Cluster(copy, copy.cols - 1, -1, -1, copy.rows - 1, -1, -1);
00751
00752
00753 for (int x = 0; x < copy.cols; ++x) {
00754 for (int y = 0; y < copy.rows; ++y) {
00755 if (copy(y, x) != WHITE)
00756 copy(y, x) = BLACK;
00757 }
00758 }
00759 }
00760
00761 void FaceFind::Cluster(cv::Mat_<cv::Vec3b>& copy, int n_x, int border_x,
00762 int inc_x, int n_y, int border_y, int inc_y) {
00763 for (int x = n_x; x != border_x; x += inc_x) {
00764 for (int y = n_y; y != border_y; y += inc_y) {
00765
00766 if (copy(y, x) != WHITE)
00767 continue;
00768
00769 int found = 0;
00770 int maximum = 0;
00771 int blobsize = 10 * copy.cols / 350;
00772
00773 for (int deltax = -blobsize; deltax < blobsize; ++deltax) {
00774 for (int deltay = -blobsize; deltay < blobsize; ++deltay) {
00775
00776
00777 if (deltax * deltax + deltay * deltay > blobsize * blobsize)
00778 continue;
00779
00780 if (x + deltax < 0 || x + deltax >= copy.cols
00781 || y + deltay < 0 || y + deltay >= copy.rows)
00782 continue;
00783
00784 ++maximum;
00785 if (copy(y + deltay, x + deltax) != WHITE)
00786 ++found;
00787
00788 }
00789 }
00790 if (found >= 0.7 * maximum)
00791 copy(y, x) = GREEN;
00792 }
00793 }
00794 }
00795
00796 void FaceFind::AdjustLaser(geometry_msgs::Point p) {
00797 Q_EMIT setGUIInfo("Adjusting the laser.");
00798
00799 pr2_msgs::SetPeriodicCmd srv;
00800 pr2_msgs::PeriodicCmd pc;
00801 pc.profile = "linear";
00802 pc.period = 0.5;
00803 pc.amplitude = 0.;
00804 pc.offset = 0.;
00805 srv.request.command = pc;
00806
00807 try {
00808 if (lasertilt_client.call(srv)) {
00809
00810 } else {
00811 ROS_ERROR(
00812 (laserscanner_client.exists()?"but scanner client exists":"and scanner client doesn't exist"));
00813 }
00814 } catch (...) {
00815 ROS_ERROR("error halting laser scanner");
00816 Q_EMIT
00817 setGUIStatus("* Error in the laser scanner (1)! *");
00818 return;
00819 }
00820
00821 cv::Point3d pos;
00822
00823 try {
00824 tf::TransformListener tf;
00825 geometry_msgs::PointStamped ps;
00826 geometry_msgs::PointStamped ps2;
00827 ps.header.frame_id = "wide_stereo_optical_frame";
00828 ps.point = p;
00829 ros::Time time = ros::Time(0);
00830 tf.waitForTransform("/laser_tilt_mount_link",
00831 "/wide_stereo_optical_frame", time, ros::Duration(1.5));
00832 tf.transformPoint("/laser_tilt_mount_link", time, ps, "base_link", ps2);
00833 pos = cv::Point3d(ps2.point.x, ps2.point.y, ps2.point.z);
00834 } catch (...) {
00835 ROS_ERROR("error transforming");
00836 Q_EMIT
00837 setGUIStatus("* Error in the laser scanner transformation! *");
00838 return;
00839 }
00840
00841 float tangens_alpha = pos.z / pos.x;
00842 float alpha = acos(tangens_alpha);
00843 float amplitude;
00844 if (pos.x < 1) {
00845 amplitude = 0.2;
00846 } else {
00847 amplitude = 0.2 / pos.x;
00848 }
00849
00850 pc.profile = "linear";
00851 pc.period = 5;
00852 pc.amplitude = amplitude;
00853 pc.offset = -(1.57 - alpha);
00854 srv.request.command = pc;
00855 try {
00856 if (lasertilt_client.call(srv)) {
00857
00858 } else {
00859 ROS_ERROR(
00860 (laserscanner_client.exists()?"but scanner client exists2":"and scanner client doesn't exist2"));
00861 }
00862 } catch (...) {
00863 ROS_ERROR("error starting laser scanner");
00864 Q_EMIT
00865 setGUIStatus("* Error in the laser scanner (2)! *");
00866 return;
00867 }
00868 }
00869
00870 void FaceFind::ReceiveMouseEvent(int x, int y, int clicked) {
00871 lastMousePosition_ = cv::Point(x, y);
00872
00873 mouseClicked_ = clicked;
00874 }
00875
00876 bool FaceFind::CheckFaceBox(int x1, int y1, int x2, int y2) {
00877 return (x1 < lastMousePosition_.x && y1 < lastMousePosition_.y
00878 && x2 > lastMousePosition_.x && y2 > lastMousePosition_.y);
00879 }
00880
00881 void FaceFind::Mirror() {
00882 mirror_ = !mirror_;
00883
00884
00885 if (storedMaskImage_.total() == 0)
00886 return;
00887
00888 cv::Mat newResizedMaskImage;
00889
00890 if (mirror_) {
00891 MirrorPointCloud();
00892
00893 cv::resize(storedMirroredMaskImage_, newResizedMaskImage,
00894 cv::Size(PORTRAIT_X, PORTRAIT_Y));
00895 } else {
00896 cv::resize(storedMaskImage_, newResizedMaskImage,
00897 cv::Size(PORTRAIT_X, PORTRAIT_Y));
00898 }
00899
00900 Q_EMIT newMask(cvMat2QImage(newResizedMaskImage, 2));
00901 }
00902
00903 void FaceFind::MirrorPointCloud() {
00904
00905 if (storedMaskImage_.total() == 0)
00906 return;
00907
00908 if (!mirroredMaskExists_) {
00909 cv::Mat_<cv::Vec3b>& copy = (cv::Mat_<cv::Vec3b>&) storedMaskImage_;
00910 storedMirroredMaskImage_ = storedMaskImage_.clone();
00911 cv::Point p;
00912
00913 for (int x = 0; x < storedMirroredMaskImage_.cols; ++x) {
00914 for (int y = 0; y < storedMirroredMaskImage_.rows; ++y) {
00915 if ((copy(y, x) == BLACK)
00916 && (copy(y, storedMaskImage_.cols - x - 1) != BLACK)) {
00917 p = cv::Point(storedMirroredMaskImage_.cols - x - 1, y);
00918 cv::line(storedMirroredMaskImage_, p, p, CV_RGB(0, 0, 0));
00919 }
00920 }
00921 }
00922
00923 mirroredMaskExists_ = true;
00924 }
00925 }
00926
00927 QImage FaceFind::cvMat2QImage(const cv::Mat& image, unsigned int idx) {
00928 try {
00929 if (rgbaBuffer_.size() <= idx) {
00930 rgbaBuffer_.resize(idx + 1);
00931 }
00932
00933 if (image.rows != rgbaBuffer_[idx].rows
00934 || image.cols != rgbaBuffer_[idx].cols) {
00935 rgbaBuffer_[idx] = cv::Mat(image.rows, image.cols, CV_8UC4);
00936 }
00937 cv::Mat alpha(image.rows, image.cols, CV_8UC1, cv::Scalar(255));
00938 cv::Mat in[] = { image, alpha };
00939
00940 int from_to[] = { 0, 0, 1, 1, 2, 2, 1, 3 };
00941 mixChannels(in, 2, &rgbaBuffer_[idx], 1, from_to, 4);
00942 } catch (...) {
00943 ROS_ERROR("error in cvMat2QImage");
00944 return QImage();
00945 }
00946
00947 return QImage((unsigned char *) (rgbaBuffer_[idx].data),
00948 rgbaBuffer_[idx].cols, rgbaBuffer_[idx].rows, rgbaBuffer_[idx].step,
00949 QImage::Format_RGB32);
00950 }