ar_confirm.cpp
Go to the documentation of this file.
00001 #include <ros/ros.h>
00002 
00003 #include <tf/transform_listener.h>
00004 #include <pcl/point_cloud.h>
00005 #include <pcl/point_types.h>
00006 #include <pcl_ros/point_cloud.h>
00007 #include <pcl_ros/transforms.h>
00008 #include <cv_bridge/cv_bridge.h>
00009 #include <opencv/cv.h>
00010 #include <sensor_msgs/image_encodings.h>
00011 #include <image_transport/image_transport.h>
00012 #include <image_geometry/pinhole_camera_model.h>
00013 #include <geometry_msgs/PoseStamped.h>
00014 
00015 #include <ar_pose/ARMarker.h>
00016 
00017 typedef pcl::PointXYZRGB PRGB;
00018 typedef pcl::PointCloud<PRGB> PCRGB;
00019 
00020 ar_pose::ARMarker::Ptr ar_tag_l, ar_tag_r;
00021 ros::Subscriber ar_tag_l_sub, ar_tag_r_sub;
00022 double last_l_time, last_r_time;
00023 
00024 image_transport::CameraSubscriber camera_sub;
00025 image_transport::Publisher overlay_pub;
00026 image_geometry::PinholeCameraModel cam_model;
00027 boost::shared_ptr<tf::TransformListener> tf_list;
00028 cv_bridge::CvImagePtr cv_img;
00029 
00030 void subARTagLCallback(const ar_pose::ARMarker::Ptr& ar_tag_)
00031 {
00032     ar_tag_l = ar_tag_;
00033     last_l_time = ros::Time::now().toSec();
00034 }
00035 
00036 void subARTagRCallback(const ar_pose::ARMarker::Ptr& ar_tag_)
00037 {
00038     ar_tag_r = ar_tag_;
00039     last_r_time = ros::Time::now().toSec();
00040 }
00041 
00042 void writeTag(const ar_pose::ARMarker::Ptr& ar_tag, const sensor_msgs::ImageConstPtr& img_msg,
00043               const cv::Scalar& color)
00044 {
00045     if(!tf_list->waitForTransform(img_msg->header.frame_id, ar_tag->header.frame_id,
00046                                  img_msg->header.stamp, ros::Duration(3)))
00047         return;
00048     geometry_msgs::PoseStamped ar_tag_pose, ar_tag_tf;
00049     ar_tag_pose.header = ar_tag->header;
00050     ar_tag_pose.pose = ar_tag->pose.pose;
00051     tf_list->transformPose(img_msg->header.frame_id, ar_tag_pose, ar_tag_tf);
00052 
00053     double marker_width;
00054     ros::param::param<double>("~marker_width", marker_width, 0.135);
00055     const cv::Point** cv_poly_list = new const cv::Point*[4];
00056     std::vector<cv::Point> cv_pts;
00057     tf::Pose tf_pose;
00058     tf::poseMsgToTF(ar_tag_tf.pose, tf_pose);
00059     for(int i=0;i<2;i++) {
00060         for(int j=0;j<2;j++) {
00061             btVector4 tag_pt;
00062             tag_pt.setX(i * marker_width - marker_width/2.);
00063             tag_pt.setY(j * marker_width - marker_width/2.);
00064             tag_pt.setZ(0);
00065             tag_pt.setW(1);
00066             tf::Vector3 tag_pt_rot = tf_pose * tag_pt;
00067             cv::Point3d proj_pt_cv(tag_pt_rot.getX(), tag_pt_rot.getY(), tag_pt_rot.getZ());
00068             cv_pts.push_back(cam_model.project3dToPixel(proj_pt_cv));
00069         }
00070     }
00071     cv::Point* tag_pts = new cv::Point[4];
00072     tag_pts[0] = cv_pts[0]; tag_pts[1] = cv_pts[1]; 
00073     tag_pts[2] = cv_pts[3]; tag_pts[3] = cv_pts[2]; 
00074     cv_poly_list[0] = tag_pts;
00075     int npts[1] = {4};
00076     cv::polylines(cv_img->image, cv_poly_list, npts, 1, 1, color, 4);
00077 }
00078 
00079 void doOverlay(const sensor_msgs::ImageConstPtr& img_msg,
00080                const sensor_msgs::CameraInfoConstPtr& info_msg) {
00081 
00082     // convert camera image into opencv
00083     cam_model.fromCameraInfo(info_msg);
00084     cv_img = cv_bridge::toCvCopy(img_msg, sensor_msgs::image_encodings::RGB8);
00085 
00086     double timeout_time;
00087     ros::param::param<double>("~timeout_time", timeout_time, 1.0);
00088 
00089     double cur_time = ros::Time::now().toSec();
00090     if(ar_tag_l && cur_time - last_l_time < timeout_time) {
00091         cv::Scalar color = CV_RGB(255, 0, 0);
00092         writeTag(ar_tag_l, img_msg, color);
00093     }
00094     if(ar_tag_r && cur_time - last_r_time < timeout_time) {
00095         cv::Scalar color = CV_RGB(0, 255, 0);
00096         writeTag(ar_tag_r, img_msg, color);
00097     }
00098     
00099     overlay_pub.publish(cv_img->toImageMsg());
00100 }
00101 
00102 int main(int argc, char **argv)
00103 {
00104     ros::init(argc, argv, "head_tracking");
00105     ros::NodeHandle nh;
00106     image_transport::ImageTransport img_trans(nh);
00107     tf_list = boost::shared_ptr<tf::TransformListener>(new tf::TransformListener());
00108 
00109     ar_tag_l_sub = nh.subscribe("/ar_tag_l", 1, &subARTagLCallback);
00110     ar_tag_r_sub = nh.subscribe("/ar_tag_r", 1, &subARTagRCallback);
00111     camera_sub = img_trans.subscribeCamera("/camera", 1, 
00112                                            &doOverlay);
00113     overlay_pub = img_trans.advertise("/confirmation", 1);
00114 
00115 #if 0
00116     ros::Rate r(5);
00117     while(ros::ok()) {
00118         cv::Mat image(480, 640, CV_8UC3);
00119         ros::spinOnce();
00120         r.sleep();
00121     }
00122 #else
00123     ros::spin();
00124 #endif
00125 
00126     return 0;
00127 }


hrl_pr2_ar_servo
Author(s): Kelsey Hawkins, Advisor: Prof. Charlie Kemp, Lab: Healthcare Robotics Lab
autogenerated on Wed Nov 27 2013 11:43:43