$search
00001 #include <ros/ros.h> 00002 #include <actionlib/server/simple_action_server.h> 00003 #include <pr2_plugs_msgs/VisionOutletDetectionAction.h> 00004 00005 #include <cv_bridge/CvBridge.h> 00006 #include <image_transport/image_transport.h> 00007 #include <image_geometry/pinhole_camera_model.h> 00008 #include <tf/transform_listener.h> 00009 #include <tf/transform_broadcaster.h> 00010 00011 #include <outlet_pose_estimation/detector.h> 00012 #include <outlet_pose_estimation/estimator.h> 00013 00014 using namespace outlet_pose_estimation; 00015 using namespace visual_pose_estimation; 00016 00017 bool initFromParameters(Detector& detector, PoseEstimator& estimator) 00018 { 00019 // Read all the parameters 00020 ros::NodeHandle local_nh("~"); 00021 bool have_all_required = true; 00022 00023 std::string outlet_template_path; 00024 if (!local_nh.getParam("outlet_template", outlet_template_path)) { 00025 ROS_ERROR("Outlet template [~outlet_template] unspecified"); 00026 have_all_required = false; 00027 } 00028 00029 if (!have_all_required) return false; 00030 00031 // Initialize detector 00032 if (!detector.loadTemplate(outlet_template_path)) { 00033 ROS_ERROR("Could not load outlet template from file %s", outlet_template_path.c_str()); 00034 return false; 00035 } 00036 00037 // Initialize pose estimator with outlet object model 00038 estimator = createOutletEstimator(detector.getTemplate()); 00039 00040 return true; 00041 } 00042 00043 class DetectOutletAction 00044 { 00045 public: 00046 DetectOutletAction(const std::string& name) 00047 : it_(nh_), 00048 as_(nh_, name), 00049 action_name_(name), 00050 update_transformed_prior_(true) 00051 { 00052 // Load configuration 00053 if (!initFromParameters(detector_, pose_estimator_)) { 00054 ROS_FATAL("%s: Error initializing from parameters, shutting down", action_name_.c_str()); 00055 nh_.shutdown(); 00056 return; 00057 } 00058 00059 // Advertise visualization topics 00060 display_pub_ = it_.advertise("display_image", 1); 00061 //marker_pub_ = nh_.advertise<visualization_msgs::Marker>("/visualization_marker", 1); 00062 00063 // Register the goal and preempt callbacks 00064 as_.registerGoalCallback(boost::bind(&DetectOutletAction::goalCb, this)); 00065 as_.registerPreemptCallback(boost::bind(&DetectOutletAction::preemptCb, this)); 00066 } 00067 00068 void timeoutCb(const ros::TimerEvent& e) 00069 { 00070 if(sub_.getNumPublishers() == 0) 00071 ROS_INFO("%s: Aborted, there are no publishers on goal topic.", action_name_.c_str()); 00072 else 00073 ROS_INFO("%s: Aborted, there are publishers on goal topic, but detection took too long.", action_name_.c_str()); 00074 00075 sub_.shutdown(); 00076 as_.setAborted(); 00077 } 00078 00079 void goalCb() 00080 { 00081 ROS_INFO("%s: Received new goal", action_name_.c_str()); 00082 // Accept the new goal 00083 typedef boost::shared_ptr<const pr2_plugs_msgs::VisionOutletDetectionGoal> GoalPtr; 00084 GoalPtr goal = as_.acceptNewGoal(); 00086 tf::poseStampedMsgToTF(goal->prior, prior_); 00087 tf::vector3StampedMsgToTF(goal->wall_normal, wall_normal_); 00088 update_transformed_prior_ = true; 00089 00090 // Subscribe to the streams from the requested camera 00091 ros::Duration(1.0).sleep(); 00092 std::string image_topic = goal->camera_name + "/image_rect_color"; 00093 sub_ = it_.subscribeCamera(image_topic, 1, &DetectOutletAction::detectCb, this); 00094 00095 //create a timer for how long we can actually transform images before we have to abort 00096 pub_timer_ = nh_.createTimer(tf_listener_.getCacheLength() - ros::Duration(1.0), boost::bind(&DetectOutletAction::timeoutCb, this, _1), true); 00097 } 00098 00099 void preemptCb() 00100 { 00101 ROS_INFO("%s: Preempted", action_name_.c_str()); 00102 as_.setPreempted(); 00103 pub_timer_.stop(); 00104 sub_.shutdown(); 00105 } 00106 00107 void detectCb(const sensor_msgs::ImageConstPtr& image_msg, const sensor_msgs::CameraInfoConstPtr& info_msg) 00108 { 00109 if (!as_.isActive()) return; 00110 00111 ROS_INFO("%s: Received image, performing detection", action_name_.c_str()); 00112 00113 // Convert image message 00115 if (!img_bridge_.fromImage(*image_msg, "bgr8")) { 00116 ROS_ERROR("%s: Failed to convert image from %s -> bgr8", action_name_.c_str(), image_msg->encoding.c_str()); 00117 return; 00118 } 00119 cv::Mat image(img_bridge_.toIpl()); 00120 00121 // Detect the outlet 00122 std::vector<cv::Point2f> image_points; 00123 if (!detector_.detect(image, image_points)) { 00124 ROS_DEBUG("%s: Failed to detect outlet", action_name_.c_str()); 00125 publishDisplayImage(image, image_points, false); 00127 return; 00128 } 00129 00130 // Get the prior in the camera frame. Currently we transform the prior once, on the first 00131 // image received for a new goal, and assume that the camera frame does not move wrt to 00132 // the original frame of the prior while that goal is active. 00133 cam_model_.fromCameraInfo(info_msg); 00134 if (update_transformed_prior_) { 00135 // Transform the prior and wall normal into the camera coordinate frame. 00136 tf::Stamped<tf::Vector3> wall_normal_in_camera; 00137 try { 00139 tf_listener_.transformPose(cam_model_.tfFrame(), prior_, prior_in_camera_); 00140 tf_listener_.transformVector(cam_model_.tfFrame(), wall_normal_, wall_normal_in_camera); 00141 } 00142 catch (tf::TransformException& ex) { 00143 ROS_WARN("%s: TF exception\n%s", action_name_.c_str(), ex.what()); 00144 return; 00145 } 00146 00147 // Now we tweak the orientation of the original prior to align with the wall normal. 00148 // First pull out a couple prior basis vectors of interest. 00149 btVector3 prior_x_basis = prior_in_camera_.getBasis().getColumn(0); 00150 btVector3 prior_z_basis = prior_in_camera_.getBasis().getColumn(2); 00151 // New forward vector is set to the wall normal. 00152 btVector3 adjusted_x_basis = wall_normal_in_camera.normalized(); 00153 // We assume normal is into the wall, so positive Z in the camera frame. 00154 if (adjusted_x_basis.z() < 0) 00155 adjusted_x_basis = -adjusted_x_basis; 00156 // The wall normal should not differ wildly from the prior X (forward) basis vector. 00157 static const double ANGLE_THRESHOLD = 0.175; // ~10 degrees 00158 double angle = adjusted_x_basis.angle(prior_x_basis); 00159 if (angle > ANGLE_THRESHOLD) { 00160 ROS_ERROR("Wall normal differs from prior orientation by %f radians, threshold is %f. Aborting.", 00161 angle, ANGLE_THRESHOLD); 00162 as_.setAborted(); 00163 pub_timer_.stop(); 00164 sub_.shutdown(); 00165 return; 00166 } 00167 // New left vector calculated as cross product of the original up vector and the wall normal. 00168 btVector3 adjusted_y_basis = prior_z_basis.cross(adjusted_x_basis).normalized(); 00169 // New up vector calculated from the other two basis vectors. 00170 btVector3 adjusted_z_basis = adjusted_x_basis.cross(adjusted_y_basis); 00171 // Fill in the new basis vectors as columns. 00172 prior_in_camera_.getBasis().setValue(adjusted_x_basis.x(), adjusted_y_basis.x(), adjusted_z_basis.x(), 00173 adjusted_x_basis.y(), adjusted_y_basis.y(), adjusted_z_basis.y(), 00174 adjusted_x_basis.z(), adjusted_y_basis.z(), adjusted_z_basis.z()); 00175 00176 update_transformed_prior_ = false; 00177 } 00178 00179 // Estimate the outlet pose 00180 tf::Pose pose = pose_estimator_.solveWithPrior(image_points, cam_model_, prior_in_camera_); 00181 00182 // Report success 00183 pr2_plugs_msgs::VisionOutletDetectionResult result; 00184 tf::poseTFToMsg(pose, result.outlet_pose.pose); 00185 result.outlet_pose.header.stamp = image_msg->header.stamp; 00186 result.outlet_pose.header.frame_id = cam_model_.tfFrame(); 00187 as_.setSucceeded(result); 00188 pub_timer_.stop(); 00189 00190 // Publish visualization messages 00191 tf_broadcaster_.sendTransform(tf::StampedTransform(prior_, prior_.stamp_, 00192 prior_.frame_id_, "outlet_prior_frame")); 00193 tf_broadcaster_.sendTransform(tf::StampedTransform(prior_in_camera_, prior_in_camera_.stamp_, 00194 prior_in_camera_.frame_id_, 00195 "outlet_adjusted_prior_frame")); 00196 tf_broadcaster_.sendTransform(tf::StampedTransform(pose, image_msg->header.stamp, 00197 cam_model_.tfFrame(), "outlet_frame")); 00198 publishDisplayImage(image, image_points, true); 00199 00200 // Shut down the camera subscription and wait for the next goal 00201 sub_.shutdown(); 00202 } 00203 00204 void publishDisplayImage(const cv::Mat& source, const std::vector<cv::Point2f>& image_points, bool success) 00205 { 00206 if (display_pub_.getNumSubscribers() == 0) return; 00207 detector_.getDisplayImage(source, image_points, success, display_img_cv_); 00208 IplImage ipl = (IplImage)display_img_cv_; 00209 sensor_msgs::CvBridge::fromIpltoRosImage(&ipl, display_img_); 00210 display_img_.encoding = "bgr8"; 00211 display_pub_.publish(display_img_); 00212 } 00213 00214 protected: 00215 ros::NodeHandle nh_; 00216 image_transport::ImageTransport it_; 00217 actionlib::SimpleActionServer<pr2_plugs_msgs::VisionOutletDetectionAction> as_; 00218 std::string action_name_; 00219 00220 // Subscriptions 00221 image_transport::CameraSubscriber sub_; 00222 00223 ros::Timer pub_timer_; 00224 00225 // Publications 00226 image_transport::Publisher display_pub_; 00227 sensor_msgs::Image display_img_; 00228 cv::Mat display_img_cv_; 00229 ros::Publisher marker_pub_; 00230 00231 // TF 00232 tf::TransformListener tf_listener_; 00233 tf::TransformBroadcaster tf_broadcaster_; 00234 00235 // Message processing 00236 sensor_msgs::CvBridge img_bridge_; 00237 image_geometry::PinholeCameraModel cam_model_; 00238 outlet_pose_estimation::Detector detector_; 00239 visual_pose_estimation::PoseEstimator pose_estimator_; 00240 tf::Stamped<tf::Pose> prior_; 00241 tf::Stamped<tf::Pose> prior_in_camera_; 00242 tf::Stamped<tf::Vector3> wall_normal_; 00243 bool update_transformed_prior_; 00244 }; 00245 00246 int main(int argc, char** argv) 00247 { 00248 ros::init(argc, argv, "vision_detect_outlet"); 00249 DetectOutletAction action(ros::this_node::getName()); 00250 ros::spin(); 00251 00252 return 0; 00253 }