2 #include <pcl/conversions.h> 5 #include <boost/foreach.hpp> 6 #include <tf/tfMessage.h> 9 #include <geometry_msgs/TransformStamped.h> 11 #include <rosgraph_msgs/Clock.h> 13 #include <std_msgs/Float64.h> 21 #if ROS_VERSION_MINIMUM(1, 11, 1) // if current ros version is >= 1.10.1 (hydro) 26 #define LINES_OF_TERMINAL_FT 4 32 _image_transport(this->_measurements_node_handle),
34 _data_from_bag(false),
35 _ci_initialized(false),
36 _subscribe_to_pc(false),
37 _publishing_depth_edges_img(false),
38 _publishing_tracked_feats_img(false),
39 _publishing_rgb_img(false),
40 _publishing_depth_img(false),
41 _publishing_tracked_feats_with_pred_msk_img(false),
42 _publishing_full_pc(false),
43 _publishing_predicting_msk_img(false),
44 _publishing_tracking_msk_img(false),
45 _publishing_detecting_msk_img(false),
46 _publishing_predicted_and_past_feats_img(false),
47 _republishing_predicted_feat_locs(false),
48 _advance_frame_min_wait_time(0.0),
51 _processing_factor(0),
52 _advance_frame_mechanism(0),
53 _advance_frame_max_wait_time(60.),
54 _advance_sub_returned_true(false),
55 _attention_to_motion(false),
56 _occlusion_mask_positive(true)
58 this->
_namespace = std::string(
"feature_tracker");
72 const sensor_msgs::ImageConstPtr &depth_image_msg,
73 const sensor_msgs::ImageConstPtr &rgb_image_msg)
78 this->
_full_rgbd_pc = boost::make_shared<const PointCloudPCL>(temp_cloud);
86 const sensor_msgs::ImageConstPtr &rgb_image_msg)
91 ros::Time current_measurement_time = depth_image_msg->header.stamp;
102 double period_between_frames = 1.0/this->
_sensor_fps;
105 int frames_between_meas = round((time_between_meas.
toSec())/period_between_frames);
112 else if(frames_between_meas == this->_processing_factor )
116 else if(frames_between_meas > this->_processing_factor)
119 ROS_ERROR(
"%3d frames lost! Consider setting a lower frame rate." , frames_between_meas - this->_processing_factor);
167 static int advanced_frame_number = 0;
177 std::cout <<
"Press enter to process the next frame" << std::endl;
188 std::cout <<
"WARNING: no time limit, waiting forever" << std::endl;
189 max_time = std::numeric_limits<double>::max();
192 std::cout <<
"Waiting for advance response in FTracker" ;
198 printf(
" (%4.1fs)", wait_time);
201 if (wait_time >= max_time) {
202 std::cout <<
" Exceeded time limit " << max_time ;
204 std::cout <<
" Refinement received in FTracker" ;
210 advanced_frame_number++;
211 std::cout <<
". Reading frame " << advanced_frame_number << std::endl;
219 ROS_WARN_STREAM(
"Total meas processing time: " << (last-first).toSec()*1000 <<
" ms");
251 this->
_re_filter->setDynamicReconfigureValues(config);
261 std::vector<std::string> topics;
267 topics.push_back(
"/tf");
278 int messages_counter = 0;
279 rosgraph_msgs::Clock time_now_msg;
291 sensor_msgs::PointCloud2::ConstPtr points_pc = m.
instantiate<sensor_msgs::PointCloud2>();
292 if (points_pc != NULL )
298 time_now = points_pc->header.stamp;
306 sensor_msgs::Image::Ptr occlusion_mask = m.
instantiate<sensor_msgs::Image>();
307 if (occlusion_mask != NULL)
309 time_now = occlusion_mask->header.stamp;
316 sensor_msgs::Image::Ptr depth_img = m.
instantiate<sensor_msgs::Image>();
317 if (depth_img != NULL)
319 time_now = depth_img->header.stamp;
326 sensor_msgs::Image::Ptr rgb_img = m.
instantiate<sensor_msgs::Image>();
329 time_now = rgb_img->header.stamp;
342 sensor_msgs::CameraInfo::ConstPtr ci_msg = m.
instantiate<sensor_msgs::CameraInfo>();
345 time_now = ci_msg->header.stamp;
354 static bool first_ee_tf =
true;
358 tf::tfMessage::Ptr tf_msg = m.
instantiate<tf::tfMessage>();
361 time_now = tf_msg->transforms.at(0).header.stamp;
362 for(
int tf_transforms_idx = 0; tf_transforms_idx < tf_msg->transforms.size() ; tf_transforms_idx++)
364 geometry_msgs::TransformStamped transform = tf_msg->transforms.at(tf_transforms_idx);
365 if(transform.child_frame_id ==
"/ee")
372 cam_link_to_camera_rgb_optical_frame.
setOrigin(translation);
374 cam_link_to_camera_rgb_optical_frame.
setRotation(rotation);
384 geometry_msgs::TransformStamped initial_tf;
386 initial_tf.header = transform.header;
387 initial_tf.child_frame_id =
"initial_transform";
394 time_now_msg.clock = time_now;
421 rgb_img->header.frame_id =
"camera_rgb_optical_frame";
435 depth_img->header.frame_id =
"camera_rgb_optical_frame";
446 sensor_msgs::PointCloud2 full_rgbd_pc_ros;
449 full_rgbd_pc_ros.header.frame_id =
"camera_rgb_optical_frame";
459 tracked_features_img->header.frame_id =
"camera_rgb_optical_frame";
470 ->getTrackedFeaturesWithPredictionMaskImg();
471 tracked_features_with_pm_img->header.frame_id =
"camera_rgb_optical_frame";
482 ->getDepthEdgesImg();
483 depth_edges_img->header.frame_id =
"camera_rgb_optical_frame";
494 predicting_mask_img->header.frame_id =
"camera_rgb_optical_frame";
505 tracking_mask_img->header.frame_id =
"camera_rgb_optical_frame";
516 detecting_mask_img->header.frame_id =
"camera_rgb_optical_frame";
527 predicted_and_last_feats_img->header.frame_id =
"camera_rgb_optical_frame";
538 sensor_msgs::PointCloud2 temp_cloud;
541 temp_cloud.header.frame_id =
"camera_rgb_optical_frame";
550 sensor_msgs::PointCloud2 tracked_features_pc_ros;
551 pcl::toROSMsg(*tracked_features_pc, tracked_features_pc_ros);
553 tracked_features_pc_ros.header.frame_id =
"camera_rgb_optical_frame";
560 ROS_ERROR_STREAM_NAMED(
"FeatureTrackerNode._publishPredictedMeasurement",
"FeatureTrackerNode::_publishPredictedMeasurement has not been implemented for the Feature Tracker. It could publish the predicted 2D location of the features using both generative models: the internal model (no motion between frames) and the model that uses the higher RE level (velocity of the features");
571 this->getROSParameter<std::string>(this->
_namespace + std::string(
"/camera_info_topic"),this->
_ci_topic);
595 this->_advance_frame_min_wait_time *= 1e6;
601 this->getROSParameter<double>(std::string(
"/omip/sensor_fps"), this->
_sensor_fps);
602 this->getROSParameter<int>(std::string(
"/omip/processing_factor"), this->
_processing_factor);
606 std::map<int, std::string > mechanism_codes;
607 mechanism_codes[0] = std::string(
"Automatically advancing, no waiting");
608 mechanism_codes[1] = std::string(
"Manually advancing");
609 mechanism_codes[2] = std::string(
"Wait for signal from Shape Reconstruction");
610 mechanism_codes[3] = std::string(
"Wait for signal from Shape Tracker");
616 "FeatureTrackerNode Parameters: " <<
619 "\n\tMaximum time to wait for the signal to advance a frame (only used if advance_frame_mechanism is 2 or 3): " << this->_advance_frame_max_wait_time <<
620 "\n\tMinimum time to wait to advance a frame (only used if advance_frame_mechanism is 0): " << this->_advance_frame_min_wait_time/1e6 <<
626 "FeatureTrackerNode Parameters: " <<
635 "FeatureTrackerNode subscribes to these topics: " <<
643 "FeatureTrackerNode subscribes to these topics: " <<
653 if (this->
_tracker_type == std::string(
"PointFeatureTracker"))
663 "This type of Visual Tracker (" << this->
_tracker_type <<
") is not defined");
665 "[FeatureTrackerNode::getROSParameter] This type of Visual Tracker is not defined"));
688 "FeatureTrackerNode.getROSParameter",
689 "CameraInfo message could not get read. Using default values to initialize the camera info!");
693 for(
int i=0; i<5; i++)
731 ROS_ERROR_STREAM_NAMED(
"FeatureTrackerNode._SubscribeAndAdvertiseTopics",
"Using the HEAVY synchronization in FT (depth maps, RGB images and point clouds).");
738 ROS_ERROR_STREAM_NAMED(
"FeatureTrackerNode._SubscribeAndAdvertiseTopics",
"Using the LIGHT synchronization in FT (depth maps and RGB images).");
804 ROS_INFO(
"Waiting for ShapeReconstruction to advance");
808 ROS_INFO(
"Waiting for ShapeTracker to advance");
816 ROS_DEBUG_STREAM(
"Advance flag from ShapeReconstruction received in FeatureTracker!");
822 ROS_DEBUG_STREAM(
"Advance flag from ShapeTracker received in FeatureTracker!");
842 int main(
int argc,
char** argv)
844 ros::init(argc, argv,
"feature_tracker");
847 bool press_enter_to_start =
false;
848 ft_node.
getROSParameter<
bool>(
"/omip/press_enter_to_start", press_enter_to_start);
849 if(press_enter_to_start)
851 std::cout <<
"************************************************************************" << std::endl;
852 std::cout <<
"Press enter to start Online Interactive Perception" << std::endl;
853 std::cout <<
"************************************************************************" << std::endl;
856 std::cout <<
"Starting Online Interactive Perception" << std::endl;
std::string _occlusion_mask_img_topic
message_filters::Subscriber< sensor_msgs::PointCloud2 > _full_rgbd_pc_sub
double _advance_frame_min_wait_time
dynamic_reconfigure::Server< feature_tracker::FeatureTrackerDynReconfConfig >::CallbackType _dr_callback
static tf::Quaternion createQuaternionFromRPY(double roll, double pitch, double yaw)
ros::Time _previous_measurement_time
void newMessage(const boost::shared_ptr< M const > &msg)
pcl::PointCloud< pcl::PointXYZRGB > PointCloudPCL
std::string _tracker_type
ros::Time _current_measurement_time
virtual void DynamicReconfigureCallback(feature_tracker::FeatureTrackerDynReconfConfig &config, uint32_t level)
Callback for the Dynamic Reconfigure parameters.
ros::NodeHandle _measurements_node_handle
std::pair< cv_bridge::CvImagePtr, cv_bridge::CvImagePtr > ft_measurement_t
#define ROS_DEBUG_STREAM_NAMED(name, args)
void publish(const boost::shared_ptr< M > &message) const
bool _republishing_predicted_feat_locs
message_filters::Subscriber< sensor_msgs::Image > _rgb_img_sub
#define ROS_ERROR_STREAM_NAMED(name, args)
bool _publishing_detecting_msk_img
image_transport::Publisher _depth_img_pub
std::string _predicted_locations_pc_topic
void open(std::string const &filename, uint32_t mode=bagmode::Read)
int _num_external_state_predictors
Subscriber subscribe(const std::string &topic, uint32_t queue_size, void(T::*fp)(M), T *obj, const TransportHints &transport_hints=TransportHints())
message_filters::sync_policies::ApproximateTime< sensor_msgs::PointCloud2, sensor_msgs::Image, sensor_msgs::Image > FTrackerSyncPolicy
BagSubscriber< sensor_msgs::Image > _bag_depth_img_sub
std::vector< boost::thread * > _state_predictor_listener_threads
bool _publishing_tracked_feats_with_pred_msk_img
ros::Publisher _shutdown_publisher
virtual ~FeatureTrackerNode()
FeatureCloudPCLwc::Ptr ft_state_t
ROSCPP_DECL void init(int &argc, char **argv, const std::string &name, uint32_t options=0)
Publisher advertise(const std::string &base_topic, uint32_t queue_size, bool latch=false)
virtual void PublishDepthEdgesImg()
Publish an image with the edges detected on the depth map.
virtual void AdvanceBagCallbackFromShapeReconstruction(const boost::shared_ptr< std_msgs::Bool const > &flag)
void fromROSMsg(const sensor_msgs::PointCloud2 &cloud, pcl::PointCloud< T > &pcl_cloud)
virtual void PublishTrackedFeaturesImg()
Publish an image with the last tracked features on it.
ros::Publisher _camera_info_pub2
#define ROS_INFO_STREAM_NAMED(name, args)
dynamic_reconfigure::Server< feature_tracker::FeatureTrackerDynReconfConfig > _dr_srv
FeatureCloudPCLwc::Ptr _predicted_locations_pc
virtual void _publishPredictedMeasurement() const
Publish the prediction about the next measurement by this RE level.
cv_bridge::CvImagePtr _cv_ptr_depth
BagSubscriber< sensor_msgs::Image > _bag_rgb_img_sub
image_transport::Publisher _detecting_mask_img_pub
ros::Publisher _state_publisher
virtual void RepublishPredictedFeatLocation()
image_transport::Publisher _prediction_mask_img_pub
std::string _depth_img_topic
sensor_msgs::CameraInfo _camera_info_msg
virtual void spinStatePredictorQueue(int state_prediction_queue_idx)
sensor_msgs::PointCloud2 ft_state_ros_t
bool getROSParameter(std::string param_name, T ¶m_container)
ros::Publisher _full_rgbd_pc_repub
image_transport::Publisher _tracked_feats_with_pm_img_pub
virtual void PublishTrackedFeaturesWithPredictionMaskImg()
Publish an image with the last tracked features together with the mask of the predictions.
virtual void _SubscribeAndAdvertiseTopics()
virtual void _publishState() const
Publish the current state of this RE level.
void publish(const sensor_msgs::Image &message) const
image_transport::Publisher _depth_edges_img_pub
ros::CallbackQueue * _measurements_queue
message_filters::Synchronizer< FTrackerLightSyncPolicy > * _light_synchronizer
RecursiveEstimatorFilterClass * _re_filter
virtual void PublishPredictedAndLastFeaturesImg()
Publish an image with the last feature locations and the predicted feature locations.
bool _publishing_predicting_msk_img
virtual void PublishPredictionMaskImg()
Publish the mask of the predictions.
boost::shared_ptr< T > instantiate() const
CvImagePtr toCvCopy(const sensor_msgs::ImageConstPtr &source, const std::string &encoding=std::string())
message_filters::Synchronizer< FTrackerSyncPolicy > * _synchronizer
virtual void AdvanceBagCallbackFromShapeTracker(const boost::shared_ptr< omip_msgs::ShapeTrackerStates const > &st_states)
virtual void _ReadParameters()
message_filters::sync_policies::ApproximateTime< sensor_msgs::Image, sensor_msgs::Image > FTrackerLightSyncPolicy
Publisher advertise(const std::string &topic, uint32_t queue_size, bool latch=false)
virtual void PublishDetectingMaskImg()
Publish the mask used to detect new features. Combines edges, max depth and mask of current features ...
#define ROS_WARN_STREAM(args)
#define ROS_DEBUG_STREAM(args)
bool _advance_sub_returned_true
ros::Subscriber _occlusion_mask_img_sub
bool _publishing_depth_edges_img
bool _publishing_tracked_feats_img
ros::Subscriber _state_prediction_subscriber
FeatureTrackerNode class Connects to the ROS communication system to get messages from the sensors an...
image_transport::Publisher _predicted_and_past_feats_img_pub
ros::Publisher _camera_info_pub
virtual void OcclusionMaskImgCallback(const sensor_msgs::ImageConstPtr &occlusion_mask_img)
Callback for the mask of the self occlusion of the robot. Only for the WAM.
std::vector< ros::NodeHandle > _state_prediction_node_handles
int _advance_frame_mechanism
image_transport::Publisher _rgb_img_pub
virtual void PublishTrackingMaskImg()
Publish the mask used to track features. Combines edges, predictions mask and max depth...
std::string _bag_file_name
double _advance_frame_max_wait_time
void subscribe(ros::NodeHandle &nh, const std::string &topic, uint32_t queue_size, const ros::TransportHints &transport_hints=ros::TransportHints(), ros::CallbackQueueInterface *callback_queue=0)
static void transformMsgToTF(const geometry_msgs::Transform &msg, Transform &bt)
virtual void _InitializeVariables()
ros::Publisher _pred_feat_locs_repub
bool _publishing_depth_img
std::string const & getTopic() const
PointCloudPCL::ConstPtr _full_rgbd_pc
virtual void PublishDepthImg()
bool _attention_to_motion
virtual void PublishRGBImg()
void toROSMsg(const sensor_msgs::PointCloud2 &cloud, sensor_msgs::Image &image)
#define ROS_ERROR_NAMED(name,...)
pcl::PointCloud< FeaturePCLwc > FeatureCloudPCLwc
image_transport::Publisher _tracked_feats_img_pub
image_transport::ImageTransport _image_transport
tf::Transform _initial_ee_tf
virtual void PublishFullRGBDPC()
Publish the full RGB-D point cloud used in the last iteration (used to synchronize the PC to the othe...
ros::Subscriber _advance_sub
std::string _full_rgbd_pc_topic
virtual void measurementCallback(const sensor_msgs::PointCloud2ConstPtr &pc_msg, const sensor_msgs::ImageConstPtr &depth_image_msg, const sensor_msgs::ImageConstPtr &rgb_image_msg)
Combined callback for the measurements of this RE level: point cloud, depth image and rgb image...
boost::shared_ptr< M const > waitForMessage(const std::string &topic, NodeHandle &nh, ros::Duration timeout)
image_transport::Publisher _tracking_mask_img_pub
std::string _rgb_img_topic
BagSubscriber< sensor_msgs::PointCloud2 > _bag_full_rgbd_pc_sub
cv_bridge::CvImagePtr _cv_ptr_occlusion_msk
message_filters::Subscriber< sensor_msgs::Image > _depth_img_sub
virtual void statePredictionCallback(const boost::shared_ptr< ft_state_ros_t const > &predicted_next_state)
Callback for the predictions about the state of this RE level coming from the higher level of the hie...
static void transformTFToMsg(const Transform &bt, geometry_msgs::Transform &msg)
bool _publishing_predicted_and_past_feats_img
cv_bridge::CvImagePtr _cv_ptr_rgb
int main(int argc, char **argv)
virtual void ReadRosBag()
Read messages from a rosbag instead of receiving them from a sensor Internally it "publishes" the mes...
bool _publishing_tracking_msk_img
ros::Publisher _time_repub
bool _occlusion_mask_positive