Go to the documentation of this file.
6 #ifndef ADI_3DTOF_IMAGE_STITCH_H
7 #define ADI_3DTOF_IMAGE_STITCH_H
16 #ifdef ENABLE_GPU_OPTIMIZATION
17 #include "stitch_frames_core_gpu.cuh"
21 #include <boost/make_shared.hpp>
30 #include <pcl/features/normal_3d.h>
31 #include <pcl/filters/filter.h>
32 #include <pcl/filters/voxel_grid.h>
33 #include <pcl/point_cloud.h>
34 #include <pcl/point_representation.h>
35 #include <pcl/point_types.h>
36 #include <pcl/registration/icp.h>
37 #include <pcl/registration/icp_nl.h>
38 #include <pcl/registration/transforms.h>
42 #include <sensor_msgs/CameraInfo.h>
43 #include <sensor_msgs/Image.h>
44 #include <sensor_msgs/PointCloud2.h>
47 #include <std_msgs/Bool.h>
52 #include <opencv2/core.hpp>
53 #include <opencv2/highgui.hpp>
54 #include <opencv2/imgcodecs.hpp>
56 #include <boost/chrono.hpp>
57 #include <boost/thread/thread.hpp>
60 #define MAX_QUEUE_SIZE_FOR_TIME_SYNC 10
61 #define SENSOR_OVERLAP_PERCENT 10.0f
69 sensor_msgs::CameraInfo>
72 sensor_msgs::CameraInfo, sensor_msgs::CameraInfo>
78 sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image>
81 sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image,
82 sensor_msgs::Image, sensor_msgs::Image>
117 ROS_INFO(
"adi_3dtof_image_stitching::Inside ADI3DToFImageStitching()");
129 std::vector<std::string> cam_prefix;
131 nh.
param(
"param_camera_prefixes", cam_prefix_arr, cam_prefix_arr);
132 std::vector<std::string> cam_prefix_1;
133 for (
int i = 0; i < cam_prefix_arr.
size(); i++)
135 cam_prefix.push_back(cam_prefix_arr[i]);
136 std::cerr <<
"camera_prefixes: " << cam_prefix[i] << std::endl;
152 num_sensors_ = std::min(max_devices_allowed,
static_cast<int>(cam_prefix.size()));
211 printf(
"\n ERROR: Exception while registering Synchronizers \n");
233 printf(
"\n ERROR: Exception while registering Synchronizers \n");
257 printf(
"\n ERROR: Exception while registering Synchronizers \n");
268 stitched_pc_pcl_ = pcl::PointCloud<pcl::PointXYZ>::Ptr(
new pcl::PointCloud<pcl::PointXYZ>);
273 #ifdef ENABLE_GPU_OPTIMIZATION
292 int status = system(
"rosnode kill -a");
328 int img_width,
int img_height)
330 sensor_msgs::PointCloud2::Ptr pointcloud_msg(
new sensor_msgs::PointCloud2);
333 pointcloud_msg->header.stamp = sensor_header.stamp;
334 pointcloud_msg->header.frame_id = sensor_header.frame_id;
335 pointcloud_msg->width = img_width;
336 pointcloud_msg->height = img_height;
337 pointcloud_msg->is_dense =
false;
338 pointcloud_msg->is_bigendian =
false;
342 short* xyz_sensor_buf;
343 xyz_sensor_buf = xyz_frame;
351 for (
int i = 0; i < img_height; i++)
353 for (
int j = 0; j < img_width; j++)
355 *iter_x = (float)(*xyz_sensor_buf++) / 1000.0f;
356 *iter_y = (float)(*xyz_sensor_buf++) / 1000.0f;
357 *iter_z = (float)(*xyz_sensor_buf++) / 1000.0f;
364 return pointcloud_msg;
368 const sensor_msgs::ImageConstPtr& ir_image_cam1,
369 const sensor_msgs::ImageConstPtr& depth_image_cam2,
370 const sensor_msgs::ImageConstPtr& ir_image_cam2);
373 const sensor_msgs::ImageConstPtr& ir_image_cam1,
374 const sensor_msgs::ImageConstPtr& depth_image_cam2,
375 const sensor_msgs::ImageConstPtr& ir_image_cam2,
376 const sensor_msgs::ImageConstPtr& depth_image_cam3,
377 const sensor_msgs::ImageConstPtr& ir_image_cam3);
380 const sensor_msgs::ImageConstPtr& depth_image_cam1,
const sensor_msgs::ImageConstPtr& ir_image_cam1,
381 const sensor_msgs::ImageConstPtr& depth_image_cam2,
const sensor_msgs::ImageConstPtr& ir_image_cam2,
382 const sensor_msgs::ImageConstPtr& depth_image_cam3,
const sensor_msgs::ImageConstPtr& ir_image_cam3,
383 const sensor_msgs::ImageConstPtr& depth_image_cam4,
const sensor_msgs::ImageConstPtr& ir_image_cam4);
386 const sensor_msgs::CameraInfoConstPtr& CameraInfo_cam2);
389 const sensor_msgs::CameraInfoConstPtr& CameraInfo_cam2,
390 const sensor_msgs::CameraInfoConstPtr& CameraInfo_cam3);
393 const sensor_msgs::CameraInfoConstPtr& CameraInfo_cam2,
394 const sensor_msgs::CameraInfoConstPtr& CameraInfo_cam3,
395 const sensor_msgs::CameraInfoConstPtr& CameraInfo_cam4);
397 void camInfoCallback(
const sensor_msgs::CameraInfoConstPtr& cam_info,
int cam_id);
402 void irImageCallback(
const sensor_msgs::ImageConstPtr& ir_image,
int cam_id,
409 void generatePointCloud(
float* xyz_frame,
int* lut_3d_to_2d_mapping,
const pcl::PointCloud<pcl::PointXYZ>::Ptr& out_pointcloud);
424 template <
typename T>
490 #ifdef ENABLE_GPU_OPTIMIZATION
491 StitchFramesCoreGPU* stitch_frames_core_GPU_;
508 cv_ptr->encoding = encoding_type;
511 cv_ptr->header.frame_id = std::move(frame_id);
512 cv_ptr->image = std::move(img);
514 publisher.
publish(cv_ptr->toImageMsg());
std::queue< ADI3DToFImageStitchingInputInfo * > input_node_queue_
bool tf_recvd_[MAX_NUM_DEVICES]
void fillAndPublishCameraInfo(std::string frame_id, const ros::Publisher &publisher)
This image fills and publishes the camera information.
void sync4CamerasCamInfoCallback(const sensor_msgs::CameraInfoConstPtr &CameraInfo_cam1, const sensor_msgs::CameraInfoConstPtr &CameraInfo_cam2, const sensor_msgs::CameraInfoConstPtr &CameraInfo_cam3, const sensor_msgs::CameraInfoConstPtr &CameraInfo_cam4)
Call back for synchronised topics(CameraInfo) for 4 camera configuration.
static IOutputSensor * getOutputSensor(int output_sensor_type)
Get the Output Sensor object.
void AutoscaleStitching(ADI3DToFImageStitchingInputInfo *image_stitch_input_info)
Calculates Yaw correction and updates output size.
~ADI3DToFImageStitching()
Destroy the ADI3DToFImageStitching object.
int enable_pointcloud_generation_
tf2_ros::TransformListener * tf_listener_[MAX_NUM_DEVICES]
boost::shared_ptr< sync_CamInfo_2cam > sync_CamInfo_2cam_
void depthImageCallback(const sensor_msgs::ImageConstPtr &depth_image, int cam_id, ADI3DToFImageStitchingInputInfo *image_stitch_input_info)
Low-level callback for depth image.
boost::mutex output_thread_mtx_
ros::Publisher combo_out_point_cloud_publisher_
void(ADI3DToFImageStitching::* ObjectDetectCallbackFuncPointer)(const std_msgs::BoolConstPtr &)
image_transport::SubscriberFilter depth_image_subscriber_[MAX_NUM_DEVICES]
ros::Publisher stitched_ir_image_publisher_
float horizontal_fov_sensor_in_degrees_
message_filters::Synchronizer< Sync_CameraInfo_3sensors > sync_CamInfo_3cam
message_filters::sync_policies::ApproximateTime< sensor_msgs::CameraInfo, sensor_msgs::CameraInfo, sensor_msgs::CameraInfo, sensor_msgs::CameraInfo > Sync_CameraInfo_4sensors
message_filters::sync_policies::ApproximateTime< sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image > sync_depth_ir_3
void generatePointCloud(float *xyz_frame, int *lut_3d_to_2d_mapping, const pcl::PointCloud< pcl::PointXYZ >::Ptr &out_pointcloud)
Function to convert 3D XYZ frame to PCL point cloud format.
message_filters::Subscriber< sensor_msgs::CameraInfo > camera_info_subscriber_[MAX_NUM_DEVICES]
ROSCPP_DECL void shutdown()
void subscribe(ImageTransport &it, const std::string &base_topic, uint32_t queue_size, const TransportHints &transport_hints=TransportHints())
This is main class for this package.
#define MAX_QUEUE_SIZE_FOR_TIME_SYNC
geometry_msgs::TransformStamped camera_map_transform_[MAX_NUM_DEVICES]
float camera_matrix[9]
Camera matrix = [fx, 0, cx, 0, fy, cy, 0, 0, 1] fx, fy : Camera focal lengths (pixels) cx,...
sensor_msgs::PointCloud2 stitched_pc_
void publish(const boost::shared_ptr< M > &message) const
IOutputSensor * output_sensor_
std::string output_file_name_
This class has image processing utilities.
message_filters::sync_policies::ApproximateTime< sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image > sync_depth_ir_4
void sync2CamerasCamInfoCallback(const sensor_msgs::CameraInfoConstPtr &CameraInfo_cam1, const sensor_msgs::CameraInfoConstPtr &CameraInfo_cam2)
Call back for synchronised topics(CameraInfo) for 2 camera configuration.
std::queue< ADI3DToFImageStitchingOutputInfo * > output_node_queue_
message_filters::Synchronizer< sync_depth_ir_2 > sync_depth_ir_2cam
void sync2CamerasDepthIRImageCallback(const sensor_msgs::ImageConstPtr &depth_image_cam1, const sensor_msgs::ImageConstPtr &ir_image_cam1, const sensor_msgs::ImageConstPtr &depth_image_cam2, const sensor_msgs::ImageConstPtr &ir_image_cam2)
Call back for synchronised topics(depth and IR image pair) for 2 camera configuration.
void camInfoCallback(const sensor_msgs::CameraInfoConstPtr &cam_info, int cam_id)
Low-level callback for Com-Info the ID would indicate the source camera.
void sync3CamerasCamInfoCallback(const sensor_msgs::CameraInfoConstPtr &CameraInfo_cam1, const sensor_msgs::CameraInfoConstPtr &CameraInfo_cam2, const sensor_msgs::CameraInfoConstPtr &CameraInfo_cam3)
Call back for synchronised topics(CameraInfo) for 3 camera configuration.
boost::shared_ptr< sync_depth_ir_2cam > sync_depth_ir_2cam_
void processOutput()
The output process function, this is running a loop which reads the frame from the putput queue,...
ImageProcUtils * image_proc_utils_[MAX_NUM_DEVICES]
void processOutputAbort()
This function sets the abort flag for the output thread, the function is normally called by the main ...
message_filters::sync_policies::ApproximateTime< sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image > sync_depth_ir_2
ros::Publisher stitched_depth_image_publisher_
tf2_ros::Buffer tf_buffer_[MAX_NUM_DEVICES]
void(ADI3DToFImageStitching::* PointCloudCallbackFuncPointer)(const sensor_msgs::PointCloud2ConstPtr &)
image_transport::ImageTransport * it_
boost::shared_ptr< sync_CamInfo_4cam > sync_CamInfo_4cam_
bool process_output_thread_abort_
bool depth_image_recvd_[MAX_NUM_DEVICES]
#define ROS_INFO_STREAM(args)
void publishImageAsRosMsg(cv::Mat img, const std::string &encoding_type, std::string frame_id, const ros::Publisher &publisher)
This function publishes images as Ros messages.
virtual void open(std::string input_file_name, int image_width, int image_height)=0
message_filters::Synchronizer< sync_depth_ir_3 > sync_depth_ir_3cam
void sync4CamerasDepthIRImageCallback(const sensor_msgs::ImageConstPtr &depth_image_cam1, const sensor_msgs::ImageConstPtr &ir_image_cam1, const sensor_msgs::ImageConstPtr &depth_image_cam2, const sensor_msgs::ImageConstPtr &ir_image_cam2, const sensor_msgs::ImageConstPtr &depth_image_cam3, const sensor_msgs::ImageConstPtr &ir_image_cam3, const sensor_msgs::ImageConstPtr &depth_image_cam4, const sensor_msgs::ImageConstPtr &ir_image_cam4)
Call back for synchronised topics(depth and IR image pair) for 4 camera configuration.
void shutDownAllNodes()
This function shuts down all the nodes running in a roscore.
StitchFramesCoreCPU * stitch_frames_core_CPU_
void(ADI3DToFImageStitching::* ImageCallbackFuncPointer)(const sensor_msgs::ImageConstPtr &)
static const int MAX_NUM_DEVICES
message_filters::sync_policies::ApproximateTime< sensor_msgs::CameraInfo, sensor_msgs::CameraInfo, sensor_msgs::CameraInfo > Sync_CameraInfo_3sensors
sensor_msgs::PointCloud2::Ptr convert2ROSPointCloudMsg(short *xyz_frame, std_msgs::Header sensor_header, int img_width, int img_height)
COnvery XYZ 3D fram to ROS point cloud message format.
bool stitchFrames()
Stitch function.
#define SENSOR_OVERLAP_PERCENT
message_filters::Synchronizer< sync_depth_ir_4 > sync_depth_ir_4cam
ADI3DToFImageStitching()
Construct a new ADI3DToFImageStitching object.
This is base class for output.
boost::mutex input_thread_mtx_
bool camera_parameters_updated_[MAX_NUM_DEVICES]
boost::shared_ptr< sync_CamInfo_3cam > sync_CamInfo_3cam_
Class for Stitch frame core CPU implementation.
float camera_yaw_correction_
void addInputNodeToQueue(T *image_stitch_input_info)
Intrinsic parameters of the camera.
void sync3CamerasDepthIRImageCallback(const sensor_msgs::ImageConstPtr &depth_image_cam1, const sensor_msgs::ImageConstPtr &ir_image_cam1, const sensor_msgs::ImageConstPtr &depth_image_cam2, const sensor_msgs::ImageConstPtr &ir_image_cam2, const sensor_msgs::ImageConstPtr &depth_image_cam3, const sensor_msgs::ImageConstPtr &ir_image_cam3)
Call back for synchronised topics(depth and IR image pair) for 3 camera configuration.
float transform_matrix_[MAX_NUM_DEVICES][16]
boost::shared_ptr< sync_depth_ir_4cam > sync_depth_ir_4cam_
bool ir_image_recvd_[MAX_NUM_DEVICES]
float camera_yaw_[MAX_NUM_DEVICES]
T param(const std::string ¶m_name, const T &default_val) const
boost::shared_ptr< sync_depth_ir_3cam > sync_depth_ir_3cam_
message_filters::Synchronizer< Sync_CameraInfo_4sensors > sync_CamInfo_4cam
pcl::PointCloud< pcl::PointXYZ >::Ptr stitched_pc_pcl_
ros::Time curr_frame_timestamp_
CameraIntrinsics depth_intrinsics_
void pushOutputNode(ADI3DToFImageStitchingOutputInfo *new_output_node)
This function pushes the debug node to the output queue. If the queue is full, then the last item in ...
const std::string PLUMB_BOB
message_filters::sync_policies::ApproximateTime< sensor_msgs::CameraInfo, sensor_msgs::CameraInfo > Sync_CameraInfo_2sensors
void irImageCallback(const sensor_msgs::ImageConstPtr &ir_image, int cam_id, ADI3DToFImageStitchingInputInfo *image_stitch_input_info)
Low-level callback for IR image.
float distortion_coeffs[8]
Distortion vector = [k1, k2, p1, p2, k3, k4, k5, k6] k1, k2, k3, k4, k5, k6 : Camera radial distortio...
ros::Publisher stitched_camera_info_publisher_
message_filters::Synchronizer< Sync_CameraInfo_2sensors > sync_CamInfo_2cam
void populateCameraInstrinsics(CameraIntrinsics &camera_intrinsics)
Populate camera Intrinsic data.
image_transport::SubscriberFilter ir_image_subscriber_[MAX_NUM_DEVICES]
float vertical_fov_sensor_in_degrees_
void setPointCloud2FieldsByString(int n_fields,...)
sensor_msgs::CameraInfo cam_info_msg_
ADI3DToFImageStitchingInputInfo * getInputNode()
Function to read the next node from the input Queue.