hfl110dcu.cpp
Go to the documentation of this file.
1 // Copyright 2020 Continental AG
2 // All rights reserved.
3 //
4 // Software License Agreement (BSD 2-Clause Simplified License)
5 //
6 // Redistribution and use in source and binary forms, with or without
7 // modification, are permitted provided that the following conditions
8 // are met:
9 //
10 // * Redistributions of source code must retain the above copyright
11 // notice, this list of conditions and the following disclaimer.
12 // * Redistributions in binary form must reproduce the above
13 // copyright notice, this list of conditions and the following
14 // disclaimer in the documentation and/or other materials provided
15 // with the distribution.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
20 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
21 // COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
22 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
27 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 // POSSIBILITY OF SUCH DAMAGE.
29 
30 
38 #include <string>
39 #include <vector>
40 #include <cmath>
41 
42 // Note: the initTransform function in this file was originally written for
43 // the depth_image_proc ROS package and is included here instead. Full credit
44 // of development goes to the authors of that package and use of it in this
45 // package was granted by one of the authors since it is not a static function:
46 // https://github.com/ros-perception/image_pipeline/blob/
47 // 9b6764166096fa1d90706459fb70242f46ac8643/depth_image_proc/src/
48 // nodelets/point_cloud_xyzi_radial.cpp#L101
49 
50 namespace hfl
51 {
52 HFL110DCU::HFL110DCU(std::string model, std::string version,
53  std::string frame_id, ros::NodeHandle& node_handler)
54  : node_handler_(node_handler)
55 {
56  // Set model and version
57  model_ = model;
58  version_ = version;
59 
60  // Initialize header messages
61  frame_header_message_.reset(new std_msgs::Header());
62  pdm_header_message_.reset(new std_msgs::Header());
63  object_header_message_.reset(new std_msgs::Header());
64  tele_header_message_.reset(new std_msgs::Header());
65  slice_header_message_.reset(new std_msgs::Header());
66  tf_header_message_.reset(new std_msgs::Header());
67 
68  ros::NodeHandle image_depth_nh(node_handler_, "depth");
69  ros::NodeHandle image_intensity_16b_nh(node_handler_, "intensity");
70  ros::NodeHandle image_depth2_nh(node_handler_, "depth2");
71  ros::NodeHandle image_intensity2_16b_nh(node_handler_, "intensity2");
72  ros::NodeHandle image_intensity_8b_nh(node_handler_, "intensity8");
73  ros::NodeHandle objects_nh(node_handler_, "perception");
74  ros::NodeHandle flag_nh(node_handler_, "flags");
75  ros::NodeHandle ct_nh(flag_nh, "crosstalk");
76  ros::NodeHandle ct2_nh(flag_nh, "crosstalk2");
77  ros::NodeHandle sat_nh(flag_nh, "saturated");
78  ros::NodeHandle sat2_nh(flag_nh, "saturated2");
79  ros::NodeHandle si_nh(flag_nh, "si");
80  ros::NodeHandle si2_nh(flag_nh, "si2");
81 
82  image_transport::ImageTransport it_depth(image_depth_nh);
83  image_transport::ImageTransport it_depth2(image_depth2_nh);
84  image_transport::ImageTransport it_intensity_16b(image_intensity_16b_nh);
85  image_transport::ImageTransport it_intensity2_16b(image_intensity2_16b_nh);
86  image_transport::ImageTransport it_intensity_8b(image_intensity_8b_nh);
88  image_transport::ImageTransport it_ct2(ct2_nh);
89  image_transport::ImageTransport it_sat(sat_nh);
90  image_transport::ImageTransport it_sat2(sat2_nh);
92  image_transport::ImageTransport it_si2(si2_nh);
93 
94  // Initialize publishers
95  pub_depth_ = it_depth.advertiseCamera("image_raw", 100);
96  pub_intensity_ = it_intensity_16b.advertiseCamera("image_raw", 100);
97  pub_depth2_ = it_depth2.advertiseCamera("image_raw", 100);
98  pub_intensity2_ = it_intensity2_16b.advertiseCamera("image_raw", 100);
99  pub_ct_ = it_ct.advertiseCamera("image_raw", 100);
100  pub_ct2_ = it_ct2.advertiseCamera("image_raw", 100);
101  pub_sat_ = it_sat.advertiseCamera("image_raw", 100);
102  pub_sat2_ = it_sat2.advertiseCamera("image_raw", 100);
103  pub_si_ = it_si.advertiseCamera("image_raw", 100);
104  pub_si2_ = it_si2.advertiseCamera("image_raw", 100);
105  pub_objects_ = objects_nh.advertise<visualization_msgs::MarkerArray>("objects", 100);
106  pub_points_ = node_handler_.advertise<sensor_msgs::PointCloud2>("points", 1000);
107  pub_slices_ = node_handler_.advertise<std_msgs::UInt16MultiArray>("slices", 1000);
108 
109  std::string default_calib_file = "~/.ros/camera_info/default.yaml";
110 
111  // Check camera info manager
113  new camera_info_manager::CameraInfoManager(image_intensity_16b_nh, frame_id);
114 
115  // Initalize diagnostic device ID, later on this should update with serial number, if available
116  updater_.setHardwareIDf("%s", frame_id);
117  // Add diagnostic updater callback
118  updater_.add("HFL110 Updater", this, &HFL110DCU::update_diagnostics);
119 
120  // Initialize Message Headers
121  frame_header_message_->frame_id = frame_id;
122  frame_header_message_->seq = -1;
126  object_header_message_->frame_id = "map"; // TODO(flynneva): make this a ROS parameter
127  object_header_message_->seq = -1;
128  tf_header_message_->frame_id = "map";
129  tf_header_message_->seq = 0;
130  global_tf_.child_frame_id = frame_id;
131 }
132 
133 bool HFL110DCU::parseFrame(int start_byte, const std::vector<uint8_t>& packet)
134 {
135  int byte_offset = 0;
136 
137  float range_1, range_2, temp_range = 0;
138  uint16_t intensity_1, intensity_2 = 0;
139  uint8_t classification, ch = 0;
140 
141  // Build up range and intensity images
142  for (col_ = 0; col_ < FRAME_COLUMNS; col_ += 1)
143  {
144  byte_offset = start_byte + (col_ * 4);
145  // Populate range images
146  range_1 = (global_offset_ + float(
147  big_to_native(*reinterpret_cast<const uint16_t*>(&packet[byte_offset])))) / 256.0;
148  range_2 = (global_offset_ + float(
149  big_to_native(*reinterpret_cast<const uint16_t*>(&packet[byte_offset + 2])))) / 256.0;
150 
151  // Byte offset for intensity
152  // Intensity Data follows Full Row of Depth Data (128 * 2 returns * 2bytes each)
153  byte_offset = start_byte + 512 + (col_ * 4);
154 
155  // Populate intensity images
156  intensity_1 = uint16_t(
157  big_to_native(*reinterpret_cast<const uint16_t*>(&packet[byte_offset])));
158  intensity_2 = uint16_t(
159  big_to_native(*reinterpret_cast<const uint16_t*>(&packet[byte_offset + 2])));
160 
161  // If range is larger than 49m, set it to NAN
162  if (range_1 > 49.0)
163  range_1 = NAN;
164 
165  if (range_2 > 49.0)
166  range_2 = NAN;
167 
168  p_image_depth_->image.at<float>(cv::Point(col_, row_)) = range_1;
169  p_image_depth2_->image.at<float>(cv::Point(col_, row_)) = range_2;
170 
171  p_image_intensity_->image.at<uint16_t>(cv::Point(col_, row_)) = intensity_1;
172  p_image_intensity2_->image.at<uint16_t>(cv::Point(col_, row_)) = intensity_2;
173 
174  // Byte offset for classification flags
175  byte_offset = start_byte + 1152 + col_;
176 
177  classification = big_to_native(*reinterpret_cast<const uint8_t*>(&packet[byte_offset]));
178 
179  p_image_crosstalk_->image.at<uint8_t>(cv::Point(col_, row_)) = ((classification >> 0) & 1) * 255;
180  p_image_saturated_->image.at<uint8_t>(cv::Point(col_, row_)) = ((classification >> 1) & 1) * 255;
181  p_image_superimposed_->image.at<uint8_t>(cv::Point(col_, row_)) = ((classification >> 3) & 1) * 255;
182 
183  p_image_crosstalk2_->image.at<uint8_t>(cv::Point(col_, row_)) = ((classification >> 4) & 1) * 255;
184  p_image_saturated2_->image.at<uint8_t>(cv::Point(col_, row_)) = ((classification >> 5) & 1) * 255;
185  p_image_superimposed2_->image.at<uint8_t>(cv::Point(col_, row_)) = ((classification >> 7) & 1) * 255;
186 
187  if (ch < 3) {
188  ch += 1;
189  } else {
190  ch = 0;
191  }
192  }
193 
194  return true;
195 }
196 
197 bool HFL110DCU::processFrameData(const std::vector<uint8_t>& frame_data)
198 {
199  if (version_ == "v1")
200  {
201  int size = frame_data.size();
202 
203  // identify packet by fragmentation offset
204  row_ = FRAME_ROWS - 1 - big_to_native(*reinterpret_cast<const uint32_t*>(&frame_data[16]));
205  int frame_num = big_to_native(*reinterpret_cast<const uint32_t*>(&frame_data[12]));
206 
207  // Check packet offset continuity
208  if ( row_ != expected_packet_)
209  {
210  ROS_ERROR("Unexpected packet (dropped packet?) expecting: %i, received: %i",
213  return false;
214  }
215 
216  // First frame packet, reset frame data
217  if (row_ == (FRAME_ROWS - 1))
218  {
219  // Set header message
223 
224  // Set Pointcloud message
225  pointcloud_.reset(new sensor_msgs::PointCloud2());
227  pointcloud_->height = FRAME_ROWS;
228  pointcloud_->width = FRAME_COLUMNS * 2;
229 
231  modifier.setPointCloud2Fields(8,
232  "x", 1, sensor_msgs::PointField::FLOAT32,
233  "y", 1, sensor_msgs::PointField::FLOAT32,
234  "z", 1, sensor_msgs::PointField::FLOAT32,
235  "intensity", 1, sensor_msgs::PointField::FLOAT32,
236  "return", 1, sensor_msgs::PointField::UINT8,
237  "crosstalk", 1, sensor_msgs::PointField::UINT8,
238  "saturated", 1, sensor_msgs::PointField::UINT8,
239  "superimposed", 1, sensor_msgs::PointField::UINT8);
240 
241  // Reset image pointers
244  p_image_depth_->image = cv::Mat(FRAME_ROWS, FRAME_COLUMNS, CV_32FC1);
245 
248  p_image_intensity_->image = cv::Mat(FRAME_ROWS, FRAME_COLUMNS, CV_16UC1);
249 
252  p_image_depth2_->image = cv::Mat(FRAME_ROWS, FRAME_COLUMNS, CV_32FC1);
253 
256  p_image_intensity2_->image = cv::Mat(FRAME_ROWS, FRAME_COLUMNS, CV_16UC1);
257 
260  p_image_crosstalk_->image = cv::Mat(FRAME_ROWS, FRAME_COLUMNS, CV_8UC1);
263  p_image_saturated_->image = cv::Mat(FRAME_ROWS, FRAME_COLUMNS, CV_8UC1);
266  p_image_superimposed_->image = cv::Mat(FRAME_ROWS, FRAME_COLUMNS, CV_8UC1);
267 
270  p_image_crosstalk2_->image = cv::Mat(FRAME_ROWS, FRAME_COLUMNS, CV_8UC1);
273  p_image_saturated2_->image = cv::Mat(FRAME_ROWS, FRAME_COLUMNS, CV_8UC1);
276  p_image_superimposed2_->image = cv::Mat(FRAME_ROWS, FRAME_COLUMNS, CV_8UC1);
277 
278  // Get intrinsic and extrinsic calibration parameters
279  // CameraIntrinsics * camera_intrinsics;
280  float fx = *reinterpret_cast<const float*>(&frame_data[20]);
281  ROS_INFO_ONCE("fx: %.4f", fx);
282  float fy = *reinterpret_cast<const float*>(&frame_data[24]);
283  ROS_INFO_ONCE("fy: %.4f", fy);
284  float ux = *reinterpret_cast<const float*>(&frame_data[28]);
285  ROS_INFO_ONCE("ux: %.4f", ux);
286  float uy = *reinterpret_cast<const float*>(&frame_data[32]);
287  ROS_INFO_ONCE("uy: %.4f", uy);
288  float r1 = *reinterpret_cast<const float*>(&frame_data[36]);
289  ROS_INFO_ONCE("r1: %.4f", r1);
290  float r2 = *reinterpret_cast<const float*>(&frame_data[40]);
291  ROS_INFO_ONCE("r2: %.4f", r2);
292  float t1 = *reinterpret_cast<const float*>(&frame_data[44]);
293  ROS_INFO_ONCE("t1: %.4f", t1);
294  float t2 = *reinterpret_cast<const float*>(&frame_data[48]);
295  ROS_INFO_ONCE("t2: %.4f", t2);
296  float r4 = *reinterpret_cast<const float*>(&frame_data[52]);
297  ROS_INFO_ONCE("r4: %.4f", r4);
298 
299  float intrinsic_yaw = *reinterpret_cast<const float*>(&frame_data[56]);
300  float intrinsic_pitch = *reinterpret_cast<const float*>(&frame_data[60]);
301  float extrinsic_yaw = *reinterpret_cast<const float*>(&frame_data[64]);
302  float extrinsic_pitch = *reinterpret_cast<const float*>(&frame_data[68]);
303  float extrinsic_roll = *reinterpret_cast<const float*>(&frame_data[72]);
304  float extrinsic_z = *reinterpret_cast<const float*>(&frame_data[76]);
305  float extrinsic_y = *reinterpret_cast<const float*>(&frame_data[80]);
306  float extrinsic_x = *reinterpret_cast<const float*>(&frame_data[84]);
307 
308  ROS_INFO_ONCE("Extrinsics received from DCU:");
309  ROS_INFO_ONCE(" x: %f", extrinsic_x);
310  ROS_INFO_ONCE(" y: %f", extrinsic_y);
311  ROS_INFO_ONCE(" z: %f", extrinsic_z);
312  ROS_INFO_ONCE(" r: %f", extrinsic_roll);
313  ROS_INFO_ONCE(" p: %f", extrinsic_pitch);
314  ROS_INFO_ONCE(" y: %f", extrinsic_yaw);
315 
316  // set extrinsics to global tf
317  tf2::Quaternion q_orig, q_rot, q_final;
318 
319  // Output extrinsics are in AUTOSAR format, rotate to match ROS standard
320  double r=-1.5707, p=0.0, y=-1.5707;
321  q_rot.setRPY(r, p, y);
322 
323  global_tf_.transform.translation.x = extrinsic_x;
324  global_tf_.transform.translation.y = extrinsic_y;
325  global_tf_.transform.translation.z = extrinsic_z;
326  q_orig.setRPY(extrinsic_roll, extrinsic_pitch, extrinsic_yaw);
327  q_final = q_orig * q_rot; // Calculate actual orientation
328  q_final.normalize();
329  global_tf_.transform.rotation = tf2::toMsg(q_final);
330 
331  // check camera info manager
332  if (camera_info_manager_ != NULL)
333  {
334  auto ci = camera_info_manager_->getCameraInfo();
335 
336  if (ci.K[0] != fx)
337  {
338  ROS_WARN("Initialized intrinsics do not match those received from sensor");
339  ROS_WARN("Setting intrinsics to values received from sensor");
340  // set default values
341  ci.distortion_model = "rational_polynomial";
342  ci.height = FRAME_ROWS;
343  ci.width = FRAME_COLUMNS;
344  ci.D.resize(8);
345  ci.D[0] = r1;
346  ci.D[1] = r2;
347  ci.D[2] = t1;
348  ci.D[3] = t2;
349  ci.D[4] = 0;
350  ci.D[5] = r4;
351  ci.D[6] = 0;
352  ci.D[7] = 0;
353 
354  ci.K[0] = fx;
355  ci.K[2] = ux;
356  ci.K[4] = fy;
357  ci.K[5] = uy;
358  ci.K[8] = 1;
359 
360  ci.P[0] = fx;
361  ci.P[2] = ux;
362  ci.P[4] = fy;
363  ci.P[5] = uy;
364  ci.P[11] = 1;
365 
367 
368  transform_ = initTransform(cv::Mat_<double>(3, 3, &ci.K[0]),
369  cv::Mat(ci.D), ci.width, ci.height, true);
370  }
371  }
372  }
373 
374  // Parse image data
375  parseFrame(92, frame_data);
376 
377  // Last frame packet, pulish frame data
378  if (row_ == 0)
379  {
380  // Get camera info
381  auto ci = camera_info_manager_->getCameraInfo();
382  sensor_msgs::CameraInfoPtr flash_cam_info(new sensor_msgs::CameraInfo(ci));
383 
384  flash_cam_info->header = *frame_header_message_;
385 
388  pub_depth_.publish(p_image_depth_->toImageMsg(), flash_cam_info);
389  pub_intensity_.publish(p_image_intensity_->toImageMsg(), flash_cam_info);
390 
393  pub_depth2_.publish(p_image_depth2_->toImageMsg(), flash_cam_info);
394  pub_intensity2_.publish(p_image_intensity2_->toImageMsg(), flash_cam_info);
395 
402 
403  pub_ct_.publish(p_image_crosstalk_->toImageMsg(), flash_cam_info);
404  pub_ct2_.publish(p_image_crosstalk2_->toImageMsg(), flash_cam_info);
405  pub_sat_.publish(p_image_saturated_->toImageMsg(), flash_cam_info);
406  pub_sat2_.publish(p_image_saturated2_->toImageMsg(), flash_cam_info);
407  pub_si_.publish(p_image_superimposed_->toImageMsg(), flash_cam_info);
408  pub_si2_.publish(p_image_superimposed2_->toImageMsg(), flash_cam_info);
409  // iterators
418 
419  // loop through rows and cols
420  for (row_ = 0; row_ < FRAME_ROWS; row_ += 1)
421  {
422  for (col_ = 0; col_ < FRAME_COLUMNS; col_ += 1)
423  {
424  // Return 1
425  const cv::Vec3f &cvPoint =
426  transform_.at<cv::Vec3f>(col_, row_) *
427  p_image_depth_->image.at<float>(cv::Point(col_, row_));
428 
429  *out_x = cvPoint(0);
430  *out_y = cvPoint(1);
431  *out_z = cvPoint(2);
432  *out_i = p_image_intensity_->image.at<uint16_t>(cv::Point(col_, row_));
433  *out_r = 1;
434  *out_ct = p_image_crosstalk_->image.at<uint8_t>(cv::Point(col_, row_));
435  *out_sat = p_image_saturated_->image.at<uint8_t>(cv::Point(col_, row_));
436  *out_si = p_image_superimposed_->image.at<uint8_t>(cv::Point(col_, row_));
437 
438  out_x += 1;
439  out_y += 1;
440  out_z += 1;
441  out_i += 1;
442  out_r += 1;
443  out_ct += 1;
444  out_sat += 1;
445  out_si += 1;
446 
447  // Return 2
448  const cv::Vec3f &cvPoint2 = transform_.at<cv::Vec3f>(col_, row_) *
449  p_image_depth2_->image.at<float>(cv::Point(col_, row_));
450 
451  *out_x = cvPoint2(0);
452  *out_y = cvPoint2(1);
453  *out_z = cvPoint2(2);
454  *out_i = p_image_intensity2_->image.at<uint16_t>(cv::Point(col_, row_));
455  *out_r = 2;
456  *out_ct = p_image_crosstalk2_->image.at<uint8_t>(cv::Point(col_, row_));
457  *out_sat = p_image_saturated2_->image.at<uint8_t>(cv::Point(col_, row_));
458  *out_si = p_image_superimposed2_->image.at<uint8_t>(cv::Point(col_, row_));
459 
460  out_x += 1;
461  out_y += 1;
462  out_z += 1;
463  out_i += 1;
464  out_r += 1;
465  out_ct += 1;
466  out_sat += 1;
467  out_si += 1;
468  }
469  }
470 
471  // publish transform
473  global_tf_.header = *tf_header_message_;
475 
476  // publish pointcloud
478  }
480  }
481  return true;
482 }
483 
484 bool HFL110DCU::parseObjects(int start_byte, const std::vector<uint8_t>& packet)
485 {
486  int count = objects_.size();
487  int last_object = 0;
488  if (count == 0) {
489  // first packet, stop after 11 objects
490  last_object = 11;
491  } else if (count == 11) {
492  // second packet, stop after 20 objects
493  last_object = 20;
494  }
495 
496  for (int i = start_byte; i < packet.size(); i += 129)
497  {
498  if (count == last_object)
499  {
500  break;
501  }
502  // create new object
503  objects_.push_back(hflObj());
504  // object geometry attributes
505  objects_[count].geometry.x_rear_r = *reinterpret_cast<const float*>(&packet[i + 0]);
506  objects_[count].geometry.y_rear_r = *reinterpret_cast<const float*>(&packet[i + 4]);
507  objects_[count].geometry.x_rear_l = *reinterpret_cast<const float*>(&packet[i + 8]);
508  objects_[count].geometry.y_rear_l = *reinterpret_cast<const float*>(&packet[i + 12]);
509  objects_[count].geometry.x_front_l = *reinterpret_cast<const float*>(&packet[i + 16]);
510  objects_[count].geometry.y_front_l = *reinterpret_cast<const float*>(&packet[i + 20]);
511  objects_[count].geometry.height = *reinterpret_cast<const float*>(&packet[i + 24]);
512  objects_[count].geometry.ground_offset = *reinterpret_cast<const float*>(&packet[i + 28]);
513  objects_[count].geometry.fDistX = *reinterpret_cast<const float*>(&packet[i + 32]);
514  objects_[count].geometry.fDistY = *reinterpret_cast<const float*>(&packet[i + 36]);
515  objects_[count].geometry.yaw = *reinterpret_cast<const float*>(&packet[i + 40]);
516  // object kinematic attributes
517  objects_[count].kinematics.fVabsX = *reinterpret_cast<const float*>(&packet[i + 44]);
518  objects_[count].kinematics.fVabsY = *reinterpret_cast<const float*>(&packet[i + 48]);
519  objects_[count].kinematics.fVrelX = *reinterpret_cast<const float*>(&packet[i + 52]);
520  objects_[count].kinematics.fVrelY = *reinterpret_cast<const float*>(&packet[i + 56]);
521  objects_[count].kinematics.fAabsX = *reinterpret_cast<const float*>(&packet[i + 60]);
522  objects_[count].kinematics.fDistXDistY = *reinterpret_cast<const float*>(&packet[i + 64]);
523  objects_[count].kinematics.fDistXVx = *reinterpret_cast<const float*>(&packet[i + 68]);
524  objects_[count].kinematics.fDistXVy = *reinterpret_cast<const float*>(&packet[i + 72]);
525  objects_[count].kinematics.fDistXAx = *reinterpret_cast<const float*>(&packet[i + 76]);
526  objects_[count].kinematics.fDistXAy = *reinterpret_cast<const float*>(&packet[i + 80]);
527  objects_[count].kinematics.fDistYVx = *reinterpret_cast<const float*>(&packet[i + 84]);
528  objects_[count].kinematics.fDistYVy = *reinterpret_cast<const float*>(&packet[i + 88]);
529  objects_[count].kinematics.fDistYAx = *reinterpret_cast<const float*>(&packet[i + 92]);
530  objects_[count].kinematics.fDistYAy = *reinterpret_cast<const float*>(&packet[i + 96]);
531  objects_[count].kinematics.fVxVy = *reinterpret_cast<const float*>(&packet[i + 100]);
532  objects_[count].kinematics.fVxAx = *reinterpret_cast<const float*>(&packet[i + 104]);
533  objects_[count].kinematics.fVxAy = *reinterpret_cast<const float*>(&packet[i + 108]);
534  objects_[count].kinematics.fVyAx = *reinterpret_cast<const float*>(&packet[i + 112]);
535  objects_[count].kinematics.fVyAy = *reinterpret_cast<const float*>(&packet[i + 116]);
536  objects_[count].kinematics.fAxAy = *reinterpret_cast<const float*>(&packet[i + 120]);
537  // object state
538  objects_[count].state = *reinterpret_cast<const hfl::objState*>(&packet[i + 124]);
539  objects_[count].dynamic_props = *reinterpret_cast<const hfl::objDyn*>(&packet[i + 125]);
540  objects_[count].quality = *reinterpret_cast<const uint8_t*>(&packet[i + 126]);
541  // object classification attributes
542  objects_[count].classification = *reinterpret_cast<const uint8_t*>(&packet[i + 127]);
543  objects_[count].confidence = *reinterpret_cast<const uint8_t*>(&packet[i + 128]);
544  count += 1;
545  }
546 
547  return true;
548 }
549 
550 bool HFL110DCU::processObjectData(const std::vector<uint8_t>& object_data)
551 {
552  // grab the time when recieved packet
554  object_header_message_->seq += 1;
555 
556  // identify packet by fragmentation offset
557  uint32_t obj_packet =
558  (big_to_native(*reinterpret_cast<const uint32_t*>(&object_data[10])) >> 0) & 1;
559 
560  parseObjects(14, object_data);
561 
562  if (obj_packet == 1)
563  {
564  visualization_msgs::Marker bBox;
565  visualization_msgs::MarkerArray marker_array;
566  tf2::Quaternion q;
567 
568  for (int i = 0; i < objects_.size(); i += 1)
569  {
570  bBox.pose.position.x = (objects_[i].geometry.x_rear_r + 0.5 *
571  (objects_[i].geometry.x_front_l - objects_[i].geometry.x_rear_r)) +
572  objects_[i].geometry.fDistX;
573  bBox.pose.position.y = (objects_[i].geometry.y_rear_r + 0.5 *
574  (objects_[i].geometry.y_front_l - objects_[i].geometry.y_rear_r)) +
575  objects_[i].geometry.fDistY;
576  bBox.pose.position.z =
577  objects_[i].geometry.ground_offset + (objects_[i].geometry.height / 2.0);
578 
579  q.setRPY(0, 0, objects_[i].geometry.yaw);
580  bBox.pose.orientation = tf2::toMsg(q);
581 
582  float length = sqrt((objects_[i].geometry.x_front_l - objects_[i].geometry.x_rear_l) *
583  (objects_[i].geometry.x_front_l - objects_[i].geometry.x_rear_l) +
584  (objects_[i].geometry.y_front_l - objects_[i].geometry.y_rear_l) *
585  (objects_[i].geometry.y_front_l - objects_[i].geometry.y_rear_l));
586  float width = sqrt((objects_[i].geometry.x_rear_r - objects_[i].geometry.x_rear_l) *
587  (objects_[i].geometry.x_rear_r - objects_[i].geometry.x_rear_l) +
588  (objects_[i].geometry.y_rear_r - objects_[i].geometry.y_rear_l) *
589  (objects_[i].geometry.y_rear_r - objects_[i].geometry.y_rear_l));
590  bBox.scale.x = length;
591  bBox.scale.y = width;
592  bBox.scale.z = objects_[i].geometry.height + objects_[i].geometry.ground_offset;
593 
594  // set color from classification
595  if (objects_[i].classification == 9)
596  {
597  // TL
598  bBox.color.r = 240.0 / 255.0;
599  bBox.color.g = 230.0 / 255.0;
600  bBox.color.b = 140.0 / 255.0;
601  bBox.color.a = objects_[i].confidence / 100.0;
602  } else if (objects_[i].classification == 8) {
603  // OTHER VEHICLE
604  bBox.color.r = 238.0 / 255.0;
605  bBox.color.g = 232.0 / 255.0;
606  bBox.color.b = 170.0 / 255.0;
607  bBox.color.a = objects_[i].confidence / 100.0;
608  } else if (objects_[i].classification == 7) {
609  // UNCLASSIFIED
610  bBox.color.r = 238.0 / 255.0;
611  bBox.color.g = 232.0 / 255.0;
612  bBox.color.b = 170.0 / 255.0;
613  bBox.color.a = objects_[i].confidence / 100.0;
614  } else if (objects_[i].classification == 6) {
615  // WIDE
616  bBox.color.r = 238.0 / 255.0;
617  bBox.color.g = 232.0 / 255.0;
618  bBox.color.b = 170.0 / 255.0;
619  bBox.color.a = objects_[i].confidence / 100.0;
620  } else if (objects_[i].classification == 5) {
621  // BICYCLE
622  bBox.color.r = 255.0 / 255.0;
623  bBox.color.g = 140.0 / 255.0;
624  bBox.color.b = 0;
625  bBox.color.a = objects_[i].confidence / 100.0;
626  } else if (objects_[i].classification == 4) {
627  // MOTORCYCLE
628  bBox.color.r = 230 / 255.0;
629  bBox.color.g = 190 / 255.0;
630  bBox.color.b = 138 / 255.0;
631  bBox.color.a = objects_[i].confidence / 100.0;
632  } else if (objects_[i].classification == 3) {
633  // PERSON
634  bBox.color.r = 215 / 255.0;
635  bBox.color.g = 215 / 255.0;
636  bBox.color.b = 0;
637  bBox.color.a = objects_[i].confidence / 100.0;
638  } else if (objects_[i].classification == 2) {
639  // TRUCK
640  bBox.color.r = 218 / 255.0;
641  bBox.color.g = 165 / 255.0;
642  bBox.color.b = 32 / 255.0;
643  bBox.color.a = objects_[i].confidence / 100.0;
644  } else if (objects_[i].classification == 1) {
645  // CAR
646  bBox.color.r = 139 / 255.0;
647  bBox.color.g = 69 / 255.0;
648  bBox.color.b = 19 / 255.0;
649  bBox.color.a = objects_[i].confidence / 100.0;
650  } else if (objects_[i].classification == 0) {
651  // POINT
652  bBox.color.r = 210 / 255.0;
653  bBox.color.g = 105 / 255.0;
654  bBox.color.b = 30 / 255.0;
655  bBox.color.a = objects_[i].confidence / 100.0;
656  }
657 
658  bBox.type = 1;
659  bBox.id = i;
660  bBox.lifetime = ros::Duration();
661  bBox.frame_locked = false;
662  // bBox.text = ("OBJECT%i", i);
663  bBox.action = visualization_msgs::Marker::ADD;
664  bBox.header = *object_header_message_;
665 
666  marker_array.markers.push_back(bBox);
667  }
668  pub_objects_.publish(marker_array);
669  objects_.clear();
670  }
671 
672  return true;
673 }
674 
675 bool HFL110DCU::processTelemetryData(const std::vector<uint8_t>& tele_data)
676 {
677  // grab the time when recieved packet
679  tele_header_message_->seq += 1;
680 
682  (big_to_native(*reinterpret_cast<const uint32_t*>(&tele_data[0])));
684  (*reinterpret_cast<const float*>(&tele_data[4]));
686  (-*reinterpret_cast<const float*>(&tele_data[8]));
688  (big_to_native(*reinterpret_cast<const uint32_t*>(&tele_data[12])));
690  (*reinterpret_cast<const float*>(&tele_data[16]));
691  telem_.fADCUbatt =
692  (*reinterpret_cast<const float*>(&tele_data[20]));
694  (*reinterpret_cast<const float*>(&tele_data[24]));
696  (*reinterpret_cast<const float*>(&tele_data[28]));
698  (*reinterpret_cast<const float*>(&tele_data[32]));
700  (*reinterpret_cast<const float*>(&tele_data[36]));
702  (unsigned(*reinterpret_cast<const uint8_t*>(&tele_data[40])));
703 
704  for (int i = 25; i >= 0; i--)
705  {
706  telem_.au8SerialNumber[25 - i] =
707  (*reinterpret_cast<const char*>(&tele_data[41 + i]));
708  }
709 
710  //ROS_INFO("sensor temp: %u", *reinterpret_cast<const uint8_t*>(&tele_data[40]));
711 
712  // update diagnostics
713  updater_.update();
714  return true;
715 }
716 
717 bool HFL110DCU::processSliceData(const std::vector<uint8_t>& slice_data)
718 {
719  // INTERNAL
720  return true;
721 }
722 
723 cv::Mat HFL110DCU::initTransform(cv::Mat cameraMatrix, cv::Mat distCoeffs,
724  int width, int height, bool radial)
725 {
726  int i, j;
727  int totalsize = width*height;
728  cv::Mat pixelVectors(1, totalsize, CV_32FC3);
729  cv::Mat dst(1, totalsize, CV_32FC3);
730 
731  cv::Mat sensorPoints(cv::Size(height, width), CV_32FC2);
732  cv::Mat undistortedSensorPoints(1, totalsize, CV_32FC2);
733 
734  std::vector<cv::Mat> ch;
735  for(j = 0; j < height; j++)
736  {
737  for(i = 0; i < width; i++)
738  {
739  cv::Vec2f &p = sensorPoints.at<cv::Vec2f>(i, j);
740  p[0] = i;
741  p[1] = j;
742  }
743  }
744 
745  sensorPoints = sensorPoints.reshape(2, 1);
746 
747  cv::undistortPoints(sensorPoints, undistortedSensorPoints, cameraMatrix, distCoeffs);
748 
749  ch.push_back(undistortedSensorPoints);
750  ch.push_back(cv::Mat::ones(1, totalsize, CV_32FC1));
751  cv::merge(ch, pixelVectors);
752 
753  if(radial)
754  {
755  for(i = 0; i < totalsize; i++)
756  {
757  normalize(pixelVectors.at<cv::Vec3f>(i),
758  dst.at<cv::Vec3f>(i));
759  }
760  pixelVectors = dst;
761  }
762  return pixelVectors.reshape(3, width);
763 }
764 
766 {
768 
769  // put telemetry data in diagnostic msg
770  stat.add("uiHardwareRevision", telem_.uiHardwareRevision);
771  stat.add("fSensorTemp", telem_.fSensorTemp);
772  stat.add("fHeaterTemp", telem_.fHeaterTemp);
773  stat.add("uiFrameCounter", telem_.uiFrameCounter);
774  stat.add("fADCUbattSW", telem_.fADCUbattSW);
775  stat.add("fADCUbatt", telem_.fADCUbatt);
776  stat.add("fADCHeaterLens", telem_.fADCHeaterLens);
777  stat.add("fADCHeaterLensHigh", telem_.fADCHeaterLensHigh);
778  stat.add("fADCTemp0Lens", telem_.fADCTemp0Lens);
779  stat.add("fAcquisitionPeriod", telem_.fAcquisitionPeriod);
780  stat.add("uiTempSensorFeedback", telem_.uiTempSensorFeedback);
781  // TODO(flynneva): should reset HardwareID using this serial number
782  stat.add("au8SerialNumber", telem_.au8SerialNumber);
783 
784  // TODO(flynneva): add some logic here to check if everything is ok
785  stat.level = diagnostic_msgs::DiagnosticStatus::OK;
786  stat.message = "OK";
787 }
788 
789 } // namespace hfl
std::shared_ptr< std_msgs::Header > tele_header_message_
Telemetry Header message.
Definition: hfl110dcu.h:339
image_transport::CameraPublisher pub_depth_
Depth image publisher.
Definition: hfl110dcu.h:390
geometry_msgs::TransformStamped global_tf_
ROS Transform.
Definition: hfl110dcu.h:441
image_transport::CameraPublisher pub_depth2_
Depth image publisher second return 2.
Definition: hfl110dcu.h:393
This file defines the HFL110DCU image processor class.
void setRPY(const tf2Scalar &roll, const tf2Scalar &pitch, const tf2Scalar &yaw)
void publish(const sensor_msgs::Image &image, const sensor_msgs::CameraInfo &info) const
uint32_t uiHardwareRevision
Definition: hfl110dcu.h:212
sensor_msgs::CameraInfo getCameraInfo(void)
bool processObjectData(const std::vector< uint8_t > &data) override
Definition: hfl110dcu.cpp:550
void publish(const boost::shared_ptr< M > &message) const
#define ROS_INFO_ONCE(...)
float fSensorTemp
Definition: hfl110dcu.h:213
float fADCUbatt
Definition: hfl110dcu.h:217
image_transport::CameraPublisher pub_si_
Superimposed flag image publisher.
Definition: hfl110dcu.h:414
std::shared_ptr< std_msgs::Header > tf_header_message_
TF Header message.
Definition: hfl110dcu.h:345
diagnostic_updater::Updater updater_
Definition: hfl110dcu.h:447
ros::Publisher pub_slices_
Slices publisher.
Definition: hfl110dcu.h:423
ros::Publisher pub_objects_
Objects publisher.
Definition: hfl110dcu.h:420
void add(const std::string &name, TaskFunction f)
const std::string TYPE_8UC1
cv_bridge::CvImagePtr p_image_depth2_
Pointer to depth image second return.
Definition: hfl110dcu.h:366
void setPointCloud2Fields(int n_fields,...)
cv_bridge::CvImagePtr p_image_crosstalk2_
Pointer to crosstalk2 flags.
Definition: hfl110dcu.h:381
image_transport::CameraPublisher pub_sat2_
Saturated2 flag image publisher.
Definition: hfl110dcu.h:411
cv::Mat transform_
Transform.
Definition: hfl110dcu.h:444
HFL110DCU v1 object dynamic property.
Definition: hfl110dcu.h:185
bool processFrameData(const std::vector< uint8_t > &data) override
Definition: hfl110dcu.cpp:197
TFSIMD_FORCE_INLINE const tfScalar & y() const
const uint16_t FRAME_COLUMNS
Default frame cols.
#define ROS_WARN(...)
float fADCHeaterLensHigh
Definition: hfl110dcu.h:219
float fADCHeaterLens
Definition: hfl110dcu.h:218
std::shared_ptr< std_msgs::Header > frame_header_message_
Frame Header message.
Definition: hfl110dcu.h:330
float fHeaterTemp
Definition: hfl110dcu.h:214
float fAcquisitionPeriod
Definition: hfl110dcu.h:221
double global_offset_
global range offset
std::string version_
Current camera model.
image_transport::CameraPublisher pub_ct2_
Crosstalk2 flag image publisher.
Definition: hfl110dcu.h:405
Quaternion & normalize()
cv_bridge::CvImagePtr p_image_superimposed2_
Pointer to superimposed2 flags.
Definition: hfl110dcu.h:387
cv_bridge::CvImagePtr p_image_saturated_
Pointer to saturated flags.
Definition: hfl110dcu.h:375
HFL110DCU v1 ethernet object struct.
Definition: hfl110dcu.h:198
float fADCUbattSW
Definition: hfl110dcu.h:216
cv_bridge::CvImagePtr p_image_crosstalk_
Pointer to crosstalk flags.
Definition: hfl110dcu.h:372
std::shared_ptr< std_msgs::Header > pdm_header_message_
PDM Header message.
Definition: hfl110dcu.h:333
CameraPublisher advertiseCamera(const std::string &base_topic, uint32_t queue_size, bool latch=false)
cv_bridge::CvImagePtr p_image_intensity2_
Pointer to 16 bit intensity image second return.
Definition: hfl110dcu.h:369
uint8_t expected_packet_
Return counter.
Definition: hfl110dcu.h:351
cv::Mat initTransform(cv::Mat cameraMatrix, cv::Mat distCoeffs, int width, int height, bool radial)
Definition: hfl110dcu.cpp:723
image_transport::CameraPublisher pub_ct_
Crosstalk flag image publisher.
Definition: hfl110dcu.h:402
const std::string TYPE_32FC1
const uint16_t FRAME_ROWS
Default frame rows.
telemetry telem_
Telemetry Data.
Definition: hfl110dcu.h:432
const std::string TYPE_16UC1
bool parseFrame(int start_byte, const std::vector< uint8_t > &packet) override
Definition: hfl110dcu.cpp:133
float fADCTemp0Lens
Definition: hfl110dcu.h:220
HFL110DCU v1 object state.
Definition: hfl110dcu.h:174
Publisher advertise(const std::string &topic, uint32_t queue_size, bool latch=false)
HFL110DCU(std::string model, std::string version, std::string frame_id, ros::NodeHandle &node_handler)
Definition: hfl110dcu.cpp:52
INLINE Rall1d< T, V, S > sqrt(const Rall1d< T, V, S > &arg)
cv_bridge::CvImagePtr p_image_saturated2_
Pointer to saturated2 flags.
Definition: hfl110dcu.h:384
ros::NodeHandle node_handler_
ROS node handler.
Definition: hfl110dcu.h:324
void sendTransform(const geometry_msgs::TransformStamped &transform)
TFSIMD_FORCE_INLINE Vector3 & normalize()
void update_diagnostics(diagnostic_updater::DiagnosticStatusWrapper &stat)
Definition: hfl110dcu.cpp:765
image_transport::CameraPublisher pub_intensity_
16 bit Intensity image publisher
Definition: hfl110dcu.h:396
std::shared_ptr< std_msgs::Header > slice_header_message_
Slice Header message.
Definition: hfl110dcu.h:342
bool parseObjects(int start_byte, const std::vector< uint8_t > &packet) override
Definition: hfl110dcu.cpp:484
uint8_t row_
Row and column Counter.
Definition: hfl110dcu.h:348
B toMsg(const A &a)
image_transport::CameraPublisher pub_si2_
Superimposed flag image publisher.
Definition: hfl110dcu.h:417
uint8_t col_
Definition: hfl110dcu.h:348
bool setCameraInfo(const sensor_msgs::CameraInfo &camera_info)
uint32_t uiFrameCounter
Definition: hfl110dcu.h:215
std::shared_ptr< std_msgs::Header > object_header_message_
Object Header message.
Definition: hfl110dcu.h:336
static Time now()
camera_info_manager::CameraInfoManager * camera_info_manager_
Definition: hfl110dcu.h:357
bool processTelemetryData(const std::vector< uint8_t > &data) override
Definition: hfl110dcu.cpp:675
TFSIMD_FORCE_INLINE tfScalar length(const Quaternion &q)
bool processSliceData(const std::vector< uint8_t > &data) override
Definition: hfl110dcu.cpp:717
cv_bridge::CvImagePtr p_image_depth_
Pointer to depth image.
Definition: hfl110dcu.h:360
cv_bridge::CvImagePtr p_image_superimposed_
Pointer to superimposed flags.
Definition: hfl110dcu.h:378
std::shared_ptr< sensor_msgs::PointCloud2 > pointcloud_
Pointcloud msg.
Definition: hfl110dcu.h:435
cv_bridge::CvImagePtr p_image_intensity_
Pointer to 16 bit intensity image.
Definition: hfl110dcu.h:363
void add(const std::string &key, const T &val)
std::vector< hflObj > objects_
Objects vector;.
Definition: hfl110dcu.h:426
void setHardwareIDf(const char *format,...)
char au8SerialNumber[26]
Definition: hfl110dcu.h:223
image_transport::CameraPublisher pub_intensity2_
16 bit Intensity image publisher return 2
Definition: hfl110dcu.h:399
#define ROS_ERROR(...)
static float big_to_native(float x)
Definition: hfl_interface.h:55
std::string model_
Current camera model.
Definition: hfl_interface.h:99
image_transport::CameraPublisher pub_sat_
Saturated flag image publisher.
Definition: hfl110dcu.h:408
unsigned uiTempSensorFeedback
Definition: hfl110dcu.h:222
ros::Publisher pub_points_
Pointcloud publisher.
Definition: hfl110dcu.h:429


hfl_driver
Author(s): Evan Flynn , Maxton Ginier , Gerardo Bravo , Moises Diaz
autogenerated on Sat Mar 20 2021 02:27:31