stereo_camera.cpp
Go to the documentation of this file.
2 
8 
10 {
11  leftCameraInfo = ensenso::std::make_shared<sensor_msgs::msg::CameraInfo>();
12  rightCameraInfo = ensenso::std::make_shared<sensor_msgs::msg::CameraInfo>();
13  leftRectifiedCameraInfo = ensenso::std::make_shared<sensor_msgs::msg::CameraInfo>();
14  rightRectifiedCameraInfo = ensenso::std::make_shared<sensor_msgs::msg::CameraInfo>();
15 
16  fitPrimitiveServer = MAKE_SERVER(StereoCamera, FitPrimitive, "fit_primitive");
17  setParameterServer = MAKE_SERVER(StereoCamera, SetParameter, "set_parameter");
18  requestDataServer = MAKE_SERVER(StereoCamera, RequestData, "request_data");
19  locatePatternServer = MAKE_SERVER(StereoCamera, LocatePattern, "locate_pattern");
20  projectPatternServer = MAKE_SERVER(StereoCamera, ProjectPattern, "project_pattern");
21  calibrateHandEyeServer = MAKE_SERVER(StereoCamera, CalibrateHandEye, "calibrate_hand_eye");
22  calibrateWorkspaceServer = MAKE_SERVER(StereoCamera, CalibrateWorkspace, "calibrate_workspace");
23  telecentricProjectionServer = MAKE_SERVER(StereoCamera, TelecentricProjection, "project_telecentric");
24  texturedPointCloudServer = MAKE_SERVER(StereoCamera, TexturedPointCloud, "texture_point_cloud");
25 }
26 
28 {
29  if (!hasRightCamera())
30  {
31  rightCameraInfo = nullptr;
32  rightRectifiedCameraInfo = nullptr;
33  }
34 }
35 
37 {
40  if (hasRightCamera())
41  {
44  }
45  if (hasDisparityMap())
46  {
48  }
51 
52  pointCloudPublisher = ensenso::pcl::create_publisher<ensenso::pcl::PointCloud>(nh, "point_cloud", 1);
53  pointCloudNormalsPublisher = ensenso::pcl::create_publisher<ensenso::pcl::PointCloudNormals>(nh, "point_cloud", 1);
55  ensenso::pcl::create_publisher<ensenso::pcl::PointCloudColored>(nh, "point_cloud_color", 1);
57  ensenso::pcl::create_publisher<ensenso::pcl::PointCloud>(nh, "projected_point_cloud", 1);
58 }
59 
61 {
63  startServers();
66 }
67 
69 {
71  requestDataServer->start();
72  fitPrimitiveServer->start();
73  calibrateHandEyeServer->start();
74  calibrateWorkspaceServer->start();
75  locatePatternServer->start();
76  projectPatternServer->start();
77  texturedPointCloudServer->start();
79 }
80 
81 void StereoCamera::onFitPrimitive(ensenso::action::FitPrimitiveGoalConstPtr const& goal)
82 {
84 
85  NxLibCommand fitPrimitives(cmdFitPrimitive, params.serial);
86  NxLibItem const& primitives = fitPrimitives.parameters()[itmPrimitive];
87 
88  int primitivesCount = 0;
89  for (auto const& primitive : goal->primitives)
90  {
91  if (primitive.type == ensenso::msg::Primitive::SPHERE)
92  {
93  primitives[primitivesCount][itmRadius][itmMin] = primitive.min_radius * ensenso_conversion::conversionFactor;
94  primitives[primitivesCount][itmRadius][itmMax] = primitive.max_radius * ensenso_conversion::conversionFactor;
95  primitives[primitivesCount][itmCount] = primitive.count;
96  }
97  else if (primitive.type == ensenso::msg::Primitive::CYLINDER)
98  {
99  primitives[primitivesCount][itmRadius][itmMin] = primitive.min_radius * ensenso_conversion::conversionFactor;
100  primitives[primitivesCount][itmRadius][itmMax] = primitive.min_radius * ensenso_conversion::conversionFactor;
101  }
102  else if (primitive.type == ensenso::msg::Primitive::PLANE)
103  {
104  primitives[primitivesCount][itmCount] = primitive.count;
105  }
106  primitives[primitivesCount][itmType] = primitive.type;
107  primitives[primitivesCount][itmInlierThreshold] = primitive.inlier_threshold * ensenso_conversion::conversionFactor;
108  primitives[primitivesCount][itmInlierFraction] = primitive.inlier_fraction_in;
109 
110  primitivesCount++;
111  }
112 
113  NxLibItem const& commandParameters = fitPrimitives.parameters();
114  if (goal->normal_radius)
115  {
116  commandParameters[itmNormal][itmRadius] = goal->normal_radius;
117  }
118 
119  float const zeroThreshold = 10e-6;
120  if (!(std::abs(goal->region_of_interest.lower.x) < zeroThreshold &&
121  std::abs(goal->region_of_interest.lower.y) < zeroThreshold &&
122  std::abs(goal->region_of_interest.lower.z) < zeroThreshold))
123  {
124  commandParameters[itmBoundingBox][itmMin] << ensenso_conversion::toEnsensoPoint(goal->region_of_interest.lower);
125  }
126  if (!(std::abs(goal->region_of_interest.upper.x) < zeroThreshold &&
127  std::abs(goal->region_of_interest.upper.y) < zeroThreshold &&
128  std::abs(goal->region_of_interest.upper.z) < zeroThreshold))
129  {
130  commandParameters[itmBoundingBox][itmMax] << ensenso_conversion::toEnsensoPoint(goal->region_of_interest.upper);
131  }
132 
133  if (goal->failure_probability != 0)
134  {
135  commandParameters[itmFailureProbability] = goal->failure_probability;
136  }
137 
138  if (std::abs(goal->inlier_threshold) > zeroThreshold)
139  {
140  commandParameters[itmInlierThreshold] = goal->inlier_threshold * ensenso_conversion::conversionFactor;
141  }
142  if (std::abs(goal->inlier_fraction) > zeroThreshold)
143  {
144  commandParameters[itmInlierFraction] = goal->inlier_fraction;
145  }
146  if (std::abs(goal->scaling) > zeroThreshold)
147  {
148  commandParameters[itmScaling] = goal->scaling;
149  }
150  if (goal->ransac_iterations != 0)
151  {
152  commandParameters[itmIterations] = goal->ransac_iterations;
153  }
154 
155  fitPrimitives.execute();
156 
157  ensenso::action::FitPrimitiveResult result;
158 
159  NxLibItem const primitiveResults = fitPrimitives.result()[itmPrimitive];
160  if (primitiveResults.isArray())
161  {
162  result.primitives.reserve(primitiveResults.count());
163  for (int primitiveCount = 0; primitiveCount < primitiveResults.count(); primitiveCount++)
164  {
165  NxLibItem const& currentPrimitive = primitiveResults[primitiveCount];
166  ensenso::msg::Primitive primitive;
167 
168  primitive.type = currentPrimitive[itmType].asString();
169  primitive.ransac_iterations = currentPrimitive[itmIterations].asInt();
170  primitive.inlier_count = currentPrimitive[itmInlierCount].asInt();
171  primitive.inlier_fraction_out = currentPrimitive[itmInlierFraction].asDouble();
172  primitive.score = currentPrimitive[itmScore].asDouble();
173  primitive.center = ensenso_conversion::toRosPoint(currentPrimitive[itmCenter]);
174 
175  if (primitive.type == ensenso::msg::Primitive::PLANE)
176  {
177  primitive.axes.push_back(ensenso_conversion::toRosPoint(currentPrimitive[itmAxis][0]));
178  primitive.axes.push_back(ensenso_conversion::toRosPoint(currentPrimitive[itmAxis][1]));
179  primitive.normal = ensenso_conversion::toRosPoint(currentPrimitive[itmNormal], false);
180  }
181  else if (primitive.type == ensenso::msg::Primitive::SPHERE)
182  {
183  primitive.radius = currentPrimitive[itmRadius].asDouble() / ensenso_conversion::conversionFactor;
184  }
185  else if (primitive.type == ensenso::msg::Primitive::CYLINDER)
186  {
187  primitive.axis = ensenso_conversion::toRosPoint(currentPrimitive[itmAxis]);
188  }
189  result.primitives.emplace_back(primitive);
190  }
191  }
192 
193  fitPrimitiveServer->setSucceeded(std::move(result));
194 
195  FINISH_NXLIB_ACTION(FitPrimitive)
196 }
197 
198 void StereoCamera::onRequestData(ensenso::action::RequestDataGoalConstPtr const& goal)
199 {
201 
202  ensenso::action::RequestDataResult result;
203  ensenso::action::RequestDataFeedback feedback;
204 
205  // Automatically enable publishing if neither publish_reults nor include_results_in_response is enabled.
206  bool publishResults = goal->publish_results;
207  if (!goal->publish_results && !goal->include_results_in_response)
208  {
209  publishResults = true;
210  }
211 
212  // Automatically disable requesting raw images if the camera does not have any.
213  bool requestRawImages = goal->request_raw_images;
214  if (isXrSeries() && !hasRawImages() && requestRawImages)
215  {
216  ENSENSO_WARN(nh, "XR: Capture mode \"Rectified\", skipping raw images request.");
217  requestRawImages = false;
218  }
219 
220  // Automatically disable requesting rectified images if the camera does not have any.
221  bool requestRectifiedImages = goal->request_rectified_images;
222  if (isXrSeries() && hasRawImages() && requestRectifiedImages)
223  {
224  ENSENSO_WARN(nh, "XR: Capture mode \"Raw\", skipping rectified images request.");
225  requestRectifiedImages = false;
226  }
227 
228  // Automatically disable requesting raw/rectified images if the camera does not have downloaded any.
229  if (isXrSeries() && !hasDownloadedImages() && (requestRawImages || requestRectifiedImages))
230  {
231  ENSENSO_WARN(nh, "XR: Downloading raw/rectified images is disabled, skipping the raw/rectified images request.");
232  requestRawImages = false;
233  requestRectifiedImages = false;
234  }
235 
236  bool requestDisparityMap = goal->request_disparity_map;
237 
238  bool requestDepthImage = goal->request_depth_image;
239 
240  // Automatically request point cloud if no data set is explicitly selected.
241  bool requestPointCloud = goal->request_point_cloud || goal->request_depth_image;
242  if (!goal->request_raw_images && !goal->request_rectified_images && !goal->request_disparity_map &&
243  !goal->request_point_cloud && !goal->request_depth_image && !goal->request_normals)
244  {
245  requestPointCloud = true;
246  }
247 
248  bool requestNormals = goal->request_normals;
249 
250  // Normals require computePointCloud and computePointCloud requires computeDisparityMap to be called first.
251  bool computePointCloud = requestPointCloud || requestNormals;
252  bool computeDisparityMap = requestDisparityMap || computePointCloud;
253 
254  // Automatically disable requesting the disparity map if the camera does not have one.
255  if (!hasDisparityMap())
256  {
257  requestDisparityMap = false;
258  }
259 
260  // Automatically disable requesting 3d data if the camera is an XR and only has raw images.
261  if (isXrSeries() && hasRawImages() && (computeDisparityMap || computePointCloud))
262  {
264  "XR: Capture mode \"Raw\", skipping all 3D data requests! Only raw images are captured and they can "
265  "only be used for calibration actions. Rectifying these images afterwards is not possible and they "
266  "cannot be used to compute 3D data. If you want to retrieve 3D data, set capture mode to "
267  "\"Rectified\".");
268  requestDisparityMap = false;
269  requestDepthImage = false;
270  requestPointCloud = false;
271  requestNormals = false;
272  computePointCloud = false;
273  computeDisparityMap = false;
274  }
275 
276  loadParameterSet(goal->parameter_set, computeDisparityMap ? projectorOn : projectorOff);
277  ensenso::ros::Time imageTimestamp = capture();
278 
280 
281  feedback.images_acquired = true;
282  requestDataServer->publishFeedback(feedback);
283 
284  if (requestRawImages)
285  {
286  auto rawImages = imagePairsFromNxLibNode(cameraNode[itmImages][itmRaw], params.cameraFrame, params.isFileCamera);
287 
288  leftCameraInfo->header.stamp = rawImages[0].first->header.stamp;
289  if (hasRightCamera())
290  {
291  rightCameraInfo->header.stamp = leftCameraInfo->header.stamp;
292  }
293 
294  if (goal->include_results_in_response)
295  {
296  for (auto const& imagePair : rawImages)
297  {
298  result.left_raw_images.push_back(*imagePair.first);
299  if (hasRightCamera())
300  {
301  result.right_raw_images.push_back(*imagePair.second);
302  }
303  }
304  result.left_camera_info = *leftCameraInfo;
305  if (hasRightCamera())
306  {
307  result.right_camera_info = *rightCameraInfo;
308  }
309  }
310  if (publishResults)
311  {
312  // We only publish one of the images on the topic, even if FlexView is enabled.
313  leftRawImagePublisher.publish(rawImages[0].first, leftCameraInfo);
314  if (hasRightCamera())
315  {
316  rightRawImagePublisher.publish(rawImages[0].second, rightCameraInfo);
317  }
318  }
319  }
320 
322 
323  // Only call cmdRectifyImages if just the rectified images are requested. Otherwise the rectification is implicitly
324  // invoked by cmdComputeDisparityMap, which is more efficient when using CUDA.
325  if (requestRectifiedImages && !computeDisparityMap)
326  {
327  NxLibCommand rectify(cmdRectifyImages, params.serial);
328  rectify.parameters()[itmCameras] = params.serial;
329  rectify.execute();
330  }
331  else if (computeDisparityMap)
332  {
333  // Since the S-series currently uses cmdComputeDisparityMap instead of cmdComputePointMap to get the point map, the
334  // global link has to be updated here before cmdComputeDisparityMap is executed.
335  updateGlobalLink(imageTimestamp, "", goal->use_cached_transformation);
336 
337  NxLibCommand disparityMapCommand(cmdComputeDisparityMap, params.serial);
338  disparityMapCommand.parameters()[itmCameras] = params.serial;
339  disparityMapCommand.execute();
340  }
341 
342  if (requestRectifiedImages)
343  {
344  auto rectifiedImages =
346 
347  leftRectifiedCameraInfo->header.stamp = rectifiedImages[0].first->header.stamp;
348  if (hasRightCamera())
349  {
350  rightRectifiedCameraInfo->header.stamp = leftRectifiedCameraInfo->header.stamp;
351  }
352 
353  if (goal->include_results_in_response)
354  {
355  for (auto const& imagePair : rectifiedImages)
356  {
357  result.left_rectified_images.push_back(*imagePair.first);
358  if (hasRightCamera())
359  {
360  result.right_rectified_images.push_back(*imagePair.second);
361  }
362  }
363  result.left_rectified_camera_info = *leftRectifiedCameraInfo;
364  if (hasRightCamera())
365  {
366  result.right_rectified_camera_info = *rightRectifiedCameraInfo;
367  }
368  }
369  if (publishResults)
370  {
371  // We only publish one of the images on the topic, even if FlexView is enabled.
373  if (hasRightCamera())
374  {
376  }
377  }
378  }
379 
380  auto depthImageCameraInfo = ensenso::std::make_shared<sensor_msgs::msg::CameraInfo>(*leftRectifiedCameraInfo);
381 
382  addDisparityMapOffset(depthImageCameraInfo);
383 
384  if (requestDisparityMap)
385  {
386  auto disparityMap =
387  imageFromNxLibNode(cameraNode[itmImages][itmDisparityMap], params.cameraFrame, params.isFileCamera);
388 
389  depthImageCameraInfo->header.stamp = disparityMap->header.stamp;
390 
391  if (goal->include_results_in_response)
392  {
393  result.disparity_map = *disparityMap;
394  result.disparity_map_info = *depthImageCameraInfo;
395  }
396  if (publishResults)
397  {
398  disparityMapPublisher.publish(disparityMap, depthImageCameraInfo);
399  }
400  }
401 
403 
404  if (computePointCloud)
405  {
406  NxLibCommand computePointMap(cmdComputePointMap, params.serial);
407  computePointMap.parameters()[itmCameras] = params.serial;
408  computePointMap.execute();
409 
410  PointCloudROI const* pointCloudROI = 0;
411  if (parameterSets.at(currentParameterSet).useROI)
412  {
413  pointCloudROI = &parameterSets.at(currentParameterSet).roi;
414  }
415 
416  if (requestPointCloud && !requestNormals)
417  {
418  auto pointCloud = pointCloudFromNxLib(cameraNode[itmImages][itmPointMap], params.targetFrame, params.isFileCamera,
419  pointCloudROI);
420 
421  if (goal->include_results_in_response)
422  {
423  pcl::toROSMsg(*pointCloud, result.point_cloud);
424  }
425  if (publishResults)
426  {
427  publishPointCloud(pointCloudPublisher, std::move(pointCloud));
428  }
429  }
430  else
431  {
432  NxLibCommand computeNormals(cmdComputeNormals, params.serial);
433  computeNormals.execute();
434 
435  auto pointCloud =
436  pointCloudWithNormalsFromNxLib(cameraNode[itmImages][itmPointMap], cameraNode[itmImages][itmNormals],
437  params.targetFrame, params.isFileCamera, pointCloudROI);
438 
439  if (goal->include_results_in_response)
440  {
441  pcl::toROSMsg(*pointCloud, result.point_cloud);
442  }
443  if (publishResults)
444  {
445  publishPointCloud(pointCloudNormalsPublisher, std::move(pointCloud));
446  }
447  }
448  }
449 
451 
452  if (requestDepthImage)
453  {
454  // In case camera and target frame are different, the point cloud is recomputed with the relative(toWorld)-flag,
455  // such that the resulting point cloud (and thus the depth image) is transformed by the NxLib with the transform
456  // stored in the stereo camera's link node.
458  {
459  NxLibCommand computePointMap(cmdComputePointMap, params.serial);
460  computePointMap.parameters()[itmCameras] = params.serial;
461  computePointMap.parameters()[itmRelative] = true;
462  computePointMap.execute();
463  }
464 
465  auto depthImage =
467 
468  depthImageCameraInfo->header.stamp = depthImage->header.stamp;
469 
470  if (goal->include_results_in_response)
471  {
472  result.depth_image = *depthImage;
473  result.depth_image_info = *depthImageCameraInfo;
474  }
475  if (publishResults)
476  {
477  depthImagePublisher.publish(depthImage, depthImageCameraInfo);
478  }
479  }
480 
481  requestDataServer->setSucceeded(std::move(result));
482 
483  FINISH_NXLIB_ACTION(RequestData)
484 }
485 
486 void StereoCamera::onSetParameter(ensenso::action::SetParameterGoalConstPtr const& goal)
487 {
489 
490  ensenso::action::SetParameterResult result;
491 
492  loadParameterSet(goal->parameter_set);
493 
494  result.parameter_file_applied = false;
495  if (!goal->parameter_file.empty())
496  {
497  result.parameter_file_applied = loadSettings(goal->parameter_file);
498  if (!result.parameter_file_applied)
499  {
500  server->setAborted(std::move(result));
501  return;
502  }
503  }
504 
505  bool projectorChanged = false;
506  for (auto const& parameter : goal->parameters)
507  {
508  writeParameter(parameter);
509 
510  if (parameter.key == parameter.PROJECTOR || parameter.key == parameter.FRONT_LIGHT)
511  {
512  projectorChanged = true;
513  }
514  }
515 
516  // Synchronize to make sure that we read back the correct values.
517  NxLibCommand synchronize(cmdSynchronize, params.serial);
518  synchronize.parameters()[itmCameras] = params.serial;
519  synchronize.execute();
520 
521  saveParameterSet(goal->parameter_set, projectorChanged);
522 
523  // The new parameters might change the camera calibration.
525 
526  // Read back the actual values.
527  for (auto const& parameter : goal->parameters)
528  {
529  result.results.push_back(*readParameter(parameter.key));
530  }
531 
532  setParameterServer->setSucceeded(std::move(result));
533 
534  FINISH_NXLIB_ACTION(SetParameter)
535 }
536 
537 void StereoCamera::onLocatePattern(ensenso::action::LocatePatternGoalConstPtr const& goal)
538 {
540 
541  ensenso::action::LocatePatternResult result;
542  ensenso::action::LocatePatternFeedback feedback;
543 
544  loadParameterSet(goal->parameter_set, projectorOff);
545 
546  int numberOfShots = goal->number_of_shots;
547  if (numberOfShots < 1)
548  {
549  numberOfShots = 1;
550  }
551 
552  std::vector<StereoCalibrationPattern> patterns;
553  ensenso::ros::Time imageTimestamp;
554  for (int i = 0; i < numberOfShots; i++)
555  {
557 
558  ensenso::ros::Time timestamp = capture();
559  if (i == 0)
560  {
561  imageTimestamp = timestamp;
562  }
563 
565 
566  if (i == (numberOfShots - 1))
567  {
568  // The last image has been captured.
569  feedback.images_acquired = true;
570  locatePatternServer->publishFeedback(feedback);
571  }
572 
573  bool clearBuffer = (i == 0);
574  patterns = collectPattern(clearBuffer);
575  if (patterns.empty())
576  {
577  result.found_pattern = false;
578  locatePatternServer->setSucceeded(std::move(result));
579  return;
580  }
581 
582  if (patterns.size() > 1)
583  {
584  // Cannot average multiple shots of multiple patterns. We will cancel the capturing and estimate the pose of each
585  // pattern individually.
586  break;
587  }
588  }
589 
590  result.found_pattern = true;
591  result.patterns.resize(patterns.size());
592  for (size_t i = 0; i < patterns.size(); i++)
593  {
594  patterns[i].writeToMessage(result.patterns[i]);
595  }
596 
598 
599  std::string patternFrame = params.targetFrame;
600  if (!goal->target_frame.empty())
601  {
602  patternFrame = goal->target_frame;
603  }
604 
605  result.frame = patternFrame;
606 
607  if (patterns.size() > 1)
608  {
609  // Estimate the pose of all the patterns we found.
610  auto patternPoses = estimatePatternPoses(imageTimestamp, patternFrame);
611 
612  result.pattern_poses.resize(patternPoses.size());
613  for (size_t i = 0; i < patternPoses.size(); i++)
614  {
615  result.pattern_poses[i] = patternPoses[i];
616  }
617  }
618  else
619  {
620  // Estimate the pose of a single pattern, averaging over the different shots.
621  auto patternPose = estimatePatternPose(imageTimestamp, patternFrame);
622 
623  result.pattern_poses.resize(1);
624  result.pattern_poses[0] = patternPose;
625  }
626 
627  if (!goal->tf_frame.empty())
628  {
629  if (patterns.size() == 1)
630  {
631  auto transform = transformFromPose(result.pattern_poses[0], goal->tf_frame);
632  transformBroadcaster->sendTransform(transform);
633  }
634  else
635  {
636  ENSENSO_WARN(nh, "Cannot publish the pattern pose in tf, because there are multiple patterns!");
637  }
638  }
639 
640  // The target frame has changed.
642  locatePatternServer->setSucceeded(std::move(result));
643 
644  FINISH_NXLIB_ACTION(LocatePattern)
645 }
646 
647 void StereoCamera::onProjectPattern(ensenso::action::ProjectPatternGoalConstPtr const& goal)
648 {
650 
651  ensenso::action::ProjectPatternResult result;
652 
653  tf2::Transform targetFrameTransformation = fromMsg(goal->target_frame_transformation);
654  if (isValid(targetFrameTransformation))
655  {
656  updateTransformations(targetFrameTransformation);
657  }
658  else
659  {
660  // Here we need to pass the current timestamp
662  }
663 
665 
666  tf2::Transform patternTransform = fromMsg(goal->pattern_pose);
667 
668  NxLibCommand projectPattern(cmdProjectPattern, params.serial);
669  projectPattern.parameters()[itmCameras] = params.serial;
670  projectPattern.parameters()[itmGridSpacing] = goal->grid_spacing * 1000;
671  projectPattern.parameters()[itmGridSize][0] = goal->grid_size_x;
672  projectPattern.parameters()[itmGridSize][1] = goal->grid_size_y;
673 
674  writeTransformToNxLib(patternTransform, projectPattern.parameters()[itmPatternPose]);
675 
676  projectPattern.execute();
677 
679 
680  int sensorWidth = cameraNode[itmSensor][itmSize][0].asInt();
681  int sensorHeight = cameraNode[itmSensor][itmSize][1].asInt();
682 
683  NxLibItem projectedPoints = projectPattern.result()[itmStereo][0][itmPoints];
684  NxLibItem leftPoints = projectedPoints[0];
685  NxLibItem rightPoints = projectedPoints[1];
686 
687  result.pattern_is_visible = true;
688  result.left_points.resize(leftPoints.count());
689  result.right_points.resize(leftPoints.count());
690  for (int i = 0; i < leftPoints.count(); i++)
691  {
692  double leftX = leftPoints[i][0].asDouble();
693  double leftY = leftPoints[i][1].asDouble();
694  double rightX = rightPoints[i][0].asDouble();
695  double rightY = rightPoints[i][1].asDouble();
696 
697  result.left_points[i].x = leftX;
698  result.left_points[i].y = leftY;
699  result.right_points[i].x = rightX;
700  result.right_points[i].y = rightY;
701 
702  if (leftX < 0 || leftX > sensorWidth || leftY < 0 || leftY > sensorHeight || rightX < 0 || rightX > sensorWidth ||
703  rightY < 0 || rightY > sensorHeight)
704  {
705  result.pattern_is_visible = false;
706  }
707  }
708 
709  projectPatternServer->setSucceeded(std::move(result));
710 
711  FINISH_NXLIB_ACTION(ProjectPattern)
712 }
713 
714 void StereoCamera::onCalibrateHandEye(ensenso::action::CalibrateHandEyeGoalConstPtr const& goal)
715 {
716  START_NXLIB_ACTION(CalibrateHandEye, calibrateHandEyeServer)
717 
718  ensenso::action::CalibrateHandEyeResult result;
719  result.command = goal->command;
720 
721  if (goal->command == goal->RESET)
722  {
725  }
726  else if (goal->command == goal->CAPTURE_PATTERN)
727  {
728  if (params.robotFrame.empty() || params.wristFrame.empty())
729  {
730  result.error_message = "You need to specify a robot base and wrist frame to do a hand-eye calibration!";
731  ENSENSO_ERROR(nh, "%s", result.error_message.c_str());
732  calibrateHandEyeServer->setAborted(std::move(result));
733  return;
734  }
735 
736  loadParameterSet(goal->parameter_set, projectorOff);
737  ensenso::ros::Time imageTimestamp = capture();
738 
740 
741  // Load the pattern buffer that we remembered from the previous calibration steps.
742  if (!handEyeCalibrationPatternBuffer.empty())
743  {
744  NxLibCommand setPatternBuffer(cmdSetPatternBuffer, params.serial);
745  setPatternBuffer.parameters()[itmPatterns] << handEyeCalibrationPatternBuffer;
746  setPatternBuffer.execute();
747  }
748  else
749  {
750  NxLibCommand(cmdDiscardPatterns, params.serial).execute();
751  }
752 
753  std::vector<StereoCalibrationPattern> patterns = collectPattern();
754  if (patterns.empty())
755  {
756  result.found_pattern = false;
757  calibrateHandEyeServer->setSucceeded(std::move(result));
758  return;
759  }
760  if (patterns.size() > 1)
761  {
762  result.error_message = "Detected multiple calibration patterns during a hand-eye calibration!";
763  ENSENSO_ERROR(nh, "%s", result.error_message.c_str());
764  calibrateHandEyeServer->setAborted(std::move(result));
765  return;
766  }
767 
769 
770  result.found_pattern = true;
771  patterns[0].writeToMessage(result.pattern);
772 
773  auto patternPose = estimatePatternPose(imageTimestamp, params.cameraFrame, true);
774  result.pattern_pose = patternPose.pose;
775 
777 
778  geometry_msgs::msg::TransformStamped robotPose;
779  try
780  {
781  robotPose = tfBuffer->lookupTransform(params.robotFrame, params.wristFrame, ensenso::ros::Time(0));
782  }
783  catch (tf2::TransformException& e)
784  {
785  result.error_message = std::string("Could not look up the robot pose due to the tf error: ") + e.what();
786  ENSENSO_ERROR(nh, "%s", result.error_message.c_str());
787  calibrateHandEyeServer->setAborted(std::move(result));
788  return;
789  }
790 
791  // Remember the newly collected data for the next step.
792  NxLibCommand getPatternBuffer(cmdGetPatternBuffer, params.serial);
793  getPatternBuffer.execute();
794  handEyeCalibrationPatternBuffer = getPatternBuffer.result()[itmPatterns].asJson();
795 
796  handEyeCalibrationRobotTransforms.push_back(fromMsg(robotPose));
797 
798  result.robot_pose = poseFromTransform(robotPose).pose;
799  }
800  else if (goal->command == goal->START_CALIBRATION)
801  {
802  if (handEyeCalibrationRobotTransforms.size() < 5)
803  {
804  result.error_message = "You need to collect at least 5 patterns before starting a hand-eye calibration!";
805  ENSENSO_ERROR(nh, "%s", result.error_message.c_str());
806  calibrateHandEyeServer->setAborted(std::move(result));
807  return;
808  }
809 
810  // Load the pattern observations.
811  size_t numberOfPatterns = 0;
812  NxLibCommand setPatternBuffer(cmdSetPatternBuffer, params.serial);
813  if (!goal->pattern_observations.empty())
814  {
815  for (size_t i = 0; i < goal->pattern_observations.size(); i++)
816  {
817  StereoCalibrationPattern pattern(goal->pattern_observations[i]);
818 
819  NxLibItem patternNode = setPatternBuffer.parameters()[itmPatterns][i][params.serial];
820  pattern.writeToNxLib(patternNode[itmLeft][0]);
821  pattern.writeToNxLib(patternNode[itmRight][0], true);
822  }
823  }
824  else
825  {
826  setPatternBuffer.parameters()[itmPatterns] << handEyeCalibrationPatternBuffer;
827  }
828  numberOfPatterns = setPatternBuffer.parameters()[itmPatterns].count();
829  setPatternBuffer.execute();
830 
831  // Load the corresponding robot transforms.
832  auto robotTransforms = handEyeCalibrationRobotTransforms;
833  if (!goal->robot_poses.empty())
834  {
835  robotTransforms.clear();
836  for (auto const& pose : goal->robot_poses)
837  {
838  tf2::Transform transform = fromMsg(pose);
839  robotTransforms.push_back(transform);
840  }
841  }
842 
843  if (robotTransforms.size() != numberOfPatterns)
844  {
845  result.error_message = "The number of pattern observations does not match the number of robot poses!";
846  ENSENSO_ERROR(nh, "%s", result.error_message.c_str());
847  calibrateHandEyeServer->setAborted(std::move(result));
848  return;
849  }
850 
851  // Load the initial guesses from the action goal.
852  tf2::Transform linkTransform, patternTransform;
853  linkTransform = fromMsg(goal->link);
854  patternTransform = fromMsg(goal->pattern_pose);
855 
856  NxLibCommand calibrateHandEye(cmdCalibrateHandEye, params.serial);
857  calibrateHandEye.parameters()[itmSetup] = params.fixed ? valFixed : valMoving;
858 
859  // Check the robot geometry and set the fixed axes accordingly.
860  if (goal->robot_geometry == goal->DOF4_FIX_CAMERA_POSE_Z_COMPONENT)
861  {
862  calibrateHandEye.parameters()[itmFixed][itmLink][itmTranslation][0] = false;
863  calibrateHandEye.parameters()[itmFixed][itmLink][itmTranslation][1] = false;
864  calibrateHandEye.parameters()[itmFixed][itmLink][itmTranslation][2] = true;
865  }
866  else if (goal->robot_geometry == goal->DOF4_FIX_PATTERN_POSE_Z_COMPONENT)
867  {
868  calibrateHandEye.parameters()[itmFixed][itmPatternPose][itmTranslation][0] = false;
869  calibrateHandEye.parameters()[itmFixed][itmPatternPose][itmTranslation][1] = false;
870  calibrateHandEye.parameters()[itmFixed][itmPatternPose][itmTranslation][2] = true;
871  }
872  else if (goal->robot_geometry == goal->DOF3_FIX_CAMERA_POSE)
873  {
874  calibrateHandEye.parameters()[itmFixed][itmLink][itmTranslation][0] = true;
875  calibrateHandEye.parameters()[itmFixed][itmLink][itmTranslation][1] = true;
876  calibrateHandEye.parameters()[itmFixed][itmLink][itmTranslation][2] = true;
877  }
878  else if (goal->robot_geometry == goal->DOF3_FIX_PATTERN_POSE)
879  {
880  calibrateHandEye.parameters()[itmFixed][itmPatternPose][itmTranslation][0] = true;
881  calibrateHandEye.parameters()[itmFixed][itmPatternPose][itmTranslation][1] = true;
882  calibrateHandEye.parameters()[itmFixed][itmPatternPose][itmTranslation][2] = true;
883  }
884 
885  // The target node will be reset anyway before we calculate data for the next time.
886  calibrateHandEye.parameters()[itmTarget] = getNxLibTargetFrameName();
887  if (isValid(linkTransform))
888  {
889  writeTransformToNxLib(linkTransform.inverse(), calibrateHandEye.parameters()[itmLink]);
890  }
891  if (isValid(patternTransform))
892  {
893  writeTransformToNxLib(patternTransform, calibrateHandEye.parameters()[itmPatternPose]);
894  }
895  for (size_t i = 0; i < robotTransforms.size(); i++)
896  {
897  writeTransformToNxLib(robotTransforms[i], calibrateHandEye.parameters()[itmTransformations][i]);
898  }
899 
900  calibrateHandEye.execute(false);
901 
902  auto getCalibrationResidual = [](NxLibItem const& node) { // NOLINT
903  if (node[itmResidual].exists())
904  {
905  return node[itmResidual].asDouble();
906  }
907  // Compatibility with the SDK 2.0.
908  return node[itmReprojectionError].asDouble();
909  };
910 
911  ensenso::ros::Rate waitingRate(5);
912  while (!calibrateHandEye.finished())
913  {
914  if (calibrateHandEye.result()[itmProgress].exists())
915  {
916  ensenso::action::CalibrateHandEyeFeedback feedback;
917  feedback.number_of_iterations = calibrateHandEye.result()[itmProgress][itmIterations].asInt();
918  feedback.residual = getCalibrationResidual(calibrateHandEye.result()[itmProgress]);
919  calibrateHandEyeServer->publishFeedback(feedback);
920  }
921 
922  if (calibrateHandEyeServer->isPreemptRequested())
923  {
924  NxLibCommand(cmdBreak, params.serial).execute();
925 
926  calibrateHandEyeServer->setPreempted();
927  return;
928  }
929 
930  waitingRate.sleep();
931  }
932 
933  // NxLib time is in milliseconds, ROS expects time to be in seconds.
934  result.calibration_time = calibrateHandEye.result()[itmTime].asDouble() / 1000;
935 
936  result.number_of_iterations = calibrateHandEye.result()[itmIterations].asInt();
937  result.residual = getCalibrationResidual(calibrateHandEye.result());
938 
939  result.link = poseFromTransform(transformFromNxLib(cameraNode[itmLink]).inverse());
940  result.pattern_pose = poseFromTransform(transformFromNxLib(calibrateHandEye.result()[itmPatternPose]));
941 
942  if (goal->write_calibration_to_eeprom)
943  {
944  // Save the new calibration link to the camera's EEPROM.
945  NxLibCommand storeCalibration(cmdStoreCalibration, params.serial);
946  storeCalibration.parameters()[itmCameras] = params.serial;
947  storeCalibration.parameters()[itmLink] = true;
948  storeCalibration.execute();
949  }
950  }
951 
952  // The target frame has changed.
954  calibrateHandEyeServer->setSucceeded(std::move(result));
955 
956  FINISH_NXLIB_ACTION(CalibrateHandEye)
957 }
958 
959 void StereoCamera::onCalibrateWorkspace(ensenso::action::CalibrateWorkspaceGoalConstPtr const& goal)
960 {
961  START_NXLIB_ACTION(CalibrateWorkspace, calibrateWorkspaceServer)
962 
963  if (!params.fixed)
964  {
966  "You are performing a workspace calibration for a moving camera. Are you sure this is what you want?");
967  }
968 
969  ensenso::action::CalibrateWorkspaceResult result;
970  result.successful = true;
971 
972  loadParameterSet(goal->parameter_set, projectorOff);
973 
974  int numberOfShots = goal->number_of_shots;
975  if (numberOfShots < 1)
976  {
977  numberOfShots = 1;
978  }
979 
980  ensenso::ros::Time imageTimestamp;
981  for (int i = 0; i < numberOfShots; i++)
982  {
984 
985  ensenso::ros::Time timestamp = capture();
986  if (i == 0)
987  imageTimestamp = timestamp;
988 
990 
991  bool clearBuffer = (i == 0);
992  std::vector<StereoCalibrationPattern> patterns = collectPattern(clearBuffer);
993  if (patterns.size() != 1)
994  {
995  result.successful = false;
996  calibrateWorkspaceServer->setSucceeded(std::move(result));
997  return;
998  }
999  }
1000 
1002 
1003  auto patternPose = estimatePatternPose(imageTimestamp);
1004  tf2::Transform patternTransform = fromMsg(patternPose);
1005 
1007 
1008  tf2::Transform definedPatternTransform = fromMsg(goal->defined_pattern_pose);
1009 
1010  NxLibCommand calibrateWorkspace(cmdCalibrateWorkspace, params.serial);
1011  calibrateWorkspace.parameters()[itmCameras] = params.serial;
1012  writeTransformToNxLib(patternTransform, calibrateWorkspace.parameters()[itmPatternPose]);
1013  writeTransformToNxLib(definedPatternTransform, calibrateWorkspace.parameters()[itmDefinedPose]);
1014  calibrateWorkspace.parameters()[itmTarget] = getNxLibTargetFrameName();
1015  calibrateWorkspace.execute();
1016 
1017  if (goal->write_calibration_to_eeprom)
1018  {
1019  // Save the new calibration link to the camera's EEPROM.
1020  NxLibCommand storeCalibration(cmdStoreCalibration, params.serial);
1021  storeCalibration.parameters()[itmCameras] = params.serial;
1022  storeCalibration.parameters()[itmLink] = true;
1023  storeCalibration.execute();
1024  }
1025 
1027  calibrateWorkspaceServer->setSucceeded(std::move(result));
1028 
1029  FINISH_NXLIB_ACTION(CalibrateWorkspace)
1030 }
1031 
1032 void StereoCamera::onTelecentricProjection(ensenso::action::TelecentricProjectionGoalConstPtr const& goal)
1033 {
1034  START_NXLIB_ACTION(TelecentricProjection, telecentricProjectionServer)
1035 
1036  ensenso::action::TelecentricProjectionResult result;
1037 
1038  bool useViewPose = isValid(goal->view_pose);
1039  bool frameGiven = !goal->frame.empty();
1040 
1041  if (!frameGiven)
1042  {
1043  result.error.message = "You have to define a valid frame, to which to projection will be published. Aborting";
1044  ENSENSO_ERROR(nh, "%s", result.error.message.c_str());
1045  telecentricProjectionServer->setAborted(std::move(result));
1046  return;
1047  }
1048 
1049  tf2::Transform transform;
1050  std::string publishingFrame = useViewPose ? params.targetFrame : goal->frame;
1051 
1052  if (useViewPose)
1053  {
1054  transform = fromMsg(goal->view_pose);
1055  }
1056  else
1057  {
1058  transform = getLatestTransform(*tfBuffer, params.targetFrame, goal->frame);
1059  }
1060 
1061  int pixelScale = goal->pixel_scale != 0. ? goal->pixel_scale : 1;
1062  double scaling = goal->scaling != 0. ? goal->scaling : 1.0;
1063  int sizeWidth = goal->size_width != 0 ? goal->size_width : 1024;
1064  int sizeHeight = goal->size_height != 0. ? goal->size_height : 768;
1065  bool useOpenGL = goal->use_opengl == 1;
1066  RenderPointMapParamsTelecentric renderParams(useOpenGL, pixelScale, scaling, sizeWidth, sizeHeight,
1067  std::move(transform));
1068 
1069  NxLibCommand renderPointMap(cmdRenderPointMap, params.serial);
1070 
1071  for (int i = 0; i < static_cast<int>(goal->serials.size()); i++)
1072  {
1073  renderPointMap.parameters()[itmCameras][i] = goal->serials[i];
1074  }
1075  setRenderParams(renderPointMap.parameters(), &renderParams);
1076 
1077  renderPointMap.execute();
1078 
1079  NxLibItem resultPath = renderPointMap.result()[itmImages][itmRenderPointMap];
1080 
1081  if (!resultPath.exists())
1082  {
1083  result.error.message = "Rendered Point Map does not exist in path: " + resultPath.path;
1084  ENSENSO_ERROR(nh, "%s", result.error.message.c_str());
1085  telecentricProjectionServer->setAborted(std::move(result));
1086  return;
1087  }
1088 
1089  if (goal->publish_results || goal->include_results_in_response)
1090  {
1091  if (goal->request_point_cloud || (!goal->request_point_cloud && !goal->request_depth_image))
1092  {
1093  auto pointCloud = retrieveRenderedPointCloud(renderPointMap.result(), goal->frame, params.isFileCamera);
1094 
1095  if (goal->include_results_in_response)
1096  {
1097  pcl::toROSMsg(*pointCloud, result.projected_point_cloud);
1098  telecentricProjectionServer->setSucceeded(std::move(result));
1099  }
1100  else
1101  {
1102  telecentricProjectionServer->setSucceeded();
1103  }
1104  if (goal->publish_results)
1105  {
1106  publishPointCloud(pointCloudProjectedPublisher, std::move(pointCloud));
1107  }
1108  }
1109 
1110  if (goal->request_depth_image)
1111  {
1112  auto renderedImage = retrieveRenderedDepthMap(renderPointMap.result(), goal->frame, params.isFileCamera);
1113 
1114  if (goal->publish_results)
1115  {
1116  projectedImagePublisher.publish(renderedImage);
1117  }
1118  if (goal->include_results_in_response)
1119  {
1120  result.projected_depth_map = *renderedImage;
1121  telecentricProjectionServer->setSucceeded(std::move(result));
1122  }
1123  else
1124  {
1125  telecentricProjectionServer->setSucceeded();
1126  }
1127  }
1128  }
1129  else
1130  {
1131  telecentricProjectionServer->setSucceeded();
1132  }
1133 
1134  FINISH_NXLIB_ACTION(TelecentricProjection)
1135 }
1136 
1137 void StereoCamera::onTexturedPointCloud(ensenso::action::TexturedPointCloudGoalConstPtr const& goal)
1138 {
1139  START_NXLIB_ACTION(TexturedPointCloud, texturedPointCloudServer)
1140 
1141  ensenso::action::TexturedPointCloudResult result;
1142 
1143  if (goal->mono_serial.empty())
1144  {
1145  result.error.message = "In Order to use this action, you have to specify one mono serial";
1146  ENSENSO_ERROR(nh, "%s", result.error.message.c_str());
1147  texturedPointCloudServer->setAborted(std::move(result));
1148  return;
1149  }
1150 
1151  double farPlane = goal->far_plane ? goal->far_plane : 10000.;
1152  double nearPlane = goal->near_plane ? goal->near_plane : -10000.;
1153  bool useOpenGL = goal->use_opengl == 1;
1154  bool withTexture = true;
1155 
1156  RenderPointMapParamsProjection renderParams(useOpenGL, farPlane, nearPlane, withTexture);
1157 
1158  NxLibCommand renderPointMap(cmdRenderPointMap, params.serial);
1159  for (int i = 0; i < static_cast<int>(goal->serials.size()); i++)
1160  {
1161  renderPointMap.parameters()[itmCameras][i] = goal->serials[i];
1162  }
1163  renderPointMap.parameters()[itmCamera] = goal->mono_serial;
1164  setRenderParams(renderPointMap.parameters(), &renderParams);
1165 
1166  renderPointMap.execute();
1167 
1168  if (goal->publish_results || goal->include_results_in_response)
1169  {
1170  auto pointCloud = retrieveTexturedPointCloud(renderPointMap.result(), params.targetFrame, params.isFileCamera);
1171  if (goal->include_results_in_response)
1172  {
1173  pcl::toROSMsg(*pointCloud, result.point_cloud);
1174  texturedPointCloudServer->setSucceeded(std::move(result));
1175  }
1176  else
1177  {
1178  texturedPointCloudServer->setSucceeded();
1179  }
1180  if (goal->publish_results)
1181  {
1182  publishPointCloud(pointCloudColoredPublisher, std::move(pointCloud));
1183  }
1184  }
1185  else
1186  {
1187  texturedPointCloudServer->setSucceeded();
1188  }
1189 
1190  FINISH_NXLIB_ACTION(TexturedPointCloud)
1191 }
1192 
1193 void StereoCamera::saveParameterSet(std::string name, bool projectorWasSet)
1194 {
1195  if (name.empty())
1196  {
1197  name = DEFAULT_PARAMETER_SET;
1198  }
1199 
1201 
1202  ParameterSet& parameterSet = parameterSets.at(name);
1203  if (projectorWasSet)
1204  {
1205  parameterSet.autoProjector = false;
1206  }
1207 }
1208 
1209 void StereoCamera::loadParameterSet(std::string name, ProjectorState projector)
1210 {
1211  Camera::loadParameterSet(std::move(name));
1212 
1213  bool changedParameters = false;
1214  ParameterSet const& parameterSet = parameterSets.at(currentParameterSet);
1215 
1216  if (parameterSet.autoProjector && projector != projectorDontCare)
1217  {
1218  try
1219  {
1220  if (projector == projectorOn)
1221  {
1222  cameraNode[itmParameters][itmCapture][itmProjector] = true;
1223  cameraNode[itmParameters][itmCapture][itmFrontLight] = false;
1224  }
1225  else
1226  {
1227  cameraNode[itmParameters][itmCapture][itmProjector] = false;
1228  cameraNode[itmParameters][itmCapture][itmFrontLight] = true;
1229  }
1230  changedParameters = true;
1231  }
1232  catch (...)
1233  {
1234  // Setting the projector and front light fails on file cameras.
1235  }
1236  }
1237 
1238  if (changedParameters)
1239  {
1240  NxLibCommand synchronize(cmdSynchronize, params.serial);
1241  synchronize.parameters()[itmCameras] = params.serial;
1242  synchronize.execute();
1243  }
1244 }
1245 
1247 {
1248  // For file cameras this workaround is needed, because the timestamp of captures from file cameras will not change
1249  // over time. When looking up the current tf tree, this will result in errors, because the time of the original
1250  // timestamp is requested, which lies in the past (and most often longer than the tfBuffer will store the transform!).
1251  if (params.isFileCamera)
1252  {
1253  return ensenso::ros::now(nh);
1254  }
1255  // For stereo cameras with only one sensor (S-Series) the image is stored in the raw node.
1256  else if (isSSeries())
1257  {
1258  return timestampFromNxLibNode(cameraNode[itmImages][itmRaw]);
1259  }
1260  // For XR cameras the image node depends on the capture mode. In Raw mode it behaves like a normal stereo camera.
1261  else if (isXrSeries() && !hasRawImages())
1262  {
1263  ENSENSO_WARN_ONCE("XR: Using timestamp of first left rectified image in capture mode \"Rectified\".");
1264  return timestampFromNxLibNode(cameraNode[itmImages][itmRectified][itmLeft]);
1265  }
1266  // For stereo cameras with left and right sensor the timestamp of the left sensor is taken as the reference.
1267  else
1268  {
1269  return timestampFromNxLibNode(cameraNode[itmImages][itmRaw][itmLeft]);
1270  }
1271 }
1272 
1274 {
1275  // Update virtual objects. Ignore failures with a simple warning.
1277  {
1278  try
1279  {
1280  params.virtualObjectHandler->updateObjectLinks();
1281  }
1282  catch (const std::exception& e)
1283  {
1284  ENSENSO_WARN(nh, "Unable to update virtual objects. Error: %s", e.what());
1285  }
1286  }
1287 
1288  ENSENSO_DEBUG(nh, "Capturing an image...");
1289 
1290  NxLibCommand capture(cmdCapture, params.serial);
1291  capture.parameters()[itmCameras] = params.serial;
1292  if (params.captureTimeout > 0)
1293  {
1294  capture.parameters()[itmTimeout] = params.captureTimeout;
1295  }
1296  capture.execute();
1297 
1298  return timestampOfCapturedImage();
1299 }
1300 
1301 std::vector<StereoCalibrationPattern> StereoCamera::collectPattern(bool clearBuffer) const
1302 {
1303  if (clearBuffer)
1304  {
1305  NxLibCommand(cmdDiscardPatterns, params.serial).execute();
1306  }
1307 
1308  NxLibCommand collectPattern(cmdCollectPattern, params.serial);
1309  collectPattern.parameters()[itmCameras] = params.serial;
1310  collectPattern.parameters()[itmDecodeData] = true;
1311  collectPattern.parameters()[itmFilter][itmCameras] = params.serial;
1312  bool useModel = true;
1313  collectPattern.parameters()[itmFilter][itmUseModel] = useModel;
1314  collectPattern.parameters()[itmFilter][itmType] = valStatic;
1315  collectPattern.parameters()[itmFilter][itmValue] = true;
1316  try
1317  {
1318  collectPattern.execute();
1319  }
1320  catch (NxLibException& e)
1321  {
1322  if (e.getErrorCode() == NxLibExecutionFailed)
1323  {
1324  if (collectPattern.result()[itmErrorSymbol] == errPatternNotFound ||
1325  collectPattern.result()[itmErrorSymbol] == errPatternNotDecodable)
1326  {
1327  return {};
1328  }
1329  }
1330  throw;
1331  }
1332 
1333  if (!collectPattern.result()[itmStereo].exists())
1334  {
1335  // We did find patterns, but only in one of the cameras.
1336  return {};
1337  }
1338 
1339  std::vector<StereoCalibrationPattern> result;
1340 
1341  for (int i = 0; i < collectPattern.result()[itmStereo].count(); i++)
1342  {
1343  result.emplace_back(collectPattern.result()[itmStereo][i]);
1344  }
1345 
1346  // Extract the pattern's image points from the result.
1347  NxLibItem pattern = collectPattern.result()[itmPatterns][0][params.serial];
1348  if (pattern[itmLeft].count() > 1 || pattern[itmRight].count() > 1)
1349  {
1350  // We cannot tell which of the patterns in the two cameras belong together, because that would need a comparison
1351  // with the stereo model.
1352  return result;
1353  }
1354 
1355  for (size_t i = 0; i < result.size(); i++)
1356  {
1357  for (int j = 0; j < pattern[itmLeft][i][itmPoints].count(); j++)
1358  {
1359  NxLibItem pointNode = pattern[itmLeft][i][itmPoints][j];
1360 
1361  ensenso::msg::ImagePoint point;
1362  point.x = pointNode[0].asDouble();
1363  point.y = pointNode[1].asDouble();
1364  result.at(i).leftPoints.push_back(point);
1365  }
1366  for (int j = 0; j < pattern[itmRight][i][itmPoints].count(); j++)
1367  {
1368  NxLibItem pointNode = pattern[itmRight][i][itmPoints][j];
1369 
1370  ensenso::msg::ImagePoint point;
1371  point.x = pointNode[0].asDouble();
1372  point.y = pointNode[1].asDouble();
1373  result.at(i).rightPoints.push_back(point);
1374  }
1375  }
1376 
1377  return result;
1378 }
1379 
1380 geometry_msgs::msg::PoseStamped StereoCamera::estimatePatternPose(ensenso::ros::Time imageTimestamp,
1381  std::string const& targetFrame,
1382  bool latestPatternOnly) const
1383 {
1384  updateGlobalLink(imageTimestamp, targetFrame);
1385 
1386  NxLibCommand estimatePatternPose(cmdEstimatePatternPose, params.serial);
1387  if (latestPatternOnly)
1388  {
1389  estimatePatternPose.parameters()[itmAverage] = false;
1390 
1391  int patternCount = NxLibItem()[itmParameters][itmPatternCount].asInt();
1392  estimatePatternPose.parameters()[itmFilter][itmOr][0][itmAnd][0][itmType] = valIndex;
1393  estimatePatternPose.parameters()[itmFilter][itmOr][0][itmAnd][0][itmValue] = patternCount - 1;
1394  }
1395  else
1396  {
1397  estimatePatternPose.parameters()[itmAverage] = true;
1398  }
1399  estimatePatternPose.execute();
1400 
1401  auto patterns = estimatePatternPose.result()[itmPatterns];
1402  ENSENSO_ASSERT(patterns.count() == 1);
1403 
1404  return stampedPoseFromNxLib(patterns[0][itmPatternPose], targetFrame, imageTimestamp);
1405 }
1406 
1407 std::vector<geometry_msgs::msg::PoseStamped> StereoCamera::estimatePatternPoses(ensenso::ros::Time imageTimestamp,
1408  std::string const& targetFrame) const
1409 {
1410  updateGlobalLink(imageTimestamp, targetFrame);
1411 
1412  NxLibCommand estimatePatternPose(cmdEstimatePatternPose, params.serial);
1413  estimatePatternPose.parameters()[itmAverage] = false;
1414  estimatePatternPose.parameters()[itmFilter][itmCameras] = params.serial;
1415  estimatePatternPose.parameters()[itmFilter][itmUseModel] = true;
1416  estimatePatternPose.parameters()[itmFilter][itmType] = valStatic;
1417  estimatePatternPose.parameters()[itmFilter][itmValue] = true;
1418  estimatePatternPose.execute();
1419 
1420  auto patterns = estimatePatternPose.result()[itmPatterns];
1421 
1422  std::vector<geometry_msgs::msg::PoseStamped> result;
1423  result.reserve(patterns.count());
1424 
1425  for (int i = 0; i < patterns.count(); i++)
1426  {
1427  result.push_back(stampedPoseFromNxLib(patterns[i][itmPatternPose], targetFrame, imageTimestamp));
1428  }
1429 
1430  return result;
1431 }
1432 
1433 void StereoCamera::fillCameraInfoFromNxLib(sensor_msgs::msg::CameraInfoPtr const& info, bool right,
1434  bool rectified) const
1435 {
1437 
1438  NxLibItem monoCalibrationNode = cameraNode[itmCalibration][itmMonocular][right ? itmRight : itmLeft];
1439  NxLibItem stereoCalibrationNode = cameraNode[itmCalibration][itmStereo][right ? itmRight : itmLeft];
1440 
1441  if (rectified)
1442  {
1443  // For the rectified images all transformations are the identity (because all of the distortions were already
1444  // removed), except for the stereo camera matrix.
1445  GET_D_MATRIX(info).resize(5, 0);
1446 
1447  for (int row = 0; row < 3; row++)
1448  {
1449  for (int column = 0; column < 3; column++)
1450  {
1451  GET_P_MATRIX(info)[4 * row + column] = stereoCalibrationNode[itmCamera][column][row].asDouble();
1452 
1453  if (row == column)
1454  {
1455  GET_K_MATRIX(info)[3 * row + column] = 1;
1456  GET_R_MATRIX(info)[3 * row + column] = 1;
1457  }
1458  }
1459  }
1460  }
1461  else
1462  {
1463  fillDistortionParamsFromNxLib(monoCalibrationNode[itmDistortion], info);
1464 
1465  for (int row = 0; row < 3; row++)
1466  {
1467  for (int column = 0; column < 3; column++)
1468  {
1469  GET_K_MATRIX(info)[3 * row + column] = monoCalibrationNode[itmCamera][column][row].asDouble();
1470  GET_P_MATRIX(info)[4 * row + column] = stereoCalibrationNode[itmCamera][column][row].asDouble();
1471  GET_R_MATRIX(info)[3 * row + column] = stereoCalibrationNode[itmRotation][column][row].asDouble();
1472  }
1473  }
1474  }
1475 
1476  if (right)
1477  {
1478  // Add the offset of the right camera relative to the left one to the projection matrix.
1479  double fx = stereoCalibrationNode[itmCamera][0][0].asDouble();
1480  double baseline = cameraNode[itmCalibration][itmStereo][itmBaseline].asDouble() / 1000.0;
1481  GET_P_MATRIX(info)[3] = -fx * baseline;
1482  }
1483 
1484  int leftTopX = cameraNode[itmParameters][itmDisparityMap][itmAreaOfInterest][itmLeftTop][0].asInt();
1485  int leftTopY = cameraNode[itmParameters][itmDisparityMap][itmAreaOfInterest][itmLeftTop][1].asInt();
1486  int rightBottomX = cameraNode[itmParameters][itmDisparityMap][itmAreaOfInterest][itmRightBottom][0].asInt();
1487  int rightBottomY = cameraNode[itmParameters][itmDisparityMap][itmAreaOfInterest][itmRightBottom][1].asInt();
1488  info->roi.x_offset = leftTopX;
1489  info->roi.y_offset = leftTopY;
1490  info->roi.width = rightBottomX - leftTopX;
1491  info->roi.height = rightBottomY - leftTopY;
1492  if (rectified)
1493  {
1494  info->roi.do_rectify = true;
1495  }
1496 }
1497 
1499 {
1502  if (hasRightCamera())
1503  {
1506  }
1507 }
1508 
1509 ensenso::msg::ParameterPtr StereoCamera::readParameter(std::string const& key) const
1510 {
1511  auto message = ensenso::std::make_shared<ensenso::msg::Parameter>();
1512  message->key = key;
1513 
1514  if (key == ensenso::msg::Parameter::REGION_OF_INTEREST)
1515  {
1516  PointCloudROI const& roi = parameterSets.at(currentParameterSet).roi;
1517 
1518  message->region_of_interest_value.lower.x = roi.minX;
1519  message->region_of_interest_value.lower.y = roi.minY;
1520  message->region_of_interest_value.lower.z = roi.minZ;
1521  message->region_of_interest_value.upper.x = roi.maxX;
1522  message->region_of_interest_value.upper.y = roi.maxY;
1523  message->region_of_interest_value.upper.z = roi.maxZ;
1524  }
1525  else if (key == ensenso::msg::Parameter::FLEX_VIEW)
1526  {
1527  NxLibItem flexViewNode = cameraNode[itmParameters][itmCapture][itmFlexView];
1528 
1529  if (!flexViewNode.exists())
1530  {
1531  message->float_value = -1;
1532  }
1533  else if (flexViewNode.isNumber())
1534  {
1535  message->float_value = flexViewNode.asInt();
1536  }
1537  else
1538  {
1539  message->float_value = 0;
1540  }
1541  }
1542  else
1543  {
1545  }
1546 
1547  return message;
1548 }
1549 
1550 void StereoCamera::writeParameter(ensenso::msg::Parameter const& parameter)
1551 {
1552  if (parameter.key == ensenso::msg::Parameter::REGION_OF_INTEREST)
1553  {
1555 
1556  roi.minX = parameter.region_of_interest_value.lower.x;
1557  roi.minY = parameter.region_of_interest_value.lower.y;
1558  roi.minZ = parameter.region_of_interest_value.lower.z;
1559  roi.maxX = parameter.region_of_interest_value.upper.x;
1560  roi.maxY = parameter.region_of_interest_value.upper.y;
1561  roi.maxZ = parameter.region_of_interest_value.upper.z;
1562 
1563  parameterSets.at(currentParameterSet).useROI = !roi.isEmpty();
1564  }
1565  else if (parameter.key == ensenso::msg::Parameter::FLEX_VIEW)
1566  {
1567  NxLibItem flexViewNode = cameraNode[itmParameters][itmCapture][itmFlexView];
1568 
1569  if (!flexViewNode.exists())
1570  {
1571  ENSENSO_WARN(nh, "Writing the parameter FlexView, but the camera does not support it!");
1572  return;
1573  }
1574 
1575  int n = static_cast<int>(std::round(parameter.float_value));
1576  if (n <= 1)
1577  {
1578  flexViewNode = false;
1579  }
1580  else
1581  {
1582  flexViewNode = n;
1583  }
1584  }
1585  else
1586  {
1587  Camera::writeParameter(parameter);
1588  }
1589 }
1590 
1592 {
1593  return cameraNode[itmType].asString() == "StructuredLight";
1594 }
1595 
1597 {
1598  std::string modelName = cameraNode[itmModelName].asString();
1599  return startswith(modelName, "XR");
1600 }
1601 
1603 {
1604  return (!isSSeries());
1605 }
1606 
1608 {
1609  std::string captureMode = cameraNode[itmParameters][itmCapture][itmMode].asString();
1610  return captureMode == valRaw;
1611 }
1612 
1614 {
1615  return isXrSeries() ? cameraNode[itmParameters][itmCapture][itmDownloadImages].asBool() : true;
1616 }
1617 
1619 {
1620  return (!isSSeries());
1621 }
1622 
1623 void StereoCamera::addDisparityMapOffset(sensor_msgs::msg::CameraInfoPtr const& info) const
1624 {
1625  double disparityMapOffset = 0.0;
1626  if (cameraNode[itmCalibration][itmDynamic][itmStereo][itmDisparityMapOffset].exists())
1627  {
1628  disparityMapOffset = cameraNode[itmCalibration][itmDynamic][itmStereo][itmDisparityMapOffset].asDouble();
1629  }
1630 
1631  // Adjust the width, which differs from the image width if it has a non-zero disparity map offset.
1632  info->width -= disparityMapOffset;
1633 
1634  // Adjust origin cx.
1635  GET_K_MATRIX(info)[2] -= disparityMapOffset;
1636  GET_P_MATRIX(info)[2] -= disparityMapOffset;
1637 }
std::vector< tf2::Transform > handEyeCalibrationRobotTransforms
Definition: stereo_camera.h:61
void onFitPrimitive(ensenso::action::FitPrimitiveGoalConstPtr const &goal)
std::string const DEFAULT_PARAMETER_SET
Definition: camera.h:62
std::unique_ptr< FitPrimitiveServer > fitPrimitiveServer
Definition: stereo_camera.h:37
NxLibItem toEnsensoPoint(geometry_msgs::msg::Point32 const &point, bool convertUnits=true)
Definition: conversion.cpp:71
std::map< std::string, ParameterSet > parameterSets
Definition: camera.h:293
void ENSENSO_WARN_ONCE(T... args)
Definition: logging.h:229
std::string cameraFrame
Definition: camera.h:206
#define GET_K_MATRIX(info)
Definition: namespace.h:55
StereoCamera(ensenso::ros::NodeHandle &nh, CameraParameters params)
void ENSENSO_ERROR(T... args)
Definition: logging.h:278
sensor_msgs::msg::CameraInfoPtr rightRectifiedCameraInfo
Definition: stereo_camera.h:33
void writeTransformToNxLib(Transform const &transform, NxLibItem const &node)
void writeToNxLib(NxLibItem const &node, bool right=false)
void ENSENSO_DEBUG(T... args)
Definition: logging.h:74
sensor_msgs::msg::CameraInfoPtr leftRectifiedCameraInfo
Definition: stereo_camera.h:32
virtual void initStatusTimer()
Definition: camera.cpp:703
void onCalibrateWorkspace(ensenso::action::CalibrateWorkspaceGoalConstPtr const &goal)
::ros::Time Time
Definition: time.h:67
StampedTransformMsg transformFromPose(StampedPoseMsg const &pose, std::string const &childFrame)
#define GET_P_MATRIX(info)
Definition: namespace.h:56
void addDisparityMapOffset(sensor_msgs::msg::CameraInfoPtr const &info) const
image_transport::CameraPublisher leftRectifiedImagePublisher
Definition: stereo_camera.h:46
std::unique_ptr< ensenso::pcl::PointCloud > pointCloudFromNxLib(NxLibItem const &node, std::string const &frame, bool isFileCamera=false, PointCloudROI const *roi=nullptr)
Transform getLatestTransform(tf2_ros::Buffer const &tfBuffer, std::string const &cameraFrame, std::string const &targetFrame)
bool hasRightCamera() const
PointCloudPublisher< ensenso::pcl::PointCloud > pointCloudPublisher
Definition: stereo_camera.h:52
bool isFileCamera
Definition: camera.h:186
CameraParameters params
Definition: camera.h:258
virtual ensenso::msg::ParameterPtr readParameter(std::string const &key) const
Definition: camera.cpp:406
void updateTransformations(tf2::Transform const &targetFrameTransformation) const
Definition: camera.cpp:356
std::unique_ptr< ensenso_camera::VirtualObjectHandler > virtualObjectHandler
Definition: camera.h:250
std::unique_ptr< ProjectPatternServer > projectPatternServer
Definition: stereo_camera.h:39
bool hasDownloadedImages() const
bool autoProjector
Definition: camera.h:167
StampedPoseMsg stampedPoseFromNxLib(NxLibItem const &node, std::string const &parentFrame, ensenso::ros::Time timestamp)
void publishPointCloud(ensenso::ros::Publisher< T > const &publisher, std::unique_ptr< T > pointCloud)
Definition: pcl.h:95
void onProjectPattern(ensenso::action::ProjectPatternGoalConstPtr const &goal)
ensenso::ros::Time timestampOfCapturedImage() const
virtual void writeParameter(ensenso::msg::Parameter const &parameter)
Definition: camera.cpp:445
void init() override
image_transport::Publisher projectedImagePublisher
Definition: stereo_camera.h:50
std::string getNxLibTargetFrameName() const
Definition: camera.cpp:351
ProjectorState
Definition: stereo_camera.h:20
std::string handEyeCalibrationPatternBuffer
Definition: stereo_camera.h:60
std::unique_ptr< CalibrateWorkspaceServer > calibrateWorkspaceServer
Definition: stereo_camera.h:36
ensenso::msg::ParameterPtr readParameter(std::string const &key) const override
#define MAKE_SERVER(CallbackClass, ActionName, TopicName)
Definition: action_server.h:35
std::unique_ptr< ensenso::pcl::PointCloudNormals > pointCloudWithNormalsFromNxLib(NxLibItem const &pointMapNode, NxLibItem const &normalNode, std::string const &frame, bool isFileCamera=false, PointCloudROI const *roi=nullptr)
Transform inverse() const
std::string serial
Definition: camera.h:180
ensenso::ros::NodeHandle nh
Definition: camera.h:271
inline ::image_transport::CameraPublisher create_camera_publisher(ensenso::ros::NodeHandle const &nh, ::std::string const &topic_name)
void updateCameraTypeSpecifics() override
std::unique_ptr< SetParameterServer > setParameterServer
Definition: camera.h:285
bool isXrSeries() const
Transform fromMsg(TransformMsg const &transform)
void updateGlobalLink(ensenso::ros::Time time, std::string targetFrame="", bool useCachedTransformation=false) const
Definition: camera.cpp:362
image_transport::CameraPublisher depthImagePublisher
Definition: stereo_camera.h:49
virtual void initTfPublishTimer()
Definition: camera.cpp:698
void setRenderParams(NxLibItem const &cmdParams, RenderPointMapParams const *params)
void onTexturedPointCloud(ensenso::action::TexturedPointCloudGoalConstPtr const &goal)
std::unique_ptr< CalibrateHandEyeServer > calibrateHandEyeServer
Definition: stereo_camera.h:35
std::unique_ptr< tf2_ros::Buffer > tfBuffer
Definition: camera.h:278
ImagePtr depthImageFromNxLibNode(NxLibItem const &node, std::string const &frame, bool isFileCamera)
void startServers() const override
int captureTimeout
Definition: camera.h:245
#define GET_D_MATRIX(info)
Definition: namespace.h:54
void publishCurrentLinks(TIMER_CALLBACK_DECLARATION_ARGS)
Definition: camera.cpp:649
std::unique_ptr< LocatePatternServer > locatePatternServer
Definition: stereo_camera.h:38
std::string wristFrame
Definition: camera.h:240
inline ::ros::Time now(ensenso::ros::NodeHandle const &nh)
Definition: time.h:76
bool isSSeries() const
TFSIMD_FORCE_INLINE Quaternion inverse(const Quaternion &q)
void onTelecentricProjection(ensenso::action::TelecentricProjectionGoalConstPtr const &goal)
void advertiseTopics()
bool startswith(std::string const &lhs, std::string const &rhs)
Definition: string_helper.h:5
void fillCameraInfoFromNxLib(sensor_msgs::msg::CameraInfoPtr const &info, bool right, bool rectified=false) const
void loadParameterSet(std::string name, ProjectorState projector=projectorDontCare)
ensenso::ros::Time capture() const override
image_transport::CameraPublisher disparityMapPublisher
Definition: stereo_camera.h:48
def message(msg, args, kwargs)
::ros::NodeHandle NodeHandle
Definition: node.h:214
void publish(const sensor_msgs::Image &message) const
void fillDistortionParamsFromNxLib(NxLibItem const &distortionItem, sensor_msgs::msg::CameraInfoPtr const &info)
std::unique_ptr< ensenso::pcl::PointCloud > retrieveRenderedPointCloud(NxLibItem const &cmdResult, std::string const &frame, bool isFileCamera=false)
void ENSENSO_WARN(T... args)
Definition: logging.h:210
NxLibItem cameraNode
Definition: camera.h:266
std::unique_ptr< RequestDataServer > requestDataServer
Definition: stereo_camera.h:40
std::unique_ptr< TexturedPointCloudServer > texturedPointCloudServer
Definition: stereo_camera.h:41
PointCloudPublisher< ensenso::pcl::PointCloud > pointCloudProjectedPublisher
Definition: stereo_camera.h:55
std::string targetFrame
Definition: camera.h:227
inline ::image_transport::Publisher create_publisher(ensenso::ros::NodeHandle const &nh, ::std::string const &topic_name)
StampedPoseMsg poseFromTransform(StampedTransformMsg const &transform)
void loadParameterSet(std::string name)
Definition: camera.cpp:614
ensenso::ros::Time timestampFromNxLibNode(NxLibItem const &node)
void saveParameterSet(std::string name, bool projectorWasSet)
void onSetParameter(ensenso::action::SetParameterGoalConstPtr const &goal) override
const int conversionFactor
Definition: conversion.h:16
PointCloudPublisher< ensenso::pcl::PointCloudNormals > pointCloudNormalsPublisher
Definition: stereo_camera.h:53
void move(std::vector< T > &a, std::vector< T > &b)
void publish(const sensor_msgs::Image &image, const sensor_msgs::CameraInfo &info) const
std::vector< StereoCalibrationPattern > collectPattern(bool clearBuffer=false) const
#define GET_R_MATRIX(info)
Definition: namespace.h:57
Transform transformFromNxLib(NxLibItem const &node)
void toROSMsg(const sensor_msgs::PointCloud2 &cloud, sensor_msgs::Image &image)
void onRequestData(ensenso::action::RequestDataGoalConstPtr const &goal)
sensor_msgs::msg::CameraInfoPtr leftCameraInfo
Definition: stereo_camera.h:30
#define START_NXLIB_ACTION(ACTION_NAME, ACTION_SERVER)
Definition: camera.h:93
#define PREEMPT_ACTION_IF_REQUESTED
Definition: camera.h:136
#define ENSENSO_ASSERT(cond)
Definition: core.h:159
ROSCPP_DECL bool exists(const std::string &service_name, bool print_failure_reason)
virtual void startServers() const
Definition: camera.cpp:120
sensor_msgs::msg::CameraInfoPtr rightCameraInfo
Definition: stereo_camera.h:31
::ros::Rate Rate
Definition: time.h:66
void saveParameterSet(std::string name)
Definition: camera.cpp:601
bool isEmpty() const
bool hasDisparityMap() const
image_transport::CameraPublisher leftRawImagePublisher
Definition: stereo_camera.h:44
void updateCameraInfo() override
image_transport::CameraPublisher rightRawImagePublisher
Definition: stereo_camera.h:45
ImagePtr imageFromNxLibNode(NxLibItem const &node, std::string const &frame, bool isFileCamera)
std::vector< ImagePtrPair > imagePairsFromNxLibNode(NxLibItem const &node, std::string const &frame, bool isFileCamera)
bool hasRawImages() const
void writeParameter(ensenso::msg::Parameter const &parameter) override
void onLocatePattern(ensenso::action::LocatePatternGoalConstPtr const &goal)
image_transport::CameraPublisher rightRectifiedImagePublisher
Definition: stereo_camera.h:47
Definition: camera.h:255
#define FINISH_NXLIB_ACTION(ACTION_NAME)
Definition: camera.h:104
std::string currentParameterSet
Definition: camera.h:294
bool isValid(Transform const &transform)
geometry_msgs::msg::Point32 toRosPoint(NxLibItem const &itemArray, bool convertUnits=true)
Definition: conversion.cpp:52
std::vector< geometry_msgs::msg::PoseStamped > estimatePatternPoses(ensenso::ros::Time imageTimestamp, std::string const &targetFrame="") const override
void onCalibrateHandEye(ensenso::action::CalibrateHandEyeGoalConstPtr const &goal)
std::string robotFrame
Definition: camera.h:234
std::unique_ptr< TelecentricProjectionServer > telecentricProjectionServer
Definition: stereo_camera.h:42
std::unique_ptr< ensenso::pcl::PointCloudColored > retrieveTexturedPointCloud(NxLibItem const &cmdResult, std::string const &targetFrame, bool isFileCamera=false)
PointCloudPublisher< ensenso::pcl::PointCloudColored > pointCloudColoredPublisher
Definition: stereo_camera.h:54
bool loadSettings(std::string const &jsonFile, bool saveAsDefaultParameters=false)
Definition: camera.cpp:129
geometry_msgs::msg::PoseStamped estimatePatternPose(ensenso::ros::Time imageTimestamp, std::string const &targetFrame="", bool latestPatternOnly=false) const override
sensor_msgs::msg::ImagePtr retrieveRenderedDepthMap(NxLibItem const &cmdResult, std::string const &frame, bool isFileCamera)
std::unique_ptr< tf2_ros::TransformBroadcaster > transformBroadcaster
Definition: camera.h:280
void fillBasicCameraInfoFromNxLib(sensor_msgs::msg::CameraInfoPtr const &info) const
Definition: camera.cpp:324


ensenso_camera
Author(s): Ensenso
autogenerated on Sat Jun 3 2023 02:17:04