tutorial-franka-coppeliasim-ibvs-apriltag.cpp
Go to the documentation of this file.
1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2021 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See https://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  *****************************************************************************/
32 
34 
35 #include <iostream>
36 
37 #include <visp3/core/vpCameraParameters.h>
38 #include <visp3/detection/vpDetectorAprilTag.h>
39 #include <visp3/gui/vpDisplayOpenCV.h>
40 #include <visp3/gui/vpPlot.h>
41 #include <visp3/io/vpImageIo.h>
42 #include <visp3/visual_features/vpFeatureBuilder.h>
43 #include <visp3/visual_features/vpFeaturePoint.h>
44 #include <visp3/vs/vpServo.h>
45 #include <visp3/vs/vpServoDisplay.h>
46 
47 #include <visp_ros/vpROSGrabber.h>
49 
50 void
51 display_point_trajectory( const vpImage< unsigned char > &I, const std::vector< vpImagePoint > &vip,
52  std::vector< vpImagePoint > *traj_vip )
53 {
54  for ( size_t i = 0; i < vip.size(); i++ )
55  {
56  if ( traj_vip[i].size() )
57  {
58  // Add the point only if distance with the previous > 1 pixel
59  if ( vpImagePoint::distance( vip[i], traj_vip[i].back() ) > 1. )
60  {
61  traj_vip[i].push_back( vip[i] );
62  }
63  }
64  else
65  {
66  traj_vip[i].push_back( vip[i] );
67  }
68  }
69  for ( size_t i = 0; i < vip.size(); i++ )
70  {
71  for ( size_t j = 1; j < traj_vip[i].size(); j++ )
72  {
73  vpDisplay::displayLine( I, traj_vip[i][j - 1], traj_vip[i][j], vpColor::green, 2 );
74  }
75  }
76 }
77 
78 int
79 main( int argc, char **argv )
80 {
81  double opt_tagSize = 0.08;
82  bool display_tag = true;
83  int opt_quad_decimate = 2;
84  bool opt_verbose = false;
85  bool opt_plot = false;
86  bool opt_adaptive_gain = false;
87  bool opt_task_sequencing = false;
88  double convergence_threshold = 0.0005;
89  bool opt_coppeliasim_sync_mode = false;
90 
91  for ( int i = 1; i < argc; i++ )
92  {
93  if ( std::string( argv[i] ) == "--tag_size" && i + 1 < argc )
94  {
95  opt_tagSize = std::stod( argv[i + 1] );
96  }
97  else if ( std::string( argv[i] ) == "--verbose" || std::string( argv[i] ) == "-v" )
98  {
99  opt_verbose = true;
100  }
101  else if ( std::string( argv[i] ) == "--plot" )
102  {
103  opt_plot = true;
104  }
105  else if ( std::string( argv[i] ) == "--adaptive_gain" )
106  {
107  opt_adaptive_gain = true;
108  }
109  else if ( std::string( argv[i] ) == "--task_sequencing" )
110  {
111  opt_task_sequencing = true;
112  }
113  else if ( std::string( argv[i] ) == "--quad_decimate" && i + 1 < argc )
114  {
115  opt_quad_decimate = std::stoi( argv[i + 1] );
116  }
117  else if ( std::string( argv[i] ) == "--no-convergence-threshold" )
118  {
119  convergence_threshold = 0.;
120  }
121  else if ( std::string( argv[i] ) == "--enable-coppeliasim-sync-mode" )
122  {
123  opt_coppeliasim_sync_mode = true;
124  }
125  else if ( std::string( argv[i] ) == "--help" || std::string( argv[i] ) == "-h" )
126  {
127  std::cout << argv[0] << "[--tag_size <marker size in meter; default " << opt_tagSize << ">] "
128  << "[--quad_decimate <decimation; default " << opt_quad_decimate << ">] "
129  << "[--adaptive_gain] "
130  << "[--plot] "
131  << "[--task_sequencing] "
132  << "[--no-convergence-threshold] "
133  << "[--enable-coppeliasim-sync-mode] "
134  << "[--verbose] [-v] "
135  << "[--help] [-h]" << std::endl;
136  return EXIT_SUCCESS;
137  }
138  }
139 
140  try
141  {
142  //------------------------------------------------------------------------//
143  //------------------------------------------------------------------------//
144  // ROS node
145  ros::init( argc, argv, "visp_ros" );
146  ros::NodeHandlePtr n = boost::make_shared< ros::NodeHandle >();
147  ros::Rate loop_rate( 1000 );
148  ros::spinOnce();
149 
151  robot.setVerbose( opt_verbose );
152  robot.connect();
153 
154  std::cout << "Coppeliasim sync mode enabled: " << ( opt_coppeliasim_sync_mode ? "yes" : "no" ) << std::endl;
155  robot.coppeliasimStopSimulation(); // Allows to reset simulation, moving the robot to initial position
156  robot.setCoppeliasimSyncMode( false );
158 
159  if ( 0 )
160  {
161  robot.setRobotState( vpRobot::STATE_POSITION_CONTROL );
162  vpColVector q;
163  robot.getPosition( vpRobot::JOINT_STATE, q );
164  std::cout << "Initial joint position: " << q.t() << std::endl;
165 
166  q[0] += vpMath::rad( 10 ); // Add 10 deg axis 1
167  std::cout << "Move to joint position: " << q.t() << std::endl;
168  robot.setPosition( vpRobot::JOINT_STATE, q );
169  }
170 
171  vpImage< unsigned char > I;
172  vpROSGrabber g;
173  g.setImageTopic( "/coppeliasim/franka/camera/image" );
174  g.setCameraInfoTopic( "/coppeliasim/franka/camera/camera_info" );
175  g.open( argc, argv );
176 
177  g.acquire( I );
178 
179  std::cout << "Image size: " << I.getWidth() << " x " << I.getHeight() << std::endl;
180  vpCameraParameters cam;
181 
182  g.getCameraInfo( cam );
183  std::cout << cam << std::endl;
184  vpDisplayOpenCV dc( I, 10, 10, "Color image" );
185 
186  vpDetectorAprilTag::vpAprilTagFamily tagFamily = vpDetectorAprilTag::TAG_36h11;
187  vpDetectorAprilTag::vpPoseEstimationMethod poseEstimationMethod = vpDetectorAprilTag::HOMOGRAPHY_VIRTUAL_VS;
188  vpDetectorAprilTag detector( tagFamily );
189  detector.setAprilTagPoseEstimationMethod( poseEstimationMethod );
190  detector.setDisplayTag( display_tag );
191  detector.setAprilTagQuadDecimate( opt_quad_decimate );
192 
193  // Servo
194  vpHomogeneousMatrix cMo, oMo;
195 
196  // Desired pose used to compute the desired features
197  vpHomogeneousMatrix cdMo( vpTranslationVector( 0.0, 0.0, opt_tagSize * 3 ),
198  vpRotationMatrix( { 1, 0, 0, 0, -1, 0, 0, 0, -1 } ) );
199 
200  // Create visual features
201  std::vector< vpFeaturePoint > p( 4 ), pd( 4 ); // We use 4 points
202 
203  // Define 4 3D points corresponding to the CAD model of the Apriltag
204  std::vector< vpPoint > point( 4 );
205  point[0].setWorldCoordinates( -opt_tagSize / 2., -opt_tagSize / 2., 0 );
206  point[1].setWorldCoordinates( opt_tagSize / 2., -opt_tagSize / 2., 0 );
207  point[2].setWorldCoordinates( opt_tagSize / 2., opt_tagSize / 2., 0 );
208  point[3].setWorldCoordinates( -opt_tagSize / 2., opt_tagSize / 2., 0 );
209 
210  vpServo task;
211  // Add the 4 visual feature points
212  for ( size_t i = 0; i < p.size(); i++ )
213  {
214  task.addFeature( p[i], pd[i] );
215  }
216  task.setServo( vpServo::EYEINHAND_CAMERA );
217  task.setInteractionMatrixType( vpServo::CURRENT );
218 
219  if ( opt_adaptive_gain )
220  {
221  std::cout << "Enable adaptive gain" << std::endl;
222  vpAdaptiveGain lambda( 4, 1.2, 25 ); // lambda(0)=4, lambda(oo)=1.2 and lambda'(0)=25
223  task.setLambda( lambda );
224  }
225  else
226  {
227  task.setLambda( 1.2 );
228  }
229 
230  vpPlot *plotter = nullptr;
231 
232  if ( opt_plot )
233  {
234  plotter = new vpPlot( 2, static_cast< int >( 250 * 2 ), 500, static_cast< int >( I.getWidth() ) + 80, 10,
235  "Real time curves plotter" );
236  plotter->setTitle( 0, "Visual features error" );
237  plotter->setTitle( 1, "Camera velocities" );
238  plotter->initGraph( 0, 8 );
239  plotter->initGraph( 1, 6 );
240  plotter->setLegend( 0, 0, "error_feat_p1_x" );
241  plotter->setLegend( 0, 1, "error_feat_p1_y" );
242  plotter->setLegend( 0, 2, "error_feat_p2_x" );
243  plotter->setLegend( 0, 3, "error_feat_p2_y" );
244  plotter->setLegend( 0, 4, "error_feat_p3_x" );
245  plotter->setLegend( 0, 5, "error_feat_p3_y" );
246  plotter->setLegend( 0, 6, "error_feat_p4_x" );
247  plotter->setLegend( 0, 7, "error_feat_p4_y" );
248  plotter->setLegend( 1, 0, "vc_x" );
249  plotter->setLegend( 1, 1, "vc_y" );
250  plotter->setLegend( 1, 2, "vc_z" );
251  plotter->setLegend( 1, 3, "wc_x" );
252  plotter->setLegend( 1, 4, "wc_y" );
253  plotter->setLegend( 1, 5, "wc_z" );
254  }
255 
256  bool final_quit = false;
257  bool has_converged = false;
258  bool send_velocities = false;
259  bool servo_started = false;
260  std::vector< vpImagePoint > *traj_corners = nullptr; // To memorize point trajectory
261 
262  double sim_time = robot.getCoppeliasimSimulationTime();
263  double sim_time_prev = sim_time;
264  double sim_time_init_servo = sim_time;
265  double sim_time_img = sim_time;
266 
267  if ( 0 )
268  {
269  // Instead of setting eMc from /coppeliasim/franka/eMc topic, we can set its value to introduce noise for example
270  vpHomogeneousMatrix eMc;
271  eMc.buildFrom( 0.05, -0.05, 0, 0, 0, M_PI_4 );
272  robot.set_eMc( eMc );
273  }
274  std::cout << "eMc:\n" << robot.get_eMc() << std::endl;
275 
276  robot.setRobotState( vpRobot::STATE_VELOCITY_CONTROL );
277  robot.setCoppeliasimSyncMode( opt_coppeliasim_sync_mode );
278 
279  while ( !final_quit )
280  {
281  sim_time = robot.getCoppeliasimSimulationTime();
282 
283  g.acquire( I, sim_time_img );
284  vpDisplay::display( I );
285 
286  std::vector< vpHomogeneousMatrix > cMo_vec;
287  detector.detect( I, opt_tagSize, cam, cMo_vec );
288 
289  {
290  std::stringstream ss;
291  ss << "Left click to " << ( send_velocities ? "stop the robot" : "servo the robot" )
292  << ", right click to quit.";
293  vpDisplay::displayText( I, 20, 20, ss.str(), vpColor::red );
294  }
295 
296  vpColVector v_c( 6 );
297 
298  // Only one tag is detected
299  if ( cMo_vec.size() == 1 )
300  {
301  cMo = cMo_vec[0];
302 
303  static bool first_time = true;
304  if ( first_time )
305  {
306  // Introduce security wrt tag positionning in order to avoid PI rotation
307  std::vector< vpHomogeneousMatrix > v_oMo( 2 ), v_cdMc( 2 );
308  v_oMo[1].buildFrom( 0, 0, 0, 0, 0, M_PI );
309  for ( size_t i = 0; i < 2; i++ )
310  {
311  v_cdMc[i] = cdMo * v_oMo[i] * cMo.inverse();
312  }
313  if ( std::fabs( v_cdMc[0].getThetaUVector().getTheta() ) <
314  std::fabs( v_cdMc[1].getThetaUVector().getTheta() ) )
315  {
316  oMo = v_oMo[0];
317  }
318  else
319  {
320  std::cout << "Desired frame modified to avoid PI rotation of the camera" << std::endl;
321  oMo = v_oMo[1]; // Introduce PI rotation
322  }
323 
324  // Compute the desired position of the features from the desired pose
325  std::cout << "Initializing desired pose" << std::endl;
326  for ( size_t i = 0; i < point.size(); i++ )
327  {
328  vpColVector cP, p_;
329  point[i].changeFrame( cdMo * oMo, cP );
330  point[i].projection( cP, p_ );
331 
332  pd[i].set_x( p_[0] );
333  pd[i].set_y( p_[1] );
334  pd[i].set_Z( cP[2] );
335  }
336  } // end first_time
337 
338  // Get tag corners
339  std::vector< vpImagePoint > corners = detector.getPolygon( 0 );
340 
341  // Update visual features
342  for ( size_t i = 0; i < corners.size(); i++ )
343  {
344  // Update the point feature from the tag corners location
345  vpFeatureBuilder::create( p[i], cam, corners[i] );
346  // Set the feature Z coordinate from the pose
347  vpColVector cP;
348  point[i].changeFrame( cMo, cP );
349 
350  p[i].set_Z( cP[2] );
351  }
352 
353  if ( opt_task_sequencing )
354  {
355  if ( !servo_started )
356  {
357  if ( send_velocities )
358  {
359  servo_started = true;
360  }
361  sim_time_init_servo = robot.getCoppeliasimSimulationTime();
362  }
363  v_c = task.computeControlLaw( robot.getCoppeliasimSimulationTime() - sim_time_init_servo );
364  }
365  else
366  {
367  v_c = task.computeControlLaw();
368  }
369 
370  // Display the current and desired feature points in the image display
371  vpServoDisplay::display( task, cam, I );
372  for ( size_t i = 0; i < corners.size(); i++ )
373  {
374  std::stringstream ss;
375  ss << i;
376  // Display current point indexes
377  vpDisplay::displayText( I, corners[i] + vpImagePoint( 15, 15 ), ss.str(), vpColor::red );
378  // Display desired point indexes
379  vpImagePoint ip;
380  vpMeterPixelConversion::convertPoint( cam, pd[i].get_x(), pd[i].get_y(), ip );
381  vpDisplay::displayText( I, ip + vpImagePoint( 15, 15 ), ss.str(), vpColor::red );
382  }
383  if ( first_time )
384  {
385  traj_corners = new std::vector< vpImagePoint >[corners.size()];
386  }
387  // Display the trajectory of the points used as features
388  display_point_trajectory( I, corners, traj_corners );
389 
390  if ( opt_plot )
391  {
392  plotter->plot( 0, static_cast< double >( sim_time ), task.getError() );
393  plotter->plot( 1, static_cast< double >( sim_time ), v_c );
394  }
395 
396  if ( opt_verbose )
397  {
398  std::cout << "v_c: " << v_c.t() << std::endl;
399  }
400 
401  double error = task.getError().sumSquare();
402  std::stringstream ss;
403  ss << "||error||: " << error;
404  vpDisplay::displayText( I, 20, static_cast< int >( I.getWidth() ) - 150, ss.str(), vpColor::red );
405 
406  if ( opt_verbose )
407  std::cout << ss.str() << std::endl;
408 
409  if ( !has_converged && error < convergence_threshold )
410  {
411  has_converged = true;
412  std::cout << "Servo task has converged"
413  << "\n";
414  vpDisplay::displayText( I, 100, 20, "Servo task has converged", vpColor::red );
415  }
416 
417  if ( first_time )
418  {
419  first_time = false;
420  }
421  } // end if (cMo_vec.size() == 1)
422  else
423  {
424  v_c = 0; // Stop the robot
425  }
426 
427  if ( !send_velocities )
428  {
429  v_c = 0; // Stop the robot
430  }
431 
432  robot.setVelocity( vpRobot::CAMERA_FRAME, v_c );
433 
434  std::stringstream ss;
435  ss << "Loop time [s]: " << std::round( ( sim_time - sim_time_prev ) * 1000. ) / 1000.;
436  ss << " Simulation time [s]: " << sim_time;
437  sim_time_prev = sim_time;
438  vpDisplay::displayText( I, 40, 20, ss.str(), vpColor::red );
439 
440  vpMouseButton::vpMouseButtonType button;
441  if ( vpDisplay::getClick( I, button, false ) )
442  {
443  switch ( button )
444  {
445  case vpMouseButton::button1:
446  send_velocities = !send_velocities;
447  break;
448 
449  case vpMouseButton::button3:
450  final_quit = true;
451  v_c = 0;
452  break;
453 
454  default:
455  break;
456  }
457  }
458 
459  vpDisplay::flush( I );
460  robot.wait( sim_time, 0.020 ); // Slow down the loop to simulate a camera at 50 Hz
461  } // end while
462 
463  if ( opt_plot && plotter != nullptr )
464  {
465  delete plotter;
466  plotter = nullptr;
467  }
469 
470  if ( !final_quit )
471  {
472  while ( !final_quit )
473  {
474  g.acquire( I );
475  vpDisplay::display( I );
476 
477  vpDisplay::displayText( I, 20, 20, "Click to quit the program.", vpColor::red );
478  vpDisplay::displayText( I, 40, 20, "Visual servo converged.", vpColor::red );
479 
480  if ( vpDisplay::getClick( I, false ) )
481  {
482  final_quit = true;
483  }
484 
485  vpDisplay::flush( I );
486  }
487  }
488  if ( traj_corners )
489  {
490  delete[] traj_corners;
491  }
492  }
493  catch ( const vpException &e )
494  {
495  std::cout << "ViSP exception: " << e.what() << std::endl;
496  std::cout << "Stop the robot " << std::endl;
497  return EXIT_FAILURE;
498  }
499 
500  return 0;
501 }
void connect(const std::string &robot_ID="")
vpHomogeneousMatrix get_eMc() const
Class for cameras video capture for ROS cameras.
Definition: vpROSGrabber.h:104
virtual void set_eMc(const vpHomogeneousMatrix &eMc)
virtual void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel)
ROSCPP_DECL void init(int &argc, char **argv, const std::string &name, uint32_t options=0)
void open(int argc, char **argv)
bool getCameraInfo(vpCameraParameters &cam)
void display_point_trajectory(const vpImage< unsigned char > &I, const std::vector< vpImagePoint > &vip, std::vector< vpImagePoint > *traj_vip)
int main(int argc, char **argv)
void setImageTopic(const std::string &topic_name)
void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position)
virtual void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position)
void coppeliasimStopSimulation(double sleep_ms=1000.)
void wait(double timestamp_second, double duration_second)
void setCoppeliasimSyncMode(bool enable, double sleep_ms=1000.)
class for Camera video capture for ROS middleware.
void coppeliasimStartSimulation(double sleep_ms=1000.)
void setVerbose(bool verbose)
void setCameraInfoTopic(const std::string &topic_name)
ROSCPP_DECL void spinOnce()
vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState)
void acquire(vpImage< unsigned char > &I)


visp_ros
Author(s): Francois Pasteau, Fabien Spindler, Gatien Gaumerais, Alexander Oliva
autogenerated on Tue Mar 1 2022 00:03:22