tutorial_ros_node_pioneer_visual_servo.cpp
Go to the documentation of this file.
1 
2 #include <iostream>
3 
4 #include <visp/vpCameraParameters.h>
5 #include <visp/vpDisplayX.h>
6 #include <visp/vpDot2.h>
7 #include <visp/vpFeatureBuilder.h>
8 #include <visp/vpFeatureDepth.h>
9 #include <visp/vpFeaturePoint.h>
10 #include <visp/vpHomogeneousMatrix.h>
11 #include <visp/vpImage.h>
12 #include <visp/vpImageConvert.h>
13 #include <visp/vpServo.h>
14 #include <visp/vpVelocityTwistMatrix.h>
15 
16 #include <visp_ros/vpROSGrabber.h>
18 
19 #if defined( VISP_HAVE_DC1394_2 ) && defined( VISP_HAVE_X11 )
20 #define TEST_COULD_BE_ACHIEVED
21 #endif
22 
43 #ifdef TEST_COULD_BE_ACHIEVED
44 int
45 main( int argc, char **argv )
46 {
47  try
48  {
49  vpImage< unsigned char > I; // Create a gray level image container
50  double depth = 1.;
51  double lambda = 0.6;
52  double coef = 1. / 6.77; // Scale parameter used to estimate the depth Z of the blob from its surface
53 
54  vpROSRobotPioneer robot;
55  robot.setCmdVelTopic( "/RosAria/cmd_vel" );
56  robot.init( argc, argv );
57 
58  // Wait 3 sec to be sure that the low level Aria thread used to control
59  // the robot is started. Without this delay we experienced a delay (arround 2.2 sec)
60  // between the velocity send to the robot and the velocity that is really applied
61  // to the wheels.
62  vpTime::sleepMs( 3000 );
63 
64  std::cout << "Robot connected" << std::endl;
65 
66  // Camera parameters. In this experiment we don't need a precise calibration of the camera
67  vpCameraParameters cam;
68 
69  // Create a grabber based on libdc1394-2.x third party lib (for firewire cameras under Linux)
70  vpROSGrabber g;
71  g.setCameraInfoTopic( "/camera/camera_info" );
72  g.setImageTopic( "/camera/image_raw" );
73  g.setRectify( true );
74  g.open( argc, argv );
75  // Get camera parameters from /camera/camera_info topic
76  if ( g.getCameraInfo( cam ) == false )
77  cam.initPersProjWithoutDistortion( 600, 600, I.getWidth() / 2, I.getHeight() / 2 );
78 
79  g.acquire( I );
80 
81  // Create an image viewer
82  vpDisplayX d( I, 10, 10, "Current frame" );
83  vpDisplay::display( I );
84  vpDisplay::flush( I );
85 
86  // Create a blob tracker
87  vpDot2 dot;
88  dot.setGraphics( true );
89  dot.setComputeMoments( true );
90  dot.setEllipsoidShapePrecision( 0. ); // to track a blob without any constraint on the shape
91  dot.setGrayLevelPrecision( 0.9 ); // to set the blob gray level bounds for binarisation
92  dot.setEllipsoidBadPointsPercentage( 0.5 ); // to be accept 50% of bad inner and outside points with bad gray level
93  dot.initTracking( I );
94  vpDisplay::flush( I );
95 
96  vpServo task;
97  task.setServo( vpServo::EYEINHAND_L_cVe_eJe );
98  task.setInteractionMatrixType( vpServo::DESIRED, vpServo::PSEUDO_INVERSE );
99  task.setLambda( lambda );
100  vpVelocityTwistMatrix cVe;
101  cVe = robot.get_cVe();
102  task.set_cVe( cVe );
103 
104  std::cout << "cVe: \n" << cVe << std::endl;
105 
106  vpMatrix eJe;
107  robot.get_eJe( eJe );
108  task.set_eJe( eJe );
109  std::cout << "eJe: \n" << eJe << std::endl;
110 
111  // Current and desired visual feature associated to the x coordinate of the point
112  vpFeaturePoint s_x, s_xd;
113 
114  // Create the current x visual feature
115  vpFeatureBuilder::create( s_x, cam, dot );
116 
117  // Create the desired x* visual feature
118  s_xd.buildFrom( 0, 0, depth );
119 
120  // Add the feature
121  task.addFeature( s_x, s_xd );
122 
123  // Create the current log(Z/Z*) visual feature
124  vpFeatureDepth s_Z, s_Zd;
125  // Surface of the blob estimated from the image moment m00 and converted in meters
126  double surface = 1. / sqrt( dot.m00 / ( cam.get_px() * cam.get_py() ) );
127  double Z, Zd;
128  // Initial depth of the blob in from of the camera
129  Z = coef * surface;
130  // Desired depth Z* of the blob. This depth is learned and equal to the initial depth
131  Zd = Z;
132 
133  std::cout << "Z " << Z << std::endl;
134  s_Z.buildFrom( s_x.get_x(), s_x.get_y(), Z, 0 ); // log(Z/Z*) = 0 that's why the last parameter is 0
135  s_Zd.buildFrom( s_x.get_x(), s_x.get_y(), Zd, 0 ); // log(Z/Z*) = 0 that's why the last parameter is 0
136 
137  // Add the feature
138  task.addFeature( s_Z, s_Zd );
139 
140  vpColVector v; // vz, wx
141 
142  while ( 1 )
143  {
144  // Acquire a new image
145  g.acquire( I );
146 
147  // Set the image as background of the viewer
148  vpDisplay::display( I );
149 
150  // Does the blob tracking
151  dot.track( I );
152  // Update the current x feature
153  vpFeatureBuilder::create( s_x, cam, dot );
154 
155  // Update log(Z/Z*) feature. Since the depth Z change, we need to update the intection matrix
156  surface = 1. / sqrt( dot.m00 / ( cam.get_px() * cam.get_py() ) );
157  Z = coef * surface;
158  s_Z.buildFrom( s_x.get_x(), s_x.get_y(), Z, log( Z / Zd ) );
159 
160  robot.get_cVe( cVe );
161  task.set_cVe( cVe );
162 
163  robot.get_eJe( eJe );
164  task.set_eJe( eJe );
165 
166  // Compute the control law. Velocities are computed in the mobile robot reference frame
167  v = task.computeControlLaw();
168 
169  std::cout << "Send velocity to the pionner: " << v[0] << " m/s " << vpMath::deg( v[1] ) << " deg/s" << std::endl;
170 
171  // Send the velocity to the robot
172  robot.setVelocity( vpRobot::REFERENCE_FRAME, v );
173 
174  // Draw a vertical line which corresponds to the desired x coordinate of the dot cog
175  vpDisplay::displayLine( I, 0, 320, 479, 320, vpColor::red );
176  vpDisplay::flush( I );
177 
178  // A click in the viewer to exit
179  if ( vpDisplay::getClick( I, false ) )
180  break;
181  }
182 
183  std::cout << "Ending robot thread..." << std::endl;
184 
185  // Kill the servo task
186  task.print();
187  task.kill();
188  }
189  catch ( vpException e )
190  {
191  std::cout << "Catch an exception: " << e << std::endl;
192  return 1;
193  }
194 }
195 #else
196 int
197 main()
198 {
199  std::cout << "You don't have the right 3rd party libraries to run this example..." << std::endl;
200 }
201 #endif
vpROSRobotPioneer.h
vpROSGrabber::getCameraInfo
bool getCameraInfo(vpCameraParameters &cam)
Definition: vpROSGrabber.cpp:649
vpROSGrabber
Class for cameras video capture for ROS cameras.
Definition: vpROSGrabber.h:104
vpROSRobotPioneer::init
void init()
basic initialization
Definition: vpROSRobotPioneer.cpp:65
vpROSGrabber::open
void open(int argc, char **argv)
Definition: vpROSGrabber.cpp:98
dot
TFSIMD_FORCE_INLINE tfScalar dot(const Quaternion &q1, const Quaternion &q2)
d
d
vpROSGrabber::setImageTopic
void setImageTopic(const std::string &topic_name)
Definition: vpROSGrabber.cpp:592
vpROSRobotPioneer
Interface for Pioneer mobile robots based on ROS.
Definition: vpROSRobotPioneer.h:64
main
int main(int argc, char **argv)
Definition: tutorial_ros_node_pioneer_visual_servo.cpp:45
vpROSGrabber::setCameraInfoTopic
void setCameraInfoTopic(const std::string &topic_name)
Definition: vpROSGrabber.cpp:579
vpROSRobot::setCmdVelTopic
void setCmdVelTopic(std::string topic_name)
Definition: vpROSRobot.cpp:305
vpROSGrabber::acquire
void acquire(vpImage< unsigned char > &I)
Definition: vpROSGrabber.cpp:412
vpROSRobotPioneer::get_eJe
void get_eJe(vpMatrix &eJe)
Definition: vpROSRobotPioneer.h:84
vpROSGrabber::setRectify
void setRectify(bool rectify)
Definition: vpROSGrabber.cpp:517
vpROSGrabber.h
class for Camera video capture for ROS middleware.
vpROSRobotPioneer::setVelocity
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel)
Definition: vpROSRobotPioneer.cpp:96


visp_ros
Author(s): Francois Pasteau, Fabien Spindler, Gatien Gaumerais, Alexander Oliva
autogenerated on Wed Mar 2 2022 01:13:33