tutorial_ros_node_pioneer_visual_servo.cpp
Go to the documentation of this file.
1 
2 #include <iostream>
3 
4 #include <visp/vpCameraParameters.h>
5 #include <visp/vpDisplayX.h>
6 #include <visp/vpDot2.h>
7 #include <visp/vpFeatureBuilder.h>
8 #include <visp/vpFeatureDepth.h>
9 #include <visp/vpFeaturePoint.h>
10 #include <visp/vpHomogeneousMatrix.h>
11 #include <visp/vpImage.h>
12 #include <visp/vpImageConvert.h>
13 #include <visp/vpServo.h>
14 #include <visp/vpVelocityTwistMatrix.h>
15 
16 #include <visp_ros/vpROSGrabber.h>
18 
19 #if defined(VISP_HAVE_DC1394_2) && defined(VISP_HAVE_X11)
20 # define TEST_COULD_BE_ACHIEVED
21 #endif
22 
23 
43 #ifdef TEST_COULD_BE_ACHIEVED
44 int main(int argc, char **argv)
45 {
46  try {
47  vpImage<unsigned char> I; // Create a gray level image container
48  double depth = 1.;
49  double lambda = 0.6;
50  double coef = 1./6.77; // Scale parameter used to estimate the depth Z of the blob from its surface
51 
52  vpROSRobotPioneer robot;
53  robot.setCmdVelTopic("/RosAria/cmd_vel");
54  robot.init(argc, argv);
55 
56  // Wait 3 sec to be sure that the low level Aria thread used to control
57  // the robot is started. Without this delay we experienced a delay (arround 2.2 sec)
58  // between the velocity send to the robot and the velocity that is really applied
59  // to the wheels.
60  vpTime::sleepMs(3000);
61 
62  std::cout << "Robot connected" << std::endl;
63 
64  // Camera parameters. In this experiment we don't need a precise calibration of the camera
65  vpCameraParameters cam;
66 
67  // Create a grabber based on libdc1394-2.x third party lib (for firewire cameras under Linux)
68  vpROSGrabber g;
69  g.setCameraInfoTopic("/camera/camera_info");
70  g.setImageTopic("/camera/image_raw");
71  g.setRectify(true);
72  g.open(argc, argv);
73  // Get camera parameters from /camera/camera_info topic
74  if (g.getCameraInfo(cam) == false)
75  cam.initPersProjWithoutDistortion(600,600,I.getWidth()/2, I.getHeight()/2);
76 
77  g.acquire(I);
78 
79  // Create an image viewer
80  vpDisplayX d(I, 10, 10, "Current frame");
81  vpDisplay::display(I);
82  vpDisplay::flush(I);
83 
84  // Create a blob tracker
85  vpDot2 dot;
86  dot.setGraphics(true);
87  dot.setComputeMoments(true);
88  dot.setEllipsoidShapePrecision(0.); // to track a blob without any constraint on the shape
89  dot.setGrayLevelPrecision(0.9); // to set the blob gray level bounds for binarisation
90  dot.setEllipsoidBadPointsPercentage(0.5); // to be accept 50% of bad inner and outside points with bad gray level
91  dot.initTracking(I);
92  vpDisplay::flush(I);
93 
94  vpServo task;
95  task.setServo(vpServo::EYEINHAND_L_cVe_eJe) ;
96  task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE) ;
97  task.setLambda(lambda) ;
98  vpVelocityTwistMatrix cVe ;
99  cVe = robot.get_cVe() ;
100  task.set_cVe(cVe) ;
101 
102  std::cout << "cVe: \n" << cVe << std::endl;
103 
104  vpMatrix eJe;
105  robot.get_eJe(eJe) ;
106  task.set_eJe(eJe) ;
107  std::cout << "eJe: \n" << eJe << std::endl;
108 
109  // Current and desired visual feature associated to the x coordinate of the point
110  vpFeaturePoint s_x, s_xd;
111 
112  // Create the current x visual feature
113  vpFeatureBuilder::create(s_x, cam, dot);
114 
115  // Create the desired x* visual feature
116  s_xd.buildFrom(0, 0, depth);
117 
118  // Add the feature
119  task.addFeature(s_x, s_xd) ;
120 
121  // Create the current log(Z/Z*) visual feature
122  vpFeatureDepth s_Z, s_Zd;
123  // Surface of the blob estimated from the image moment m00 and converted in meters
124  double surface = 1./sqrt(dot.m00/(cam.get_px()*cam.get_py()));
125  double Z, Zd;
126  // Initial depth of the blob in from of the camera
127  Z = coef * surface ;
128  // Desired depth Z* of the blob. This depth is learned and equal to the initial depth
129  Zd = Z;
130 
131  std::cout << "Z " << Z << std::endl;
132  s_Z.buildFrom(s_x.get_x(), s_x.get_y(), Z , 0); // log(Z/Z*) = 0 that's why the last parameter is 0
133  s_Zd.buildFrom(s_x.get_x(), s_x.get_y(), Zd , 0); // log(Z/Z*) = 0 that's why the last parameter is 0
134 
135  // Add the feature
136  task.addFeature(s_Z, s_Zd) ;
137 
138  vpColVector v; // vz, wx
139 
140  while(1)
141  {
142  // Acquire a new image
143  g.acquire(I);
144 
145  // Set the image as background of the viewer
146  vpDisplay::display(I);
147 
148  // Does the blob tracking
149  dot.track(I);
150  // Update the current x feature
151  vpFeatureBuilder::create(s_x, cam, dot);
152 
153  // Update log(Z/Z*) feature. Since the depth Z change, we need to update the intection matrix
154  surface = 1./sqrt(dot.m00/(cam.get_px()*cam.get_py()));
155  Z = coef * surface ;
156  s_Z.buildFrom(s_x.get_x(), s_x.get_y(), Z, log(Z/Zd)) ;
157 
158  robot.get_cVe(cVe) ;
159  task.set_cVe(cVe) ;
160 
161  robot.get_eJe(eJe) ;
162  task.set_eJe(eJe) ;
163 
164  // Compute the control law. Velocities are computed in the mobile robot reference frame
165  v = task.computeControlLaw() ;
166 
167  std::cout << "Send velocity to the pionner: " << v[0] << " m/s "
168  << vpMath::deg(v[1]) << " deg/s" << std::endl;
169 
170  // Send the velocity to the robot
171  robot.setVelocity(vpRobot::REFERENCE_FRAME, v);
172 
173  // Draw a vertical line which corresponds to the desired x coordinate of the dot cog
174  vpDisplay::displayLine(I, 0, 320, 479, 320, vpColor::red);
175  vpDisplay::flush(I);
176 
177  // A click in the viewer to exit
178  if ( vpDisplay::getClick(I, false) )
179  break;
180  }
181 
182  std::cout << "Ending robot thread..." << std::endl;
183 
184  // Kill the servo task
185  task.print() ;
186  task.kill();
187  }
188  catch(vpException e) {
189  std::cout << "Catch an exception: " << e << std::endl;
190  return 1;
191  }
192 }
193 #else
194 int main()
195 {
196  std::cout << "You don't have the right 3rd party libraries to run this example..." << std::endl;
197 }
198 #endif
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel)
d
Class for cameras video capture for ROS cameras.
Definition: vpROSGrabber.h:103
void setCmdVelTopic(std::string topic_name)
Definition: vpROSRobot.cpp:285
void setRectify(bool rectify)
void init()
basic initialization
void setCameraInfoTopic(std::string topic_name)
void open(int argc, char **argv)
bool getCameraInfo(vpCameraParameters &cam)
int main(int argc, char **argv)
TFSIMD_FORCE_INLINE tfScalar dot(const Quaternion &q1, const Quaternion &q2)
class for Camera video capture for ROS middleware.
void get_eJe(vpMatrix &eJe)
Interface for Pioneer mobile robots based on ROS.
void setImageTopic(std::string topic_name)
void acquire(vpImage< unsigned char > &I)


visp_ros
Author(s): Francois Pasteau, Fabien Spindler, Gatien Gaumerais
autogenerated on Tue Feb 9 2021 03:40:20