tutorial-visp-pioneer-visual-servo.cpp
Go to the documentation of this file.
00001 
00021 #include <iostream>
00022 
00023 #include <visp/vpRobotPioneer.h>
00024 #include <visp/vpCameraParameters.h>
00025 #include <visp/vpDisplayX.h>
00026 #include <visp/vpDot2.h>
00027 #include <visp/vpFeatureBuilder.h>
00028 #include <visp/vpFeatureDepth.h>
00029 #include <visp/vpFeaturePoint.h>
00030 #include <visp/vpHomogeneousMatrix.h>
00031 #include <visp/vpImage.h>
00032 #include <visp/vpImageConvert.h>
00033 #include <visp/vp1394TwoGrabber.h>
00034 #include <visp/vpServo.h>
00035 #include <visp/vpVelocityTwistMatrix.h>
00036 
00037 #if defined(VISP_HAVE_DC1394_2) && defined(VISP_HAVE_X11) || defined(VISP_HAVE_PIONEER)
00038 #  define TEST_COULD_BE_ACHIEVED
00039 #endif
00040 
00041 
00042 #ifdef TEST_COULD_BE_ACHIEVED
00043 int main(int argc, char **argv)
00044 {
00045   try {
00046     vpImage<unsigned char> I; // Create a gray level image container
00047     double depth = 1.;
00048     double lambda = 0.6;
00049     double coef = 1./6.77; // Scale parameter used to estimate the depth Z of the blob from its surface
00050 
00051     vpRobotPioneer robot;
00052     ArArgumentParser parser(&argc, argv);
00053     parser.loadDefaultArguments();
00054 
00055     // ArRobotConnector connects to the robot, get some initial data from it such as type and name,
00056     // and then loads parameter files for this robot.
00057     ArRobotConnector robotConnector(&parser, &robot);
00058     if(!robotConnector.connectRobot())
00059     {
00060       ArLog::log(ArLog::Terse, "Could not connect to the robot.");
00061       if(parser.checkHelpAndWarnUnparsed())
00062       {
00063         Aria::logOptions();
00064         Aria::exit(1);
00065       }
00066     }
00067     if (!Aria::parseArgs())
00068     {
00069       Aria::logOptions();
00070       Aria::shutdown();
00071       return false;
00072     }
00073 
00074     // Wait 3 sec to be sure that the low level Aria thread used to control
00075     // the robot is started. Without this delay we experienced a delay (arround 2.2 sec)
00076     // between the velocity send to the robot and the velocity that is really applied
00077     // to the wheels.
00078     vpTime::sleepMs(3000);
00079 
00080     std::cout << "Robot connected" << std::endl;
00081 
00082     // Camera parameters. In this experiment we don't need a precise calibration of the camera
00083     vpCameraParameters cam;
00084 
00085     // Create a grabber based on libdc1394-2.x third party lib (for firewire cameras under Linux)
00086     vp1394TwoGrabber g(false);
00087     g.setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_640x480_MONO8);
00088     g.setFramerate(vp1394TwoGrabber::vpFRAMERATE_30);
00089     // AVT Pike 032C parameters
00090     cam.initPersProjWithoutDistortion(800, 795, 320, 216);
00091 
00092     g.acquire(I);
00093 
00094     // Create an image viewer
00095     vpDisplayX d(I, 10, 10, "Current frame");
00096     vpDisplay::display(I);
00097     vpDisplay::flush(I);
00098 
00099     // Create a blob tracker
00100     vpDot2 dot;
00101     dot.setGraphics(true);
00102     dot.setComputeMoments(true);
00103     dot.setEllipsoidShapePrecision(0.);  // to track a blob without any constraint on the shape
00104     dot.setGrayLevelPrecision(0.9);  // to set the blob gray level bounds for binarisation
00105     dot.setEllipsoidBadPointsPercentage(0.5); // to be accept 50% of bad inner and outside points with bad gray level
00106     dot.initTracking(I);
00107     vpDisplay::flush(I);
00108 
00109     vpServo task;
00110     task.setServo(vpServo::EYEINHAND_L_cVe_eJe) ;
00111     task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE) ;
00112     task.setLambda(lambda) ;
00113     vpVelocityTwistMatrix cVe ;
00114     cVe = robot.get_cVe() ;
00115     task.set_cVe(cVe) ;
00116 
00117     std::cout << "cVe: \n" << cVe << std::endl;
00118 
00119     vpMatrix eJe;
00120     robot.get_eJe(eJe) ;
00121     task.set_eJe(eJe) ;
00122     std::cout << "eJe: \n" << eJe << std::endl;
00123 
00124     // Current and desired visual feature associated to the x coordinate of the point
00125     vpFeaturePoint s_x, s_xd;
00126 
00127     // Create the current x visual feature
00128     vpFeatureBuilder::create(s_x, cam, dot);
00129 
00130     // Create the desired x* visual feature
00131     s_xd.buildFrom(0, 0, depth);
00132 
00133     // Add the feature
00134     task.addFeature(s_x, s_xd) ;
00135 
00136     // Create the current log(Z/Z*) visual feature
00137     vpFeatureDepth s_Z, s_Zd;
00138     // Surface of the blob estimated from the image moment m00 and converted in meters
00139     double surface = 1./sqrt(dot.m00/(cam.get_px()*cam.get_py()));
00140     double Z, Zd;
00141     // Initial depth of the blob in from of the camera
00142     Z = coef * surface ;
00143     // Desired depth Z* of the blob. This depth is learned and equal to the initial depth
00144     Zd = Z;
00145 
00146     std::cout << "Z " << Z << std::endl;
00147     s_Z.buildFrom(s_x.get_x(), s_x.get_y(), Z , 0); // log(Z/Z*) = 0 that's why the last parameter is 0
00148     s_Zd.buildFrom(s_x.get_x(), s_x.get_y(), Zd , 0); // log(Z/Z*) = 0 that's why the last parameter is 0
00149 
00150     // Add the feature
00151     task.addFeature(s_Z, s_Zd) ;
00152 
00153     vpColVector v; // vz, wx
00154 
00155     while(1)
00156     {
00157       // Acquire a new image
00158       g.acquire(I);
00159 
00160       // Set the image as background of the viewer
00161       vpDisplay::display(I);
00162 
00163       // Does the blob tracking
00164       dot.track(I);
00165       // Update the current x feature
00166       vpFeatureBuilder::create(s_x, cam, dot);
00167 
00168       // Update log(Z/Z*) feature. Since the depth Z change, we need to update the intection matrix
00169       surface = 1./sqrt(dot.m00/(cam.get_px()*cam.get_py()));
00170       Z = coef * surface ;
00171       s_Z.buildFrom(s_x.get_x(), s_x.get_y(), Z, log(Z/Zd)) ;
00172 
00173       robot.get_cVe(cVe) ;
00174       task.set_cVe(cVe) ;
00175 
00176       robot.get_eJe(eJe) ;
00177       task.set_eJe(eJe) ;
00178 
00179       // Compute the control law. Velocities are computed in the mobile robot reference frame
00180       v = task.computeControlLaw() ;
00181 
00182       std::cout << "Send velocity to the pionner: " << v[0] << " m/s "
00183                 << vpMath::deg(v[1]) << " deg/s" << std::endl;
00184 
00185       // Send the velocity to the robot
00186       robot.setVelocity(vpRobot::REFERENCE_FRAME, v);
00187 
00188       // Draw a vertical line which corresponds to the desired x coordinate of the dot cog
00189       vpDisplay::displayLine(I, 0, 320, 479, 320, vpColor::red);
00190       vpDisplay::flush(I);
00191 
00192       // A click in the viewer to exit
00193       if ( vpDisplay::getClick(I, false) )
00194         break;
00195     }
00196 
00197     std::cout << "Ending robot thread..." << std::endl;
00198     robot.stopRunning();
00199 
00200     // wait for the thread to stop
00201     robot.waitForRunExit();
00202 
00203     // Kill the servo task
00204     task.print() ;
00205     task.kill();
00206   }
00207   catch(vpException e) {
00208     std::cout << "Catch an exception: " << e << std::endl;
00209     return 1;
00210   }
00211 }
00212 #else
00213 int main()
00214 {
00215   std::cout << "You don't have the right 3rd party libraries to run this example..." << std::endl;
00216 }
00217 #endif


visp_ros
Author(s): Francois Pasteau, Fabien Spindler
autogenerated on Wed Aug 26 2015 16:44:33