SampleMarkerHide.cpp
Go to the documentation of this file.
00001 #include "CvTestbed.h"
00002 #include "MarkerDetector.h"
00003 #include "GlutViewer.h"
00004 #include "Shared.h"
00005 using namespace alvar;
00006 using namespace std;
00007 
00008 #define GLUT_DISABLE_ATEXIT_HACK // Needed to compile with Mingw?
00009 #include <GL/gl.h>
00010 
00011 const double margin = 1.0;
00012 std::stringstream calibrationFilename;
00013 
00014 // Own drawable for showing hide-texture in OpenGL
00015 struct OwnDrawable : public Drawable {
00016     unsigned char hidingtex[64*64*4];
00017     virtual void Draw() {
00018         glPushMatrix();
00019         glMultMatrixd(gl_mat);
00020         
00021         glPushAttrib(GL_ALL_ATTRIB_BITS);
00022         glEnable(GL_TEXTURE_2D);
00023         int tex=0;
00024         glBindTexture(GL_TEXTURE_2D, tex);
00025         glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
00026         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
00027         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
00028         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
00029         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
00030         glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,64,64,0,GL_RGBA,GL_UNSIGNED_BYTE,hidingtex);
00031         glDisable(GL_CULL_FACE);
00032         glDisable(GL_LIGHTING);
00033         glDisable(GL_DEPTH_TEST);
00034         glEnable(GL_ALPHA_TEST);
00035         glEnable(GL_BLEND);
00036         glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
00037         glBegin(GL_QUADS);
00038             glTexCoord2d(0.0,0.0);
00039             glVertex3d(-margin,-margin,0);
00040             glTexCoord2d(0.0,1.0);
00041             glVertex3d(-margin,margin,0);
00042             glTexCoord2d(1.0,1.0);
00043             glVertex3d(margin,margin,0);
00044             glTexCoord2d(1.0,0.0);
00045             glVertex3d(margin,-margin,0);
00046         glEnd();
00047         glPopAttrib();
00048         glPopMatrix();
00049     }
00050 };
00051 
00052 void videocallback(IplImage *image)
00053 {
00054     static bool init=true;
00055     static const int marker_size=15;
00056     static Camera cam;
00057     static OwnDrawable d[32];
00058     static IplImage *hide_texture;
00059 
00060     bool flip_image = (image->origin?true:false);
00061     if (flip_image) {
00062         cvFlip(image);
00063         image->origin = !image->origin;
00064     }
00065 
00066     static IplImage* bg_image = 0;
00067     if(!bg_image) bg_image = cvCreateImage(cvSize(512, 512), 8, 3);
00068     if(image->nChannels == 3)
00069     {
00070         bg_image->origin = 0;
00071         cvResize(image, bg_image);
00072         GlutViewer::SetVideo(bg_image);
00073     }
00074 
00075     if (init) {
00076         init = false;
00077         cout<<"Loading calibration: "<<calibrationFilename.str();
00078         if (cam.SetCalib(calibrationFilename.str().c_str(), image->width, image->height)) {
00079             cout<<" [Ok]"<<endl;
00080         } else {
00081             cam.SetRes(image->width, image->height);
00082             cout<<" [Fail]"<<endl;
00083         }
00084         double p[16];
00085         cam.GetOpenglProjectionMatrix(p,image->width,image->height);
00086         GlutViewer::SetGlProjectionMatrix(p);
00087         hide_texture = CvTestbed::Instance().CreateImage("hide_texture", cvSize(64, 64), 8, 4);
00088     }
00089     static MarkerDetector<MarkerData> marker_detector;\
00090     marker_detector.Detect(image, &cam, false, false);
00091 
00092     GlutViewer::DrawableClear();
00093     for (size_t i=0; i<marker_detector.markers->size(); i++) {
00094         if (i >= 32) break;
00095         GlutViewer::DrawableAdd(&(d[i]));
00096     }
00097     for (size_t i=0; i<marker_detector.markers->size(); i++) {
00098         if (i >= 32) break;
00099         
00100         // Note that we need to mirror both the y- and z-axis because:
00101         // - In OpenCV we have coordinates: x-right, y-down, z-ahead
00102         // - In OpenGL we have coordinates: x-right, y-up, z-backwards
00103         // TODO: Better  option might be to use OpenGL projection matrix that matches our OpenCV-approach
00104         Pose p = (*(marker_detector.markers))[i].pose;
00105         BuildHideTexture(image, hide_texture, &cam, d[i].gl_mat, PointDouble(-margin, -margin), PointDouble(margin, margin));
00106         //DrawTexture(image, hide_texture, &cam, d[i].gl_mat, PointDouble(-0.7, -0.7), PointDouble(0.7, 0.7));
00107         
00108         p.GetMatrixGL(d[i].gl_mat);
00109         for (int ii=0; ii<64*64; ii++) {
00110             d[i].hidingtex[ii*4+0] = hide_texture->imageData[ii*4+2];
00111             d[i].hidingtex[ii*4+1] = hide_texture->imageData[ii*4+1];
00112             d[i].hidingtex[ii*4+2] = hide_texture->imageData[ii*4+0];
00113             d[i].hidingtex[ii*4+3] = hide_texture->imageData[ii*4+3];
00114         }
00115     }
00116     if (flip_image) {
00117         cvFlip(image);
00118         image->origin = !image->origin;
00119     }
00120 }
00121 
00122 int main(int argc, char *argv[])
00123 {
00124     try {
00125         // Output usage message
00126         std::string filename(argv[0]);
00127         filename = filename.substr(filename.find_last_of('\\') + 1);
00128         std::cout << "SampleMarkerHide" << std::endl;
00129         std::cout << "================" << std::endl;
00130         std::cout << std::endl;
00131         std::cout << "Description:" << std::endl;
00132         std::cout << "  This is an example of how to detect 'MarkerData' markers, similarly" << std::endl;
00133         std::cout << "  to 'SampleMarkerDetector', and hide them using the 'BuildHideTexture'" << std::endl;
00134         std::cout << "  and 'DrawTexture' classes." << std::endl;
00135         std::cout << std::endl;
00136         std::cout << "Usage:" << std::endl;
00137         std::cout << "  " << filename << " [device]" << std::endl;
00138         std::cout << std::endl;
00139         std::cout << "    device    integer selecting device from enumeration list (default 0)" << std::endl;
00140         std::cout << "              highgui capture devices are prefered" << std::endl;
00141         std::cout << std::endl;
00142         std::cout << "Keyboard Shortcuts:" << std::endl;
00143         std::cout << "  q: quit" << std::endl;
00144         std::cout << std::endl;
00145 
00146         // Initialise GlutViewer and CvTestbed
00147         GlutViewer::Start(argc, argv, 640, 480, 15);
00148         CvTestbed::Instance().SetVideoCallback(videocallback);
00149 
00150         // Enumerate possible capture plugins
00151         CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins();
00152         if (plugins.size() < 1) {
00153             std::cout << "Could not find any capture plugins." << std::endl;
00154             return 0;
00155         }
00156 
00157         // Display capture plugins
00158         std::cout << "Available Plugins: ";
00159         outputEnumeratedPlugins(plugins);
00160         std::cout << std::endl;
00161 
00162         // Enumerate possible capture devices
00163         CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices();
00164         if (devices.size() < 1) {
00165             std::cout << "Could not find any capture devices." << std::endl;
00166             return 0;
00167         }
00168 
00169         // Check command line argument for which device to use
00170         int selectedDevice = defaultDevice(devices);
00171         if (argc > 1) {
00172             selectedDevice = atoi(argv[1]);
00173         }
00174         if (selectedDevice >= (int)devices.size()) {
00175             selectedDevice = defaultDevice(devices);
00176         }
00177         
00178         // Display capture devices
00179         std::cout << "Enumerated Capture Devices:" << std::endl;
00180         outputEnumeratedDevices(devices, selectedDevice);
00181         std::cout << std::endl;
00182         
00183         // Create capture object from camera
00184         Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]);
00185         std::string uniqueName = devices[selectedDevice].uniqueName();
00186 
00187         // Handle capture lifecycle and start video capture
00188         // Note that loadSettings/saveSettings are not supported by all plugins
00189         if (cap) {
00190             std::stringstream settingsFilename;
00191             settingsFilename << "camera_settings_" << uniqueName << ".xml";
00192             calibrationFilename << "camera_calibration_" << uniqueName << ".xml";
00193             
00194             cap->start();
00195             cap->setResolution(640, 480);
00196             
00197             if (cap->loadSettings(settingsFilename.str())) {
00198                 std::cout << "Loading settings: " << settingsFilename.str() << std::endl;
00199             }
00200 
00201             std::stringstream title;
00202             title << "SampleMarkerHide (" << cap->captureDevice().captureType() << ")";
00203 
00204             CvTestbed::Instance().StartVideo(cap, title.str().c_str());
00205 
00206             if (cap->saveSettings(settingsFilename.str())) {
00207                 std::cout << "Saving settings: " << settingsFilename.str() << std::endl;
00208             }
00209 
00210             cap->stop();
00211             delete cap;
00212         }
00213         else if (CvTestbed::Instance().StartVideo(0, argv[0])) {
00214         }
00215         else {
00216             std::cout << "Could not initialize the selected capture backend." << std::endl;
00217         }
00218 
00219         return 0;
00220     }
00221     catch (const std::exception &e) {
00222         std::cout << "Exception: " << e.what() << endl;
00223     }
00224     catch (...) {
00225         std::cout << "Exception: unknown" << std::endl;
00226     }
00227 }


ar_track_alvar
Author(s): Scott Niekum
autogenerated on Sat Dec 28 2013 16:46:16