Draw.cpp
Go to the documentation of this file.
00001 /*
00002  * This file is part of ALVAR, A Library for Virtual and Augmented Reality.
00003  *
00004  * Copyright 2007-2012 VTT Technical Research Centre of Finland
00005  *
00006  * Contact: VTT Augmented Reality Team <alvar.info@vtt.fi>
00007  *          <http://www.vtt.fi/multimedia/alvar.html>
00008  *
00009  * ALVAR is free software; you can redistribute it and/or modify it under the
00010  * terms of the GNU Lesser General Public License as published by the Free
00011  * Software Foundation; either version 2.1 of the License, or (at your option)
00012  * any later version.
00013  *
00014  * This library is distributed in the hope that it will be useful, but WITHOUT
00015  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
00016  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
00017  * for more details.
00018  *
00019  * You should have received a copy of the GNU Lesser General Public License
00020  * along with ALVAR; if not, see
00021  * <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html>.
00022  */
00023 
00024 #include "ar_track_alvar/Draw.h"
00025 #include <cassert>
00026 
00027 using namespace std;
00028 
00029 namespace alvar {
00030 using namespace std;
00031 
00032 void DrawPoints(IplImage *image, const vector<CvPoint>& points, CvScalar color)
00033 {
00034         for(unsigned i = 0; i < points.size(); ++i)
00035                 cvLine(image, cvPoint(points[i].x,points[i].y), cvPoint(points[i].x,points[i].y), color);
00036 }
00037 
00038 void DrawLine(IplImage* image, const Line line, CvScalar color)
00039 {
00040         double len = 100;
00041         CvPoint p1, p2;
00042         p1.x = int(line.c.x); p1.y = int(line.c.y);
00043         p2.x = int(line.c.x+line.s.x*len); p2.y = int(line.c.y+line.s.y*len);
00044         cvLine(image, p1, p2, color);
00045 
00046         p1.x = int(line.c.x); p1.y = int(line.c.y);
00047         p2.x = int(line.c.x-line.s.x*len); p2.y = int(line.c.y-line.s.y*len);
00048         cvLine(image, p1, p2, color);
00049 }
00050 
00051 void DrawPoints(IplImage* image, const CvSeq* contour, CvScalar color)
00052 {
00053         for(int i = 0; i < contour->total; ++i)
00054         {
00055                 CvPoint* pt = (CvPoint*)cvGetSeqElem( contour, i);
00056                 cvLine(image, cvPoint(pt->x, pt->y), cvPoint(pt->x, pt->y), color);
00057         }
00058 }
00059 
00060 void DrawCircles(IplImage* image, const CvSeq* contour, int radius, CvScalar color)
00061 {
00062         for(int i = 0; i < contour->total; ++i)
00063         {
00064                 CvPoint* pt = (CvPoint*)cvGetSeqElem( contour, i);
00065                 cvCircle(image, cvPoint(pt->x, pt->y), radius, color);
00066         }
00067 }
00068 
00069 void DrawLines(IplImage* image, const CvSeq* contour, CvScalar color)
00070 {
00071         if(contour->total >= 2)
00072         {
00073                 for(int i = 0; i < contour->total; ++i)
00074                 {
00075                         CvPoint* pt1 = (CvPoint*)cvGetSeqElem( contour, i);
00076                         CvPoint* pt2 = (CvPoint*)cvGetSeqElem( contour, (i+1)%(contour->total));
00077                         cvLine(image, cvPoint(pt1->x, pt1->y), cvPoint(pt2->x, pt2->y), color);
00078                 }
00079         }
00080 }
00081 
00082 void DrawCVEllipse(IplImage* image, CvBox2D& ellipse, CvScalar color, bool fill/*=false*/, double par)
00083 {
00084         CvPoint center;
00085         center.x = static_cast<int>(ellipse.center.x);
00086         center.y = static_cast<int>(ellipse.center.y);
00087         int type = 1;
00088         if(fill)
00089                 type = CV_FILLED;
00090 
00091         //cout<<center.x<<" "<<center.y<<" "<<ellipse.size.width/2<<" "<<ellipse.size.height/2<<" "<<ellipse.angle<<endl;
00092         cvEllipse(image, center, cvSize(static_cast<int>(par+ellipse.size.width/2), static_cast<int>(par+ellipse.size.height/2)), -ellipse.angle, 0, 360, color, type);
00093 }
00094 
00095 void BuildHideTexture(IplImage *image, IplImage *hide_texture, 
00096         Camera *cam, double gl_modelview[16], 
00097         PointDouble topleft, PointDouble botright) 
00098 {
00099         assert(image->origin == 0); // Currently only top-left origin supported
00100         double kx=1.0;
00101         double ky=1.0;
00102         
00103         double width = abs(botright.x - topleft.x);
00104         double height = abs(botright.y - topleft.y);
00105         
00106         //GLint vp[4]; //viewport
00107         //GLdouble winx[8];     // point's coordinates in windowcoordinates
00108         //GLdouble winy[8];     
00109         //GLdouble winz[8];
00110         double objx;
00111         double objy;
00112         //GLdouble objz;
00113         unsigned char pixels[8][3];
00114         unsigned char color[3]={0,0,0};
00115 
00116         int i=0,j=0,t=0;
00117         double ox,oy,ya,yb,xc,xd,offset;
00118         double sizex = width/4, size2x=width/2;
00119         double sizey = height/4, size2y=height/2;
00120 
00121         // Calculate extended coordinates of detected marker (+ border)
00122         objx = width/2*kx;
00123         objy = height/2*ky;
00124 
00125         //cout<<hide_texture->width<<","<<hide_texture->height<<endl;
00126         
00127         double l2r=2*width*kx;
00128         double l2s=2*height*ky;
00129         double lr=width*kx;
00130         double ls=height*ky;
00131         double r,s;
00132         double xstep=2*objx/hide_texture->width,ystep=2*objy/hide_texture->height;
00133         for(i=0;i<hide_texture->width;i++){
00134                 ox = -objx+i*xstep;
00135                 offset = fmod((objx-ox), size2x);
00136                 if(     offset < sizex)
00137                         xc = objx + offset;
00138                 else 
00139                         xc = objx+size2x-offset;
00140                 offset = fmod((objx+ox), size2x);
00141                 if( offset < sizex)
00142                         xd = -objx - offset;
00143                 else 
00144                         xd = -objx-size2x+offset;
00145                 r=(ox+objx);
00146                 for(j=0;j<hide_texture->height;j++){
00147                         oy = -objy+j*ystep;
00148                         offset = fmod((objy-oy), size2y);
00149                         if(     offset < sizey)
00150                                 ya = objy + offset;
00151                         else 
00152                                 ya = objy+size2y-offset;                 
00153                         offset = fmod((oy+objy), size2y);
00154                         if( offset < sizey)
00155                                 yb = -objy - offset;
00156                         else 
00157                                 yb = -objy-size2y+offset;               
00158                         s=(oy+objy);
00159 
00160                         double points3d[4][3] = {
00161                                 ox, ya, 0,
00162                                 ox, yb, 0,
00163                                 xc, oy, 0,
00164                                 xd, oy, 0,
00165                         };
00166                         double points2d[4][2];
00167                         CvMat points3d_mat, points2d_mat;
00168                         cvInitMatHeader(&points3d_mat, 4, 3, CV_64F, points3d);
00169                         cvInitMatHeader(&points2d_mat, 4, 2, CV_64F, points2d);
00170                         cam->ProjectPoints(&points3d_mat, gl_modelview, &points2d_mat);
00171                         int kuvanx4 = (int)Limit(points2d[0][0], 0, image->width-1); int kuvany4 = (int)Limit(points2d[0][1], 0, image->height-1);
00172                         int kuvanx5 = (int)Limit(points2d[1][0], 0, image->width-1); int kuvany5 = (int)Limit(points2d[1][1], 0, image->height-1);
00173                         int kuvanx6 = (int)Limit(points2d[2][0], 0, image->width-1); int kuvany6 = (int)Limit(points2d[2][1], 0, image->height-1);
00174                         int kuvanx7 = (int)Limit(points2d[3][0], 0, image->width-1); int kuvany7 = (int)Limit(points2d[3][1], 0, image->height-1);
00175 
00176                         pixels[4][0] = (unsigned char)cvGet2D(image, kuvany4, kuvanx4).val[0];
00177                         pixels[4][1] = (unsigned char)cvGet2D(image, kuvany4, kuvanx4).val[1];
00178                         pixels[4][2] = (unsigned char)cvGet2D(image, kuvany4, kuvanx4).val[2];
00179                         pixels[5][0] = (unsigned char)cvGet2D(image, kuvany5, kuvanx5).val[0];
00180                         pixels[5][1] = (unsigned char)cvGet2D(image, kuvany5, kuvanx5).val[1];
00181                         pixels[5][2] = (unsigned char)cvGet2D(image, kuvany5, kuvanx5).val[2];
00182                         pixels[6][0] = (unsigned char)cvGet2D(image, kuvany6, kuvanx6).val[0];
00183                         pixels[6][1] = (unsigned char)cvGet2D(image, kuvany6, kuvanx6).val[1];
00184                         pixels[6][2] = (unsigned char)cvGet2D(image, kuvany6, kuvanx6).val[2];
00185                         pixels[7][0] = (unsigned char)cvGet2D(image, kuvany7, kuvanx7).val[0];
00186                         pixels[7][1] = (unsigned char)cvGet2D(image, kuvany7, kuvanx7).val[1];
00187                         pixels[7][2] = (unsigned char)cvGet2D(image, kuvany7, kuvanx7).val[2];
00188 
00189                         // make the borders of the texture partly transparent
00190                         int opaque;
00191                         const int w=1;
00192                         if((i<w)|(j<w)|(i>hide_texture->width-w)|(j>hide_texture->width-w))
00193                                 opaque=60;
00194                         else if ((i<2*w)|(j<2*w)|(i>hide_texture->width-2*w)|(j>hide_texture->width-2*w))
00195                                 opaque=100;
00196                         else if ((i<3*w)|(j<3*w)|(i>hide_texture->width-3*w)|(j>hide_texture->width-3*w))
00197                                 opaque=140;
00198                         else if ((i<4*w)|(j<4*w)|(i>hide_texture->width-4*w)|(j>hide_texture->width-4*w))
00199                                 opaque=200;
00200                         else
00201                                 opaque=255;             
00202                         
00203                         cvSet2D(hide_texture, j, i, cvScalar(
00204                                         (((lr-r)*pixels[7][0] + r*pixels[6][0]+ s* pixels[4][0] + (ls-s)* pixels[5][0])/l2r),
00205                                         (((lr-r)*pixels[7][1] + r*pixels[6][1]+ s* pixels[4][1] + (ls-s)* pixels[5][1])/l2r),
00206                                         (((lr-r)*pixels[7][2] + r*pixels[6][2]+ s* pixels[4][2] + (ls-s)* pixels[5][2])/l2r),
00207                                         opaque
00208                                 ));
00209                 }
00210         }
00211 }
00212 
00213 void DrawTexture(IplImage *image, IplImage *texture, 
00214         Camera *cam, double gl_modelview[16], 
00215         PointDouble topleft, PointDouble botright) 
00216 {
00217         assert(image->origin == 0); // Currently only top-left origin supported
00218         double width = abs(botright.x - topleft.x);
00219         double height = abs(botright.y - topleft.y);
00220         double objx = width/2;
00221         double objy = height/2;
00222         
00223         // Project corners
00224         double points3d[4][3] = {
00225                 -objx, -objy, 0,
00226                 -objx, objy, 0,
00227                 objx,  objy, 0,
00228                 objx, -objy, 0,
00229         };
00230         double points2d[4][2];
00231         CvMat points3d_mat, points2d_mat;
00232         cvInitMatHeader(&points3d_mat, 4, 3, CV_64F, points3d);
00233         cvInitMatHeader(&points2d_mat, 4, 2, CV_64F, points2d);
00234         cam->ProjectPoints(&points3d_mat, gl_modelview, &points2d_mat);
00235         
00236         // Warp texture and mask using the perspective that is based on the corners
00237         double map[9];
00238         CvMat map_mat = cvMat(3, 3, CV_64F, map);
00239         CvPoint2D32f src[4] = {
00240                 { 0, 0 },
00241                 { 0, float(texture->height-1) },
00242                 { float(texture->width-1), float(texture->height-1) },
00243                 { float(texture->width-1), 0 },
00244         };
00245         CvPoint2D32f dst[4] = {
00246                 { float(points2d[0][0]), float(points2d[0][1]) },
00247                 { float(points2d[1][0]), float(points2d[1][1]) },
00248                 { float(points2d[2][0]), float(points2d[2][1]) },
00249                 { float(points2d[3][0]), float(points2d[3][1]) },
00250         };
00251         cvGetPerspectiveTransform(src, dst, &map_mat);
00252         IplImage *img = cvCloneImage(image);
00253         IplImage *img2 = cvCloneImage(image);
00254         IplImage *mask = cvCreateImage(cvSize(image->width, image->height), 8, 1);
00255         IplImage *mask2 = cvCreateImage(cvSize(image->width, image->height), 8, 1);
00256         cvZero(img);
00257         cvZero(img2);
00258         cvZero(mask);
00259         cvZero(mask2);
00260         for (int j=0; j<texture->height; j++) { //ttesis: why must we copy the texture first?
00261                 for (int i=0; i<texture->width; i++) {
00262                         CvScalar s = cvGet2D(texture, j, i);
00263                         cvSet2D(img, j, i, s);
00264                         if ((i>0) && (j>0) && (i<(texture->width-1)) && (j<(texture->height-1)))
00265                                 cvSet2D(mask, j, i, cvScalar(1)); //ttesis: why are edges not included?
00266                 }
00267         }
00268         cvWarpPerspective(img, img2, &map_mat);
00269         cvWarpPerspective(mask, mask2, &map_mat, 0);
00270         
00271         cvCopy(img2, image, mask2);
00272 
00273         cvReleaseImage(&img);
00274         cvReleaseImage(&img2);
00275         cvReleaseImage(&mask);
00276         cvReleaseImage(&mask2);
00277 }
00278 
00279 } // namespace alvar


ar_track_alvar
Author(s): Scott Niekum
autogenerated on Thu Jun 6 2019 21:12:54