face_normalizer.cpp
Go to the documentation of this file.
00001 #include<cob_people_detection/face_normalizer.h>
00002 #include<pcl/common/common.h>
00003 #include<pcl/common/eigen.h>
00004 #if !defined(PCL_VERSION_COMPARE)
00005         #include<pcl/common/transform.h>
00006 #else
00007         #if PCL_VERSION_COMPARE(<,1,2,0)
00008                 #include<pcl/common/transform.h>
00009         #endif
00010 #endif
00011 
00012 #include<fstream>
00013 
00014 #include<limits>
00015 
00016 using namespace cv;
00017 
00018 void FaceNormalizer::init(FNConfig& i_config)
00019 {
00020   std::string def_classifier_directory=     "/opt/ros/groovy/share/OpenCV/";
00021   this->init(def_classifier_directory,"",i_config,0,false,false);
00022 }
00023 
00024 void FaceNormalizer::init(std::string i_classifier_directory,FNConfig& i_config)
00025 {
00026   this->init(i_classifier_directory,"",i_config,0,false,false);
00027 }
00028 
00029 //call initialize function with set default values
00030 // -> no debug
00031 // -> no storage of results
00032 // -> epoch ctr set to zero
00033 // -> classifier directory set to standard opencv installation dir
00034 void FaceNormalizer::init()
00035 {
00036 
00037   // set default values for face normalization
00038   FNConfig def_config;
00039   def_config.eq_ill=  true;
00040   def_config.align=   false;
00041   def_config.resize=  true;
00042   def_config.cvt2gray=true;
00043   def_config.extreme_illumination_condtions=false;
00044 
00045   std::string def_classifier_directory=   "/opt/ros/groovy/share/OpenCV/"  ;
00046 
00047   this->init(def_classifier_directory,"",def_config,0,false,false);
00048 }
00049 
00050 
00051 
00052 void FaceNormalizer::init(std::string i_classifier_directory,std::string i_storage_directory,FNConfig& i_config,int i_epoch_ctr,bool i_debug,bool i_record_scene)
00053 {
00054   classifier_directory_=i_classifier_directory;
00055   storage_directory_=   i_storage_directory;
00056   config_=              i_config;
00057   epoch_ctr_=            i_epoch_ctr;
00058   debug_=               i_debug;
00059   record_scene_=        i_record_scene;
00060 
00061   if (config_.align)
00062   {
00063     std::string eye_r_path,eye_path,eye_l_path,nose_path,mouth_path;
00064     eye_r_path=    classifier_directory_+ "haarcascades/haarcascade_mcs_righteye.xml";
00065     eye_l_path=    classifier_directory_+ "haarcascades/haarcascade_mcs_lefteye.xml";
00066     nose_path=     classifier_directory_+ "haarcascades/haarcascade_mcs_nose.xml";
00067 
00068     eye_r_cascade_=(CvHaarClassifierCascade*) cvLoad(eye_r_path.c_str(),0,0,0);
00069     eye_r_storage_=cvCreateMemStorage(0);
00070 
00071     eye_l_cascade_=(CvHaarClassifierCascade*) cvLoad(eye_l_path.c_str(),0,0,0);
00072     eye_l_storage_=cvCreateMemStorage(0);
00073 
00074     nose_cascade_=(CvHaarClassifierCascade*) cvLoad(nose_path.c_str(),0,0,0);
00075     nose_storage_=cvCreateMemStorage(0);
00076 
00077     //intrinsics
00078     cv::Vec2f focal_length;
00079     cv::Point2f pp;
00080     focal_length[0]=524.90160178307269 ;
00081     focal_length[1]=525.85226379335393;
00082     pp.x=312.13543361773458;
00083     pp.y=254.73474482242005;
00084     cam_mat_=(cv::Mat_<double>(3,3) << focal_length[0] , 0.0 , pp.x  , 0.0 , focal_length[1] , pp.y  , 0.0 , 0.0 , 1);
00085     dist_coeffs_=(cv::Mat_<double>(1,5) << 0.25852454045259377, -0.88621162461930914, 0.0012346117737001144, 0.00036377459304633028, 1.0422813597203011);
00086   }
00087 
00088   initialized_=true;
00089 }
00090 
00091 
00092 FaceNormalizer::~FaceNormalizer()
00093 {
00094   if(config_.align)
00095   {
00096         cvReleaseHaarClassifierCascade(&nose_cascade_);
00097         cvReleaseMemStorage(&nose_storage_);
00098         cvReleaseHaarClassifierCascade(&eye_l_cascade_);
00099         cvReleaseMemStorage(&eye_l_storage_);
00100         cvReleaseHaarClassifierCascade(&eye_r_cascade_);
00101         cvReleaseMemStorage(&eye_r_storage_);
00102   }
00103 };
00104 
00105 bool FaceNormalizer::isolateFace(cv::Mat& RGB,cv::Mat& XYZ)
00106 {
00107   cv::Vec3f middle_pt=XYZ.at<cv::Vec3f>(round(XYZ.rows/2),round(XYZ.cols/2));
00108 
00109   float background_threshold=middle_pt[2]+0.25;
00110 
00111   eliminate_background(RGB,XYZ,background_threshold);
00112   interpolate_head(RGB, XYZ);
00113   return true;
00114 }
00115 
00116 bool FaceNormalizer::recordFace(cv::Mat&RGB,cv::Mat& XYZ)
00117 {
00118   
00119   
00120   std::string filepath=storage_directory_;
00121   filepath.append("scene");
00122   save_scene(RGB,XYZ,filepath);
00123 
00124 }
00125 
00126 bool FaceNormalizer::synthFace(cv::Mat &RGB,cv::Mat& XYZ, cv::Size& norm_size,std::vector<cv::Mat>& synth_images)
00127 {
00128   //TODO
00129   //maybe deveop a measure for the angles that are now being represented
00130   norm_size_=norm_size;
00131   input_size_=cv::Size(RGB.cols,RGB.rows);
00132 
00133   bool valid = true; // Flag only returned true if all steps have been completed successfully
00134 
00135   epoch_ctr_++;
00136 
00137   //geometric normalization
00138   if(config_.align)
00139   {
00140     cv::Mat GRAY;
00141     cv::cvtColor(RGB,GRAY,CV_RGB2GRAY);
00142     //rotate_head(RGB,XYZ);
00143     if(!synth_head_poses(GRAY,XYZ,synth_images))
00144     {
00145       valid=false;
00146       // if head synthesis fails push back color image
00147       synth_images.push_back(RGB);
00148     }
00149 
00150   }
00151 
00152   // process remaining normalization steps with all synthetic images
00153   for(int n=0;n<synth_images.size();n++)
00154   {
00155   if(config_.cvt2gray)
00156   {
00157     if(synth_images[n].channels()==3)cv::cvtColor(synth_images[n],synth_images[n],CV_RGB2GRAY);
00158   }
00159 
00160   if(config_.eq_ill)
00161   {
00162     // radiometric normalization
00163     if(!normalize_radiometry(synth_images[n])) valid=false;
00164     if(debug_)dump_img(synth_images[n],"radiometry");
00165   }
00166 
00167   if(debug_ && valid)dump_img(synth_images[n],"geometry");
00168 
00169   if(config_.resize)
00170   {
00171   //resizing
00172   normalize_img_type(synth_images[n],synth_images[n]);
00173   cv::resize(synth_images[n],synth_images[n],norm_size_,0,0);
00174   }
00175 
00176   if(debug_)dump_img(synth_images[n],"size");
00177   }
00178 
00179   epoch_ctr_++;
00180   return valid;
00181 
00182 }
00183 
00184 bool FaceNormalizer::normalizeFace( cv::Mat& RGB, cv::Mat& XYZ, cv::Size& norm_size, cv::Mat& DM)
00185 {
00186   bool valid=true;
00187   valid =normalizeFace(RGB,XYZ,norm_size);
00188   XYZ.copyTo(DM);
00189   //reducing z coordinate and performing whitening transformation
00190   create_DM(DM,DM);
00191   return valid;
00192 }
00193 
00194 
00195 bool FaceNormalizer::normalizeFace( cv::Mat& img,cv::Mat& depth,cv::Size& norm_size)
00196 {
00197 
00198   norm_size_=norm_size;
00199   input_size_=cv::Size(img.cols,img.rows);
00200 
00201   bool valid = true; // Flag only returned true if all steps have been completed successfully
00202 
00203   epoch_ctr_++;
00204 
00205   //geometric normalization
00206   if(config_.align)
00207   {
00208     if(!normalize_geometry_depth(img,depth)) valid=false;
00209   }
00210 
00211   if(config_.cvt2gray)
00212   {
00213     if(img.channels()==3)cv::cvtColor(img,img,CV_RGB2GRAY);
00214   }
00215 
00216   if(config_.eq_ill)
00217   {
00218     // radiometric normalization
00219     cv::Mat img_before_ill = img.clone();
00220     if(!normalize_radiometry(img))
00221       valid=false;
00222     else
00223     {
00224       if(img_before_ill.channels()==3)
00225       {
00226         cv::Vec3b white(255, 255, 255);
00227         for (int v=0; v<img.rows; ++v)
00228           for (int u=0; u<img.cols; ++u)
00229             if (img_before_ill.at<cv::Vec3b>(v,u) == white)
00230               img.at<cv::Vec3b>(v,u) = white;
00231       }
00232       else
00233       {
00234         int white = 255;
00235         for (int v=0; v<img.rows; ++v)
00236           for (int u=0; u<img.cols; ++u)
00237             if ((int)img_before_ill.at<uchar>(v,u) == white)
00238               img.at<uchar>(v,u) = (uchar)white;
00239       }
00240     }
00241     if(debug_)dump_img(img,"radiometry");
00242   }
00243 
00244   if(debug_ && valid)dump_img(img,"geometry");
00245 
00246   if(config_.resize)
00247   {
00248   //resizing
00249   cv::resize(img,img,norm_size_,0,0);
00250   cv::resize(depth,depth,norm_size_,0,0);
00251   }
00252 
00253   if(debug_)dump_img(img,"size");
00254 
00255   epoch_ctr_++;
00256   normalize_img_type(img,img);
00257   return valid;
00258 }
00259 
00260 
00261 bool FaceNormalizer::normalizeFace( cv::Mat& img,cv::Size& norm_size)
00262 {
00263 
00264   if(!initialized_)
00265   {
00266     std::cout<<"[FaceNormalizer] not initialized - use init() first"<<std::endl;
00267   }
00268   // set members to current values
00269   norm_size_=norm_size;
00270   input_size_=cv::Size(img.cols,img.rows);
00271 
00272   bool valid = true; // Flag only returned true if all steps have been completed successfully
00273 
00274   if(config_.cvt2gray)
00275   {
00276   if(img.channels()==3)cv::cvtColor(img,img,CV_RGB2GRAY);
00277   }
00278 
00279   if(config_.align)
00280   {
00281     std::cout<<"[FaceNormalizer] geometrical normalization only with 3D data"<<std::endl;
00282   //geometric normalization for rgb image only disabled
00283   }
00284 
00285   if(config_.eq_ill)
00286   {
00287   // radiometric normalization
00288   if(!normalize_radiometry(img)) valid=false;
00289   if(debug_)dump_img(img,"1_radiometry");
00290   }
00291 
00292   if(config_.resize)
00293   {
00294   //resizing
00295   cv::resize(img,img,norm_size_,0,0);
00296   if(debug_)dump_img(img,"2_resized");
00297   }
00298 
00299   epoch_ctr_++;
00300   normalize_img_type(img,img);
00301   return valid;
00302 }
00303 
00304 bool FaceNormalizer::normalize_radiometry(cv::Mat& img)
00305 {
00306   
00307   if(config_.extreme_illumination_condtions==true)GammaDoG(img);
00308   else  GammaDCT(img);
00309   return true;
00310 }
00311 
00312 void FaceNormalizer::extractVChannel( cv::Mat& img,cv::Mat& V)
00313 {
00314   if(debug_)cv::cvtColor(img,img,CV_RGB2HSV);
00315   else cv::cvtColor(img,img,CV_BGR2HSV);
00316 
00317   std::vector<cv::Mat> channels;
00318   cv::split(img,channels);
00319   channels[2].copyTo(V);
00320 
00321   if(debug_)cv::cvtColor(img,img,CV_HSV2RGB);
00322   else cv::cvtColor(img,img,CV_HSV2BGR);
00323 
00324   return;
00325 
00326 }
00327 
00328 void FaceNormalizer::subVChannel(cv::Mat& img,cv::Mat& V)
00329 {
00330 
00331    if(debug_)cv::cvtColor(img,img,CV_RGB2HSV);
00332   else cv::cvtColor(img,img,CV_BGR2HSV);
00333 
00334   std::vector<cv::Mat> channels;
00335   cv::split(img,channels);
00336   channels[2]=V;
00337   cv::merge(channels,img);
00338 
00339   if(debug_)cv::cvtColor(img,img,CV_HSV2RGB);
00340   else cv::cvtColor(img,img,CV_HSV2BGR);
00341 
00342   return;
00343 }
00344 
00345 
00346 
00347 void FaceNormalizer::GammaDoG(cv::Mat& input_img)
00348 {
00349   cv::Mat img=cv::Mat(input_img.rows,input_img.cols,CV_8UC1);
00350   if(input_img.channels()==3)
00351   {
00352     extractVChannel(input_img,img);
00353     std::cout<<"extracting"<<std::endl;
00354   }
00355   else
00356   {
00357     img=input_img;
00358   }
00359 
00360 
00361   img.convertTo(img,CV_32FC1);
00362 
00363   // gamma correction
00364   cv::pow(img,0.2,img);
00365   //dog
00366   cv::Mat g2,g1;
00367   cv::GaussianBlur(img,g1,cv::Size(9,9),1);
00368   cv::GaussianBlur(img,g2,cv::Size(9,9),2);
00369   cv::subtract(g2,g1,img);
00370   //cv::normalize(img,img,0,255,cv::NORM_MINMAX);
00371   img.convertTo(img,CV_8UC1,255);
00372   cv::equalizeHist(img,img);
00373 
00374   input_img=img;
00375 }
00376 
00377 void FaceNormalizer::GammaDCT(cv::Mat& input_img)
00378 {
00379   cv::Mat img=cv::Mat(input_img.rows,input_img.cols,CV_8UC1);
00380   if(input_img.channels()==3)
00381   {
00382     extractVChannel(input_img,img);
00383   }
00384   else
00385   {
00386     img=input_img;
00387   }
00388 
00389   // Dct conversion on logarithmic image
00390   if( img.rows&2!=0 )
00391   {
00392     img=img(cv::Rect(0,0,img.cols,img.rows-1));
00393     input_img=img(cv::Rect(0,0,img.cols,img.rows-1));
00394   }
00395   if( img.cols&2!=0 )
00396   {
00397     img=img(cv::Rect(0,0,img.cols-1,img.rows));
00398     input_img=img(cv::Rect(0,0,img.cols-1,img.rows));
00399   }
00400 
00401   cv::equalizeHist(img,img);
00402   img.convertTo(img,CV_32FC1);
00403   cv::Scalar mu,sigma;
00404   cv::meanStdDev(img,mu,sigma);
00405 
00406   double C_00=log(mu.val[0])*sqrt(img.cols*img.rows);
00407 
00408 //----------------------------
00409   // gamma correction
00410   cv::pow(img,0.2,img);
00411 
00412   cv::Mat imgdummy;
00413   img.convertTo(imgdummy,CV_8UC1);
00414   cv::dct(img,img);
00415 
00416   //---------------------------------------
00417   img.at<float>(0,0)=C_00;
00418   img.at<float>(0,1)/=10;
00419   img.at<float>(0,2)/=10;
00420 
00421   img.at<float>(1,0)/=10;
00422   img.at<float>(1,1)/=10;
00423 
00424   //--------------------------------------
00425 
00426   cv::idct(img,img);
00427   cv::normalize(img,img,0,255,cv::NORM_MINMAX);
00428 
00429   img.convertTo(img,CV_8UC1);
00430   //cv::blur(img,img,cv::Size(3,3));
00431 
00432   if(input_img.channels()==3)
00433   {
00434     subVChannel(input_img,img);
00435   }
00436   else
00437   {
00438     input_img=img;
00439   }
00440 }
00441 
00442 
00443 bool FaceNormalizer::synth_head_poses_relative(cv::Mat& img,cv::Mat& depth,std::vector<cv::Mat>& synth_images)
00444 {
00445 
00446   // detect features
00447   if(!features_from_color(img))return false;
00448   if(debug_)dump_features(img);
00449 
00450 
00451    if(!features_from_depth(depth)) return false;
00452 
00453    Eigen::Vector3f temp,x_new,y_new,z_new,lefteye,nose,righteye,eye_middle;
00454 
00455    nose<<f_det_xyz_.nose.x,f_det_xyz_.nose.y,f_det_xyz_.nose.z;
00456    lefteye<<f_det_xyz_.lefteye.x,f_det_xyz_.lefteye.y,f_det_xyz_.lefteye.z;
00457    righteye<<f_det_xyz_.righteye.x,f_det_xyz_.righteye.y,f_det_xyz_.righteye.z;
00458    eye_middle=lefteye+((righteye-lefteye)*0.5);
00459 
00460    x_new<<f_det_xyz_.righteye.x-f_det_xyz_.lefteye.x,f_det_xyz_.righteye.y-f_det_xyz_.lefteye.y,f_det_xyz_.righteye.z-f_det_xyz_.lefteye.z;
00461    temp<<f_det_xyz_.nose.x-eye_middle[0],f_det_xyz_.nose.y-eye_middle[1],(f_det_xyz_.nose.z-eye_middle[2]);
00462    z_new=x_new.cross(temp);
00463    x_new.normalize();
00464    z_new.normalize();
00465    y_new=x_new.cross(z_new);
00466 
00467    if(y_new[1]<0) y_new*=-1;
00468 
00469 
00470    Eigen::Vector3f origin;
00471    origin=nose;
00472    //origin=nose+(0.1*z_new);
00473 
00474    Eigen::Affine3f T_norm;
00475 
00476    pcl::getTransformationFromTwoUnitVectorsAndOrigin(y_new,z_new,origin,T_norm);
00477 
00478 
00479    // viewing offset of normalized perspective
00480     double view_offset=0.6;
00481     Eigen::Translation<float,3> translation=Eigen::Translation<float,3>(0, 0, view_offset);
00482 
00483     // modify T_norm by angle for nose incline compensation
00484     Eigen::AngleAxis<float> roll(-0.78, x_new);
00485     //Eigen::AngleAxis<float> roll(-0.78, Eigen::Vector3f(1,0,0));
00486     T_norm=roll*T_norm;
00487 
00488 
00489   Eigen::Affine3f T_rot;
00490   cv::Mat workmat=cv::Mat(depth.rows,depth.cols,CV_32FC3);
00491   Eigen::AngleAxis<float> alpha;
00492 
00493   cv::Mat dmres;  cv::Mat imgres;
00494   cv::Rect roi;
00495 
00496 
00497 
00498   //number of poses
00499   int N=7;
00500   std::cout<<"Synthetic POSES"<<std::endl;
00501 
00502   for(int i=0;i<N;i++)
00503   {
00504     switch(i)
00505     {
00506       case 0:
00507         {
00508           alpha=Eigen::AngleAxis<float>((float)0, x_new);
00509           break;
00510         }
00511       case 1:
00512         {
00513           alpha=Eigen::AngleAxis<float>((float) 0.1*M_PI, x_new);
00514           break;
00515         }
00516       case 2:
00517         {
00518           alpha=Eigen::AngleAxis<float>((float)-0.1*M_PI, x_new);
00519           break;
00520         }
00521       case 3:
00522         {
00523           alpha=Eigen::AngleAxis<float>((float) 0.1*M_PI, y_new);
00524           break;
00525         }
00526       case 4:
00527         {
00528           alpha=Eigen::AngleAxis<float>((float)-0.1*M_PI, y_new);
00529           break;
00530         }
00531       case 5:
00532         {
00533           alpha=Eigen::AngleAxis<float>((float) 0.1*M_PI, z_new);
00534           break;
00535         }
00536       case 6:
00537         {
00538           alpha=Eigen::AngleAxis<float>((float)-0.1*M_PI, z_new);
00539           break;
00540         }
00541     }
00542   // ----- artificial head pose rotation
00543 
00544   T_rot.setIdentity();
00545   T_rot=alpha*T_rot;
00546   // ----- artificial head pose rotation
00547 
00548   dmres=cv::Mat::zeros(480,640,CV_32FC3);
00549   if(img.channels()==3)imgres=cv::Mat::zeros(480,640,CV_8UC3);
00550   if(img.channels()==1)imgres=cv::Mat::zeros(480,640,CV_8UC1);
00551 
00552 
00553   depth.copyTo(workmat);
00554    cv::Vec3f* ptr=workmat.ptr<cv::Vec3f>(0,0);
00555    Eigen::Vector3f pt;
00556    for(int i=0;i<img.total();i++)
00557    {
00558      pt<<(*ptr)[0],(*ptr)[1],(*ptr)[2];
00559      pt=T_rot*pt;
00560      pt=translation*pt;
00561 
00562     (*ptr)[0]=pt[0];
00563     (*ptr)[1]=pt[1];
00564     (*ptr)[2]=pt[2];
00565      ptr++;
00566    }
00567 
00568    nose<<f_det_xyz_.nose.x,f_det_xyz_.nose.y,f_det_xyz_.nose.z;
00569    lefteye<<f_det_xyz_.lefteye.x,f_det_xyz_.lefteye.y,f_det_xyz_.lefteye.z;
00570    righteye<<f_det_xyz_.righteye.x,f_det_xyz_.righteye.y,f_det_xyz_.righteye.z;
00571 
00572    lefteye=translation*T_rot*T_norm*lefteye;
00573    righteye=translation*T_rot*T_norm*righteye;
00574    nose=translation*T_rot*T_norm*nose;
00575 
00576    //transform norm coordiantes separately to  determine roi
00577    cv::Point2f lefteye_uv,righteye_uv,nose_uv;
00578    cv::Point3f lefteye_xyz,righteye_xyz,nose_xyz;
00579 
00580 
00581    lefteye_xyz = cv::Point3f(lefteye[0],lefteye[1],lefteye[2]);
00582    righteye_xyz = cv::Point3f(righteye[0],righteye[1],righteye[2]);
00583    nose_xyz = cv::Point3f(nose[0],nose[1],nose[2]);
00584 
00585    projectPoint(lefteye_xyz,lefteye_uv);
00586    projectPoint(righteye_xyz,righteye_uv);
00587    projectPoint(nose_xyz,nose_uv);
00588 
00589    //determine bounding box
00590 
00591    float s=2;
00592    int dim_x=(righteye_uv.x-lefteye_uv.x)*s;
00593    int off_x=((righteye_uv.x-lefteye_uv.x)*s -(righteye_uv.x-lefteye_uv.x))/2;
00594    int off_y=off_x;
00595    int dim_y=dim_x;
00596 
00597    roi=cv::Rect(round(nose_uv.x-dim_x*0.5),round(nose_uv.y-dim_y*0.5),dim_x,dim_y);
00598    //roi=cv::Rect(round(lefteye_uv.x-dim_x*0.25),round(lefteye_uv.y-dim_y*0.25),dim_x,dim_y);
00599 
00600    if(img.channels()==3)cv::cvtColor(img,img,CV_RGB2GRAY);
00601 
00602 
00603 
00604   projectPointCloud(img,workmat,imgres,dmres);
00605 
00606 
00607   if(debug_)dump_img(imgres,"uncropped");
00608 
00609   if(roi.height<=1 ||roi.width<=0 || roi.x<0 || roi.y<0 ||roi.x+roi.width >imgres.cols || roi.y+roi.height>imgres.rows)
00610   {
00611     std::cout<<"[FaceNormalizer]image ROI out of limits"<<std::endl;
00612     return false;
00613   }
00614   imgres=imgres(roi);
00615   imgres=imgres(cv::Rect(2,2,imgres.cols-4,imgres.rows-4));
00616 
00617   synth_images.push_back(imgres);
00618   }
00619 
00620   return true;
00621 }
00622 bool FaceNormalizer::synth_head_poses(cv::Mat& img,cv::Mat& depth,std::vector<cv::Mat>& synth_images)
00623 {
00624 
00625   // detect features
00626   if(!features_from_color(img))return false;
00627   if(debug_)dump_features(img);
00628 
00629 
00630    if(!features_from_depth(depth)) return false;
00631 
00632    Eigen::Vector3f temp,x_new,y_new,z_new,lefteye,nose,righteye,eye_middle;
00633 
00634    nose<<f_det_xyz_.nose.x,f_det_xyz_.nose.y,f_det_xyz_.nose.z;
00635    lefteye<<f_det_xyz_.lefteye.x,f_det_xyz_.lefteye.y,f_det_xyz_.lefteye.z;
00636    righteye<<f_det_xyz_.righteye.x,f_det_xyz_.righteye.y,f_det_xyz_.righteye.z;
00637    eye_middle=lefteye+((righteye-lefteye)*0.5);
00638 
00639    x_new<<f_det_xyz_.righteye.x-f_det_xyz_.lefteye.x,f_det_xyz_.righteye.y-f_det_xyz_.lefteye.y,f_det_xyz_.righteye.z-f_det_xyz_.lefteye.z;
00640    temp<<f_det_xyz_.nose.x-eye_middle[0],f_det_xyz_.nose.y-eye_middle[1],(f_det_xyz_.nose.z-eye_middle[2]);
00641    z_new=x_new.cross(temp);
00642    x_new.normalize();
00643    z_new.normalize();
00644    y_new=x_new.cross(z_new);
00645 
00646    if(y_new[1]<0) y_new*=-1;
00647 
00648 
00649    Eigen::Vector3f origin;
00650    origin=nose;
00651    //origin=nose+(0.1*z_new);
00652 
00653    Eigen::Affine3f T_norm;
00654 
00655    pcl::getTransformationFromTwoUnitVectorsAndOrigin(y_new,z_new,origin,T_norm);
00656 
00657 
00658    // viewing offset of normalized perspective
00659     double view_offset=0.8;
00660     Eigen::Translation<float,3> translation=Eigen::Translation<float,3>(0, 0, view_offset);
00661 
00662     // modify T_norm by angle for nose incline compensation
00663     //Eigen::AngleAxis<float> roll(-0.78, x_new);
00664     Eigen::AngleAxis<float> roll(0.00, x_new);
00665     //Eigen::AngleAxis<float> roll(-0.78, Eigen::Vector3f(1,0,0));
00666     T_norm=roll*T_norm;
00667 
00668 
00669   Eigen::Affine3f T_rot;
00670   Eigen::AngleAxis<float> alpha;
00671 
00672   cv::Mat dmres;
00673   cv::Mat imgres;
00674   cv::Rect roi;
00675 
00676 
00677   float  background_thresh=view_offset+0.3;
00678   //eliminate_background(img,depth,background_thresh);
00679 
00680   cv::Mat workmat=cv::Mat(depth.rows,depth.cols,CV_32FC3);
00681 //  // eliminate background
00682 //  cv::Vec3f* xyz_ptr=depth.ptr<cv::Vec3f>(0,0);
00683 //  for(int r=0;r<depth.total();r++)
00684 //  {
00685 //    if((*xyz_ptr)[2]>background_thresh ||(*xyz_ptr)[2]<0)
00686 //    {
00687 //      // set this to invalid value
00688 //      *xyz_ptr=cv::Vec3f(-1000,-1000,-1000);
00689 //    }
00690 //    xyz_ptr++;
00691 //  }
00692 
00693   //number of poses
00694   int N=5;
00695   std::cout<<"Synthetic POSES"<<std::endl;
00696 
00697   for(int i=0;i<N;i++)
00698   {
00699     switch(i)
00700     {
00701       case 0:
00702         {
00703           alpha=Eigen::AngleAxis<float>((float)0, Eigen::Vector3f(1,0,0));
00704           break;
00705         }
00706       case 1:
00707         {
00708           alpha=Eigen::AngleAxis<float>((float) 0.1*M_PI, Eigen::Vector3f(1,0,0));
00709           break;
00710         }
00711       case 2:
00712         {
00713           alpha=Eigen::AngleAxis<float>((float)-0.1*M_PI, Eigen::Vector3f(1,0,0));
00714           break;
00715         }
00716       case 3:
00717         {
00718           alpha=Eigen::AngleAxis<float>((float) 0.1*M_PI, Eigen::Vector3f(0,1,0));
00719           break;
00720         }
00721       case 4:
00722         {
00723           alpha=Eigen::AngleAxis<float>((float)-0.1*M_PI, Eigen::Vector3f(0,1,0));
00724           break;
00725         }
00726       case 5:
00727         {
00728           alpha=Eigen::AngleAxis<float>((float) 0.1*M_PI, Eigen::Vector3f(0,0,1));
00729           break;
00730         }
00731       case 6:
00732         {
00733           alpha=Eigen::AngleAxis<float>((float)-0.1*M_PI, Eigen::Vector3f(0,0,1));
00734           break;
00735         }
00736     }
00737   // ----- artificial head pose rotation
00738 
00739   T_rot.setIdentity();
00740   T_rot=alpha*T_rot;
00741   // ----- artificial head pose rotation
00742 
00743   dmres=cv::Mat::zeros(480,640,CV_32FC3);
00744   if(img.channels()==3)imgres=cv::Mat::zeros(480,640,CV_8UC3);
00745   if(img.channels()==1)imgres=cv::Mat::zeros(480,640,CV_8UC1);
00746 
00747 
00748   depth.copyTo(workmat);
00749    cv::Vec3f* ptr=workmat.ptr<cv::Vec3f>(0,0);
00750    Eigen::Vector3f pt;
00751    for(int i=0;i<img.total();i++)
00752    {
00753      pt<<(*ptr)[0],(*ptr)[1],(*ptr)[2];
00754      pt=T_norm*pt;
00755      pt=T_rot*pt;
00756      pt=translation*pt;
00757 
00758     (*ptr)[0]=pt[0];
00759     (*ptr)[1]=pt[1];
00760     (*ptr)[2]=pt[2];
00761      ptr++;
00762    }
00763 
00764    nose<<f_det_xyz_.nose.x,f_det_xyz_.nose.y,f_det_xyz_.nose.z;
00765    lefteye<<f_det_xyz_.lefteye.x,f_det_xyz_.lefteye.y,f_det_xyz_.lefteye.z;
00766    righteye<<f_det_xyz_.righteye.x,f_det_xyz_.righteye.y,f_det_xyz_.righteye.z;
00767 
00768    lefteye=translation*T_rot*T_norm*lefteye;
00769    righteye=translation*T_rot*T_norm*righteye;
00770    nose=translation*T_rot*T_norm*nose;
00771 
00772    //transform norm coordiantes separately to  determine roi
00773    cv::Point2f lefteye_uv,righteye_uv,nose_uv;
00774    cv::Point3f lefteye_xyz,righteye_xyz,nose_xyz;
00775 
00776 
00777    lefteye_xyz = cv::Point3f(lefteye[0],lefteye[1],lefteye[2]);
00778    righteye_xyz = cv::Point3f(righteye[0],righteye[1],righteye[2]);
00779    nose_xyz = cv::Point3f(nose[0],nose[1],nose[2]);
00780 
00781    projectPoint(lefteye_xyz,lefteye_uv);
00782    projectPoint(righteye_xyz,righteye_uv);
00783    projectPoint(nose_xyz,nose_uv);
00784 
00785    //determine bounding box
00786 
00787    float s=2;
00788    int dim_x=(righteye_uv.x-lefteye_uv.x)*s;
00789    int off_x=((righteye_uv.x-lefteye_uv.x)*s -(righteye_uv.x-lefteye_uv.x))/2;
00790    int off_y=off_x;
00791    int dim_y=dim_x;
00792 
00793    roi=cv::Rect(round(nose_uv.x-dim_x*0.5),round(nose_uv.y-dim_y*0.5),dim_x,dim_y);
00794    //roi=cv::Rect(round(lefteye_uv.x-dim_x*0.25),round(lefteye_uv.y-dim_y*0.25),dim_x,dim_y);
00795 
00796    if(img.channels()==3)cv::cvtColor(img,img,CV_RGB2GRAY);
00797 
00798 
00799 
00800   projectPointCloud(img,workmat,imgres,dmres);
00801 
00802 
00803 
00804   if(debug_)dump_img(imgres,"uncropped");
00805 
00806   if(roi.height<=1 ||roi.width<=0 || roi.x<0 || roi.y<0 ||roi.x+roi.width >imgres.cols || roi.y+roi.height>imgres.rows)
00807   {
00808     std::cout<<"[FaceNormalizer]image ROI out of limits"<<std::endl;
00809     return false;
00810   }
00811   imgres=imgres(roi);
00812   imgres=imgres(cv::Rect(2,2,imgres.cols-4,imgres.rows-4));
00813 
00814   synth_images.push_back(imgres);
00815   }
00816 
00817   return true;
00818 }
00819 bool FaceNormalizer::rotate_head(cv::Mat& img,cv::Mat& depth)
00820 {
00821 
00822   // detect features
00823   if(!features_from_color(img))return false;
00824   if(debug_)dump_features(img);
00825 
00826 
00827    if(!features_from_depth(depth)) return false;
00828 
00829    Eigen::Vector3f temp,x_new,y_new,z_new,lefteye,nose,righteye,eye_middle;
00830 
00831    nose<<f_det_xyz_.nose.x,f_det_xyz_.nose.y,f_det_xyz_.nose.z;
00832    lefteye<<f_det_xyz_.lefteye.x,f_det_xyz_.lefteye.y,f_det_xyz_.lefteye.z;
00833    righteye<<f_det_xyz_.righteye.x,f_det_xyz_.righteye.y,f_det_xyz_.righteye.z;
00834    eye_middle=lefteye+((righteye-lefteye)*0.5);
00835 
00836    x_new<<f_det_xyz_.righteye.x-f_det_xyz_.lefteye.x,f_det_xyz_.righteye.y-f_det_xyz_.lefteye.y,f_det_xyz_.righteye.z-f_det_xyz_.lefteye.z;
00837    temp<<f_det_xyz_.nose.x-eye_middle[0],f_det_xyz_.nose.y-eye_middle[1],(f_det_xyz_.nose.z-eye_middle[2]);
00838    z_new=x_new.cross(temp);
00839    x_new.normalize();
00840    z_new.normalize();
00841    y_new=x_new.cross(z_new);
00842 
00843    if(y_new[1]<0) y_new*=-1;
00844 
00845 
00846    Eigen::Vector3f origin;
00847    origin=nose;
00848    //origin=nose+(0.1*z_new);
00849 
00850    Eigen::Affine3f T_norm;
00851    std::cout<<"X-Axis:\n "<<x_new<<std::endl;
00852    std::cout<<"Y-Axis:\n "<<y_new<<std::endl;
00853    std::cout<<"Z-Axis:\n "<<z_new<<std::endl;
00854 
00855    pcl::getTransformationFromTwoUnitVectorsAndOrigin(y_new,z_new,origin,T_norm);
00856 
00857 
00858    // viewing offset of normalized perspective
00859     double view_offset=0.6;
00860     Eigen::Translation<float,3> translation=Eigen::Translation<float,3>(0, 0, view_offset);
00861 
00862     // modify T_norm by angle for nose incline compensation
00863     Eigen::AngleAxis<float> roll(0.0, x_new);
00864     //Eigen::AngleAxis<float> roll(-0.78, x_new);
00865     T_norm=roll*T_norm;
00866 
00867 
00868   Eigen::Affine3f T_rot;
00869   cv::Mat workmat=cv::Mat(depth.rows,depth.cols,CV_32FC3);
00870   Eigen::AngleAxis<float> alpha;
00871 
00872   cv::Mat dmres;  cv::Mat imgres;
00873   cv::Rect roi;
00874   for(int i=0;i<60;i++)
00875   {
00876   // ----- artificial head pose rotation
00877   if(i<20)alpha=Eigen::AngleAxis<float>((float)i*0.1*M_PI, Eigen::Vector3f(1,0,0));
00878   else if(i>=20&&i<40)alpha=Eigen::AngleAxis<float>(((float)i-20)*0.1*M_PI, Eigen::Vector3f(0,1,0));
00879   else if(i>=40&&i<=60)alpha=Eigen::AngleAxis<float> (((float)i-40)*0.1*M_PI, Eigen::Vector3f(0,0,1));
00880 
00881     T_rot.setIdentity();
00882   T_rot=alpha*T_rot;
00883   // ----- artificial head pose rotation
00884 
00885   dmres=cv::Mat::zeros(480,640,CV_32FC3);
00886   if(img.channels()==3)imgres=cv::Mat::zeros(480,640,CV_8UC3);
00887   if(img.channels()==1)imgres=cv::Mat::zeros(480,640,CV_8UC1);
00888 
00889 
00890   depth.copyTo(workmat);
00891    cv::Vec3f* ptr=workmat.ptr<cv::Vec3f>(0,0);
00892    Eigen::Vector3f pt;
00893    for(int i=0;i<img.total();i++)
00894    {
00895 
00896      pt<<(*ptr)[0],(*ptr)[1],(*ptr)[2];
00897      pt=T_norm*pt;
00898      pt=T_rot*pt;
00899      pt=translation*pt;
00900 
00901     (*ptr)[0]=pt[0];
00902     (*ptr)[1]=pt[1];
00903     (*ptr)[2]=pt[2];
00904      ptr++;
00905    }
00906 
00907    nose<<f_det_xyz_.nose.x,f_det_xyz_.nose.y,f_det_xyz_.nose.z;
00908    lefteye<<f_det_xyz_.lefteye.x,f_det_xyz_.lefteye.y,f_det_xyz_.lefteye.z;
00909    righteye<<f_det_xyz_.righteye.x,f_det_xyz_.righteye.y,f_det_xyz_.righteye.z;
00910 
00911    lefteye=translation*T_rot*T_norm*lefteye;
00912    righteye=translation*T_rot*T_norm*righteye;
00913    nose=translation*T_rot*T_norm*nose;
00914 
00915    //transform norm coordiantes separately to  determine roi
00916    cv::Point2f lefteye_uv,righteye_uv,nose_uv;
00917    cv::Point3f lefteye_xyz,righteye_xyz,nose_xyz;
00918 
00919 
00920    lefteye_xyz = cv::Point3f(lefteye[0],lefteye[1],lefteye[2]);
00921    righteye_xyz = cv::Point3f(righteye[0],righteye[1],righteye[2]);
00922    nose_xyz = cv::Point3f(nose[0],nose[1],nose[2]);
00923 
00924    projectPoint(lefteye_xyz,lefteye_uv);
00925    projectPoint(righteye_xyz,righteye_uv);
00926    projectPoint(nose_xyz,nose_uv);
00927 
00928    //determine bounding box
00929 
00930    float s=2;
00931    int dim_x=(righteye_uv.x-lefteye_uv.x)*s;
00932    int off_x=((righteye_uv.x-lefteye_uv.x)*s -(righteye_uv.x-lefteye_uv.x))/2;
00933    int off_y=off_x;
00934    int dim_y=dim_x;
00935 
00936    //roi=cv::Rect(round(nose_uv.x-dim_x*0.5),round(nose_uv.y-dim_y*0.5),dim_x,dim_y);
00937    roi=cv::Rect(round(lefteye_uv.x-dim_x*0.25),round(lefteye_uv.y-dim_y*0.25),dim_x,dim_y);
00938 
00939    if(img.channels()==3)cv::cvtColor(img,img,CV_RGB2GRAY);
00940 
00941 
00942    //filter out background
00943 
00944   
00945 
00946   projectPointCloud(img,workmat,imgres,dmres);
00947   //TODO temporary
00948   // add debug oiints
00949   cv::circle(imgres,nose_uv,5,cv::Scalar(255,255,255),-1,8);
00950   cv::circle(imgres,lefteye_uv,5,cv::Scalar(255,255,255),-1,8);
00951   cv::circle(imgres,righteye_uv,5,cv::Scalar(255,255,255),-1,8);
00952   cv::imshow("FULL",imgres);
00953 
00954   int key;
00955   key=cv::waitKey(200);
00956   if(key ==1048608)
00957   {
00958     std::cout<<"PAUSED--confirm usage with ENTER - continue with other key"<<std::endl;
00959     int enter;
00960     enter=cv::waitKey(0);
00961     if(enter==1048586)
00962     {
00963       std::cout<<"ENTER-- using current view for normalization"<<std::endl;
00964       break;
00965     }
00966     else
00967     {
00968       std::cout<<"CONTINUE"<<std::endl;
00969     }
00970   }
00971   }
00972 
00973 
00974 
00975   if(debug_)dump_img(imgres,"uncropped");
00976 
00977   if(roi.height<=1 ||roi.width<=0 || roi.x<0 || roi.y<0 ||roi.x+roi.width >imgres.cols || roi.y+roi.height>imgres.rows)
00978   {
00979     std::cout<<"[FaceNormalizer]image ROI out of limits"<<std::endl;
00980     return false;
00981   }
00982   imgres(roi).copyTo(img);
00983   //TODO TEMPorary swithc ogg
00984   dmres(roi).copyTo(depth);
00985   //despeckle<unsigned char>(img,img);
00986   //only take central region
00987   img=img(cv::Rect(2,2,img.cols-4,img.rows-4));
00988 
00989   return true;
00990 }
00991 bool FaceNormalizer::normalize_geometry_depth(cv::Mat& img,cv::Mat& depth)
00992 {
00993 
00994   // detect features
00995   if(!features_from_color(img))return false;
00996   if(debug_)dump_features(img);
00997 
00998 
00999    if(!features_from_depth(depth)) return false;
01000 
01001    Eigen::Vector3f temp,x_new,y_new,z_new,lefteye,nose,righteye,eye_middle;
01002 
01003    nose<<f_det_xyz_.nose.x,f_det_xyz_.nose.y,f_det_xyz_.nose.z;
01004    lefteye<<f_det_xyz_.lefteye.x,f_det_xyz_.lefteye.y,f_det_xyz_.lefteye.z;
01005    righteye<<f_det_xyz_.righteye.x,f_det_xyz_.righteye.y,f_det_xyz_.righteye.z;
01006    eye_middle=lefteye+((righteye-lefteye)*0.5);
01007 
01008    x_new<<f_det_xyz_.righteye.x-f_det_xyz_.lefteye.x,f_det_xyz_.righteye.y-f_det_xyz_.lefteye.y,f_det_xyz_.righteye.z-f_det_xyz_.lefteye.z;
01009    temp<<f_det_xyz_.nose.x-eye_middle[0],f_det_xyz_.nose.y-eye_middle[1],(f_det_xyz_.nose.z-eye_middle[2]);
01010    z_new=x_new.cross(temp);
01011    x_new.normalize();
01012    z_new.normalize();
01013    y_new=x_new.cross(z_new);
01014 
01015    if(y_new[1]<0) y_new*=-1;
01016 
01017    Eigen::Vector3f nose_flat=nose;
01018    nose_flat[2]+=0.03;
01019 
01020 
01021    Eigen::Vector3f origin;
01022    origin=nose;
01023 
01024    Eigen::Affine3f T_norm;
01025 
01026    pcl::getTransformationFromTwoUnitVectorsAndOrigin(y_new,z_new,origin,T_norm);
01027 
01028 
01029    // viewing offset of normalized perspective
01030     double view_offset=0.8;
01031     Eigen::Translation<float,3> translation=Eigen::Translation<float,3>(0, 0, view_offset);
01032 
01033     // modify T_norm by angle for nose incline compensation
01034     Eigen::AngleAxis<float> roll(-0.78, x_new);
01035     //Eigen::AngleAxis<float> roll(0.0, x_new);
01036     T_norm=translation*roll*T_norm;
01037 
01038    cv::Vec3f* ptr=depth.ptr<cv::Vec3f>(0,0);
01039    Eigen::Vector3f pt;
01040    for(int i=0;i<img.total();i++)
01041    {
01042 
01043      pt<<(*ptr)[0],(*ptr)[1],(*ptr)[2];
01044      pt=T_norm*pt;
01045 
01046     (*ptr)[0]=pt[0];
01047     (*ptr)[1]=pt[1];
01048     (*ptr)[2]=pt[2];
01049      ptr++;
01050    }
01051 
01052    lefteye=T_norm*lefteye;
01053    righteye=T_norm*righteye;
01054    nose=T_norm*nose;
01055 
01056    //transform norm coordiantes separately to  determine roi
01057    cv::Point2f lefteye_uv,righteye_uv,nose_uv;
01058    cv::Point3f lefteye_xyz,righteye_xyz,nose_xyz;
01059 
01060 
01061    lefteye_xyz = cv::Point3f(lefteye[0],lefteye[1],lefteye[2]);
01062    righteye_xyz = cv::Point3f(righteye[0],righteye[1],righteye[2]);
01063    nose_xyz = cv::Point3f(nose[0],nose[1],nose[2]);
01064 
01065    projectPoint(lefteye_xyz,lefteye_uv);
01066    projectPoint(righteye_xyz,righteye_uv);
01067    projectPoint(nose_xyz,nose_uv);
01068 
01069    //determine bounding box
01070    float s=3.3;
01071    int dim_x=(righteye_uv.x-lefteye_uv.x)*s;
01072    int off_x=((righteye_uv.x-lefteye_uv.x)*s -(righteye_uv.x-lefteye_uv.x))/2;
01073    int off_y=off_x;
01074    int dim_y=dim_x;
01075 
01076    cv::Rect roi=cv::Rect(round(nose_uv.x-dim_x*0.5),round(nose_uv.y-dim_y*0.5),dim_x,dim_y);
01077 
01078    if(img.channels()==3)cv::cvtColor(img,img,CV_RGB2GRAY);
01079 
01080   cv::Mat imgres;
01081   if(img.channels()==3)imgres=cv::Mat::zeros(480,640,CV_8UC3);
01082   if(img.channels()==1)imgres=cv::Mat::zeros(480,640,CV_8UC1);
01083   cv::Mat dmres=cv::Mat::zeros(480,640,CV_32FC3);
01084 
01085   projectPointCloud(img,depth,imgres,dmres);
01086   if(debug_)dump_img(imgres,"uncropped");
01087 
01088   if(roi.height<=1 ||roi.width<=0 || roi.x<0 || roi.y<0 ||roi.x+roi.width >imgres.cols || roi.y+roi.height>imgres.rows)
01089   {
01090     std::cout<<"[FaceNormalizer]image ROI out of limits"<<std::endl;
01091     return false;
01092   }
01093   imgres(roi).copyTo(img);
01094   dmres(roi).copyTo(depth);
01095   despeckle<unsigned char>(img,img);
01096   //only take central region
01097   img=img(cv::Rect(2,2,img.cols-4,img.rows-4));
01098 
01099   return true;
01100 }
01101 
01102 
01103 
01104 bool FaceNormalizer::features_from_color(cv::Mat& img_color)
01105 {
01106   if(!detect_feature(img_color,f_det_img_.nose,FACE::NOSE))
01107   {
01108     std::cout<<"[FaceNormalizer] detected no nose"<<std::endl;
01109     f_det_img_.nose.x=round(img_color.cols*0.5);
01110     f_det_img_.nose.y=round(img_color.rows*0.5);
01111     return false;
01112   }
01113   if(!detect_feature(img_color,f_det_img_.lefteye,FACE::LEFTEYE))
01114   {
01115     std::cout<<"[FaceNormalizer] detected no eye_l"<<std::endl;
01116      return false;
01117   }
01118   if(!detect_feature(img_color,f_det_img_.righteye,FACE::RIGHTEYE))
01119   {
01120     std::cout<<"[FaceNormalizer] detected no eye_r"<<std::endl;
01121      return false;
01122   }
01123 
01124   if(debug_)
01125   {
01126     std::cout<<"[FaceNormalizer] detected detected image features:\n";
01127     std::cout<<"detected lefteye "<<f_det_img_.lefteye.x<<" - " << f_det_img_.lefteye.y<<std::endl;
01128     std::cout<<"detected righteye "<<f_det_img_.righteye.x<<" - " << f_det_img_.righteye.y<<std::endl;
01129     std::cout<<"detected nose "<<f_det_img_.nose.x<<" - " << f_det_img_.nose.y<<std::endl;
01130     //std::cout<<"detected mouth "<<f_det_img_.mouth.x<<" - " << f_det_img_.mouth.y<<std::endl;
01131   }
01132   return true;
01133 }
01134 
01135 bool FaceNormalizer::features_from_depth(cv::Mat& depth)
01136 {
01137   //pick 3D points from pointcloud
01138   f_det_xyz_.nose=      depth.at<cv::Vec3f>(f_det_img_.nose.y,f_det_img_.nose.x)               ;
01139   //f_det_xyz_.mouth=     depth.at<cv::Vec3f>(f_det_img_.mouth.y,f_det_img_.mouth.x)            ;
01140   f_det_xyz_.lefteye=   depth.at<cv::Vec3f>(f_det_img_.lefteye.y,f_det_img_.lefteye.x)      ;
01141   f_det_xyz_.righteye=  depth.at<cv::Vec3f>(f_det_img_.righteye.y,f_det_img_.righteye.x)   ;
01142 
01143   if(debug_)
01144   {
01145     std::cout<<"[FaceNormalizer] detected Coordinates of features in pointcloud:"<<std::endl;
01146     std::cout<<"LEFTEYE: "<<f_det_xyz_.lefteye.x<<" "<<f_det_xyz_.lefteye.y<<" "<<f_det_xyz_.lefteye.z<<std::endl;
01147     std::cout<<"RIGTHEYE: "<<f_det_xyz_.righteye.x<<" "<<f_det_xyz_.righteye.y<<" "<<f_det_xyz_.righteye.z<<std::endl;
01148     std::cout<<"NOSE: "<<f_det_xyz_.nose.x<<" "<<f_det_xyz_.nose.y<<" "<<f_det_xyz_.nose.z<<std::endl;
01149     //std::cout<<"MOUTH: "<<f_det_xyz_.mouth.x<<" "<<f_det_xyz_.mouth.y<<" "<<f_det_xyz_.mouth.z<<std::endl;
01150   }
01151   if(!f_det_xyz_.valid()) return false;
01152 
01153   return true;
01154 }
01155 
01156 bool FaceNormalizer::detect_feature(cv::Mat& img,cv::Point2f& coords,FACE::FEATURE_TYPE type)
01157 {
01158 
01159   //  determine scale of search pattern
01160   double scale=img.cols/160.0;
01161 
01162   CvSeq* seq;
01163   cv::Vec2f offset;
01164 
01165   switch(type)
01166   {
01167 
01168     case FACE::NOSE:
01169   {
01170     offset =cv::Vec2f(0,0);
01171     IplImage ipl_img=(IplImage)img;
01172      seq=cvHaarDetectObjects(&ipl_img,nose_cascade_,nose_storage_,1.1,1,0,cv::Size(20*scale,20*scale));
01173      //seq=cvHaarDetectObjects(&ipl_img,nose_cascade_,nose_storage_,1.3,2,CV_HAAR_DO_CANNY_PRUNING,cv::Size(15*scale,15*scale));
01174      break;
01175   }
01176 
01177     case FACE::LEFTEYE:
01178   {
01179     offset[0]=0;
01180     offset[1]=0;
01181     cv::Mat sub_img=img.clone();
01182     sub_img=sub_img(cvRect(0,0,f_det_img_.nose.x,f_det_img_.nose.y));
01183     IplImage ipl_img=(IplImage)sub_img;
01184      seq=cvHaarDetectObjects(&ipl_img,eye_l_cascade_,eye_l_storage_,1.1,1,0,cvSize(20*scale,10*scale));
01185      break;
01186   }
01187 
01188     case FACE::RIGHTEYE:
01189   {
01190     offset[0]=((int)f_det_img_.nose.x);
01191     offset[1]=0;
01192     cv::Mat sub_img=img.clone();
01193     sub_img=sub_img(cvRect(f_det_img_.nose.x,0,img.cols-f_det_img_.nose.x-1,f_det_img_.nose.y));
01194     IplImage ipl_img=(IplImage)sub_img;
01195      seq=cvHaarDetectObjects(&ipl_img,eye_r_cascade_,eye_r_storage_,1.1,1,0,cvSize(20*scale,10*scale));
01196      break;
01197   }
01198 
01199     case FACE::MOUTH:
01200   {
01201   //  offset[0]=0;
01202   //  offset[1]=(int)f_det_img_.nose.y;
01203   //  cv::Mat sub_img=img.clone();
01204   //  sub_img=sub_img(cvRect(0,f_det_img_.nose.y,img.cols,img.rows-f_det_img_.nose.y-1));
01205   //  IplImage ipl_img=(IplImage)sub_img;
01206   //   seq=cvHaarDetectObjects(&ipl_img,mouth_cascade_,mouth_storage_,1.3,4,CV_HAAR_DO_CANNY_PRUNING,cvSize(30*scale,15*scale));
01207   //   break;
01208   }
01209   }
01210 
01211     if(seq->total ==0) return false;
01212     Rect* seq_det=(Rect*)cvGetSeqElem(seq,0);
01213     coords.x=(float)seq_det->x+seq_det->width/2+offset[0];
01214     coords.y=(float)seq_det->y+seq_det->height/2+offset[1];
01215 
01216     return true;
01217 }
01218 
01219 
01220 void FaceNormalizer::dump_img(cv::Mat& data,std::string name)
01221 {
01222   if(!debug_)
01223   {
01224 
01225     std::cout<<"[FaceNomalizer] dump_img() only with set debug flag and path."<<std::endl;
01226   }
01227   std::string filename =storage_directory_;
01228   filename.append(boost::lexical_cast<std::string>(epoch_ctr_));
01229   filename.append("_");
01230   filename.append(name);
01231   filename.append(".jpg");
01232 
01233   cv::imwrite(filename,data);
01234   return;
01235 }
01236 
01237 void FaceNormalizer::dump_features(cv::Mat& img)
01238 {
01239 
01240   cv::Mat img2;
01241   img.copyTo(img2);
01242   IplImage ipl_img=(IplImage)img2;
01243    cv::circle(img2,cv::Point(f_det_img_.nose.x, f_det_img_.nose.y),5,CV_RGB(255,0,0));
01244    //cv::circle(img2,cv::Point(f_det_img_.mouth.x,f_det_img_.mouth.y),5,CV_RGB(0,255,0));
01245    cv::circle(img2,cv::Point(f_det_img_.lefteye.x,f_det_img_.lefteye.y),5,CV_RGB(255,255,0));
01246    cv::circle(img2,cv::Point(f_det_img_.righteye.x,f_det_img_.righteye.y),5,CV_RGB(255,0,255));
01247    if(debug_)dump_img(img2,"features");
01248 }
01249 
01250 bool FaceNormalizer::save_scene(cv::Mat& RGB,cv::Mat& XYZ,std::string path)
01251 {
01252   path.append(boost::lexical_cast<std::string>(epoch_ctr_));
01253   std::cout<<"[FaceNormalizer]Saving to "<<path<<std::endl;
01254   std::string depth_path,color_path;
01255   color_path=path;
01256   color_path.append(".jpg");
01257   depth_path=path;
01258   depth_path.append(".xml");
01259   cv::FileStorage fs(depth_path,FileStorage::WRITE);
01260   fs << "depth"<<XYZ;
01261   fs << "color"<<RGB;
01262   fs.release();
01263   imwrite(color_path,RGB);
01264 
01265   return true;
01266 }
01267 
01268 bool FaceNormalizer::read_scene(cv::Mat& depth, cv::Mat& color,std::string path)
01269 {
01270   std::cout<<"[FaceNormalizer]Reading from "<<path<<std::endl;
01271   cv::FileStorage fs(path,FileStorage::READ);
01272   fs["depth"] >> depth;
01273   fs["color"] >> color;
01274   fs.release();
01275   return true;
01276 }
01277 
01278 
01279 void FaceNormalizer::create_DM(cv::Mat& XYZ,cv::Mat& DM)
01280 {
01281   //reducing to depth ( z - coordinate only)
01282   if(XYZ.channels()==3)
01283   {
01284   std::vector<cv::Mat> cls;
01285   cv::split(XYZ,cls);
01286   DM=cls[2];
01287   }
01288   else if (XYZ.channels()==1)
01289   {
01290     DM=XYZ;
01291   }
01292 
01293   //reduce z values
01294   float minval=std::numeric_limits<float>::max();
01295   for(int r=0;r<DM.rows;r++)
01296   {
01297     for(int c=0;c<DM.cols;c++)
01298     {
01299       if(DM.at<float>(r,c) < minval &&DM.at<float>(r,c)!=0) minval =DM.at<float>(r,c);
01300     }
01301   }
01302 
01303   for(int r=0;r<DM.rows;r++)
01304   {
01305     for(int c=0;c<DM.cols;c++)
01306     {
01307       if(DM.at<float>(r,c)!=0)DM.at<float>(r,c)-=minval;
01308       //if(DM.at<float>(r,c)==0)DM.at<float>(r,c)=255;
01309     }
01310   }
01311   //despeckle<float>(DM,DM);
01312   cv::medianBlur(DM,DM,5);
01313   //cv::blur(DM,DM,cv::Size(3,3));
01314 }
01315 
01316 bool FaceNormalizer::projectPoint(cv::Point3f& xyz,cv::Point2f& uv)
01317 {
01318     std::vector<cv::Point3f> m_xyz;
01319     std::vector<cv::Point2f> m_uv;
01320     m_xyz.push_back(xyz);
01321 
01322     cv::Vec3f  rot=cv::Vec3f(0.0,0.0,0.0);
01323     cv::Vec3f  trans=cv::Vec3f(0.0,0.0,0.0);
01324     cv::projectPoints(m_xyz,rot,trans,cam_mat_,dist_coeffs_,m_uv);
01325     uv=m_uv[0];
01326 }
01327 bool FaceNormalizer::projectPointCloud(cv::Mat& img, cv::Mat& depth, cv::Mat& img_res, cv::Mat& depth_res)
01328 {
01329   int channels=img.channels();
01330 
01331   cv::Mat pc_xyz,pc_rgb;
01332   depth.copyTo(pc_xyz);
01333   img.copyTo(pc_rgb);
01334 
01335   //make point_list
01336   if(pc_xyz.rows>1 && pc_xyz.cols >1)
01337   {
01338     pc_xyz=pc_xyz.reshape(3,1);
01339   }
01340 
01341 
01342 
01343 
01344 
01345    //project 3d points to virtual camera
01346    //TODO temporary triy
01347    //cv::Mat pc_proj(pc_xyz.rows*pc_xyz.cols,1,CV_32FC2);
01348    cv::Mat pc_proj(pc_xyz.cols,1,CV_32FC2);
01349 
01350    cv::Vec3f rot=cv::Vec3f(0.0,0.0,0.0);
01351    cv::Vec3f trans=cv::Vec3f(0.0,0.0,0.0);
01352    cv::Size sensor_size=cv::Size(640,480);
01353    cv::projectPoints(pc_xyz,rot,trans,cam_mat_,dist_coeffs_,pc_proj);
01354 
01355    cv::Vec3f* pc_ptr=pc_xyz.ptr<cv::Vec3f>(0,0);
01356    cv::Vec2f* pc_proj_ptr=pc_proj.ptr<cv::Vec2f>(0,0);
01357    int ty,tx;
01358 
01359    if(channels==3)
01360    {
01361     cv::add(img_res,0,img_res);
01362     cv::add(depth_res,0,depth_res);
01363    // assign color values to calculated image coordinates
01364    cv::Vec3b* pc_rgb_ptr=pc_rgb.ptr<cv::Vec3b>(0,0);
01365 
01366 
01367    cv::Mat occ_grid=cv::Mat::ones(sensor_size,CV_32FC3);
01368    cv::Mat img_cum=cv::Mat::zeros(sensor_size,CV_32FC3);
01369    cv::Vec3f occ_inc=cv::Vec3f(1,1,1);
01370    for(int i=0;i<pc_proj.rows;++i)
01371      {
01372        cv::Vec2f txty=*pc_proj_ptr;
01373        tx=(int)round(txty[0]);
01374        ty=(int)round(txty[1]);
01375 
01376 
01377        if (ty>1 && tx>1 && ty<sensor_size.height-1 && tx<sensor_size.width-1 && !isnan(ty) && !isnan(tx) )
01378        {
01379             img_cum.at<cv::Vec3b>(ty,tx)+=(*pc_rgb_ptr);
01380             img_cum.at<cv::Vec3f>(ty+1,tx)+=(*pc_rgb_ptr);
01381             img_cum.at<cv::Vec3f>(ty-1,tx)+=(*pc_rgb_ptr);
01382             img_cum.at<cv::Vec3f>(ty,tx-1)+=(*pc_rgb_ptr);
01383             img_cum.at<cv::Vec3f>(ty,tx+1)+=(*pc_rgb_ptr);
01384 
01385             occ_grid.at<cv::Vec3f>(ty,tx)+=  occ_inc;
01386             occ_grid.at<cv::Vec3f>(ty+1,tx)+=occ_inc;
01387             occ_grid.at<cv::Vec3f>(ty-1,tx)+=occ_inc;
01388             occ_grid.at<cv::Vec3f>(ty,tx+1)+=occ_inc;
01389             occ_grid.at<cv::Vec3f>(ty,tx-1)+=occ_inc;
01390 
01391             depth_res.at<cv::Vec3f>(ty,tx)=((*pc_ptr));
01392        }
01393        pc_rgb_ptr++;
01394        pc_proj_ptr++;
01395        pc_ptr++;
01396       }
01397    img_cum=img_cum / occ_grid;
01398    img_cum.convertTo(img_res,CV_8UC3);
01399    }
01400 
01401 
01402    if(channels==1)
01403    {
01404    // assign color values to calculated image coordinates
01405     cv::add(img_res,0,img_res);
01406     cv::add(depth_res,0,depth_res);
01407    unsigned char* pc_rgb_ptr=pc_rgb.ptr<unsigned char>(0,0);
01408 
01409    cv::Mat occ_grid=cv::Mat::ones(sensor_size,CV_32FC1);
01410    cv::Mat img_cum=cv::Mat::zeros(sensor_size,CV_32FC1);
01411    cv::Mat occ_grid2=cv::Mat::ones(sensor_size,CV_32FC1);
01412    for(int i=0;i<pc_proj.rows;++i)
01413      {
01414        cv::Vec2f txty=*pc_proj_ptr;
01415        tx=(int)round(txty[0]);
01416        ty=(int)round(txty[1]);
01417 
01418 
01419        if (ty>1 && tx>1 && ty<sensor_size.height-1 && tx<sensor_size.width-1 && !isnan(ty) && !isnan(tx) )
01420        {
01421             //if((depth_map.at<cv::Vec3f>(ty,tx)[2]==0) || (depth_map.at<cv::Vec3f>(ty,tx)[2]>(*pc_ptr)[2]))
01422             //{
01423             img_res.at<unsigned char>(ty,tx)=(*pc_rgb_ptr);
01424             occ_grid2.at<float>(ty,tx)=0.0;
01425             img_cum.at<float>(ty+1,tx)+=(*pc_rgb_ptr);
01426             img_cum.at<float>(ty-1,tx)+=(*pc_rgb_ptr);
01427             img_cum.at<float>(ty,tx-1)+=(*pc_rgb_ptr);
01428             img_cum.at<float>(ty,tx+1)+=(*pc_rgb_ptr);
01429             img_cum.at<float>(ty+1,tx+1)+=(*pc_rgb_ptr);
01430             img_cum.at<float>(ty-1,tx-1)+=(*pc_rgb_ptr);
01431             img_cum.at<float>(ty-1,tx+1)+=(*pc_rgb_ptr);
01432             img_cum.at<float>(ty+1,tx-1)+=(*pc_rgb_ptr);
01433 
01434             occ_grid.at<float>(ty,tx)+=  1;
01435             occ_grid.at<float>(ty+1,tx)+=1;
01436             occ_grid.at<float>(ty-1,tx)+=1;
01437             occ_grid.at<float>(ty,tx+1)+=1;
01438             occ_grid.at<float>(ty,tx-1)+=1;
01439             occ_grid.at<float>(ty+1,tx+1)+=1;
01440             occ_grid.at<float>(ty-1,tx-1)+=1;
01441             occ_grid.at<float>(ty-1,tx+1)+=1;
01442             occ_grid.at<float>(ty+1,tx-1)+=1;
01443 
01444             depth_res.at<cv::Vec3f>(ty,tx)=((*pc_ptr));
01445        }
01446        pc_rgb_ptr++;
01447        pc_proj_ptr++;
01448        pc_ptr++;
01449       }
01450 
01451    occ_grid=occ_grid;
01452    img_cum=img_cum / (occ_grid.mul(occ_grid2)-1);
01453    img_cum.convertTo(img_cum,CV_8UC1);
01454    cv::add(img_res,img_cum,img_res);
01455    }
01456 
01457 
01458 
01459 //   // refinement
01460 //
01461 //   //unsigned char* img_res_ptr=img_res.ptr<unsigned char>(0,0);
01462 //   cv::Mat img_res2=cv::Mat::zeros(img_res.rows,img_res.cols,img_res.type());
01463 //   for(int i=1;i<img_res.total();i++)
01464 //   {
01465 //     if(img_res.at<unsigned char>(i)==0 && img_res.at<unsigned char>(i-1)!=0)
01466 //     {
01467 //       //calc position
01468 //       cv::Vec2f pos= pc_proj.at<cv::Vec2f>(i-1);
01469 //       std::cout<<"POSITION"<<pos<<std::endl;
01470 //
01471 //       unsigned char val=pc_rgb.at<unsigned char>(round(pos[0]),round(pos[1]+1));
01472 //       img_res2.at<unsigned char>(i)=val;
01473 //     }
01474 //   }
01475 //
01476 //   cv::imshow("IMG_RES",img_res2);
01477 //   cv::waitKey(0);
01478 //
01479    return true;
01480 }
01481 
01482 bool FaceNormalizer::eliminate_background(cv::Mat& RGB,cv::Mat& XYZ,float background_thresh)
01483 {
01484   // eliminate background
01485   cv::Vec3f* xyz_ptr=XYZ.ptr<cv::Vec3f>(0,0);
01486 
01487   if(RGB.channels()==3)
01488   {
01489   cv::Vec3b* rgb_ptr=RGB.ptr<cv::Vec3b>(0,0);
01490 
01491   for(int r=0;r<XYZ.total();r++)
01492   {
01493     if((*xyz_ptr)[2]>background_thresh ||(*xyz_ptr)[2]<0)
01494     {
01495       // set this to invalid value
01496       *xyz_ptr=cv::Vec3f(-1000,-1000,-1000);
01497       *rgb_ptr=cv::Vec3b(0,0,0);
01498     }
01499     xyz_ptr++;
01500     rgb_ptr++;
01501   }
01502   }
01503 
01504   else if(RGB.channels()==1)
01505   {
01506   unsigned char* rgb_ptr=RGB.ptr<unsigned char>(0,0);
01507   for(int r=0;r<XYZ.total();r++)
01508   {
01509     if((*xyz_ptr)[2]>background_thresh ||(*xyz_ptr)[2]<0)
01510     {
01511       // set this to invalid value
01512       *xyz_ptr=cv::Vec3f(-1000,-1000,-1000);
01513       *rgb_ptr=0;
01514     }
01515     xyz_ptr++;
01516     rgb_ptr++;
01517   }
01518   }
01519 }
01520 
01521 bool FaceNormalizer::interpolate_head(cv::Mat& RGB, cv::Mat& XYZ)
01522 {
01523   std::vector<cv::Mat> xyz_vec;
01524   cv::split(XYZ,xyz_vec);
01525 
01526   cv::imshow("Z",xyz_vec[2]);
01527   cv::imshow("RGB",RGB);
01528 
01529   cv::waitKey(0);
01530 
01531 }
01532 bool FaceNormalizer::normalize_img_type(cv::Mat& in,cv::Mat& out)
01533 {
01534   in.convertTo(out,CV_64FC1);
01535   return true;
01536 }
01537 


cob_people_detection
Author(s): Richard Bormann , Thomas Zwölfer
autogenerated on Fri Aug 28 2015 10:24:12