gmm.c
Go to the documentation of this file.
00001 
00007 /*
00008 Copyright (C) 2013 David Novotny and Andrea Vedaldi.
00009 All rights reserved.
00010 
00011 This file is part of the VLFeat library and is made available under
00012 the terms of the BSD license (see the COPYING file).
00013 */
00014 
00287 #include "gmm.h"
00288 #include <stdio.h>
00289 #include <stdlib.h>
00290 #include <string.h>
00291 
00292 #ifdef _OPENMP
00293 #include <omp.h>
00294 #endif
00295 
00296 #ifndef VL_DISABLE_SSE2
00297 #include "mathop_sse2.h"
00298 #endif
00299 
00300 #ifndef VL_DISABLE_AVX
00301 #include "mathop_avx.h"
00302 #endif
00303 
00304 /* ---------------------------------------------------------------- */
00305 #ifndef VL_GMM_INSTANTIATING
00306 /* ---------------------------------------------------------------- */
00307 
00308 #define VL_GMM_MIN_VARIANCE 1e-6
00309 #define VL_GMM_MIN_POSTERIOR 1e-2
00310 #define VL_GMM_MIN_PRIOR 1e-6
00311 
00312 struct _VlGMM
00313 {
00314   vl_type dataType ;                  
00315   vl_size dimension ;                 
00316   vl_size numClusters ;               
00317   vl_size numData ;                   
00318   vl_size maxNumIterations ;          
00319   vl_size numRepetitions   ;          
00320   int     verbosity ;                 
00321   void *  means;                      
00322   void *  covariances;                
00323   void *  priors;                     
00324   void *  posteriors;                 
00325   double * sigmaLowBound ;            
00326   VlGMMInitialization initialization; 
00327   VlKMeans * kmeansInit;              
00328   double LL ;                         
00329   vl_bool kmeansInitIsOwner; 
00330 } ;
00331 
00332 /* ---------------------------------------------------------------- */
00333 /*                                                       Life-cycle */
00334 /* ---------------------------------------------------------------- */
00335 
00336 static void
00337 _vl_gmm_prepare_for_data (VlGMM* self, vl_size numData)
00338 {
00339   if (self->numData < numData) {
00340     vl_free(self->posteriors) ;
00341     self->posteriors = vl_malloc(vl_get_type_size(self->dataType) * numData * self->numClusters) ;
00342   }
00343   self->numData = numData ;
00344 }
00345 
00353 VlGMM *
00354 vl_gmm_new (vl_type dataType, vl_size dimension, vl_size numComponents)
00355 {
00356   vl_index i ;
00357   vl_size size = vl_get_type_size(dataType) ;
00358   VlGMM * self = vl_calloc(1, sizeof(VlGMM)) ;
00359   self->dataType = dataType;
00360   self->numClusters = numComponents ;
00361   self->numData = 0;
00362   self->dimension = dimension ;
00363   self->initialization = VlGMMRand;
00364   self->verbosity = 0 ;
00365   self->maxNumIterations = 50;
00366   self->numRepetitions = 1;
00367   self->sigmaLowBound =  NULL ;
00368   self->priors = NULL ;
00369   self->covariances = NULL ;
00370   self->means = NULL ;
00371   self->posteriors = NULL ;
00372   self->kmeansInit = NULL ;
00373   self->kmeansInitIsOwner = VL_FALSE;
00374 
00375   self->priors = vl_calloc (numComponents, size) ;
00376   self->means = vl_calloc (numComponents * dimension, size) ;
00377   self->covariances = vl_calloc (numComponents * dimension, size) ;
00378   self->sigmaLowBound = vl_calloc (dimension, sizeof(double)) ;
00379 
00380   for (i = 0 ; i < (unsigned)self->dimension ; ++i)  { self->sigmaLowBound[i] = 1e-4 ; }
00381   return self ;
00382 }
00383 
00391 void
00392 vl_gmm_reset (VlGMM * self)
00393 {
00394   if (self->posteriors) {
00395     vl_free(self->posteriors) ;
00396     self->posteriors = NULL ;
00397     self->numData = 0 ;
00398   }
00399   if (self->kmeansInit && self->kmeansInitIsOwner) {
00400     vl_kmeans_delete(self->kmeansInit) ;
00401     self->kmeansInit = NULL ;
00402     self->kmeansInitIsOwner = VL_FALSE ;
00403   }
00404 }
00405 
00413 void
00414 vl_gmm_delete (VlGMM * self)
00415 {
00416   if(self->means) vl_free(self->means);
00417   if(self->covariances) vl_free(self->covariances);
00418   if(self->priors) vl_free(self->priors);
00419   if(self->posteriors) vl_free(self->posteriors);
00420   if(self->kmeansInit && self->kmeansInitIsOwner) {
00421     vl_kmeans_delete(self->kmeansInit);
00422   }
00423   vl_free(self);
00424 }
00425 
00426 /* ---------------------------------------------------------------- */
00427 /*                                              Getters and setters */
00428 /* ---------------------------------------------------------------- */
00429 
00435 vl_type
00436 vl_gmm_get_data_type (VlGMM const * self)
00437 {
00438   return self->dataType ;
00439 }
00440 
00446 vl_size
00447 vl_gmm_get_num_clusters (VlGMM const * self)
00448 {
00449   return self->numClusters ;
00450 }
00451 
00457 vl_size
00458 vl_gmm_get_num_data (VlGMM const * self)
00459 {
00460   return self->numData ;
00461 }
00462 
00468 double
00469 vl_gmm_get_loglikelihood (VlGMM const * self)
00470 {
00471   return self->LL ;
00472 }
00473 
00479 int
00480 vl_gmm_get_verbosity (VlGMM const * self)
00481 {
00482   return self->verbosity ;
00483 }
00484 
00490 void
00491 vl_gmm_set_verbosity (VlGMM * self, int verbosity)
00492 {
00493   self->verbosity = verbosity ;
00494 }
00495 
00501 void const *
00502 vl_gmm_get_means (VlGMM const * self)
00503 {
00504   return self->means ;
00505 }
00506 
00512 void const *
00513 vl_gmm_get_covariances (VlGMM const * self)
00514 {
00515   return self->covariances ;
00516 }
00517 
00523 void const *
00524 vl_gmm_get_priors (VlGMM const * self)
00525 {
00526   return self->priors ;
00527 }
00528 
00534 void const *
00535 vl_gmm_get_posteriors (VlGMM const * self)
00536 {
00537   return self->posteriors ;
00538 }
00539 
00545 vl_size
00546 vl_gmm_get_max_num_iterations (VlGMM const * self)
00547 {
00548   return self->maxNumIterations ;
00549 }
00550 
00556 void
00557 vl_gmm_set_max_num_iterations (VlGMM * self, vl_size maxNumIterations)
00558 {
00559   self->maxNumIterations = maxNumIterations ;
00560 }
00561 
00567 vl_size
00568 vl_gmm_get_num_repetitions (VlGMM const * self)
00569 {
00570   return self->numRepetitions ;
00571 }
00572 
00579 void
00580 vl_gmm_set_num_repetitions (VlGMM * self, vl_size numRepetitions)
00581 {
00582   assert (numRepetitions >= 1) ;
00583   self->numRepetitions = numRepetitions ;
00584 }
00585 
00591 vl_size
00592 vl_gmm_get_dimension (VlGMM const * self)
00593 {
00594   return self->dimension ;
00595 }
00596 
00602 VlGMMInitialization
00603 vl_gmm_get_initialization (VlGMM const * self)
00604 {
00605   return self->initialization ;
00606 }
00607 
00612 void
00613 vl_gmm_set_initialization (VlGMM * self, VlGMMInitialization init)
00614 {
00615   self->initialization = init;
00616 }
00617 
00622 VlKMeans * vl_gmm_get_kmeans_init_object (VlGMM const * self)
00623 {
00624   return self->kmeansInit;
00625 }
00626 
00631 void vl_gmm_set_kmeans_init_object (VlGMM * self, VlKMeans * kmeans)
00632 {
00633   if (self->kmeansInit && self->kmeansInitIsOwner) {
00634     vl_kmeans_delete(self->kmeansInit) ;
00635   }
00636   self->kmeansInit = kmeans;
00637   self->kmeansInitIsOwner = VL_FALSE;
00638 }
00639 
00644 double const * vl_gmm_get_covariance_lower_bounds (VlGMM const * self)
00645 {
00646   return self->sigmaLowBound;
00647 }
00648 
00656 void vl_gmm_set_covariance_lower_bounds (VlGMM * self, double const * bounds)
00657 {
00658   memcpy(self->sigmaLowBound, bounds, sizeof(double) * self->dimension) ;
00659 }
00660 
00669 void vl_gmm_set_covariance_lower_bound (VlGMM * self, double bound)
00670 {
00671   int i ;
00672   for (i = 0 ; i < (signed)self->dimension ; ++i) {
00673     self->sigmaLowBound[i] = bound ;
00674   }
00675 }
00676 
00677 /* ---------------------------------------------------------------- */
00678 /* Instantiate shuffle algorithm */
00679 
00680 #define VL_SHUFFLE_type vl_uindex
00681 #define VL_SHUFFLE_prefix _vl_gmm
00682 #include "shuffle-def.h"
00683 
00684 /* #ifdef VL_GMM_INSTANTITATING */
00685 #endif
00686 
00687 /* ---------------------------------------------------------------- */
00688 #ifdef VL_GMM_INSTANTIATING
00689 /* ---------------------------------------------------------------- */
00690 
00691 /* ---------------------------------------------------------------- */
00692 /*                                            Posterior assignments */
00693 /* ---------------------------------------------------------------- */
00694 
00711 double
00712 VL_XCAT(vl_get_gmm_data_posteriors_, SFX)
00713 (TYPE * posteriors,
00714  vl_size numClusters,
00715  vl_size numData,
00716  TYPE const * priors,
00717  TYPE const * means,
00718  vl_size dimension,
00719  TYPE const * covariances,
00720  TYPE const * data)
00721 {
00722   vl_index i_d, i_cl;
00723   vl_size dim;
00724   double LL = 0;
00725 
00726   TYPE halfDimLog2Pi = (dimension / 2.0) * log(2.0*VL_PI);
00727   TYPE * logCovariances ;
00728   TYPE * logWeights ;
00729   TYPE * invCovariances ;
00730 
00731 #if (FLT == VL_TYPE_FLOAT)
00732   VlFloatVector3ComparisonFunction distFn = vl_get_vector_3_comparison_function_f(VlDistanceMahalanobis) ;
00733 #else
00734   VlDoubleVector3ComparisonFunction distFn = vl_get_vector_3_comparison_function_d(VlDistanceMahalanobis) ;
00735 #endif
00736 
00737   logCovariances = vl_malloc(sizeof(TYPE) * numClusters) ;
00738   invCovariances = vl_malloc(sizeof(TYPE) * numClusters * dimension) ;
00739   logWeights = vl_malloc(sizeof(TYPE) * numClusters) ;
00740 
00741 #if defined(_OPENMP)
00742 #pragma omp parallel for private(i_cl,dim) num_threads(vl_get_max_threads())
00743 #endif
00744   for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
00745     TYPE logSigma = 0 ;
00746     if (priors[i_cl] < VL_GMM_MIN_PRIOR) {
00747       logWeights[i_cl] = - (TYPE) VL_INFINITY_D ;
00748     } else {
00749       logWeights[i_cl] = log(priors[i_cl]);
00750     }
00751     for(dim = 0 ; dim < dimension ; ++ dim) {
00752       logSigma += log(covariances[i_cl*dimension + dim]);
00753       invCovariances [i_cl*dimension + dim] = (TYPE) 1.0 / covariances[i_cl*dimension + dim];
00754     }
00755     logCovariances[i_cl] = logSigma;
00756   } /* end of parallel region */
00757 
00758 #if defined(_OPENMP)
00759 #pragma omp parallel for private(i_cl,i_d) reduction(+:LL) \
00760 num_threads(vl_get_max_threads())
00761 #endif
00762   for (i_d = 0 ; i_d < (signed)numData ; ++ i_d) {
00763     TYPE clusterPosteriorsSum = 0;
00764     TYPE maxPosterior = (TYPE)(-VL_INFINITY_D) ;
00765 
00766     for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
00767       TYPE p =
00768       logWeights[i_cl]
00769       - halfDimLog2Pi
00770       - 0.5 * logCovariances[i_cl]
00771       - 0.5 * distFn (dimension,
00772                       data + i_d * dimension,
00773                       means + i_cl * dimension,
00774                       invCovariances + i_cl * dimension) ;
00775       posteriors[i_cl + i_d * numClusters] = p ;
00776       if (p > maxPosterior) { maxPosterior = p ; }
00777     }
00778 
00779     for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
00780       TYPE p = posteriors[i_cl + i_d * numClusters] ;
00781       p =  exp(p - maxPosterior) ;
00782       posteriors[i_cl + i_d * numClusters] = p ;
00783       clusterPosteriorsSum += p ;
00784     }
00785 
00786     LL +=  log(clusterPosteriorsSum) + (double) maxPosterior ;
00787 
00788     for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
00789       posteriors[i_cl + i_d * numClusters] /= clusterPosteriorsSum ;
00790     }
00791   } /* end of parallel region */
00792 
00793   vl_free(logCovariances);
00794   vl_free(logWeights);
00795   vl_free(invCovariances);
00796 
00797   return LL;
00798 }
00799 
00800 /* ---------------------------------------------------------------- */
00801 /*                                 Restarts zero-weighted Gaussians */
00802 /* ---------------------------------------------------------------- */
00803 
00804 static void
00805 VL_XCAT(_vl_gmm_maximization_, SFX)
00806 (VlGMM * self,
00807  TYPE * posteriors,
00808  TYPE * priors,
00809  TYPE * covariances,
00810  TYPE * means,
00811  TYPE const * data,
00812  vl_size numData) ;
00813 
00814 static vl_size
00815 VL_XCAT(_vl_gmm_restart_empty_modes_, SFX) (VlGMM * self, TYPE const * data)
00816 {
00817   vl_size dimension = self->dimension;
00818   vl_size numClusters = self->numClusters;
00819   vl_index i_cl, j_cl, i_d, d;
00820   vl_size zeroWNum = 0;
00821   TYPE * priors = (TYPE*)self->priors ;
00822   TYPE * means = (TYPE*)self->means ;
00823   TYPE * covariances = (TYPE*)self->covariances ;
00824   TYPE * posteriors = (TYPE*)self->posteriors ;
00825 
00826   //VlRand * rand = vl_get_rand() ;
00827 
00828   TYPE * mass = vl_calloc(sizeof(TYPE), self->numClusters) ;
00829 
00830   if (numClusters <= 1) { return 0 ; }
00831 
00832   /* compute statistics */
00833   {
00834     vl_uindex i, k ;
00835     vl_size numNullAssignments = 0 ;
00836     for (i = 0 ; i < self->numData ; ++i) {
00837       for (k = 0 ; k < self->numClusters ; ++k) {
00838         TYPE p = ((TYPE*)self->posteriors)[k + i * self->numClusters] ;
00839         mass[k] += p ;
00840         if (p < VL_GMM_MIN_POSTERIOR) {
00841           numNullAssignments ++ ;
00842         }
00843       }
00844     }
00845     if (self->verbosity) {
00846       VL_PRINTF("gmm: sparsity of data posterior: %.1f%%\n", (double)numNullAssignments / (self->numData * self->numClusters) * 100) ;
00847     }
00848   }
00849 
00850 #if 0
00851   /* search for cluster with negligible weight and reassign them to fat clusters */
00852   for (i_cl = 0 ; i_cl < numClusters ; ++i_cl) {
00853     if (priors[i_cl] < 0.00001/numClusters) {
00854       double mass = priors[0]  ;
00855       vl_index best = 0 ;
00856 
00857       for (j_cl = 1 ; j_cl < numClusters ; ++j_cl) {
00858         if (priors[j_cl] > mass) { mass = priors[j_cl] ; best = j_cl ; }
00859       }
00860 
00861       if (j_cl == i_cl) {
00862         /* this should never happen */
00863         continue ;
00864       }
00865 
00866       j_cl = best ;
00867       zeroWNum ++ ;
00868 
00869       VL_PRINTF("gmm: restarting mode %d by splitting mode %d (with prior %f)\n", i_cl,j_cl,mass) ;
00870 
00871       priors[i_cl] = mass/2 ;
00872       priors[j_cl] = mass/2 ;
00873       for (d = 0 ; d < dimension ; ++d) {
00874         TYPE sigma2 =  covariances[j_cl*dimension + d] ;
00875         TYPE sigma = VL_XCAT(vl_sqrt_,SFX)(sigma2) ;
00876         means[i_cl*dimension + d] = means[j_cl*dimension + d] + 0.001 * (vl_rand_real1(rand) - 0.5) * sigma ;
00877         covariances[i_cl*dimension + d] = sigma2 ;
00878       }
00879     }
00880   }
00881 #endif
00882 
00883   /* search for cluster with negligible weight and reassign them to fat clusters */
00884   for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
00885     double size = - VL_INFINITY_D ;
00886     vl_index best = -1 ;
00887 
00888     if (mass[i_cl] >= VL_GMM_MIN_POSTERIOR *
00889         VL_MAX(1.0, (double) self->numData / self->numClusters))
00890     {
00891       continue ;
00892     }
00893 
00894     if (self->verbosity) {
00895       VL_PRINTF("gmm: mode %d is nearly empty (mass %f)\n", i_cl, mass[i_cl]) ;
00896     }
00897 
00898     /*
00899      Search for the Gaussian components that (approximately)
00900      maximally contribute to make the negative log-likelihood of the data
00901      large. Then split the worst offender.
00902      
00903      To do so, we approximate the exptected log-likelihood of the GMM:
00904      
00905      E[-log(f(x))] = H(f) = - log \int f(x) log f(x)
00906     
00907      where the density f(x) = sum_k pk gk(x) is a GMM. This is intractable
00908      but it is easy to approximate if we suppose that supp gk is disjoint with
00909      supp gq for all components k ~= q. In this canse
00910      
00911      H(f) ~= sum_k [ - pk log(pk) + pk H(gk) ]
00912      
00913      where H(gk) is the entropy of component k taken alone. The entropy of
00914      the latter is given by:
00915      
00916      H(gk) = D/2 (1 + log(2pi) + 1/2 sum_{i=0}^D log sigma_i^2
00917 
00918      */
00919 
00920     for (j_cl = 0 ; j_cl < (signed)numClusters ; ++j_cl) {
00921       double size_ ;
00922       if (priors[j_cl] < VL_GMM_MIN_PRIOR) { continue ; }
00923       size_ = + 0.5 * dimension * (1.0 + log(2*VL_PI)) ;
00924       for(d = 0 ; d < (signed)dimension ; d++) {
00925         double sigma2 = covariances[j_cl * dimension + d] ;
00926         size_ += 0.5 * log(sigma2) ;
00927       }
00928       size_ = priors[j_cl] * (size_ - log(priors[j_cl])) ;
00929 
00930       if (self->verbosity > 1) {
00931         VL_PRINTF("gmm: mode %d: prior %f, mass %f, entropy contribution %f\n",
00932                   j_cl, priors[j_cl], mass[j_cl], size_) ;
00933       }
00934 
00935       if (size_ > size) {
00936         size = size_ ;
00937         best = j_cl ;
00938       }
00939     }
00940 
00941     j_cl = best ;
00942 
00943     if (j_cl == i_cl || j_cl < 0) {
00944       if (self->verbosity) {
00945         VL_PRINTF("gmm: mode %d is empty, "
00946                   "but no other mode to split could be found\n", i_cl) ;
00947       }
00948       continue ;
00949     }
00950 
00951     if (self->verbosity) {
00952       VL_PRINTF("gmm: reinitializing empty mode %d with mode %d (prior %f, mass %f, score %f)\n",
00953                 i_cl, j_cl, priors[j_cl], mass[j_cl], size) ;
00954     }
00955 
00956     /*
00957      Search for the dimension with maximum variance.
00958      */
00959 
00960     size = - VL_INFINITY_D ;
00961     best = - 1 ;
00962 
00963     for(d = 0; d < (signed)dimension; d++) {
00964       double sigma2 = covariances[j_cl * dimension + d] ;
00965       if (sigma2 > size) {
00966         size = sigma2 ;
00967         best = d ;
00968       }
00969     }
00970 
00971     /*
00972      Reassign points j_cl (mode to split) to i_cl (empty mode).
00973      */
00974     {
00975       TYPE mu = means[best + j_cl * self->dimension] ;
00976       for(i_d = 0 ; i_d < (signed)self->numData ; ++ i_d) {
00977         TYPE p = posteriors[j_cl + self->numClusters * i_d] ;
00978         TYPE q = posteriors[i_cl + self->numClusters * i_d] ; /* ~= 0 */
00979         if (data[best + i_d * self->dimension] < mu) {
00980           /* assign this point to i_cl */
00981           posteriors[i_cl + self->numClusters * i_d] = p + q ;
00982           posteriors[j_cl + self->numClusters * i_d] = 0 ;
00983         } else {
00984           /* assign this point to j_cl */
00985           posteriors[i_cl + self->numClusters * i_d] = 0 ;
00986           posteriors[j_cl + self->numClusters * i_d] = p + q ;
00987         }
00988       }
00989     }
00990 
00991     /*
00992      Re-estimate.
00993      */
00994     VL_XCAT(_vl_gmm_maximization_, SFX)
00995     (self,posteriors,priors,covariances,means,data,self->numData) ;
00996   }
00997 
00998   return zeroWNum;
00999 }
01000 
01001 /* ---------------------------------------------------------------- */
01002 /*                                                          Helpers */
01003 /* ---------------------------------------------------------------- */
01004 
01005 static void
01006 VL_XCAT(_vl_gmm_apply_bounds_, SFX)(VlGMM * self)
01007 {
01008   vl_uindex dim ;
01009   vl_uindex k ;
01010   vl_size numAdjusted = 0 ;
01011   TYPE * cov = (TYPE*)self->covariances ;
01012   double const * lbs = self->sigmaLowBound ;
01013 
01014   for (k = 0 ; k < self->numClusters ; ++k) {
01015     vl_bool adjusted = VL_FALSE ;
01016     for (dim = 0 ; dim < self->dimension ; ++dim) {
01017       if (cov[k * self->dimension + dim] < lbs[dim] ) {
01018         cov[k * self->dimension + dim] = lbs[dim] ;
01019         adjusted = VL_TRUE ;
01020       }
01021     }
01022     if (adjusted) { numAdjusted ++ ; }
01023   }
01024 
01025   if (numAdjusted > 0 && self->verbosity > 0) {
01026     VL_PRINT("gmm: detected %d of %d modes with at least one dimension "
01027              "with covariance too small (set to lower bound)\n",
01028              numAdjusted, self->numClusters) ;
01029   }
01030 }
01031 
01032 /* ---------------------------------------------------------------- */
01033 /*                                           EM - Maximization step */
01034 /* ---------------------------------------------------------------- */
01035 
01036 static void
01037 VL_XCAT(_vl_gmm_maximization_, SFX)
01038 (VlGMM * self,
01039  TYPE * posteriors,
01040  TYPE * priors,
01041  TYPE * covariances,
01042  TYPE * means,
01043  TYPE const * data,
01044  vl_size numData)
01045 {
01046   vl_size numClusters = self->numClusters;
01047   vl_index i_d, i_cl;
01048   vl_size dim ;
01049   TYPE * oldMeans ;
01050   double time = 0 ;
01051 
01052   if (self->verbosity > 1) {
01053     VL_PRINTF("gmm: em: entering maximization step\n") ;
01054     time = vl_get_cpu_time() ;
01055   }
01056 
01057   oldMeans = vl_malloc(sizeof(TYPE) * self->dimension * numClusters) ;
01058   memcpy(oldMeans, means, sizeof(TYPE) * self->dimension * numClusters) ;
01059 
01060   memset(priors, 0, sizeof(TYPE) * numClusters) ;
01061   memset(means, 0, sizeof(TYPE) * self->dimension * numClusters) ;
01062   memset(covariances, 0, sizeof(TYPE) * self->dimension * numClusters) ;
01063 
01064 #if defined(_OPENMP)
01065 #pragma omp parallel default(shared) private(i_d, i_cl, dim) \
01066                      num_threads(vl_get_max_threads())
01067 #endif
01068   {
01069     TYPE * clusterPosteriorSum_, * means_, * covariances_ ;
01070 
01071 #if defined(_OPENMP)
01072 #pragma omp critical
01073 #endif
01074     {
01075       clusterPosteriorSum_ = vl_calloc(sizeof(TYPE), numClusters) ;
01076       means_ = vl_calloc(sizeof(TYPE), self->dimension * numClusters) ;
01077       covariances_ = vl_calloc(sizeof(TYPE), self->dimension * numClusters) ;
01078     }
01079 
01080     /*
01081       Accumulate weighted sums and sum of square differences. Once normalized,
01082       these become the means and covariances of each Gaussian mode.
01083 
01084       The squared differences will be taken w.r.t. the old means however. In this manner,
01085       one avoids doing two passes across the data. Eventually, these are corrected to account
01086       for the new means properly. In principle, one could set the old means to zero, but
01087       this may cause numerical instabilities (by accumulating large squares).
01088     */
01089 
01090 #if defined(_OPENMP)
01091 #pragma omp for
01092 #endif
01093     for (i_d = 0 ; i_d < (signed)numData ; ++i_d) {
01094       for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
01095         TYPE p = posteriors[i_cl + i_d * self->numClusters] ;
01096         vl_bool calculated = VL_FALSE ;
01097 
01098         /* skip very small associations for speed */
01099         if (p < VL_GMM_MIN_POSTERIOR / numClusters) { continue ; }
01100 
01101         clusterPosteriorSum_ [i_cl] += p ;
01102 
01103         #ifndef VL_DISABLE_AVX
01104         if (vl_get_simd_enabled() && vl_cpu_has_avx()) {
01105           VL_XCAT(_vl_weighted_mean_sse2_, SFX)
01106           (self->dimension,
01107            means_+ i_cl * self->dimension,
01108            data + i_d * self->dimension,
01109            p) ;
01110 
01111           VL_XCAT(_vl_weighted_sigma_sse2_, SFX)
01112           (self->dimension,
01113            covariances_ + i_cl * self->dimension,
01114            data + i_d * self->dimension,
01115            oldMeans + i_cl * self->dimension,
01116            p) ;
01117 
01118           calculated = VL_TRUE;
01119         }
01120         #endif
01121         #ifndef VL_DISABLE_SSE2
01122         if (vl_get_simd_enabled() && vl_cpu_has_sse2() && !calculated) {
01123           VL_XCAT(_vl_weighted_mean_sse2_, SFX)
01124           (self->dimension,
01125            means_+ i_cl * self->dimension,
01126            data + i_d * self->dimension,
01127            p) ;
01128 
01129            VL_XCAT(_vl_weighted_sigma_sse2_, SFX)
01130           (self->dimension,
01131            covariances_ + i_cl * self->dimension,
01132            data + i_d * self->dimension,
01133            oldMeans + i_cl * self->dimension,
01134            p) ;
01135 
01136           calculated = VL_TRUE;
01137         }
01138         #endif
01139         if(!calculated) {
01140           for (dim = 0 ; dim < self->dimension ; ++dim) {
01141             TYPE x = data[i_d * self->dimension + dim] ;
01142             TYPE mu = oldMeans[i_cl * self->dimension + dim] ;
01143             TYPE diff = x - mu ;
01144             means_ [i_cl * self->dimension + dim] += p * x ;
01145             covariances_ [i_cl * self->dimension + dim] += p * (diff*diff) ;
01146           }
01147         }
01148       }
01149     }
01150 
01151     /* accumulate */
01152 #if defined(_OPENMP)
01153 #pragma omp critical
01154 #endif
01155     {
01156       for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
01157         priors [i_cl] += clusterPosteriorSum_ [i_cl];
01158         for (dim = 0 ; dim < self->dimension ; ++dim) {
01159           means [i_cl * self->dimension + dim] += means_ [i_cl * self->dimension + dim] ;
01160           covariances [i_cl * self->dimension + dim] += covariances_ [i_cl * self->dimension + dim] ;
01161         }
01162       }
01163       vl_free(means_);
01164       vl_free(covariances_);
01165       vl_free(clusterPosteriorSum_);
01166     }
01167   } /* parallel section */
01168 
01169   /* at this stage priors[] contains the total mass of each cluster */
01170   for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
01171     TYPE mass = priors[i_cl] ;
01172     /* do not update modes that do not recieve mass */
01173     if (mass >= 1e-6 / numClusters) {
01174       for (dim = 0 ; dim < self->dimension ; ++dim) {
01175         means[i_cl * self->dimension + dim] /= mass ;
01176         covariances[i_cl * self->dimension + dim] /= mass ;
01177       }
01178     }
01179   }
01180 
01181   /* apply old to new means correction */
01182   for (i_cl = 0 ; i_cl < (signed)numClusters ; ++ i_cl) {
01183     TYPE mass = priors[i_cl] ;
01184     if (mass >= 1e-6 / numClusters) {
01185       for (dim = 0 ; dim < self->dimension ; ++dim) {
01186         TYPE mu = means[i_cl * self->dimension + dim] ;
01187         TYPE oldMu = oldMeans[i_cl * self->dimension + dim] ;
01188         TYPE diff = mu - oldMu ;
01189         covariances[i_cl * self->dimension + dim] -= diff * diff ;
01190       }
01191     }
01192   }
01193 
01194   VL_XCAT(_vl_gmm_apply_bounds_,SFX)(self) ;
01195 
01196   {
01197     TYPE sum = 0;
01198     for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
01199       sum += priors[i_cl] ;
01200     }
01201     sum = VL_MAX(sum, 1e-12) ;
01202     for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
01203       priors[i_cl] /= sum ;
01204     }
01205   }
01206 
01207   if (self->verbosity > 1) {
01208     VL_PRINTF("gmm: em: maximization step completed in %.2f s\n",
01209               vl_get_cpu_time() - time) ;
01210   }
01211 
01212   vl_free(oldMeans);
01213 }
01214 
01215 /* ---------------------------------------------------------------- */
01216 /*                                                    EM iterations */
01217 /* ---------------------------------------------------------------- */
01218 
01219 
01220 static double
01221 VL_XCAT(_vl_gmm_em_, SFX)
01222 (VlGMM * self,
01223  TYPE const * data,
01224  vl_size numData)
01225 {
01226   vl_size iteration, restarted ;
01227   double previousLL = (TYPE)(-VL_INFINITY_D) ;
01228   double LL = (TYPE)(-VL_INFINITY_D) ;
01229   double time = 0 ;
01230 
01231   _vl_gmm_prepare_for_data (self, numData) ;
01232 
01233   VL_XCAT(_vl_gmm_apply_bounds_,SFX)(self) ;
01234 
01235   for (iteration = 0 ; 1 ; ++ iteration) {
01236     double eps ;
01237 
01238     /*
01239      Expectation: assign data to Gaussian modes
01240      and compute log-likelihood.
01241      */
01242 
01243     if (self->verbosity > 1) {
01244       VL_PRINTF("gmm: em: entering expectation step\n") ;
01245       time = vl_get_cpu_time() ;
01246     }
01247 
01248     LL = VL_XCAT(vl_get_gmm_data_posteriors_,SFX)
01249     (self->posteriors,
01250      self->numClusters,
01251      numData,
01252      self->priors,
01253      self->means,
01254      self->dimension,
01255      self->covariances,
01256      data) ;
01257 
01258     if (self->verbosity > 1) {
01259       VL_PRINTF("gmm: em: expectation step completed in %.2f s\n",
01260                 vl_get_cpu_time() - time) ;
01261     }
01262 
01263     /*
01264      Check the termination conditions.
01265      */
01266     if (self->verbosity) {
01267       VL_PRINTF("gmm: em: iteration %d: loglikelihood = %f (variation = %f)\n",
01268                 iteration, LL, LL - previousLL) ;
01269     }
01270     if (iteration >= self->maxNumIterations) {
01271       if (self->verbosity) {
01272         VL_PRINTF("gmm: em: terminating because "
01273                   "the maximum number of iterations "
01274                   "(%d) has been reached.\n", self->maxNumIterations) ;
01275       }
01276       break ;
01277     }
01278 
01279     eps = vl_abs_d ((LL - previousLL) / (LL));
01280     if ((iteration > 0) && (eps < 0.00001)) {
01281       if (self->verbosity) {
01282         VL_PRINTF("gmm: em: terminating because the algorithm "
01283                   "fully converged (log-likelihood variation = %f).\n", eps) ;
01284       }
01285       break ;
01286     }
01287     previousLL = LL ;
01288 
01289     /*
01290      Restart empty modes.
01291      */
01292     if (iteration > 1) {
01293       restarted = VL_XCAT(_vl_gmm_restart_empty_modes_, SFX)
01294         (self, data);
01295       if ((restarted > 0) & (self->verbosity > 0)) {
01296         VL_PRINTF("gmm: em: %d Gaussian modes restarted because "
01297                   "they had become empty.\n", restarted);
01298       }
01299     }
01300 
01301     /*
01302       Maximization: reestimate the GMM parameters.
01303     */
01304     VL_XCAT(_vl_gmm_maximization_, SFX)
01305       (self,self->posteriors,self->priors,self->covariances,self->means,data,numData) ;
01306   }
01307   return LL;
01308 }
01309 
01310 
01311 /* ---------------------------------------------------------------- */
01312 /*                                Kmeans initialization of mixtures */
01313 /* ---------------------------------------------------------------- */
01314 
01315 static void
01316 VL_XCAT(_vl_gmm_init_with_kmeans_, SFX)
01317 (VlGMM * self,
01318  TYPE const * data,
01319  vl_size numData,
01320  VlKMeans * kmeansInit)
01321 {
01322   vl_size i_d ;
01323   vl_uint32 * assignments = vl_malloc(sizeof(vl_uint32) * numData);
01324 
01325   _vl_gmm_prepare_for_data (self, numData) ;
01326 
01327   memset(self->means,0,sizeof(TYPE) * self->numClusters * self->dimension) ;
01328   memset(self->priors,0,sizeof(TYPE) * self->numClusters) ;
01329   memset(self->covariances,0,sizeof(TYPE) * self->numClusters * self->dimension) ;
01330   memset(self->posteriors,0,sizeof(TYPE) * self->numClusters * numData) ;
01331 
01332   /* setup speified KMeans initialization object if any */
01333   if (kmeansInit) { vl_gmm_set_kmeans_init_object (self, kmeansInit) ; }
01334 
01335   /* if a KMeans initalization object is still unavailable, create one */
01336   if(self->kmeansInit == NULL) {
01337     vl_size ncomparisons = VL_MAX(numData / 4, 10) ;
01338     vl_size niter = 5 ;
01339     vl_size ntrees = 1 ;
01340     vl_size nrepetitions = 1 ;
01341     VlKMeansAlgorithm algorithm = VlKMeansANN ;
01342     VlKMeansInitialization initialization = VlKMeansRandomSelection ;
01343 
01344     VlKMeans * kmeansInitDefault = vl_kmeans_new(self->dataType,VlDistanceL2) ;
01345     vl_kmeans_set_initialization(kmeansInitDefault, initialization);
01346     vl_kmeans_set_max_num_iterations (kmeansInitDefault, niter) ;
01347     vl_kmeans_set_max_num_comparisons (kmeansInitDefault, ncomparisons) ;
01348     vl_kmeans_set_num_trees (kmeansInitDefault, ntrees);
01349     vl_kmeans_set_algorithm (kmeansInitDefault, algorithm);
01350     vl_kmeans_set_num_repetitions(kmeansInitDefault, nrepetitions);
01351     vl_kmeans_set_verbosity (kmeansInitDefault, self->verbosity);
01352 
01353     self->kmeansInit = kmeansInitDefault;
01354     self->kmeansInitIsOwner = VL_TRUE ;
01355   }
01356 
01357   /* Use k-means to assign data to clusters */
01358   vl_kmeans_cluster (self->kmeansInit, data, self->dimension, numData, self->numClusters);
01359   vl_kmeans_quantize (self->kmeansInit, assignments, NULL, data, numData) ;
01360 
01361   /* Transform the k-means assignments in posteriors and estimates the mode parameters */
01362   for(i_d = 0; i_d < numData; i_d++) {
01363     ((TYPE*)self->posteriors)[assignments[i_d] + i_d * self->numClusters] = (TYPE) 1.0 ;
01364   }
01365 
01366   /* Update cluster parameters */
01367   VL_XCAT(_vl_gmm_maximization_, SFX)
01368     (self,self->posteriors,self->priors,self->covariances,self->means,data,numData);
01369   vl_free(assignments) ;
01370 }
01371 
01372 /* ---------------------------------------------------------------- */
01373 /*                                Random initialization of mixtures */
01374 /* ---------------------------------------------------------------- */
01375 
01376 static void
01377 VL_XCAT(_vl_gmm_compute_init_sigma_, SFX)
01378 (VlGMM * self,
01379  TYPE const * data,
01380  TYPE * initSigma,
01381  vl_size dimension,
01382  vl_size numData)
01383 {
01384   vl_size dim;
01385   vl_uindex i;
01386 
01387   TYPE * dataMean ;
01388 
01389   memset(initSigma,0,sizeof(TYPE)*dimension) ;
01390   if (numData <= 1) return ;
01391 
01392   dataMean = vl_malloc(sizeof(TYPE)*dimension);
01393   memset(dataMean,0,sizeof(TYPE)*dimension) ;
01394 
01395   /* find mean of the whole dataset */
01396   for(dim = 0 ; dim < dimension ; dim++) {
01397     for(i = 0 ; i < numData ; i++) {
01398       dataMean[dim] += data[i*dimension + dim];
01399     }
01400     dataMean[dim] /= numData;
01401   }
01402 
01403   /* compute variance of the whole dataset */
01404   for(dim = 0; dim < dimension; dim++) {
01405     for(i = 0; i < numData; i++) {
01406       TYPE diff = (data[i*self->dimension + dim] - dataMean[dim]) ;
01407       initSigma[dim] += diff*diff ;
01408     }
01409     initSigma[dim] /= numData - 1 ;
01410   }
01411 
01412   vl_free(dataMean) ;
01413 }
01414 
01415 static void
01416 VL_XCAT(_vl_gmm_init_with_rand_data_, SFX)
01417 (VlGMM * self,
01418  TYPE const * data,
01419  vl_size numData)
01420 {
01421   vl_uindex i, k, dim ;
01422   VlKMeans * kmeans ;
01423 
01424   _vl_gmm_prepare_for_data(self, numData) ;
01425 
01426   /* initilaize priors of gaussians so they are equal and sum to one */
01427   for (i = 0 ; i < self->numClusters ; ++i) { ((TYPE*)self->priors)[i] = (TYPE) (1.0 / self->numClusters) ; }
01428 
01429   /* initialize diagonals of covariance matrices to data covariance */
01430   VL_XCAT(_vl_gmm_compute_init_sigma_, SFX) (self, data, self->covariances, self->dimension, numData);
01431   for (k = 1 ; k < self->numClusters ; ++ k) {
01432     for(dim = 0; dim < self->dimension; dim++) {
01433       *((TYPE*)self->covariances + k * self->dimension + dim) =
01434       *((TYPE*)self->covariances + dim) ;
01435     }
01436   }
01437 
01438   /* use kmeans++ initialization to pick points at random */
01439   kmeans = vl_kmeans_new(self->dataType,VlDistanceL2) ;
01440   vl_kmeans_init_centers_plus_plus(kmeans, data, self->dimension, numData, self->numClusters) ;
01441   memcpy(self->means, vl_kmeans_get_centers(kmeans), sizeof(TYPE) * self->dimension * self->numClusters) ;
01442   vl_kmeans_delete(kmeans) ;
01443 }
01444 
01445 /* ---------------------------------------------------------------- */
01446 #else /* VL_GMM_INSTANTIATING */
01447 /* ---------------------------------------------------------------- */
01448 
01449 #ifndef __DOXYGEN__
01450 #define FLT VL_TYPE_FLOAT
01451 #define TYPE float
01452 #define SFX f
01453 #define VL_GMM_INSTANTIATING
01454 #include "gmm.c"
01455 
01456 #define FLT VL_TYPE_DOUBLE
01457 #define TYPE double
01458 #define SFX d
01459 #define VL_GMM_INSTANTIATING
01460 #include "gmm.c"
01461 #endif
01462 
01463 /* VL_GMM_INSTANTIATING */
01464 #endif
01465 
01466 /* ---------------------------------------------------------------- */
01467 #ifndef VL_GMM_INSTANTIATING
01468 /* ---------------------------------------------------------------- */
01469 
01480 VlGMM *
01481 vl_gmm_new_copy (VlGMM const * self)
01482 {
01483   vl_size size = vl_get_type_size(self->dataType) ;
01484   VlGMM * gmm = vl_gmm_new(self->dataType, self->dimension, self->numClusters);
01485   gmm->initialization = self->initialization;
01486   gmm->maxNumIterations = self->maxNumIterations;
01487   gmm->numRepetitions = self->numRepetitions;
01488   gmm->verbosity = self->verbosity;
01489   gmm->LL = self->LL;
01490 
01491   memcpy(gmm->means, self->means, size*self->numClusters*self->dimension);
01492   memcpy(gmm->covariances, self->covariances, size*self->numClusters*self->dimension);
01493   memcpy(gmm->priors, self->priors, size*self->numClusters);
01494   return gmm ;
01495 }
01496 
01503 void
01504 vl_gmm_init_with_rand_data
01505 (VlGMM * self,
01506  void const * data,
01507  vl_size numData)
01508 {
01509   vl_gmm_reset (self) ;
01510   switch (self->dataType) {
01511     case VL_TYPE_FLOAT : _vl_gmm_init_with_rand_data_f (self, (float const *)data, numData) ; break ;
01512     case VL_TYPE_DOUBLE : _vl_gmm_init_with_rand_data_d (self, (double const *)data, numData) ; break ;
01513     default:
01514       abort() ;
01515   }
01516 }
01517 
01525 void
01526 vl_gmm_init_with_kmeans
01527 (VlGMM * self,
01528  void const * data,
01529  vl_size numData,
01530  VlKMeans * kmeansInit)
01531 {
01532   vl_gmm_reset (self) ;
01533   switch (self->dataType) {
01534     case VL_TYPE_FLOAT :
01535       _vl_gmm_init_with_kmeans_f
01536       (self, (float const *)data, numData, kmeansInit) ;
01537       break ;
01538     case VL_TYPE_DOUBLE :
01539       _vl_gmm_init_with_kmeans_d
01540       (self, (double const *)data, numData, kmeansInit) ;
01541       break ;
01542     default:
01543       abort() ;
01544   }
01545 }
01546 
01547 #if 0
01548 #include<fenv.h>
01549 #endif
01550 
01557 double vl_gmm_cluster (VlGMM * self,
01558                        void const * data,
01559                        vl_size numData)
01560 {
01561   void * bestPriors = NULL ;
01562   void * bestMeans = NULL;
01563   void * bestCovariances = NULL;
01564   void * bestPosteriors = NULL;
01565   vl_size size = vl_get_type_size(self->dataType) ;
01566   double bestLL = -VL_INFINITY_D;
01567   vl_uindex repetition;
01568 
01569   assert(self->numRepetitions >=1) ;
01570 
01571   bestPriors = vl_malloc(size * self->numClusters) ;
01572   bestMeans = vl_malloc(size * self->dimension * self->numClusters) ;
01573   bestCovariances = vl_malloc(size * self->dimension * self->numClusters) ;
01574   bestPosteriors = vl_malloc(size * self->numClusters * numData) ;
01575 
01576 #if 0
01577   feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW);
01578 #endif
01579 
01580   for (repetition = 0 ; repetition < self->numRepetitions ; ++ repetition) {
01581     double LL ;
01582     double timeRef ;
01583 
01584     if (self->verbosity) {
01585       VL_PRINTF("gmm: clustering: starting repetition %d of %d\n", repetition + 1, self->numRepetitions) ;
01586     }
01587 
01588     /* initialize a new mixture model */
01589     timeRef = vl_get_cpu_time() ;
01590     switch (self->initialization) {
01591       case VlGMMKMeans : vl_gmm_init_with_kmeans (self, data, numData, NULL) ; break ;
01592       case VlGMMRand : vl_gmm_init_with_rand_data (self, data, numData) ; break ;
01593       case VlGMMCustom : break ;
01594       default: abort() ;
01595     }
01596     if (self->verbosity) {
01597       VL_PRINTF("gmm: model initialized in %.2f s\n",
01598                 vl_get_cpu_time() - timeRef) ;
01599     }
01600 
01601     /* fit the model to data by running EM */
01602     timeRef = vl_get_cpu_time () ;
01603     LL = vl_gmm_em (self, data, numData) ;
01604     if (self->verbosity) {
01605       VL_PRINTF("gmm: optimization terminated in %.2f s with loglikelihood %f\n",
01606                 vl_get_cpu_time() - timeRef, LL) ;
01607     }
01608 
01609     if (LL > bestLL || repetition == 0) {
01610       void * temp ;
01611 
01612       temp = bestPriors ;
01613       bestPriors = self->priors ;
01614       self->priors = temp ;
01615 
01616       temp = bestMeans ;
01617       bestMeans = self->means ;
01618       self->means = temp ;
01619 
01620       temp = bestCovariances ;
01621       bestCovariances = self->covariances ;
01622       self->covariances = temp ;
01623 
01624       temp = bestPosteriors ;
01625       bestPosteriors = self->posteriors ;
01626       self->posteriors = temp ;
01627 
01628       bestLL = LL;
01629     }
01630   }
01631 
01632   vl_free (self->priors) ;
01633   vl_free (self->means) ;
01634   vl_free (self->covariances) ;
01635   vl_free (self->posteriors) ;
01636 
01637   self->priors = bestPriors ;
01638   self->means = bestMeans ;
01639   self->covariances = bestCovariances ;
01640   self->posteriors = bestPosteriors ;
01641   self->LL = bestLL;
01642 
01643   if (self->verbosity) {
01644     VL_PRINTF("gmm: all repetitions terminated with final loglikelihood %f\n", self->LL) ;
01645   }
01646 
01647   return bestLL ;
01648 }
01649 
01656 double vl_gmm_em (VlGMM * self, void const * data, vl_size numData)
01657 {
01658   switch (self->dataType) {
01659     case VL_TYPE_FLOAT:
01660       return _vl_gmm_em_f (self, (float const *)data, numData) ; break ;
01661     case VL_TYPE_DOUBLE:
01662       return _vl_gmm_em_d (self, (double const *)data, numData) ; break ;
01663     default:
01664       abort() ;
01665   }
01666   return 0 ;
01667 }
01668 
01674 void
01675 vl_gmm_set_means (VlGMM * self, void const * means)
01676 {
01677   memcpy(self->means,means,
01678          self->dimension * self->numClusters * vl_get_type_size(self->dataType));
01679 }
01680 
01686 void vl_gmm_set_covariances (VlGMM * self, void const * covariances)
01687 {
01688   memcpy(self->covariances,covariances,
01689          self->dimension * self->numClusters * vl_get_type_size(self->dataType));
01690 }
01691 
01697 void vl_gmm_set_priors (VlGMM * self, void const * priors)
01698 {
01699   memcpy(self->priors,priors,
01700          self->numClusters * vl_get_type_size(self->dataType));
01701 }
01702 
01703 /* VL_GMM_INSTANTIATING */
01704 #endif
01705 
01706 #undef SFX
01707 #undef TYPE
01708 #undef FLT
01709 #undef VL_GMM_INSTANTIATING


libvlfeat
Author(s): Andrea Vedaldi
autogenerated on Thu Jun 6 2019 20:25:51