pf.c
Go to the documentation of this file.
00001 /*
00002  *  Player - One Hell of a Robot Server
00003  *  Copyright (C) 2000  Brian Gerkey   &  Kasper Stoy
00004  *                      gerkey@usc.edu    kaspers@robotics.usc.edu
00005  *
00006  *  This library is free software; you can redistribute it and/or
00007  *  modify it under the terms of the GNU Lesser General Public
00008  *  License as published by the Free Software Foundation; either
00009  *  version 2.1 of the License, or (at your option) any later version.
00010  *
00011  *  This library is distributed in the hope that it will be useful,
00012  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
00013  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00014  *  Lesser General Public License for more details.
00015  *
00016  *  You should have received a copy of the GNU Lesser General Public
00017  *  License along with this library; if not, write to the Free Software
00018  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
00019  *
00020  */
00021 /**************************************************************************
00022  * Desc: Simple particle filter for localization.
00023  * Author: Andrew Howard
00024  * Date: 10 Dec 2002
00025  * CVS: $Id: pf.c 6345 2008-04-17 01:36:39Z gerkey $
00026  *************************************************************************/
00027 
00028 #include <assert.h>
00029 #include <math.h>
00030 #include <stdlib.h>
00031 #include <time.h>
00032 
00033 #include "amcl/pf/pf.h"
00034 #include "amcl/pf/pf_pdf.h"
00035 #include "amcl/pf/pf_kdtree.h"
00036 
00037 
00038 // Compute the required number of samples, given that there are k bins
00039 // with samples in them.
00040 static int pf_resample_limit(pf_t *pf, int k);
00041 
00042 // Re-compute the cluster statistics for a sample set
00043 static void pf_cluster_stats(pf_t *pf, pf_sample_set_t *set);
00044 
00045 
00046 // Create a new filter
00047 pf_t *pf_alloc(int min_samples, int max_samples,
00048                double alpha_slow, double alpha_fast,
00049                pf_init_model_fn_t random_pose_fn, void *random_pose_data)
00050 {
00051   int i, j;
00052   pf_t *pf;
00053   pf_sample_set_t *set;
00054   pf_sample_t *sample;
00055   
00056   srand48(time(NULL));
00057 
00058   pf = calloc(1, sizeof(pf_t));
00059 
00060   pf->random_pose_fn = random_pose_fn;
00061   pf->random_pose_data = random_pose_data;
00062 
00063   pf->min_samples = min_samples;
00064   pf->max_samples = max_samples;
00065 
00066   // Control parameters for the population size calculation.  [err] is
00067   // the max error between the true distribution and the estimated
00068   // distribution.  [z] is the upper standard normal quantile for (1 -
00069   // p), where p is the probability that the error on the estimated
00070   // distrubition will be less than [err].
00071   pf->pop_err = 0.01;
00072   pf->pop_z = 3;
00073   pf->dist_threshold = 0.5; 
00074   
00075   pf->current_set = 0;
00076   for (j = 0; j < 2; j++)
00077   {
00078     set = pf->sets + j;
00079       
00080     set->sample_count = max_samples;
00081     set->samples = calloc(max_samples, sizeof(pf_sample_t));
00082 
00083     for (i = 0; i < set->sample_count; i++)
00084     {
00085       sample = set->samples + i;
00086       sample->pose.v[0] = 0.0;
00087       sample->pose.v[1] = 0.0;
00088       sample->pose.v[2] = 0.0;
00089       sample->weight = 1.0 / max_samples;
00090     }
00091 
00092     // HACK: is 3 times max_samples enough?
00093     set->kdtree = pf_kdtree_alloc(3 * max_samples);
00094 
00095     set->cluster_count = 0;
00096     set->cluster_max_count = max_samples;
00097     set->clusters = calloc(set->cluster_max_count, sizeof(pf_cluster_t));
00098 
00099     set->mean = pf_vector_zero();
00100     set->cov = pf_matrix_zero();
00101   }
00102 
00103   pf->w_slow = 0.0;
00104   pf->w_fast = 0.0;
00105 
00106   pf->alpha_slow = alpha_slow;
00107   pf->alpha_fast = alpha_fast;
00108 
00109   //set converged to 0
00110   pf_init_converged(pf);
00111 
00112   return pf;
00113 }
00114 
00115 // Free an existing filter
00116 void pf_free(pf_t *pf)
00117 {
00118   int i;
00119   
00120   for (i = 0; i < 2; i++)
00121   {
00122     free(pf->sets[i].clusters);
00123     pf_kdtree_free(pf->sets[i].kdtree);
00124     free(pf->sets[i].samples);
00125   }
00126   free(pf);
00127   
00128   return;
00129 }
00130 
00131 // Initialize the filter using a guassian
00132 void pf_init(pf_t *pf, pf_vector_t mean, pf_matrix_t cov)
00133 {
00134   int i;
00135   pf_sample_set_t *set;
00136   pf_sample_t *sample;
00137   pf_pdf_gaussian_t *pdf;
00138   
00139   set = pf->sets + pf->current_set;
00140   
00141   // Create the kd tree for adaptive sampling
00142   pf_kdtree_clear(set->kdtree);
00143 
00144   set->sample_count = pf->max_samples;
00145 
00146   pdf = pf_pdf_gaussian_alloc(mean, cov);
00147     
00148   // Compute the new sample poses
00149   for (i = 0; i < set->sample_count; i++)
00150   {
00151     sample = set->samples + i;
00152     sample->weight = 1.0 / pf->max_samples;
00153     sample->pose = pf_pdf_gaussian_sample(pdf);
00154 
00155     // Add sample to histogram
00156     pf_kdtree_insert(set->kdtree, sample->pose, sample->weight);
00157   }
00158 
00159   pf->w_slow = pf->w_fast = 0.0;
00160 
00161   pf_pdf_gaussian_free(pdf);
00162     
00163   // Re-compute cluster statistics
00164   pf_cluster_stats(pf, set); 
00165 
00166   //set converged to 0
00167   pf_init_converged(pf);
00168 
00169   return;
00170 }
00171 
00172 
00173 // Initialize the filter using some model
00174 void pf_init_model(pf_t *pf, pf_init_model_fn_t init_fn, void *init_data)
00175 {
00176   int i;
00177   pf_sample_set_t *set;
00178   pf_sample_t *sample;
00179 
00180   set = pf->sets + pf->current_set;
00181 
00182   // Create the kd tree for adaptive sampling
00183   pf_kdtree_clear(set->kdtree);
00184 
00185   set->sample_count = pf->max_samples;
00186 
00187   // Compute the new sample poses
00188   for (i = 0; i < set->sample_count; i++)
00189   {
00190     sample = set->samples + i;
00191     sample->weight = 1.0 / pf->max_samples;
00192     sample->pose = (*init_fn) (init_data);
00193 
00194     // Add sample to histogram
00195     pf_kdtree_insert(set->kdtree, sample->pose, sample->weight);
00196   }
00197 
00198   pf->w_slow = pf->w_fast = 0.0;
00199 
00200   // Re-compute cluster statistics
00201   pf_cluster_stats(pf, set);
00202   
00203   //set converged to 0
00204   pf_init_converged(pf);
00205 
00206   return;
00207 }
00208 
00209 void pf_init_converged(pf_t *pf){
00210   pf_sample_set_t *set;
00211   set = pf->sets + pf->current_set;
00212   set->converged = 0; 
00213   pf->converged = 0; 
00214 }
00215 
00216 int pf_update_converged(pf_t *pf)
00217 {
00218   int i;
00219   pf_sample_set_t *set;
00220   pf_sample_t *sample;
00221   double total;
00222 
00223   set = pf->sets + pf->current_set;
00224   double mean_x = 0, mean_y = 0;
00225 
00226   for (i = 0; i < set->sample_count; i++){
00227     sample = set->samples + i;
00228 
00229     mean_x += sample->pose.v[0];
00230     mean_y += sample->pose.v[1];
00231   }
00232   mean_x /= set->sample_count;
00233   mean_y /= set->sample_count;
00234   
00235   for (i = 0; i < set->sample_count; i++){
00236     sample = set->samples + i;
00237     if(fabs(sample->pose.v[0] - mean_x) > pf->dist_threshold || 
00238        fabs(sample->pose.v[1] - mean_y) > pf->dist_threshold){
00239       set->converged = 0; 
00240       pf->converged = 0; 
00241       return 0;
00242     }
00243   }
00244   set->converged = 1; 
00245   pf->converged = 1; 
00246   return 1; 
00247 }
00248 
00249 // Update the filter with some new action
00250 void pf_update_action(pf_t *pf, pf_action_model_fn_t action_fn, void *action_data)
00251 {
00252   pf_sample_set_t *set;
00253 
00254   set = pf->sets + pf->current_set;
00255 
00256   (*action_fn) (action_data, set);
00257   
00258   return;
00259 }
00260 
00261 
00262 #include <float.h>
00263 // Update the filter with some new sensor observation
00264 void pf_update_sensor(pf_t *pf, pf_sensor_model_fn_t sensor_fn, void *sensor_data)
00265 {
00266   int i;
00267   pf_sample_set_t *set;
00268   pf_sample_t *sample;
00269   double total;
00270 
00271   set = pf->sets + pf->current_set;
00272 
00273   // Compute the sample weights
00274   total = (*sensor_fn) (sensor_data, set);
00275   
00276   if (total > 0.0)
00277   {
00278     // Normalize weights
00279     double w_avg=0.0;
00280     for (i = 0; i < set->sample_count; i++)
00281     {
00282       sample = set->samples + i;
00283       w_avg += sample->weight;
00284       sample->weight /= total;
00285     }
00286     // Update running averages of likelihood of samples (Prob Rob p258)
00287     w_avg /= set->sample_count;
00288     if(pf->w_slow == 0.0)
00289       pf->w_slow = w_avg;
00290     else
00291       pf->w_slow += pf->alpha_slow * (w_avg - pf->w_slow);
00292     if(pf->w_fast == 0.0)
00293       pf->w_fast = w_avg;
00294     else
00295       pf->w_fast += pf->alpha_fast * (w_avg - pf->w_fast);
00296     //printf("w_avg: %e slow: %e fast: %e\n", 
00297            //w_avg, pf->w_slow, pf->w_fast);
00298   }
00299   else
00300   {
00301     // Handle zero total
00302     for (i = 0; i < set->sample_count; i++)
00303     {
00304       sample = set->samples + i;
00305       sample->weight = 1.0 / set->sample_count;
00306     }
00307   }
00308 
00309   return;
00310 }
00311 
00312 
00313 // Resample the distribution
00314 void pf_update_resample(pf_t *pf)
00315 {
00316   int i;
00317   double total;
00318   pf_sample_set_t *set_a, *set_b;
00319   pf_sample_t *sample_a, *sample_b;
00320 
00321   //double r,c,U;
00322   //int m;
00323   //double count_inv;
00324   double* c;
00325 
00326   double w_diff;
00327 
00328   set_a = pf->sets + pf->current_set;
00329   set_b = pf->sets + (pf->current_set + 1) % 2;
00330 
00331   // Build up cumulative probability table for resampling.
00332   // TODO: Replace this with a more efficient procedure
00333   // (e.g., http://www.network-theory.co.uk/docs/gslref/GeneralDiscreteDistributions.html)
00334   c = (double*)malloc(sizeof(double)*(set_a->sample_count+1));
00335   c[0] = 0.0;
00336   for(i=0;i<set_a->sample_count;i++)
00337     c[i+1] = c[i]+set_a->samples[i].weight;
00338 
00339   // Create the kd tree for adaptive sampling
00340   pf_kdtree_clear(set_b->kdtree);
00341   
00342   // Draw samples from set a to create set b.
00343   total = 0;
00344   set_b->sample_count = 0;
00345 
00346   w_diff = 1.0 - pf->w_fast / pf->w_slow;
00347   if(w_diff < 0.0)
00348     w_diff = 0.0;
00349   //printf("w_diff: %9.6f\n", w_diff);
00350 
00351   // Can't (easily) combine low-variance sampler with KLD adaptive
00352   // sampling, so we'll take the more traditional route.
00353   /*
00354   // Low-variance resampler, taken from Probabilistic Robotics, p110
00355   count_inv = 1.0/set_a->sample_count;
00356   r = drand48() * count_inv;
00357   c = set_a->samples[0].weight;
00358   i = 0;
00359   m = 0;
00360   */
00361   while(set_b->sample_count < pf->max_samples)
00362   {
00363     sample_b = set_b->samples + set_b->sample_count++;
00364 
00365     if(drand48() < w_diff)
00366       sample_b->pose = (pf->random_pose_fn)(pf->random_pose_data);
00367     else
00368     {
00369       // Can't (easily) combine low-variance sampler with KLD adaptive
00370       // sampling, so we'll take the more traditional route.
00371       /*
00372       // Low-variance resampler, taken from Probabilistic Robotics, p110
00373       U = r + m * count_inv;
00374       while(U>c)
00375       {
00376         i++;
00377         // Handle wrap-around by resetting counters and picking a new random
00378         // number
00379         if(i >= set_a->sample_count)
00380         {
00381           r = drand48() * count_inv;
00382           c = set_a->samples[0].weight;
00383           i = 0;
00384           m = 0;
00385           U = r + m * count_inv;
00386           continue;
00387         }
00388         c += set_a->samples[i].weight;
00389       }
00390       m++;
00391       */
00392 
00393       // Naive discrete event sampler
00394       double r;
00395       r = drand48();
00396       for(i=0;i<set_a->sample_count;i++)
00397       {
00398         if((c[i] <= r) && (r < c[i+1]))
00399           break;
00400       }
00401       assert(i<set_a->sample_count);
00402 
00403       sample_a = set_a->samples + i;
00404 
00405       assert(sample_a->weight > 0);
00406 
00407       // Add sample to list
00408       sample_b->pose = sample_a->pose;
00409     }
00410 
00411     sample_b->weight = 1.0;
00412     total += sample_b->weight;
00413 
00414     // Add sample to histogram
00415     pf_kdtree_insert(set_b->kdtree, sample_b->pose, sample_b->weight);
00416 
00417     // See if we have enough samples yet
00418     if (set_b->sample_count > pf_resample_limit(pf, set_b->kdtree->leaf_count))
00419       break;
00420   }
00421   
00422   // Reset averages, to avoid spiraling off into complete randomness.
00423   if(w_diff > 0.0)
00424     pf->w_slow = pf->w_fast = 0.0;
00425 
00426   //fprintf(stderr, "\n\n");
00427 
00428   // Normalize weights
00429   for (i = 0; i < set_b->sample_count; i++)
00430   {
00431     sample_b = set_b->samples + i;
00432     sample_b->weight /= total;
00433   }
00434   
00435   // Re-compute cluster statistics
00436   pf_cluster_stats(pf, set_b);
00437 
00438   // Use the newly created sample set
00439   pf->current_set = (pf->current_set + 1) % 2; 
00440 
00441   pf_update_converged(pf);
00442 
00443   free(c);
00444   return;
00445 }
00446 
00447 
00448 // Compute the required number of samples, given that there are k bins
00449 // with samples in them.  This is taken directly from Fox et al.
00450 int pf_resample_limit(pf_t *pf, int k)
00451 {
00452   double a, b, c, x;
00453   int n;
00454 
00455   if (k <= 1)
00456     return pf->max_samples;
00457 
00458   a = 1;
00459   b = 2 / (9 * ((double) k - 1));
00460   c = sqrt(2 / (9 * ((double) k - 1))) * pf->pop_z;
00461   x = a - b + c;
00462 
00463   n = (int) ceil((k - 1) / (2 * pf->pop_err) * x * x * x);
00464 
00465   if (n < pf->min_samples)
00466     return pf->min_samples;
00467   if (n > pf->max_samples)
00468     return pf->max_samples;
00469   
00470   return n;
00471 }
00472 
00473 
00474 // Re-compute the cluster statistics for a sample set
00475 void pf_cluster_stats(pf_t *pf, pf_sample_set_t *set)
00476 {
00477   int i, j, k, cidx;
00478   pf_sample_t *sample;
00479   pf_cluster_t *cluster;
00480   
00481   // Workspace
00482   double m[4], c[2][2];
00483   size_t count;
00484   double weight;
00485 
00486   // Cluster the samples
00487   pf_kdtree_cluster(set->kdtree);
00488   
00489   // Initialize cluster stats
00490   set->cluster_count = 0;
00491 
00492   for (i = 0; i < set->cluster_max_count; i++)
00493   {
00494     cluster = set->clusters + i;
00495     cluster->count = 0;
00496     cluster->weight = 0;
00497     cluster->mean = pf_vector_zero();
00498     cluster->cov = pf_matrix_zero();
00499 
00500     for (j = 0; j < 4; j++)
00501       cluster->m[j] = 0.0;
00502     for (j = 0; j < 2; j++)
00503       for (k = 0; k < 2; k++)
00504         cluster->c[j][k] = 0.0;
00505   }
00506 
00507   // Initialize overall filter stats
00508   count = 0;
00509   weight = 0.0;
00510   set->mean = pf_vector_zero();
00511   set->cov = pf_matrix_zero();
00512   for (j = 0; j < 4; j++)
00513     m[j] = 0.0;
00514   for (j = 0; j < 2; j++)
00515     for (k = 0; k < 2; k++)
00516       c[j][k] = 0.0;
00517   
00518   // Compute cluster stats
00519   for (i = 0; i < set->sample_count; i++)
00520   {
00521     sample = set->samples + i;
00522 
00523     //printf("%d %f %f %f\n", i, sample->pose.v[0], sample->pose.v[1], sample->pose.v[2]);
00524 
00525     // Get the cluster label for this sample
00526     cidx = pf_kdtree_get_cluster(set->kdtree, sample->pose);
00527     assert(cidx >= 0);
00528     if (cidx >= set->cluster_max_count)
00529       continue;
00530     if (cidx + 1 > set->cluster_count)
00531       set->cluster_count = cidx + 1;
00532     
00533     cluster = set->clusters + cidx;
00534 
00535     cluster->count += 1;
00536     cluster->weight += sample->weight;
00537 
00538     count += 1;
00539     weight += sample->weight;
00540 
00541     // Compute mean
00542     cluster->m[0] += sample->weight * sample->pose.v[0];
00543     cluster->m[1] += sample->weight * sample->pose.v[1];
00544     cluster->m[2] += sample->weight * cos(sample->pose.v[2]);
00545     cluster->m[3] += sample->weight * sin(sample->pose.v[2]);
00546 
00547     m[0] += sample->weight * sample->pose.v[0];
00548     m[1] += sample->weight * sample->pose.v[1];
00549     m[2] += sample->weight * cos(sample->pose.v[2]);
00550     m[3] += sample->weight * sin(sample->pose.v[2]);
00551 
00552     // Compute covariance in linear components
00553     for (j = 0; j < 2; j++)
00554       for (k = 0; k < 2; k++)
00555       {
00556         cluster->c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k];
00557         c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k];
00558       }
00559   }
00560 
00561   // Normalize
00562   for (i = 0; i < set->cluster_count; i++)
00563   {
00564     cluster = set->clusters + i;
00565         
00566     cluster->mean.v[0] = cluster->m[0] / cluster->weight;
00567     cluster->mean.v[1] = cluster->m[1] / cluster->weight;
00568     cluster->mean.v[2] = atan2(cluster->m[3], cluster->m[2]);
00569 
00570     cluster->cov = pf_matrix_zero();
00571 
00572     // Covariance in linear components
00573     for (j = 0; j < 2; j++)
00574       for (k = 0; k < 2; k++)
00575         cluster->cov.m[j][k] = cluster->c[j][k] / cluster->weight -
00576           cluster->mean.v[j] * cluster->mean.v[k];
00577 
00578     // Covariance in angular components; I think this is the correct
00579     // formula for circular statistics.
00580     cluster->cov.m[2][2] = -2 * log(sqrt(cluster->m[2] * cluster->m[2] +
00581                                          cluster->m[3] * cluster->m[3]));
00582 
00583     //printf("cluster %d %d %f (%f %f %f)\n", i, cluster->count, cluster->weight,
00584            //cluster->mean.v[0], cluster->mean.v[1], cluster->mean.v[2]);
00585     //pf_matrix_fprintf(cluster->cov, stdout, "%e");
00586   }
00587 
00588   // Compute overall filter stats
00589   set->mean.v[0] = m[0] / weight;
00590   set->mean.v[1] = m[1] / weight;
00591   set->mean.v[2] = atan2(m[3], m[2]);
00592 
00593   // Covariance in linear components
00594   for (j = 0; j < 2; j++)
00595     for (k = 0; k < 2; k++)
00596       set->cov.m[j][k] = c[j][k] / weight - set->mean.v[j] * set->mean.v[k];
00597 
00598   // Covariance in angular components; I think this is the correct
00599   // formula for circular statistics.
00600   set->cov.m[2][2] = -2 * log(sqrt(m[2] * m[2] + m[3] * m[3]));
00601 
00602   return;
00603 }
00604 
00605 
00606 // Compute the CEP statistics (mean and variance).
00607 void pf_get_cep_stats(pf_t *pf, pf_vector_t *mean, double *var)
00608 {
00609   int i;
00610   double mn, mx, my, mrr;
00611   pf_sample_set_t *set;
00612   pf_sample_t *sample;
00613   
00614   set = pf->sets + pf->current_set;
00615 
00616   mn = 0.0;
00617   mx = 0.0;
00618   my = 0.0;
00619   mrr = 0.0;
00620   
00621   for (i = 0; i < set->sample_count; i++)
00622   {
00623     sample = set->samples + i;
00624 
00625     mn += sample->weight;
00626     mx += sample->weight * sample->pose.v[0];
00627     my += sample->weight * sample->pose.v[1];
00628     mrr += sample->weight * sample->pose.v[0] * sample->pose.v[0];
00629     mrr += sample->weight * sample->pose.v[1] * sample->pose.v[1];
00630   }
00631 
00632   mean->v[0] = mx / mn;
00633   mean->v[1] = my / mn;
00634   mean->v[2] = 0.0;
00635 
00636   *var = mrr / mn - (mx * mx / (mn * mn) + my * my / (mn * mn));
00637 
00638   return;
00639 }
00640 
00641 
00642 // Get the statistics for a particular cluster.
00643 int pf_get_cluster_stats(pf_t *pf, int clabel, double *weight,
00644                          pf_vector_t *mean, pf_matrix_t *cov)
00645 {
00646   pf_sample_set_t *set;
00647   pf_cluster_t *cluster;
00648 
00649   set = pf->sets + pf->current_set;
00650 
00651   if (clabel >= set->cluster_count)
00652     return 0;
00653   cluster = set->clusters + clabel;
00654 
00655   *weight = cluster->weight;
00656   *mean = cluster->mean;
00657   *cov = cluster->cov;
00658 
00659   return 1;
00660 }
00661 
00662 


amcl
Author(s): Brian P. Gerkey, contradict@gmail.com
autogenerated on Sun Mar 3 2019 03:46:02