$search
00001 /* 00002 * Player - One Hell of a Robot Server 00003 * Copyright (C) 2000 Brian Gerkey & Kasper Stoy 00004 * gerkey@usc.edu kaspers@robotics.usc.edu 00005 * 00006 * This library is free software; you can redistribute it and/or 00007 * modify it under the terms of the GNU Lesser General Public 00008 * License as published by the Free Software Foundation; either 00009 * version 2.1 of the License, or (at your option) any later version. 00010 * 00011 * This library is distributed in the hope that it will be useful, 00012 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00014 * Lesser General Public License for more details. 00015 * 00016 * You should have received a copy of the GNU Lesser General Public 00017 * License along with this library; if not, write to the Free Software 00018 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 00019 * 00020 */ 00021 /************************************************************************** 00022 * Desc: Simple particle filter for localization. 00023 * Author: Andrew Howard 00024 * Date: 10 Dec 2002 00025 * CVS: $Id: pf.c 6345 2008-04-17 01:36:39Z gerkey $ 00026 *************************************************************************/ 00027 00028 #include <assert.h> 00029 #include <math.h> 00030 #include <stdlib.h> 00031 #include <time.h> 00032 00033 #include "pf.h" 00034 #include "pf_pdf.h" 00035 #include "pf_kdtree.h" 00036 00037 00038 // Compute the required number of samples, given that there are k bins 00039 // with samples in them. 00040 static int pf_resample_limit(pf_t *pf, int k); 00041 00042 // Re-compute the cluster statistics for a sample set 00043 static void pf_cluster_stats(pf_t *pf, pf_sample_set_t *set); 00044 00045 00046 // Create a new filter 00047 pf_t *pf_alloc(int min_samples, int max_samples, 00048 double alpha_slow, double alpha_fast, 00049 pf_init_model_fn_t random_pose_fn, void *random_pose_data) 00050 { 00051 int i, j; 00052 pf_t *pf; 00053 pf_sample_set_t *set; 00054 pf_sample_t *sample; 00055 00056 srand48(time(NULL)); 00057 00058 pf = calloc(1, sizeof(pf_t)); 00059 00060 pf->random_pose_fn = random_pose_fn; 00061 pf->random_pose_data = random_pose_data; 00062 00063 pf->min_samples = min_samples; 00064 pf->max_samples = max_samples; 00065 00066 // Control parameters for the population size calculation. [err] is 00067 // the max error between the true distribution and the estimated 00068 // distribution. [z] is the upper standard normal quantile for (1 - 00069 // p), where p is the probability that the error on the estimated 00070 // distrubition will be less than [err]. 00071 pf->pop_err = 0.01; 00072 pf->pop_z = 3; 00073 00074 pf->current_set = 0; 00075 for (j = 0; j < 2; j++) 00076 { 00077 set = pf->sets + j; 00078 00079 set->sample_count = max_samples; 00080 set->samples = calloc(max_samples, sizeof(pf_sample_t)); 00081 00082 for (i = 0; i < set->sample_count; i++) 00083 { 00084 sample = set->samples + i; 00085 sample->pose.v[0] = 0.0; 00086 sample->pose.v[1] = 0.0; 00087 sample->pose.v[2] = 0.0; 00088 sample->weight = 1.0 / max_samples; 00089 } 00090 00091 // HACK: is 3 times max_samples enough? 00092 set->kdtree = pf_kdtree_alloc(3 * max_samples); 00093 00094 set->cluster_count = 0; 00095 set->cluster_max_count = max_samples; 00096 set->clusters = calloc(set->cluster_max_count, sizeof(pf_cluster_t)); 00097 00098 set->mean = pf_vector_zero(); 00099 set->cov = pf_matrix_zero(); 00100 } 00101 00102 pf->w_slow = 0.0; 00103 pf->w_fast = 0.0; 00104 00105 pf->alpha_slow = alpha_slow; 00106 pf->alpha_fast = alpha_fast; 00107 00108 return pf; 00109 } 00110 00111 00112 // Free an existing filter 00113 void pf_free(pf_t *pf) 00114 { 00115 int i; 00116 00117 for (i = 0; i < 2; i++) 00118 { 00119 free(pf->sets[i].clusters); 00120 pf_kdtree_free(pf->sets[i].kdtree); 00121 free(pf->sets[i].samples); 00122 } 00123 free(pf); 00124 00125 return; 00126 } 00127 00128 00129 // Initialize the filter using a guassian 00130 void pf_init(pf_t *pf, pf_vector_t mean, pf_matrix_t cov) 00131 { 00132 int i; 00133 pf_sample_set_t *set; 00134 pf_sample_t *sample; 00135 pf_pdf_gaussian_t *pdf; 00136 00137 set = pf->sets + pf->current_set; 00138 00139 // Create the kd tree for adaptive sampling 00140 pf_kdtree_clear(set->kdtree); 00141 00142 set->sample_count = pf->max_samples; 00143 00144 pdf = pf_pdf_gaussian_alloc(mean, cov); 00145 00146 // Compute the new sample poses 00147 for (i = 0; i < set->sample_count; i++) 00148 { 00149 sample = set->samples + i; 00150 sample->weight = 1.0 / pf->max_samples; 00151 sample->pose = pf_pdf_gaussian_sample(pdf); 00152 00153 // Add sample to histogram 00154 pf_kdtree_insert(set->kdtree, sample->pose, sample->weight); 00155 } 00156 00157 pf->w_slow = pf->w_fast = 0.0; 00158 00159 pf_pdf_gaussian_free(pdf); 00160 00161 // Re-compute cluster statistics 00162 pf_cluster_stats(pf, set); 00163 00164 return; 00165 } 00166 00167 00168 // Initialize the filter using some model 00169 void pf_init_model(pf_t *pf, pf_init_model_fn_t init_fn, void *init_data) 00170 { 00171 int i; 00172 pf_sample_set_t *set; 00173 pf_sample_t *sample; 00174 00175 set = pf->sets + pf->current_set; 00176 00177 // Create the kd tree for adaptive sampling 00178 pf_kdtree_clear(set->kdtree); 00179 00180 set->sample_count = pf->max_samples; 00181 00182 // Compute the new sample poses 00183 for (i = 0; i < set->sample_count; i++) 00184 { 00185 sample = set->samples + i; 00186 sample->weight = 1.0 / pf->max_samples; 00187 sample->pose = (*init_fn) (init_data); 00188 00189 // Add sample to histogram 00190 pf_kdtree_insert(set->kdtree, sample->pose, sample->weight); 00191 } 00192 00193 pf->w_slow = pf->w_fast = 0.0; 00194 00195 // Re-compute cluster statistics 00196 pf_cluster_stats(pf, set); 00197 00198 return; 00199 } 00200 00201 00202 // Update the filter with some new action 00203 void pf_update_action(pf_t *pf, pf_action_model_fn_t action_fn, void *action_data) 00204 { 00205 pf_sample_set_t *set; 00206 00207 set = pf->sets + pf->current_set; 00208 00209 (*action_fn) (action_data, set); 00210 00211 return; 00212 } 00213 00214 00215 #include <float.h> 00216 // Update the filter with some new sensor observation 00217 void pf_update_sensor(pf_t *pf, pf_sensor_model_fn_t sensor_fn, void *sensor_data) 00218 { 00219 int i; 00220 pf_sample_set_t *set; 00221 pf_sample_t *sample; 00222 double total; 00223 00224 set = pf->sets + pf->current_set; 00225 00226 // Compute the sample weights 00227 total = (*sensor_fn) (sensor_data, set); 00228 00229 if (total > 0.0) 00230 { 00231 // Normalize weights 00232 double w_avg=0.0; 00233 for (i = 0; i < set->sample_count; i++) 00234 { 00235 sample = set->samples + i; 00236 w_avg += sample->weight; 00237 sample->weight /= total; 00238 } 00239 // Update running averages of likelihood of samples (Prob Rob p258) 00240 w_avg /= set->sample_count; 00241 if(pf->w_slow == 0.0) 00242 pf->w_slow = w_avg; 00243 else 00244 pf->w_slow += pf->alpha_slow * (w_avg - pf->w_slow); 00245 if(pf->w_fast == 0.0) 00246 pf->w_fast = w_avg; 00247 else 00248 pf->w_fast += pf->alpha_fast * (w_avg - pf->w_fast); 00249 //printf("w_avg: %e slow: %e fast: %e\n", 00250 //w_avg, pf->w_slow, pf->w_fast); 00251 } 00252 else 00253 { 00254 //PLAYER_WARN("pdf has zero probability"); 00255 00256 // Handle zero total 00257 for (i = 0; i < set->sample_count; i++) 00258 { 00259 sample = set->samples + i; 00260 sample->weight = 1.0 / set->sample_count; 00261 } 00262 } 00263 00264 return; 00265 } 00266 00267 00268 // Resample the distribution 00269 void pf_update_resample(pf_t *pf) 00270 { 00271 int i; 00272 double total; 00273 pf_sample_set_t *set_a, *set_b; 00274 pf_sample_t *sample_a, *sample_b; 00275 00276 //double r,c,U; 00277 //int m; 00278 //double count_inv; 00279 double* c; 00280 00281 double w_diff; 00282 00283 set_a = pf->sets + pf->current_set; 00284 set_b = pf->sets + (pf->current_set + 1) % 2; 00285 00286 // Build up cumulative probability table for resampling. 00287 // TODO: Replace this with a more efficient procedure 00288 // (e.g., http://www.network-theory.co.uk/docs/gslref/GeneralDiscreteDistributions.html) 00289 c = (double*)malloc(sizeof(double)*(set_a->sample_count+1)); 00290 c[0] = 0.0; 00291 for(i=0;i<set_a->sample_count;i++) 00292 c[i+1] = c[i]+set_a->samples[i].weight; 00293 00294 // Create the kd tree for adaptive sampling 00295 pf_kdtree_clear(set_b->kdtree); 00296 00297 // Draw samples from set a to create set b. 00298 total = 0; 00299 set_b->sample_count = 0; 00300 00301 w_diff = 1.0 - pf->w_fast / pf->w_slow; 00302 if(w_diff < 0.0) 00303 w_diff = 0.0; 00304 //printf("w_diff: %9.6f\n", w_diff); 00305 00306 // Can't (easily) combine low-variance sampler with KLD adaptive 00307 // sampling, so we'll take the more traditional route. 00308 /* 00309 // Low-variance resampler, taken from Probabilistic Robotics, p110 00310 count_inv = 1.0/set_a->sample_count; 00311 r = drand48() * count_inv; 00312 c = set_a->samples[0].weight; 00313 i = 0; 00314 m = 0; 00315 */ 00316 while(set_b->sample_count < pf->max_samples) 00317 { 00318 sample_b = set_b->samples + set_b->sample_count++; 00319 00320 if(drand48() < w_diff) 00321 sample_b->pose = (pf->random_pose_fn)(pf->random_pose_data); 00322 else 00323 { 00324 // Can't (easily) combine low-variance sampler with KLD adaptive 00325 // sampling, so we'll take the more traditional route. 00326 /* 00327 // Low-variance resampler, taken from Probabilistic Robotics, p110 00328 U = r + m * count_inv; 00329 while(U>c) 00330 { 00331 i++; 00332 // Handle wrap-around by resetting counters and picking a new random 00333 // number 00334 if(i >= set_a->sample_count) 00335 { 00336 r = drand48() * count_inv; 00337 c = set_a->samples[0].weight; 00338 i = 0; 00339 m = 0; 00340 U = r + m * count_inv; 00341 continue; 00342 } 00343 c += set_a->samples[i].weight; 00344 } 00345 m++; 00346 */ 00347 00348 // Naive discrete event sampler 00349 double r; 00350 r = drand48(); 00351 for(i=0;i<set_a->sample_count;i++) 00352 { 00353 if((c[i] <= r) && (r < c[i+1])) 00354 break; 00355 } 00356 assert(i<set_a->sample_count); 00357 00358 sample_a = set_a->samples + i; 00359 00360 assert(sample_a->weight > 0); 00361 00362 // Add sample to list 00363 sample_b->pose = sample_a->pose; 00364 } 00365 00366 sample_b->weight = 1.0; 00367 total += sample_b->weight; 00368 00369 // Add sample to histogram 00370 pf_kdtree_insert(set_b->kdtree, sample_b->pose, sample_b->weight); 00371 00372 // See if we have enough samples yet 00373 if (set_b->sample_count > pf_resample_limit(pf, set_b->kdtree->leaf_count)) 00374 break; 00375 } 00376 00377 // Reset averages, to avoid spiraling off into complete randomness. 00378 if(w_diff > 0.0) 00379 pf->w_slow = pf->w_fast = 0.0; 00380 00381 //fprintf(stderr, "\n\n"); 00382 00383 // Normalize weights 00384 for (i = 0; i < set_b->sample_count; i++) 00385 { 00386 sample_b = set_b->samples + i; 00387 sample_b->weight /= total; 00388 } 00389 00390 // Re-compute cluster statistics 00391 pf_cluster_stats(pf, set_b); 00392 00393 // Use the newly created sample set 00394 pf->current_set = (pf->current_set + 1) % 2; 00395 00396 free(c); 00397 return; 00398 } 00399 00400 00401 // Compute the required number of samples, given that there are k bins 00402 // with samples in them. This is taken directly from Fox et al. 00403 int pf_resample_limit(pf_t *pf, int k) 00404 { 00405 double a, b, c, x; 00406 int n; 00407 00408 if (k <= 1) 00409 return pf->max_samples; 00410 00411 a = 1; 00412 b = 2 / (9 * ((double) k - 1)); 00413 c = sqrt(2 / (9 * ((double) k - 1))) * pf->pop_z; 00414 x = a - b + c; 00415 00416 n = (int) ceil((k - 1) / (2 * pf->pop_err) * x * x * x); 00417 00418 if (n < pf->min_samples) 00419 return pf->min_samples; 00420 if (n > pf->max_samples) 00421 return pf->max_samples; 00422 00423 return n; 00424 } 00425 00426 00427 // Re-compute the cluster statistics for a sample set 00428 void pf_cluster_stats(pf_t *pf, pf_sample_set_t *set) 00429 { 00430 int i, j, k, cidx; 00431 pf_sample_t *sample; 00432 pf_cluster_t *cluster; 00433 00434 // Workspace 00435 double m[4], c[2][2]; 00436 size_t count; 00437 double weight; 00438 00439 // Cluster the samples 00440 pf_kdtree_cluster(set->kdtree); 00441 00442 // Initialize cluster stats 00443 set->cluster_count = 0; 00444 00445 for (i = 0; i < set->cluster_max_count; i++) 00446 { 00447 cluster = set->clusters + i; 00448 cluster->count = 0; 00449 cluster->weight = 0; 00450 cluster->mean = pf_vector_zero(); 00451 cluster->cov = pf_matrix_zero(); 00452 00453 for (j = 0; j < 4; j++) 00454 cluster->m[j] = 0.0; 00455 for (j = 0; j < 2; j++) 00456 for (k = 0; k < 2; k++) 00457 cluster->c[j][k] = 0.0; 00458 } 00459 00460 // Initialize overall filter stats 00461 count = 0; 00462 weight = 0.0; 00463 set->mean = pf_vector_zero(); 00464 set->cov = pf_matrix_zero(); 00465 for (j = 0; j < 4; j++) 00466 m[j] = 0.0; 00467 for (j = 0; j < 2; j++) 00468 for (k = 0; k < 2; k++) 00469 c[j][k] = 0.0; 00470 00471 // Compute cluster stats 00472 for (i = 0; i < set->sample_count; i++) 00473 { 00474 sample = set->samples + i; 00475 00476 //printf("%d %f %f %f\n", i, sample->pose.v[0], sample->pose.v[1], sample->pose.v[2]); 00477 00478 // Get the cluster label for this sample 00479 cidx = pf_kdtree_get_cluster(set->kdtree, sample->pose); 00480 assert(cidx >= 0); 00481 if (cidx >= set->cluster_max_count) 00482 continue; 00483 if (cidx + 1 > set->cluster_count) 00484 set->cluster_count = cidx + 1; 00485 00486 cluster = set->clusters + cidx; 00487 00488 cluster->count += 1; 00489 cluster->weight += sample->weight; 00490 00491 count += 1; 00492 weight += sample->weight; 00493 00494 // Compute mean 00495 cluster->m[0] += sample->weight * sample->pose.v[0]; 00496 cluster->m[1] += sample->weight * sample->pose.v[1]; 00497 cluster->m[2] += sample->weight * cos(sample->pose.v[2]); 00498 cluster->m[3] += sample->weight * sin(sample->pose.v[2]); 00499 00500 m[0] += sample->weight * sample->pose.v[0]; 00501 m[1] += sample->weight * sample->pose.v[1]; 00502 m[2] += sample->weight * cos(sample->pose.v[2]); 00503 m[3] += sample->weight * sin(sample->pose.v[2]); 00504 00505 // Compute covariance in linear components 00506 for (j = 0; j < 2; j++) 00507 for (k = 0; k < 2; k++) 00508 { 00509 cluster->c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k]; 00510 c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k]; 00511 } 00512 } 00513 00514 // Normalize 00515 for (i = 0; i < set->cluster_count; i++) 00516 { 00517 cluster = set->clusters + i; 00518 00519 cluster->mean.v[0] = cluster->m[0] / cluster->weight; 00520 cluster->mean.v[1] = cluster->m[1] / cluster->weight; 00521 cluster->mean.v[2] = atan2(cluster->m[3], cluster->m[2]); 00522 00523 cluster->cov = pf_matrix_zero(); 00524 00525 // Covariance in linear components 00526 for (j = 0; j < 2; j++) 00527 for (k = 0; k < 2; k++) 00528 cluster->cov.m[j][k] = cluster->c[j][k] / cluster->weight - 00529 cluster->mean.v[j] * cluster->mean.v[k]; 00530 00531 // Covariance in angular components; I think this is the correct 00532 // formula for circular statistics. 00533 cluster->cov.m[2][2] = -2 * log(sqrt(cluster->m[2] * cluster->m[2] + 00534 cluster->m[3] * cluster->m[3])); 00535 00536 //printf("cluster %d %d %f (%f %f %f)\n", i, cluster->count, cluster->weight, 00537 //cluster->mean.v[0], cluster->mean.v[1], cluster->mean.v[2]); 00538 //pf_matrix_fprintf(cluster->cov, stdout, "%e"); 00539 } 00540 00541 // Compute overall filter stats 00542 set->mean.v[0] = m[0] / weight; 00543 set->mean.v[1] = m[1] / weight; 00544 set->mean.v[2] = atan2(m[3], m[2]); 00545 00546 // Covariance in linear components 00547 for (j = 0; j < 2; j++) 00548 for (k = 0; k < 2; k++) 00549 set->cov.m[j][k] = c[j][k] / weight - set->mean.v[j] * set->mean.v[k]; 00550 00551 // Covariance in angular components; I think this is the correct 00552 // formula for circular statistics. 00553 set->cov.m[2][2] = -2 * log(sqrt(m[2] * m[2] + m[3] * m[3])); 00554 00555 return; 00556 } 00557 00558 00559 // Compute the CEP statistics (mean and variance). 00560 void pf_get_cep_stats(pf_t *pf, pf_vector_t *mean, double *var) 00561 { 00562 int i; 00563 double mn, mx, my, mrr; 00564 pf_sample_set_t *set; 00565 pf_sample_t *sample; 00566 00567 set = pf->sets + pf->current_set; 00568 00569 mn = 0.0; 00570 mx = 0.0; 00571 my = 0.0; 00572 mrr = 0.0; 00573 00574 for (i = 0; i < set->sample_count; i++) 00575 { 00576 sample = set->samples + i; 00577 00578 mn += sample->weight; 00579 mx += sample->weight * sample->pose.v[0]; 00580 my += sample->weight * sample->pose.v[1]; 00581 mrr += sample->weight * sample->pose.v[0] * sample->pose.v[0]; 00582 mrr += sample->weight * sample->pose.v[1] * sample->pose.v[1]; 00583 } 00584 00585 mean->v[0] = mx / mn; 00586 mean->v[1] = my / mn; 00587 mean->v[2] = 0.0; 00588 00589 *var = mrr / mn - (mx * mx / (mn * mn) + my * my / (mn * mn)); 00590 00591 return; 00592 } 00593 00594 00595 // Get the statistics for a particular cluster. 00596 int pf_get_cluster_stats(pf_t *pf, int clabel, double *weight, 00597 pf_vector_t *mean, pf_matrix_t *cov) 00598 { 00599 pf_sample_set_t *set; 00600 pf_cluster_t *cluster; 00601 00602 set = pf->sets + pf->current_set; 00603 00604 if (clabel >= set->cluster_count) 00605 return 0; 00606 cluster = set->clusters + clabel; 00607 00608 *weight = cluster->weight; 00609 *mean = cluster->mean; 00610 *cov = cluster->cov; 00611 00612 return 1; 00613 } 00614 00615