pf.c
Go to the documentation of this file.
1 /*
2  * Player - One Hell of a Robot Server
3  * Copyright (C) 2000 Brian Gerkey & Kasper Stoy
4  * gerkey@usc.edu kaspers@robotics.usc.edu
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  */
21 /**************************************************************************
22  * Desc: Simple particle filter for localization.
23  * Author: Andrew Howard
24  * Date: 10 Dec 2002
25  * CVS: $Id: pf.c 6345 2008-04-17 01:36:39Z gerkey $
26  *************************************************************************/
27 
28 #include <assert.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <time.h>
32 
33 #include "amcl/pf/pf.h"
34 #include "amcl/pf/pf_pdf.h"
35 #include "amcl/pf/pf_kdtree.h"
36 
37 
38 // Compute the required number of samples, given that there are k bins
39 // with samples in them.
40 static int pf_resample_limit(pf_t *pf, int k);
41 
42 
43 
44 // Create a new filter
45 pf_t *pf_alloc(int min_samples, int max_samples,
46  double alpha_slow, double alpha_fast,
47  pf_init_model_fn_t random_pose_fn, void *random_pose_data)
48 {
49  int i, j;
50  pf_t *pf;
51  pf_sample_set_t *set;
52  pf_sample_t *sample;
53 
54  srand48(time(NULL));
55 
56  pf = calloc(1, sizeof(pf_t));
57 
58  pf->random_pose_fn = random_pose_fn;
59  pf->random_pose_data = random_pose_data;
60 
61  pf->min_samples = min_samples;
62  pf->max_samples = max_samples;
63 
64  // Control parameters for the population size calculation. [err] is
65  // the max error between the true distribution and the estimated
66  // distribution. [z] is the upper standard normal quantile for (1 -
67  // p), where p is the probability that the error on the estimated
68  // distrubition will be less than [err].
69  pf->pop_err = 0.01;
70  pf->pop_z = 3;
71  pf->dist_threshold = 0.5;
72 
73  // Number of leaf nodes is never higher than the max number of samples
74  pf->limit_cache = calloc(max_samples, sizeof(int));
75 
76  pf->current_set = 0;
77  for (j = 0; j < 2; j++)
78  {
79  set = pf->sets + j;
80 
81  set->sample_count = max_samples;
82  set->samples = calloc(max_samples, sizeof(pf_sample_t));
83 
84  for (i = 0; i < set->sample_count; i++)
85  {
86  sample = set->samples + i;
87  sample->pose.v[0] = 0.0;
88  sample->pose.v[1] = 0.0;
89  sample->pose.v[2] = 0.0;
90  sample->weight = 1.0 / max_samples;
91  }
92 
93  // HACK: is 3 times max_samples enough?
94  set->kdtree = pf_kdtree_alloc(3 * max_samples);
95 
96  set->cluster_count = 0;
97  set->cluster_max_count = max_samples;
98  set->clusters = calloc(set->cluster_max_count, sizeof(pf_cluster_t));
99 
100  set->mean = pf_vector_zero();
101  set->cov = pf_matrix_zero();
102  }
103 
104  pf->w_slow = 0.0;
105  pf->w_fast = 0.0;
106 
107  pf->alpha_slow = alpha_slow;
108  pf->alpha_fast = alpha_fast;
109 
110  //set converged to 0
111  pf_init_converged(pf);
112 
113  return pf;
114 }
115 
116 // Free an existing filter
117 void pf_free(pf_t *pf)
118 {
119  int i;
120 
121  free(pf->limit_cache);
122 
123  for (i = 0; i < 2; i++)
124  {
125  free(pf->sets[i].clusters);
126  pf_kdtree_free(pf->sets[i].kdtree);
127  free(pf->sets[i].samples);
128  }
129  free(pf);
130 
131  return;
132 }
133 
134 // Initialize the filter using a guassian
135 void pf_init(pf_t *pf, pf_vector_t mean, pf_matrix_t cov)
136 {
137  int i;
138  pf_sample_set_t *set;
139  pf_sample_t *sample;
140  pf_pdf_gaussian_t *pdf;
141 
142  set = pf->sets + pf->current_set;
143 
144  // Create the kd tree for adaptive sampling
145  pf_kdtree_clear(set->kdtree);
146 
147  set->sample_count = pf->max_samples;
148 
149  pdf = pf_pdf_gaussian_alloc(mean, cov);
150 
151  // Compute the new sample poses
152  for (i = 0; i < set->sample_count; i++)
153  {
154  sample = set->samples + i;
155  sample->weight = 1.0 / pf->max_samples;
156  sample->pose = pf_pdf_gaussian_sample(pdf);
157 
158  // Add sample to histogram
159  pf_kdtree_insert(set->kdtree, sample->pose, sample->weight);
160  }
161 
162  pf->w_slow = pf->w_fast = 0.0;
163 
165 
166  // Re-compute cluster statistics
167  pf_cluster_stats(pf, set);
168 
169  //set converged to 0
170  pf_init_converged(pf);
171 
172  return;
173 }
174 
175 
176 // Initialize the filter using some model
177 void pf_init_model(pf_t *pf, pf_init_model_fn_t init_fn, void *init_data)
178 {
179  int i;
180  pf_sample_set_t *set;
181  pf_sample_t *sample;
182 
183  set = pf->sets + pf->current_set;
184 
185  // Create the kd tree for adaptive sampling
186  pf_kdtree_clear(set->kdtree);
187 
188  set->sample_count = pf->max_samples;
189 
190  // Compute the new sample poses
191  for (i = 0; i < set->sample_count; i++)
192  {
193  sample = set->samples + i;
194  sample->weight = 1.0 / pf->max_samples;
195  sample->pose = (*init_fn) (init_data);
196 
197  // Add sample to histogram
198  pf_kdtree_insert(set->kdtree, sample->pose, sample->weight);
199  }
200 
201  pf->w_slow = pf->w_fast = 0.0;
202 
203  // Re-compute cluster statistics
204  pf_cluster_stats(pf, set);
205 
206  //set converged to 0
207  pf_init_converged(pf);
208 
209  return;
210 }
211 
213  pf_sample_set_t *set;
214  set = pf->sets + pf->current_set;
215  set->converged = 0;
216  pf->converged = 0;
217 }
218 
220 {
221  int i;
222  pf_sample_set_t *set;
223  pf_sample_t *sample;
224  double total;
225 
226  set = pf->sets + pf->current_set;
227  double mean_x = 0, mean_y = 0;
228 
229  for (i = 0; i < set->sample_count; i++){
230  sample = set->samples + i;
231 
232  mean_x += sample->pose.v[0];
233  mean_y += sample->pose.v[1];
234  }
235  mean_x /= set->sample_count;
236  mean_y /= set->sample_count;
237 
238  for (i = 0; i < set->sample_count; i++){
239  sample = set->samples + i;
240  if(fabs(sample->pose.v[0] - mean_x) > pf->dist_threshold ||
241  fabs(sample->pose.v[1] - mean_y) > pf->dist_threshold){
242  set->converged = 0;
243  pf->converged = 0;
244  return 0;
245  }
246  }
247  set->converged = 1;
248  pf->converged = 1;
249  return 1;
250 }
251 
252 // Update the filter with some new action
253 void pf_update_action(pf_t *pf, pf_action_model_fn_t action_fn, void *action_data)
254 {
255  pf_sample_set_t *set;
256 
257  set = pf->sets + pf->current_set;
258 
259  (*action_fn) (action_data, set);
260 
261  return;
262 }
263 
264 
265 #include <float.h>
266 // Update the filter with some new sensor observation
267 void pf_update_sensor(pf_t *pf, pf_sensor_model_fn_t sensor_fn, void *sensor_data)
268 {
269  int i;
270  pf_sample_set_t *set;
271  pf_sample_t *sample;
272  double total;
273 
274  set = pf->sets + pf->current_set;
275 
276  // Compute the sample weights
277  total = (*sensor_fn) (sensor_data, set);
278 
279  set->n_effective = 0;
280 
281  if (total > 0.0)
282  {
283  // Normalize weights
284  double w_avg=0.0;
285  for (i = 0; i < set->sample_count; i++)
286  {
287  sample = set->samples + i;
288  w_avg += sample->weight;
289  sample->weight /= total;
290  set->n_effective += sample->weight*sample->weight;
291  }
292  // Update running averages of likelihood of samples (Prob Rob p258)
293  w_avg /= set->sample_count;
294  if(pf->w_slow == 0.0)
295  pf->w_slow = w_avg;
296  else
297  pf->w_slow += pf->alpha_slow * (w_avg - pf->w_slow);
298  if(pf->w_fast == 0.0)
299  pf->w_fast = w_avg;
300  else
301  pf->w_fast += pf->alpha_fast * (w_avg - pf->w_fast);
302  //printf("w_avg: %e slow: %e fast: %e\n",
303  //w_avg, pf->w_slow, pf->w_fast);
304  }
305  else
306  {
307  // Handle zero total
308  for (i = 0; i < set->sample_count; i++)
309  {
310  sample = set->samples + i;
311  sample->weight = 1.0 / set->sample_count;
312  }
313  }
314 
315  set->n_effective = 1.0/set->n_effective;
316  return;
317 }
318 
319 // copy set a to set b
321 {
322  int i;
323  double total;
324  pf_sample_t *sample_a, *sample_b;
325 
326  // Clean set b's kdtree
327  pf_kdtree_clear(set_b->kdtree);
328 
329  // Copy samples from set a to create set b
330  total = 0;
331  set_b->sample_count = 0;
332 
333  for(i = 0; i < set_a->sample_count; i++)
334  {
335  sample_b = set_b->samples + set_b->sample_count++;
336 
337  sample_a = set_a->samples + i;
338 
339  assert(sample_a->weight > 0);
340 
341  // Copy sample a to sample b
342  sample_b->pose = sample_a->pose;
343  sample_b->weight = sample_a->weight;
344 
345  total += sample_b->weight;
346 
347  // Add sample to histogram
348  pf_kdtree_insert(set_b->kdtree, sample_b->pose, sample_b->weight);
349  }
350 
351  // Normalize weights
352  for (i = 0; i < set_b->sample_count; i++)
353  {
354  sample_b = set_b->samples + i;
355  sample_b->weight /= total;
356  }
357 
358  set_b->converged = set_a->converged;
359 }
360 
361 // Resample the distribution
363 {
364  int i;
365  double total;
366  pf_sample_set_t *set_a, *set_b;
367  pf_sample_t *sample_a, *sample_b;
368 
369  //double r,c,U;
370  //int m;
371  //double count_inv;
372  double* c;
373 
374  double w_diff;
375 
376  set_a = pf->sets + pf->current_set;
377  set_b = pf->sets + (pf->current_set + 1) % 2;
378 
379  if (pf->selective_resampling != 0)
380  {
381  if (set_a->n_effective > 0.5*(set_a->sample_count))
382  {
383  // copy set a to b
384  copy_set(set_a,set_b);
385 
386  // Re-compute cluster statistics
387  pf_cluster_stats(pf, set_b);
388 
389  // Use the newly created sample set
390  pf->current_set = (pf->current_set + 1) % 2;
391  return;
392  }
393  }
394 
395  // Build up cumulative probability table for resampling.
396  // TODO: Replace this with a more efficient procedure
397  // (e.g., http://www.network-theory.co.uk/docs/gslref/GeneralDiscreteDistributions.html)
398  c = (double*)malloc(sizeof(double)*(set_a->sample_count+1));
399  c[0] = 0.0;
400  for(i=0;i<set_a->sample_count;i++)
401  c[i+1] = c[i]+set_a->samples[i].weight;
402 
403  // Create the kd tree for adaptive sampling
404  pf_kdtree_clear(set_b->kdtree);
405 
406  // Draw samples from set a to create set b.
407  total = 0;
408  set_b->sample_count = 0;
409 
410  w_diff = 1.0 - pf->w_fast / pf->w_slow;
411  if(w_diff < 0.0)
412  w_diff = 0.0;
413  //printf("w_diff: %9.6f\n", w_diff);
414 
415  // Can't (easily) combine low-variance sampler with KLD adaptive
416  // sampling, so we'll take the more traditional route.
417  /*
418  // Low-variance resampler, taken from Probabilistic Robotics, p110
419  count_inv = 1.0/set_a->sample_count;
420  r = drand48() * count_inv;
421  c = set_a->samples[0].weight;
422  i = 0;
423  m = 0;
424  */
425  while(set_b->sample_count < pf->max_samples)
426  {
427  sample_b = set_b->samples + set_b->sample_count++;
428 
429  if(drand48() < w_diff)
430  sample_b->pose = (pf->random_pose_fn)(pf->random_pose_data);
431  else
432  {
433  // Can't (easily) combine low-variance sampler with KLD adaptive
434  // sampling, so we'll take the more traditional route.
435  /*
436  // Low-variance resampler, taken from Probabilistic Robotics, p110
437  U = r + m * count_inv;
438  while(U>c)
439  {
440  i++;
441  // Handle wrap-around by resetting counters and picking a new random
442  // number
443  if(i >= set_a->sample_count)
444  {
445  r = drand48() * count_inv;
446  c = set_a->samples[0].weight;
447  i = 0;
448  m = 0;
449  U = r + m * count_inv;
450  continue;
451  }
452  c += set_a->samples[i].weight;
453  }
454  m++;
455  */
456 
457  // Naive discrete event sampler
458  double r;
459  r = drand48();
460  for(i=0;i<set_a->sample_count;i++)
461  {
462  if((c[i] <= r) && (r < c[i+1]))
463  break;
464  }
465  assert(i<set_a->sample_count);
466 
467  sample_a = set_a->samples + i;
468 
469  assert(sample_a->weight > 0);
470 
471  // Add sample to list
472  sample_b->pose = sample_a->pose;
473  }
474 
475  sample_b->weight = 1.0;
476  total += sample_b->weight;
477 
478  // Add sample to histogram
479  pf_kdtree_insert(set_b->kdtree, sample_b->pose, sample_b->weight);
480 
481  // See if we have enough samples yet
482  if (set_b->sample_count > pf_resample_limit(pf, set_b->kdtree->leaf_count))
483  break;
484  }
485 
486  // Reset averages, to avoid spiraling off into complete randomness.
487  if(w_diff > 0.0)
488  pf->w_slow = pf->w_fast = 0.0;
489 
490  //fprintf(stderr, "\n\n");
491 
492  // Normalize weights
493  for (i = 0; i < set_b->sample_count; i++)
494  {
495  sample_b = set_b->samples + i;
496  sample_b->weight /= total;
497  }
498 
499  // Re-compute cluster statistics
500  pf_cluster_stats(pf, set_b);
501 
502  // Use the newly created sample set
503  pf->current_set = (pf->current_set + 1) % 2;
504 
506 
507  free(c);
508  return;
509 }
510 
511 
512 // Compute the required number of samples, given that there are k bins
513 // with samples in them. This is taken directly from Fox et al.
514 int pf_resample_limit(pf_t *pf, int k)
515 {
516  double a, b, c, x;
517  int n;
518 
519  // Return max_samples in case k is outside expected range, this shouldn't
520  // happen, but is added to prevent any runtime errors
521  if (k < 1 || k > pf->max_samples)
522  return pf->max_samples;
523 
524  // Return value if cache is valid, which means value is non-zero positive
525  if (pf->limit_cache[k-1] > 0)
526  return pf->limit_cache[k-1];
527 
528  if (k == 1)
529  {
530  pf->limit_cache[k-1] = pf->max_samples;
531  return pf->max_samples;
532  }
533 
534  a = 1;
535  b = 2 / (9 * ((double) k - 1));
536  c = sqrt(2 / (9 * ((double) k - 1))) * pf->pop_z;
537  x = a - b + c;
538 
539  n = (int) ceil((k - 1) / (2 * pf->pop_err) * x * x * x);
540 
541  if (n < pf->min_samples)
542  {
543  pf->limit_cache[k-1] = pf->min_samples;
544  return pf->min_samples;
545  }
546  if (n > pf->max_samples)
547  {
548  pf->limit_cache[k-1] = pf->max_samples;
549  return pf->max_samples;
550  }
551 
552  pf->limit_cache[k-1] = n;
553  return n;
554 }
555 
556 
557 // Re-compute the cluster statistics for a sample set
559 {
560  int i, j, k, cidx;
561  pf_sample_t *sample;
562  pf_cluster_t *cluster;
563 
564  // Workspace
565  double m[4], c[2][2];
566  size_t count;
567  double weight;
568 
569  // Cluster the samples
570  pf_kdtree_cluster(set->kdtree);
571 
572  // Initialize cluster stats
573  set->cluster_count = 0;
574 
575  for (i = 0; i < set->cluster_max_count; i++)
576  {
577  cluster = set->clusters + i;
578  cluster->count = 0;
579  cluster->weight = 0;
580  cluster->mean = pf_vector_zero();
581  cluster->cov = pf_matrix_zero();
582 
583  for (j = 0; j < 4; j++)
584  cluster->m[j] = 0.0;
585  for (j = 0; j < 2; j++)
586  for (k = 0; k < 2; k++)
587  cluster->c[j][k] = 0.0;
588  }
589 
590  // Initialize overall filter stats
591  count = 0;
592  weight = 0.0;
593  set->mean = pf_vector_zero();
594  set->cov = pf_matrix_zero();
595  for (j = 0; j < 4; j++)
596  m[j] = 0.0;
597  for (j = 0; j < 2; j++)
598  for (k = 0; k < 2; k++)
599  c[j][k] = 0.0;
600 
601  // Compute cluster stats
602  for (i = 0; i < set->sample_count; i++)
603  {
604  sample = set->samples + i;
605 
606  //printf("%d %f %f %f\n", i, sample->pose.v[0], sample->pose.v[1], sample->pose.v[2]);
607 
608  // Get the cluster label for this sample
609  cidx = pf_kdtree_get_cluster(set->kdtree, sample->pose);
610  assert(cidx >= 0);
611  if (cidx >= set->cluster_max_count)
612  continue;
613  if (cidx + 1 > set->cluster_count)
614  set->cluster_count = cidx + 1;
615 
616  cluster = set->clusters + cidx;
617 
618  cluster->count += 1;
619  cluster->weight += sample->weight;
620 
621  count += 1;
622  weight += sample->weight;
623 
624  // Compute mean
625  cluster->m[0] += sample->weight * sample->pose.v[0];
626  cluster->m[1] += sample->weight * sample->pose.v[1];
627  cluster->m[2] += sample->weight * cos(sample->pose.v[2]);
628  cluster->m[3] += sample->weight * sin(sample->pose.v[2]);
629 
630  m[0] += sample->weight * sample->pose.v[0];
631  m[1] += sample->weight * sample->pose.v[1];
632  m[2] += sample->weight * cos(sample->pose.v[2]);
633  m[3] += sample->weight * sin(sample->pose.v[2]);
634 
635  // Compute covariance in linear components
636  for (j = 0; j < 2; j++)
637  for (k = 0; k < 2; k++)
638  {
639  cluster->c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k];
640  c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k];
641  }
642  }
643 
644  // Normalize
645  for (i = 0; i < set->cluster_count; i++)
646  {
647  cluster = set->clusters + i;
648 
649  cluster->mean.v[0] = cluster->m[0] / cluster->weight;
650  cluster->mean.v[1] = cluster->m[1] / cluster->weight;
651  cluster->mean.v[2] = atan2(cluster->m[3], cluster->m[2]);
652 
653  cluster->cov = pf_matrix_zero();
654 
655  // Covariance in linear components
656  for (j = 0; j < 2; j++)
657  for (k = 0; k < 2; k++)
658  cluster->cov.m[j][k] = cluster->c[j][k] / cluster->weight -
659  cluster->mean.v[j] * cluster->mean.v[k];
660 
661  // Covariance in angular components; I think this is the correct
662  // formula for circular statistics.
663  cluster->cov.m[2][2] = -2 * log(sqrt(cluster->m[2] * cluster->m[2] +
664  cluster->m[3] * cluster->m[3]));
665 
666  //printf("cluster %d %d %f (%f %f %f)\n", i, cluster->count, cluster->weight,
667  //cluster->mean.v[0], cluster->mean.v[1], cluster->mean.v[2]);
668  //pf_matrix_fprintf(cluster->cov, stdout, "%e");
669  }
670 
671  // Compute overall filter stats
672  set->mean.v[0] = m[0] / weight;
673  set->mean.v[1] = m[1] / weight;
674  set->mean.v[2] = atan2(m[3], m[2]);
675 
676  // Covariance in linear components
677  for (j = 0; j < 2; j++)
678  for (k = 0; k < 2; k++)
679  set->cov.m[j][k] = c[j][k] / weight - set->mean.v[j] * set->mean.v[k];
680 
681  // Covariance in angular components; I think this is the correct
682  // formula for circular statistics.
683  set->cov.m[2][2] = -2 * log(sqrt(m[2] * m[2] + m[3] * m[3]));
684 
685  return;
686 }
687 
688 void pf_set_selective_resampling(pf_t *pf, int selective_resampling)
689 {
690  pf->selective_resampling = selective_resampling;
691 }
692 
693 // Compute the CEP statistics (mean and variance).
694 void pf_get_cep_stats(pf_t *pf, pf_vector_t *mean, double *var)
695 {
696  int i;
697  double mn, mx, my, mrr;
698  pf_sample_set_t *set;
699  pf_sample_t *sample;
700 
701  set = pf->sets + pf->current_set;
702 
703  mn = 0.0;
704  mx = 0.0;
705  my = 0.0;
706  mrr = 0.0;
707 
708  for (i = 0; i < set->sample_count; i++)
709  {
710  sample = set->samples + i;
711 
712  mn += sample->weight;
713  mx += sample->weight * sample->pose.v[0];
714  my += sample->weight * sample->pose.v[1];
715  mrr += sample->weight * sample->pose.v[0] * sample->pose.v[0];
716  mrr += sample->weight * sample->pose.v[1] * sample->pose.v[1];
717  }
718 
719  mean->v[0] = mx / mn;
720  mean->v[1] = my / mn;
721  mean->v[2] = 0.0;
722 
723  *var = mrr / mn - (mx * mx / (mn * mn) + my * my / (mn * mn));
724 
725  return;
726 }
727 
728 
729 // Get the statistics for a particular cluster.
730 int pf_get_cluster_stats(pf_t *pf, int clabel, double *weight,
731  pf_vector_t *mean, pf_matrix_t *cov)
732 {
733  pf_sample_set_t *set;
734  pf_cluster_t *cluster;
735 
736  set = pf->sets + pf->current_set;
737 
738  if (clabel >= set->cluster_count)
739  return 0;
740  cluster = set->clusters + clabel;
741 
742  *weight = cluster->weight;
743  *mean = cluster->mean;
744  *cov = cluster->cov;
745 
746  return 1;
747 }
748 
749 
double w_slow
Definition: pf.h:129
pf_init_model_fn_t random_pose_fn
Definition: pf.h:135
double(* pf_sensor_model_fn_t)(void *sensor_data, struct _pf_sample_set_t *set)
Definition: pf.h:54
static int pf_resample_limit(pf_t *pf, int k)
Definition: pf.c:514
void pf_update_sensor(pf_t *pf, pf_sensor_model_fn_t sensor_fn, void *sensor_data)
Definition: pf.c:267
double pop_err
Definition: pf.h:118
void pf_get_cep_stats(pf_t *pf, pf_vector_t *mean, double *var)
Definition: pf.c:694
double v[3]
Definition: pf_vector.h:40
void pf_kdtree_clear(pf_kdtree_t *self)
Definition: pf_kdtree.c:100
void pf_kdtree_insert(pf_kdtree_t *self, pf_vector_t pose, double value)
Definition: pf_kdtree.c:112
Definition: pf.h:112
pf_vector_t(* pf_init_model_fn_t)(void *init_data)
Definition: pf.h:45
pf_matrix_t cov
Definition: pf.h:81
void pf_init_converged(pf_t *pf)
Definition: pf.c:212
pf_vector_t pf_vector_zero()
Definition: pf_vector.c:38
int selective_resampling
Definition: pf.h:142
pf_kdtree_t * kdtree
Definition: pf.h:97
static int n
Definition: eig3.c:12
double dist_threshold
Definition: pf.h:138
void pf_update_resample(pf_t *pf)
Definition: pf.c:362
void copy_set(pf_sample_set_t *set_a, pf_sample_set_t *set_b)
Definition: pf.c:320
pf_t * pf_alloc(int min_samples, int max_samples, double alpha_slow, double alpha_fast, pf_init_model_fn_t random_pose_fn, void *random_pose_data)
Definition: pf.c:45
void pf_init_model(pf_t *pf, pf_init_model_fn_t init_fn, void *init_data)
Definition: pf.c:177
void pf_kdtree_free(pf_kdtree_t *self)
Definition: pf_kdtree.c:90
ROSCPP_DECL void set(const std::string &key, const XmlRpc::XmlRpcValue &v)
int max_samples
Definition: pf.h:115
double weight
Definition: pf.h:77
double w_fast
Definition: pf.h:129
int min_samples
Definition: pf.h:115
void pf_set_selective_resampling(pf_t *pf, int selective_resampling)
Definition: pf.c:688
double pop_z
Definition: pf.h:118
void pf_pdf_gaussian_free(pf_pdf_gaussian_t *pdf)
Definition: pf_pdf.c:74
int current_set
Definition: pf.h:125
Definition: pf.h:59
void pf_update_action(pf_t *pf, pf_action_model_fn_t action_fn, void *action_data)
Definition: pf.c:253
void * random_pose_data
Definition: pf.h:136
int pf_update_converged(pf_t *pf)
Definition: pf.c:219
double m[4]
Definition: pf.h:84
double m[3][3]
Definition: pf_vector.h:47
pf_sample_t * samples
Definition: pf.h:94
pf_matrix_t pf_matrix_zero()
Definition: pf_vector.c:134
TFSIMD_FORCE_INLINE const tfScalar & x() const
int leaf_count
Definition: pf_kdtree.h:75
int * limit_cache
Definition: pf.h:121
pf_pdf_gaussian_t * pf_pdf_gaussian_alloc(pf_vector_t x, pf_matrix_t cx)
Definition: pf_pdf.c:46
void pf_cluster_stats(pf_t *pf, pf_sample_set_t *set)
Definition: pf.c:558
void pf_free(pf_t *pf)
Definition: pf.c:117
double alpha_fast
Definition: pf.h:132
pf_vector_t pose
Definition: pf.h:62
void(* pf_action_model_fn_t)(void *action_data, struct _pf_sample_set_t *set)
Definition: pf.h:49
int pf_get_cluster_stats(pf_t *pf, int clabel, double *weight, pf_vector_t *mean, pf_matrix_t *cov)
Definition: pf.c:730
int sample_count
Definition: pf.h:93
double weight
Definition: pf.h:65
void pf_init(pf_t *pf, pf_vector_t mean, pf_matrix_t cov)
Definition: pf.c:135
pf_vector_t mean
Definition: pf.h:80
pf_kdtree_t * pf_kdtree_alloc(int max_size)
Definition: pf_kdtree.c:66
pf_cluster_t * clusters
Definition: pf.h:101
int converged
Definition: pf.h:139
int converged
Definition: pf.h:106
pf_sample_set_t sets[2]
Definition: pf.h:126
int pf_kdtree_get_cluster(pf_kdtree_t *self, pf_vector_t pose)
Definition: pf_kdtree.c:168
double alpha_slow
Definition: pf.h:132
double c[2][2]
Definition: pf.h:84
void pf_kdtree_cluster(pf_kdtree_t *self)
Definition: pf_kdtree.c:358
pf_vector_t pf_pdf_gaussian_sample(pf_pdf_gaussian_t *pdf)
Definition: pf_pdf.c:105
double n_effective
Definition: pf.h:107
int count
Definition: pf.h:74


amcl
Author(s): Brian P. Gerkey, contradict@gmail.com
autogenerated on Thu Jan 21 2021 04:05:36