SRIleastSquares.cpp
Go to the documentation of this file.
1 //==============================================================================
2 //
3 // This file is part of GNSSTk, the ARL:UT GNSS Toolkit.
4 //
5 // The GNSSTk is free software; you can redistribute it and/or modify
6 // it under the terms of the GNU Lesser General Public License as published
7 // by the Free Software Foundation; either version 3.0 of the License, or
8 // any later version.
9 //
10 // The GNSSTk is distributed in the hope that it will be useful,
11 // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 // GNU Lesser General Public License for more details.
14 //
15 // You should have received a copy of the GNU Lesser General Public
16 // License along with GNSSTk; if not, write to the Free Software Foundation,
17 // Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
18 //
19 // This software was developed by Applied Research Laboratories at the
20 // University of Texas at Austin.
21 // Copyright 2004-2022, The Board of Regents of The University of Texas System
22 //
23 //==============================================================================
24 
25 //==============================================================================
26 //
27 // This software was developed by Applied Research Laboratories at the
28 // University of Texas at Austin, under contract to an agency or agencies
29 // within the U.S. Department of Defense. The U.S. Government retains all
30 // rights to use, duplicate, distribute, disclose, or release this software.
31 //
32 // Pursuant to DoD Directive 523024
33 //
34 // DISTRIBUTION STATEMENT A: This software has been approved for public
35 // release, distribution is unlimited.
36 //
37 //==============================================================================
38 
46 //------------------------------------------------------------------------------------
47 // GNSSTk includes
48 #include "SRIleastSquares.hpp"
49 #include "RobustStats.hpp"
50 #include "StringUtils.hpp"
51 
52 //------------------------------------------------------------------------------------
53 using namespace std;
54 
55 namespace gnsstk
56 {
57  using namespace StringUtils;
58 
59  //---------------------------------------------------------------------------------
60  // empty constructor
61  SRIleastSquares::SRIleastSquares() { defaults(); }
62 
63  //---------------------------------------------------------------------------------
64  // constructor given the dimension N.
65  SRIleastSquares::SRIleastSquares(const unsigned int N)
66  {
67  defaults();
68  R = Matrix<double>(N, N, 0.0);
69  Z = Vector<double>(N, 0.0);
70  names = Namelist(N);
71  }
72 
73  //---------------------------------------------------------------------------------
74  // constructor given a Namelist, its dimension determines the SRI dimension.
75  SRIleastSquares::SRIleastSquares(const Namelist& NL)
76  {
77  defaults();
78  if (NL.size() <= 0)
79  {
80  return;
81  }
82  R = Matrix<double>(NL.size(), NL.size(), 0.0);
83  Z = Vector<double>(NL.size(), 0.0);
84  names = NL;
85  }
86 
87  //---------------------------------------------------------------------------------
88  // explicit constructor - throw if the dimensions are inconsistent.
89  SRIleastSquares::SRIleastSquares(const Matrix<double>& Rin,
90  const Vector<double>& Zin,
91  const Namelist& NLin)
92  {
93  defaults();
94  if (Rin.rows() != Rin.cols() || Rin.rows() != Zin.size() ||
95  Rin.rows() != NLin.size())
96  {
97  MatrixException me("Invalid input dimensions: R is " +
98  asString<int>(Rin.rows()) + "x" +
99  asString<int>(Rin.cols()) + ", Z has length " +
100  asString<int>(Zin.size()) + ", and NL has length " +
101  asString<int>(NLin.size()));
102  GNSSTK_THROW(me);
103  }
104  R = Rin;
105  Z = Zin;
106  names = NLin;
107  }
108 
109  //---------------------------------------------------------------------------------
110  // operator=
111  SRIleastSquares& SRIleastSquares::operator=(const SRIleastSquares& right)
112  {
113  R = right.R;
114  Z = right.Z;
115  names = right.names;
116  iterationsLimit = right.iterationsLimit;
117  convergenceLimit = right.convergenceLimit;
118  divergenceLimit = right.divergenceLimit;
119  doWeight = right.doWeight;
120  doRobust = right.doRobust;
121  doLinearize = right.doLinearize;
122  doSequential = right.doSequential;
123  doVerbose = right.doVerbose;
124  valid = right.valid;
125  numberIterations = right.numberIterations;
126  numberBatches = right.numberBatches;
127  rmsConvergence = right.rmsConvergence;
128  conditionNum = right.conditionNum;
129  Xsave = right.Xsave;
130  return *this;
131  }
132 
133  //---------------------------------------------------------------------------------
134  /* SRI least squares update (not the Kalman measurement update).
135  Given data and measurement covariance, compute a solution and
136  covariance using the appropriate least squares algorithm.
137  @param D Data vector, length M
138  Input: raw data
139  Output: post-fit residuals
140  @param X Solution vector, length N
141  Input: nominal solution X0 (zero when doLinearized is
142  false) Output: final solution
143  @param Cov Covariance matrix, dimension (N,N)
144  Input: (If doWeight is true) inverse measurement covariance
145  or weight matrix(M,M)
146  Output: Solution covariance matrix (N,N)
147  @param LSF Pointer to a function which is used to define the equation to
148  be solved. LSF arguments are:
149  X Nominal solution (input)
150  f Values of the equation f(X) (length M) (output)
151  P Partials matrix df/dX evaluated at X (dimension M,N)
152  (output)
153  When doLinearize is false, LSF should ignore X and return the
154  (constant) partials matrix in P and zero in f.
155  @return 0 ok
156  -1 Problem is underdetermined (M<N) // TD -- naturalized
157  sol? -2 Problem is singular -3 Algorithm failed to converge
158  -4 Algorithm diverged
159 
160  Reference for robust least squares: Mason, Gunst and Hess,
161  "Statistical Design and Analysis of Experiments," Wiley, New York, 1989,
162  pg 593.
163 
164  Notes on the algorithm:
165  Least squares, including linearized (iterative) and sequential processing.
166  This class will solve the equation f(X) = D, a vector equation in which
167  the solution vector X is of length N, and the data vector D is of length
168  M. The function f(X) may be linear, in which case it is of the form P*X=D
169  where P is a constant matrix, or non-linear, in which case it will be
170  linearized by expanding about a given nominal solution X0:
171  df |
172  -- | * dX = D - f(X0),
173  dX |X=X0
174  where dX is defined as (X-X0), the new solution is X, and the partials
175  matrix is P=(df/dX)|X=X0. Dimensions are P(M,N)*dX(N) = D(M) - f(X0)(M).
176  Linearized problems are iterated until the solution converges (stops
177  changing).
178 
179  The solution may be weighted by a measurement covariance matrix MCov,
180  or weight matrix W (in which case MCov = inverse(W)). MCov must be
181  non-singular.
182 
183  Options are to make the algorithm linearized (via the boolean input
184  variable doLinearize) and/or sequential (doSequential).
185 
186  - linearized. When doLinearize is true, the algorithm solves the
187  linearized version of the measurement equation (see above), rather than
188  the simple linear version P*X=D. Also when doLinearize is true, the
189  code will iterate (repeat until convergence) the linearized algorithm;
190  if you don't want to iterate, set the limit on the number of iterations
191  to zero. NB In this case, a solution must be found for each nominal
192  solution (i.e. the information matrix must be non-singular); otherwise
193  there can be no iteration.
194 
195  - sequential. When doSequential is true, the class will save the
196  accumulated information from all the calls to this routine since the
197  last reset() within the class. This means the resulting solution is
198  determined by ALL the data fed to the class since the last reset(). In
199  this case the data is fed to the algorithm in 'batches', which may be
200  of any size.
201 
202  NB When doLinearize is true, the information stored in the class has a
203  different interpretation than it does in the linear case.
204  Calling Solve(X,Cov) will NOT give the solution vector X, but rather
205  the latest update (X-X0) = (X-Xsave).
206 
207  NB In the linear case, the result you get from sequentially processing
208  a large dataset in many small batches is identical to what you would
209  get by processing all the data in one big batch. This is NOT true in
210  the linearized case, because the information at each batch is dependent
211  on the nominal state. See the next comment.
212 
213  NB Sequential, linearized LS really makes sense only when the state is
214  changing. It is difficult to get a good solution in this case with
215  small batches, because the stored information is dependent on the
216  (final) state solution at each batch. Start with a good nominal state,
217  or with a large batch of data that will produce one.
218 
219  The general Least Squares algorithm is:
220  0. set i=0.
221  1. If non-sequential, or if this is the first call, set R=z=0
222  (However doing this prevents you from adding apriori/constraint
223  information)
224  2. Let X = X0 (X0 = initial nominal solution - input). if linear, X0==0.
225  3. Save SRIsave=SRI and X0save=X0 (SRI is the pair
226  R,z)
227  4. start iteration i here.
228  5. increment the number of iterations i
229  6. Compute partials matrix P and f(X0) by calling LSF(X0,f,P).
230  if linear, LSF returns the constant P and f(X0)=0.
231  7. Set R = SRIsave.R + P(T)*inverse(MCov)*P (T means
232  transpose)
233  8. Set z = SRIsave.z + P(T)*inverse(MCov)*(D-f(X0))
234  9. [The measurement equation is now P*DX=d-F(X0)
235  where DX=(X-X0save); in the linear case it is PX = d and DX = X ]
236  10. Solve z = Rx to get
237  Cov = inverse(R)
238  and DX = inverse(R)*z OR
239  11. Set X = X0save + DX
240  [or in the linear case X = DX
241  12. Compute RMS change in X: rms = ||X-X0||/N (not X-X0save)
242  13. if linear goto quit [else linearized]
243  14. If rms > divergence limit, goto quit(failure).
244  15. If i > 1 and rms < convergence limit, goto quit(success)
245  16. If i (number of iterations) >= iteration limit, goto quit(failure)
246  17. Set X0 = X
247  18. Return to step 5.
248  19. quit: if(sequential and failed) set SRI=SRIsave.
249 
250  From the code:
251  1a. Save SRI (i.e. R, Z) in Rapriori, Zapriori
252  2a. If non-sequential, or if this is the first call, set R=z=0 -- DON'T
253  3a. If sequential and not the first call, X = Xsave
254  4a. if linear, X0=0; else X0 is input. Let NominalX = X0
255  5a. set numberIterations = 0
256  6a. start iteration
257  7a. increment numberIterations
258  8a. get partials and f from LSfunc using NominalX
259  9a. if robust, compute weight matrix
260  10a. if numberIterations > 1, restore (R,Z) = (Rapriori,Zapriori)
261  11a. MU : R,Z,Partials,D-f(NominalX),MeasCov(if weighted)
262  12a. Invert to get Xsol [ Xsol = X-NominalX or, if linear = X]
263  13a. if linearized, add NominalX to Xsol; Xsol now == X = new estimate
264  14a. if linear and not robust, quit here
265  15a. if linearized, compute rmsConvergence = RMS(Xsol - NominalX)
266  16a. if robust, recompute weights and define rmsConvergence = RMS(old-new
267  wts) 17a. failed? if so, and sequential, restore (R,Z) =
268  (Rapriori,Zapriori); quit 18a. success? quit 19a. if linearized NominalX =
269  Xsol; if robust NominalX = X 20a. iterate - return to 6a. 21a. set X =
270  Xsol for return value 22a. save X for next time : Xsave = X */
271  int SRIleastSquares::dataUpdate(
273  void(LSF)(Vector<double>& X, Vector<double>& f, Matrix<double>& P))
274  {
275  const int M = D.size();
276  const int N = R.rows();
277  if (doVerbose)
278  {
279  cout << "\nSRIleastSquares::leastSquaresUpdate : M,N are " << M << ","
280  << N << endl;
281  }
282 
283  // errors
284  if (N == 0)
285  {
286  MatrixException me("Called with zero-sized SRIleastSquares");
287  GNSSTK_THROW(me);
288  }
289  if (doLinearize && M < N)
290  {
291  MatrixException me(
292  string("When linearizing, problem must not be underdetermined:\n") +
293  string(" data dimension is ") + asString(M) +
294  string(" while state dimension is ") + asString(N));
295  GNSSTK_THROW(me);
296  }
297  if (doSequential && R.rows() != X.size())
298  {
299  MatrixException me(
300  "Sequential problem has inconsistent dimensions:\n SRI is " +
301  asString<int>(R.rows()) + "x" + asString<int>(R.cols()) +
302  " while X has length " + asString<int>(X.size()));
303  GNSSTK_THROW(me);
304  }
305  if (doWeight && doRobust)
306  {
307  MatrixException me("Cannot have doWeight and doRobust both true.");
308  GNSSTK_THROW(me);
309  }
310  // TD disallow Robust and Linearized ? why?
311  // TD disallow Robust and Sequential ? why?
312 
313  try
314  {
315  int i, iret;
316  double big, small;
317  Vector<double> f(M), Xsol(N), NominalX, Res(M), Wts(M, 1.0),
318  OldWts(M, 1.0);
319  Matrix<double> Partials(M, N), MeasCov(M, M);
320  const Matrix<double> Rapriori(R);
321  const Vector<double> Zapriori(Z);
322 
323  // save measurement covariance matrix
324  if (doWeight)
325  {
326  MeasCov = Cov;
327  }
328 
329  /* NO ... this prevents you from giving it apriori information...
330  if the first time, clear the stored information
331  if(!doSequential || numberBatches==0)
332  zeroAll(); */
333 
334  /* if sequential and not the first call, NominalX must be the last
335  solution */
336  if (doSequential && numberBatches != 0)
337  {
338  X = Xsave;
339  }
340 
341  // nominal solution
342  if (!doLinearize)
343  {
344  if ((int)X.size() != N)
345  {
346  X = Vector<double>(N);
347  }
348  X = 0.0;
349  }
350  NominalX = X;
351 
352  valid = false;
353  conditionNum = 0.0;
354  rmsConvergence = 0.0;
355  numberIterations = 0;
356  iret = 0;
357 
358  // iteration loop
359  do
360  {
361  numberIterations++;
362 
363  // call LSF to get f(NominalX) and Partials(NominalX)
364  LSF(NominalX, f, Partials);
365 
366  // Res will be both pre- and post-fit data residuals
367  Res = D - f;
368  if (doVerbose)
369  {
370  cout << "\nSRIleastSquares::leastSquaresUpdate :";
371  if (doLinearize || doRobust)
372  {
373  cout << " Iteration " << numberIterations;
374  }
375  cout << endl;
376  LabeledVector LNX(names, NominalX);
377  LNX.message(" Nominal X:");
378  cout << LNX << endl;
379  cout << " Pre-fit data residuals: " << fixed << setprecision(6)
380  << Res << endl;
381  }
382 
383  // build measurement covariance matrix for robust LS
384  if (doRobust)
385  {
386  MeasCov = 0.0;
387  for (i = 0; i < M; i++)
388  MeasCov(i, i) = 1.0 / (Wts(i) * Wts(i));
389  }
390 
391  // restore apriori information
392  if (numberIterations > 1)
393  {
394  R = Rapriori;
395  Z = Zapriori;
396  }
397 
398  // update information with simple MU
399  if (doVerbose)
400  {
401  cout << " Meas Cov:";
402  for (i = 0; i < M; i++)
403  cout << " " << MeasCov(i, i);
404  cout << endl;
405  cout << " Partials:\n" << Partials << endl;
406  }
407  /* if(doRobust || doWeight)
408  measurementUpdate(Partials,Res,MeasCov);
409  else
410  measurementUpdate(Partials,Res); */
411  {
412  Matrix<double> P(Partials);
413  Matrix<double> CHL;
414  if (doRobust || doWeight)
415  {
416  CHL = lowerCholesky(MeasCov);
417  Matrix<double> L = inverseLT(CHL);
418  P = L * P;
419  Res = L * Res;
420  }
421 
422  // update with whitened information
423  SrifMU(R, Z, P, Res);
424 
425  // un-whiten the residuals
426  if (doRobust || doWeight) // NB same if above creates CHL
427  {
428  Res = CHL * Res;
429  }
430  }
431 
432  if (doVerbose)
433  {
434  cout << " Updated information matrix\n"
435  << LabeledMatrix(names, R) << endl;
436  cout << " Updated information vector\n"
437  << LabeledVector(names, Z) << endl;
438  }
439 
440  // invert
441  try
442  {
443  getStateAndCovariance(Xsol, Cov, &small, &big);
444  }
445  catch (SingularMatrixException& sme)
446  {
447  iret = -2;
448  break;
449  }
450  conditionNum = big / small;
451  if (doVerbose)
452  {
453  cout << " Condition number: " << scientific << conditionNum
454  << fixed << endl;
455  cout << " Post-fit data residuals: " << fixed << setprecision(6)
456  << Res << endl;
457  }
458 
459  // update X: when linearized, solution = dX
460  if (doLinearize)
461  {
462  Xsol += NominalX;
463  }
464  if (doVerbose)
465  {
466  LabeledVector LXsol(names, Xsol);
467  LXsol.message(" Updated X:");
468  cout << LXsol << endl;
469  }
470 
471  // linear non-robust is done..
472  if (!doLinearize && !doRobust)
473  {
474  break;
475  }
476 
477  // test for convergence of linearization
478  if (doLinearize)
479  {
480  rmsConvergence = RMS(Xsol - NominalX);
481  if (doVerbose)
482  {
483  cout << " RMS convergence : " << scientific << rmsConvergence
484  << fixed << endl;
485  }
486  }
487 
488  // test for convergence of robust weighting, and compute new weights
489  if (doRobust)
490  {
491  // must de-weight post-fit residuals
492  LSF(Xsol, f, Partials);
493  Res = D - f;
494 
495  // compute a new set of weights
496  double mad, median;
497  // for(mad=0.0,i=0; i<M; i++)
498  // mad += Wts(i)*Res(i)*Res(i);
499  // mad = sqrt(mad)/sqrt(Robust::TuningA*(M-1));
500  mad = Robust::MedianAbsoluteDeviation(&(Res[0]), Res.size(),
501  median);
502 
503  OldWts = Wts;
504  for (i = 0; i < M; i++)
505  {
506  if (Res(i) < -RobustTuningT * mad)
507  {
508  Wts(i) = -RobustTuningT * mad / Res(i);
509  }
510  else if (Res(i) > RobustTuningT * mad)
511  {
512  Wts(i) = RobustTuningT * mad / Res(i);
513  }
514  else
515  {
516  Wts(i) = 1.0;
517  }
518  }
519 
520  // test for convergence
521  rmsConvergence = RMS(OldWts - Wts);
522  if (doVerbose)
523  {
524  cout << " Convergence: " << scientific << setprecision(3)
525  << rmsConvergence << endl;
526  }
527  }
528 
529  // failures
530  if (rmsConvergence > divergenceLimit)
531  {
532  iret = -4;
533  }
534  if (numberIterations >= iterationsLimit)
535  {
536  iret = -3;
537  }
538  if (iret)
539  {
540  if (doSequential)
541  {
542  R = Rapriori;
543  Z = Zapriori;
544  }
545  break;
546  }
547 
548  // success
549  if (numberIterations > 1 && rmsConvergence < convergenceLimit)
550  {
551  break;
552  }
553 
554  // prepare for another iteration
555  if (doLinearize)
556  {
557  NominalX = Xsol;
558  }
559  if (doRobust)
560  {
561  NominalX = X;
562  }
563 
564  } while (1); // end iteration loop
565 
566  numberBatches++;
567  if (doVerbose)
568  {
569  cout << "Return from SRIleastSquares::leastSquaresUpdate\n\n";
570  }
571 
572  if (iret)
573  {
574  return iret;
575  }
576  valid = true;
577 
578  // output the solution
579  Xsave = X = Xsol;
580 
581  // put residuals of fit into data vector, or weights if Robust
582  if (doRobust)
583  {
584  D = OldWts;
585  }
586  else
587  {
588  D = Res;
589  }
590 
591  return iret;
592  }
593  catch (Exception& e)
594  {
595  GNSSTK_RETHROW(e);
596  }
597  }
598 
599  //---------------------------------------------------------------------------------
600  // output operator
601  ostream& operator<<(ostream& os, const SRIleastSquares& srif)
602  {
603  Namelist NL(srif.names);
604  NL += string("State");
605  Matrix<double> A;
606  A = srif.R || srif.Z;
607  LabeledMatrix LM(NL, A);
608  LM.setw(os.width());
609  LM.setprecision(os.precision());
610  os << LM;
611  return os;
612  }
613 
614  //---------------------------------------------------------------------------------
615  // reset the computation, i.e. remove all stored information
616  void SRIleastSquares::zeroAll()
617  {
618  SRI::zeroAll();
619  Xsave = 0.0;
620  numberBatches = 0;
621  }
622 
623  //---------------------------------------------------------------------------------
624  /* reset the computation, i.e. remove all stored information, and
625  optionally change the dimension. If N is not input, the
626  dimension is not changed.
627  @param N new SRIleastSquares dimension (optional). */
628  void SRIleastSquares::reset(const int N)
629  {
630  try
631  {
632  if (N > 0 && N != (int)R.rows())
633  {
634  R.resize(N, N, 0.0);
635  Z.resize(N, 0.0);
636  }
637  else
638  {
639  SRI::zeroAll(N);
640  }
641  if (N > 0)
642  {
643  Xsave.resize(N);
644  }
645  Xsave = 0.0;
646  numberBatches = 0;
647  }
648  catch (Exception& e)
649  {
650  GNSSTK_RETHROW(e);
651  }
652  }
653 
654  //---------------------------------------------------------------------------------
655 } // end namespace gnsstk
gnsstk::lowerCholesky
SparseMatrix< T > lowerCholesky(const SparseMatrix< T > &A)
Definition: SparseMatrix.hpp:2065
gnsstk::SRIleastSquares::divergenceLimit
double divergenceLimit
upper limit on the RSS change in solution which produces an abort
Definition: SRIleastSquares.hpp:210
gnsstk::SRIleastSquares::doLinearize
bool doLinearize
Definition: SRIleastSquares.hpp:236
StringUtils.hpp
gnsstk::SRIleastSquares::Xsave
Vector< double > Xsave
solution X consistent with current information RX=z
Definition: SRIleastSquares.hpp:285
gnsstk::SRIleastSquares::rmsConvergence
double rmsConvergence
RMS change in state, used for convergence test.
Definition: SRIleastSquares.hpp:279
gnsstk::Matrix::cols
size_t cols() const
The number of columns in the matrix.
Definition: Matrix.hpp:167
gnsstk::StringUtils::asString
std::string asString(IonexStoreStrategy e)
Convert a IonexStoreStrategy to a whitespace-free string name.
Definition: IonexStoreStrategy.cpp:46
gnsstk::SRIleastSquares::numberBatches
int numberBatches
current number of batches seen
Definition: SRIleastSquares.hpp:276
gnsstk::Matrix::rows
size_t rows() const
The number of rows in the matrix.
Definition: Matrix.hpp:165
gnsstk::SRI::R
Matrix< double > R
Information matrix, an upper triangular (square) matrix.
Definition: SRI.hpp:601
gnsstk::LabeledVector
forward declaration
Definition: Namelist.hpp:85
gnsstk::SRIleastSquares::doSequential
bool doSequential
Definition: SRIleastSquares.hpp:229
gnsstk
For Sinex::InputHistory.
Definition: BasicFramework.cpp:50
gnsstk::SRI::Z
Vector< double > Z
SRI state vector, of length equal to the dimension (row and col) of R.
Definition: SRI.hpp:604
gnsstk::Exception
Definition: Exception.hpp:151
gnsstk::LabeledMatrix::setprecision
LabeledMatrix & setprecision(int p)
Set the precision to p digits.
Definition: Namelist.hpp:202
gnsstk::SrifMU
void SrifMU(Matrix< T > &R, Vector< T > &Z, SparseMatrix< T > &A, const unsigned int M)
Definition: SparseMatrix.hpp:2467
gnsstk::SRIleastSquares::doRobust
bool doRobust
Definition: SRIleastSquares.hpp:223
gnsstk::Matrix< double >
gnsstk::inverseLT
SparseMatrix< T > inverseLT(const SparseMatrix< T > &LT, T *ptrSmall, T *ptrBig)
Compute inverse of lower-triangular SparseMatrix.
Definition: SparseMatrix.hpp:2154
gnsstk::SRIleastSquares::numberIterations
int numberIterations
current number of iterations
Definition: SRIleastSquares.hpp:273
gnsstk::SRIleastSquares
Definition: SRIleastSquares.hpp:71
gnsstk::LabeledVector::message
LabeledVector & message(const std::string &m)
Set the label or name.
Definition: Namelist.hpp:131
gnsstk::Namelist::size
unsigned int size() const
return the size of the list.
Definition: Namelist.hpp:408
gnsstk::SRIleastSquares::doVerbose
bool doVerbose
if true, output intermediate results
Definition: SRIleastSquares.hpp:239
example6.valid
valid
Definition: example6.py:20
gnsstk::SRI::names
Namelist names
Namelist parallel to R and Z, labelling the elements of the state vector.
Definition: SRI.hpp:607
GNSSTK_RETHROW
#define GNSSTK_RETHROW(exc)
Definition: Exception.hpp:369
gnsstk::Vector< double >
gnsstk::TrackingCode::P
@ P
Legacy GPS precise code.
SRIleastSquares.hpp
RobustTuningT
#define RobustTuningT
tuning constant used in M-estimate and Robust least squares (SRIFilter.cpp)
Definition: RobustStats.hpp:63
RobustStats.hpp
std::operator<<
std::ostream & operator<<(std::ostream &s, gnsstk::StringUtils::FFLead v)
Definition: FormattedDouble_T.cpp:44
gnsstk::LabeledMatrix
Definition: Namelist.hpp:162
gnsstk::Vector::size
size_t size() const
STL size.
Definition: Vector.hpp:207
std
Definition: Angle.hpp:142
GNSSTK_THROW
#define GNSSTK_THROW(exc)
Definition: Exception.hpp:366
gnsstk::Namelist
Definition: Namelist.hpp:287
gnsstk::SRIleastSquares::conditionNum
double conditionNum
condition number, defined in inversion to get state and covariance
Definition: SRIleastSquares.hpp:282
gnsstk::median
T median(const Vector< T > &v)
Compute the median of a gnsstk::Vector.
Definition: Stats.hpp:59
gnsstk::SRIleastSquares::doWeight
bool doWeight
Definition: SRIleastSquares.hpp:216
gnsstk::SRIleastSquares::convergenceLimit
double convergenceLimit
limit on the RSS change in solution which produces success
Definition: SRIleastSquares.hpp:207
gnsstk::RMS
T RMS(const ConstVectorBase< T, BaseClass > &l)
Definition: VectorOperators.hpp:197
gnsstk::SRIleastSquares::iterationsLimit
int iterationsLimit
limit on the number of iterations
Definition: SRIleastSquares.hpp:204
gnsstk::mad
T mad(const gnsstk::Vector< T > &v)
median absolute deviation of a gnsstk::Vector
Definition: Stats.hpp:85
gnsstk::SRIleastSquares::valid
bool valid
indicates if the filter is valid - set false when singular
Definition: SRIleastSquares.hpp:270
gnsstk::LabeledMatrix::setw
LabeledMatrix & setw(int w)
Set the width to w characters.
Definition: Namelist.hpp:195
gnsstk::Robust::MedianAbsoluteDeviation
T MedianAbsoluteDeviation(T *xd, int nd, T &M, bool save_flag=true)
Definition: RobustStats.hpp:467


gnsstk
Author(s):
autogenerated on Wed Oct 25 2023 02:40:41