svm.cpp
Go to the documentation of this file.
1 #include <math.h>
2 #include <stdio.h>
3 #include <stdlib.h>
4 #include <ctype.h>
5 #include <float.h>
6 #include <string.h>
7 #include <stdarg.h>
8 #include <limits.h>
9 #include <locale.h>
10 #include "svm.h"
12 typedef float Qfloat;
13 typedef signed char schar;
14 #ifndef min
15 template <class T> static inline T min(T x, T y)
16 {
17  return (x < y) ? x : y;
18 }
19 #endif
20 #ifndef max
21 template <class T> static inline T max(T x, T y)
22 {
23  return (x > y) ? x : y;
24 }
25 #endif
26 template <class T> static inline void swap(T& x, T& y)
27 {
28  T t = x;
29  x = y;
30  y = t;
31 }
32 template <class S, class T> static inline void clone(T*& dst, S* src, int n)
33 {
34  dst = new T[n];
35  memcpy((void *)dst, (void *)src, sizeof(T)*n);
36 }
37 static inline double powi(double base, int times)
38 {
39  double tmp = base, ret = 1.0;
40 
41  for (int t = times; t > 0; t /= 2)
42  {
43  if (t % 2 == 1) ret *= tmp;
44  tmp = tmp * tmp;
45  }
46  return ret;
47 }
48 #define INF HUGE_VAL
49 #define TAU 1e-12
50 #define Malloc(type,n) (type *)malloc((n)*sizeof(type))
51 
52 static void print_string_stdout(const char *s)
53 {
54  fputs(s, stdout);
55  fflush(stdout);
56 }
57 static void (*svm_print_string)(const char *) = &print_string_stdout;
58 #if 1
59 static void info(const char *fmt, ...)
60 {
61  char buf[BUFSIZ];
62  va_list ap;
63  va_start(ap, fmt);
64  vsprintf(buf, fmt, ap);
65  va_end(ap);
66  (*svm_print_string)(buf);
67 }
68 #else
69 static void info(const char *fmt, ...) {}
70 #endif
71 
72 //
73 // Kernel Cache
74 //
75 // l is the number of total data items
76 // size is the cache size limit in bytes
77 //
78 class Cache
79 {
80 public:
81  Cache(int l, long int size);
82  ~Cache();
83 
84  // request data [0,len)
85  // return some position p where [p,len) need to be filled
86  // (p >= len if nothing needs to be filled)
87  int get_data(const int index, Qfloat **data, int len);
88  void swap_index(int i, int j);
89 private:
90  int l;
91  long int size;
92  struct head_t
93  {
94  head_t *prev, *next; // a circular list
96  int len; // data[0,len) is cached in this entry
97  };
98 
101  void lru_delete(head_t *h);
102  void lru_insert(head_t *h);
103 };
104 
105 Cache::Cache(int l_, long int size_): l(l_), size(size_)
106 {
107  head = (head_t *)calloc(l, sizeof(head_t)); // initialized to 0
108  size /= sizeof(Qfloat);
109  size -= l * sizeof(head_t) / sizeof(Qfloat);
110  size = max(size, 2 * (long int) l); // cache must be large enough for two columns
112 }
113 
115 {
116  for (head_t *h = lru_head.next; h != &lru_head; h = h->next)
117  free(h->data);
118  free(head);
119 }
120 
122 {
123  // delete from current location
124  h->prev->next = h->next;
125  h->next->prev = h->prev;
126 }
127 
129 {
130  // insert to last position
131  h->next = &lru_head;
132  h->prev = lru_head.prev;
133  h->prev->next = h;
134  h->next->prev = h;
135 }
136 
137 int Cache::get_data(const int index, Qfloat **data, int len)
138 {
139  head_t *h = &head[index];
140  if (h->len) lru_delete(h);
141  int more = len - h->len;
142 
143  if (more > 0)
144  {
145  // free old space
146  while (size < more)
147  {
148  head_t *old = lru_head.next;
149  lru_delete(old);
150  free(old->data);
151  size += old->len;
152  old->data = 0;
153  old->len = 0;
154  }
155 
156  // allocate new space
157  h->data = (Qfloat *)realloc(h->data, sizeof(Qfloat) * len);
158  size -= more;
159  swap(h->len, len);
160  }
161 
162  lru_insert(h);
163  *data = h->data;
164  return len;
165 }
166 
167 void Cache::swap_index(int i, int j)
168 {
169  if (i == j) return;
170 
171  if (head[i].len) lru_delete(&head[i]);
172  if (head[j].len) lru_delete(&head[j]);
173  swap(head[i].data, head[j].data);
174  swap(head[i].len, head[j].len);
175  if (head[i].len) lru_insert(&head[i]);
176  if (head[j].len) lru_insert(&head[j]);
177 
178  if (i > j) swap(i, j);
179  for (head_t *h = lru_head.next; h != &lru_head; h = h->next)
180  {
181  if (h->len > i)
182  {
183  if (h->len > j)
184  swap(h->data[i], h->data[j]);
185  else
186  {
187  // give up
188  lru_delete(h);
189  free(h->data);
190  size += h->len;
191  h->data = 0;
192  h->len = 0;
193  }
194  }
195  }
196 }
197 
198 //
199 // Kernel evaluation
200 //
201 // the static method k_function is for doing single kernel evaluation
202 // the constructor of Kernel prepares to calculate the l*l kernel matrix
203 // the member function get_Q is for getting one column from the Q Matrix
204 //
205 class QMatrix
206 {
207 public:
208  virtual Qfloat *get_Q(int column, int len) const = 0;
209  virtual double *get_QD() const = 0;
210  virtual void swap_index(int i, int j) const = 0;
211  virtual ~QMatrix() {}
212 };
213 
214 class Kernel: public QMatrix
215 {
216 public:
217  Kernel(int l, svm_node * const * x, const svm_parameter& param);
218  virtual ~Kernel();
219 
220  static double k_function(const svm_node *x, const svm_node *y,
221  const svm_parameter& param);
222  virtual Qfloat *get_Q(int column, int len) const = 0;
223  virtual double *get_QD() const = 0;
224  virtual void swap_index(int i, int j) const // no so const...
225  {
226  swap(x[i], x[j]);
227  if (x_square) swap(x_square[i], x_square[j]);
228  }
229 protected:
230 
231  double (Kernel::*kernel_function)(int i, int j) const;
232 
233 private:
234  const svm_node **x;
235  double *x_square;
236 
237  // svm_parameter
238  const int kernel_type;
239  const int degree;
240  const double gamma;
241  const double coef0;
242 
243  static double dot(const svm_node *px, const svm_node *py);
244  double kernel_linear(int i, int j) const
245  {
246  return dot(x[i], x[j]);
247  }
248  double kernel_poly(int i, int j) const
249  {
250  return powi(gamma * dot(x[i], x[j]) + coef0, degree);
251  }
252  double kernel_rbf(int i, int j) const
253  {
254  return exp(-gamma * (x_square[i] + x_square[j] - 2 * dot(x[i], x[j])));
255  }
256  double kernel_sigmoid(int i, int j) const
257  {
258  return tanh(gamma * dot(x[i], x[j]) + coef0);
259  }
260  double kernel_precomputed(int i, int j) const
261  {
262  return x[i][(int)(x[j][0].value)].value;
263  }
264 };
265 
266 Kernel::Kernel(int l, svm_node * const * x_, const svm_parameter& param)
267  : kernel_type(param.kernel_type), degree(param.degree),
268  gamma(param.gamma), coef0(param.coef0)
269 {
270  switch (kernel_type)
271  {
272  case LINEAR:
274  break;
275  case POLY:
277  break;
278  case RBF:
280  break;
281  case SIGMOID:
283  break;
284  case PRECOMPUTED:
286  break;
287  }
288 
289  clone(x, x_, l);
290 
291  if (kernel_type == RBF)
292  {
293  x_square = new double[l];
294  for (int i = 0; i < l; i++)
295  x_square[i] = dot(x[i], x[i]);
296  }
297  else
298  x_square = 0;
299 }
300 
302 {
303  delete[] x;
304  delete[] x_square;
305 }
306 
307 double Kernel::dot(const svm_node *px, const svm_node *py)
308 {
309  double sum = 0;
310  while (px->index != -1 && py->index != -1)
311  {
312  if (px->index == py->index)
313  {
314  sum += px->value * py->value;
315  ++px;
316  ++py;
317  }
318  else
319  {
320  if (px->index > py->index)
321  ++py;
322  else
323  ++px;
324  }
325  }
326  return sum;
327 }
328 
329 double Kernel::k_function(const svm_node *x, const svm_node *y,
330  const svm_parameter& param)
331 {
332  switch (param.kernel_type)
333  {
334  case LINEAR:
335  return dot(x, y);
336  case POLY:
337  return powi(param.gamma * dot(x, y) + param.coef0, param.degree);
338  case RBF:
339  {
340  double sum = 0;
341  while (x->index != -1 && y->index != -1)
342  {
343  if (x->index == y->index)
344  {
345  double d = x->value - y->value;
346  sum += d * d;
347  ++x;
348  ++y;
349  }
350  else
351  {
352  if (x->index > y->index)
353  {
354  sum += y->value * y->value;
355  ++y;
356  }
357  else
358  {
359  sum += x->value * x->value;
360  ++x;
361  }
362  }
363  }
364 
365  while (x->index != -1)
366  {
367  sum += x->value * x->value;
368  ++x;
369  }
370 
371  while (y->index != -1)
372  {
373  sum += y->value * y->value;
374  ++y;
375  }
376 
377  return exp(-param.gamma * sum);
378  }
379  case SIGMOID:
380  return tanh(param.gamma * dot(x, y) + param.coef0);
381  case PRECOMPUTED: //x: test (validation), y: SV
382  return x[(int)(y->value)].value;
383  default:
384  return 0; // Unreachable
385  }
386 }
387 
388 // An SMO algorithm in Fan et al., JMLR 6(2005), p. 1889--1918
389 // Solves:
390 //
391 // min 0.5(\alpha^T Q \alpha) + p^T \alpha
392 //
393 // y^T \alpha = \delta
394 // y_i = +1 or -1
395 // 0 <= alpha_i <= Cp for y_i = 1
396 // 0 <= alpha_i <= Cn for y_i = -1
397 //
398 // Given:
399 //
400 // Q, p, y, Cp, Cn, and an initial feasible point \alpha
401 // l is the size of vectors and matrices
402 // eps is the stopping tolerance
403 //
404 // solution will be put in \alpha, objective value will be put in obj
405 //
406 class Solver
407 {
408 public:
409  Solver() {};
410  virtual ~Solver() {};
411 
413  {
414  double obj;
415  double rho;
418  double r; // for Solver_NU
419  };
420 
421  void Solve(int l, const QMatrix& Q, const double *p_, const schar *y_,
422  double *alpha_, double Cp, double Cn, double eps,
423  SolutionInfo* si, int shrinking);
424 protected:
427  double *G; // gradient of objective function
428  enum { LOWER_BOUND, UPPER_BOUND, FREE };
429  char *alpha_status; // LOWER_BOUND, UPPER_BOUND, FREE
430  double *alpha;
431  const QMatrix *Q;
432  const double *QD;
433  double eps;
434  double Cp, Cn;
435  double *p;
437  double *G_bar; // gradient, if we treat free variables as 0
438  int l;
439  bool unshrink; // XXX
440 
441  double get_C(int i)
442  {
443  return (y[i] > 0) ? Cp : Cn;
444  }
446  {
447  if (alpha[i] >= get_C(i))
448  alpha_status[i] = UPPER_BOUND;
449  else if (alpha[i] <= 0)
450  alpha_status[i] = LOWER_BOUND;
451  else alpha_status[i] = FREE;
452  }
453  bool is_upper_bound(int i)
454  {
455  return alpha_status[i] == UPPER_BOUND;
456  }
457  bool is_lower_bound(int i)
458  {
459  return alpha_status[i] == LOWER_BOUND;
460  }
461  bool is_free(int i)
462  {
463  return alpha_status[i] == FREE;
464  }
465  void swap_index(int i, int j);
466  void reconstruct_gradient();
467  virtual int select_working_set(int &i, int &j);
468  virtual double calculate_rho();
469  virtual void do_shrinking();
470 private:
471  bool be_shrunk(int i, double Gmax1, double Gmax2);
472 };
473 
474 void Solver::swap_index(int i, int j)
475 {
476  Q->swap_index(i, j);
477  swap(y[i], y[j]);
478  swap(G[i], G[j]);
479  swap(alpha_status[i], alpha_status[j]);
480  swap(alpha[i], alpha[j]);
481  swap(p[i], p[j]);
482  swap(active_set[i], active_set[j]);
483  swap(G_bar[i], G_bar[j]);
484 }
485 
487 {
488  // reconstruct inactive elements of G from G_bar and free variables
489 
490  if (active_size == l) return;
491 
492  int i, j;
493  int nr_free = 0;
494 
495  for (j = active_size; j < l; j++)
496  G[j] = G_bar[j] + p[j];
497 
498  for (j = 0; j < active_size; j++)
499  if (is_free(j))
500  nr_free++;
501 
502  if (2 * nr_free < active_size)
503  info("\nWARNING: using -h 0 may be faster\n");
504 
505  if (nr_free * l > 2 * active_size * (l - active_size))
506  {
507  for (i = active_size; i < l; i++)
508  {
509  const Qfloat *Q_i = Q->get_Q(i, active_size);
510  for (j = 0; j < active_size; j++)
511  if (is_free(j))
512  G[i] += alpha[j] * Q_i[j];
513  }
514  }
515  else
516  {
517  for (i = 0; i < active_size; i++)
518  if (is_free(i))
519  {
520  const Qfloat *Q_i = Q->get_Q(i, l);
521  double alpha_i = alpha[i];
522  for (j = active_size; j < l; j++)
523  G[j] += alpha_i * Q_i[j];
524  }
525  }
526 }
527 
528 void Solver::Solve(int l, const QMatrix& Q, const double *p_, const schar *y_,
529  double *alpha_, double Cp, double Cn, double eps,
530  SolutionInfo* si, int shrinking)
531 {
532  this->l = l;
533  this->Q = &Q;
534  QD = Q.get_QD();
535  clone(p, p_, l);
536  clone(y, y_, l);
537  clone(alpha, alpha_, l);
538  this->Cp = Cp;
539  this->Cn = Cn;
540  this->eps = eps;
541  unshrink = false;
542 
543  // initialize alpha_status
544  {
545  alpha_status = new char[l];
546  for (int i = 0; i < l; i++)
547  update_alpha_status(i);
548  }
549 
550  // initialize active set (for shrinking)
551  {
552  active_set = new int[l];
553  for (int i = 0; i < l; i++)
554  active_set[i] = i;
555  active_size = l;
556  }
557 
558  // initialize gradient
559  {
560  G = new double[l];
561  G_bar = new double[l];
562  int i;
563  for (i = 0; i < l; i++)
564  {
565  G[i] = p[i];
566  G_bar[i] = 0;
567  }
568  for (i = 0; i < l; i++)
569  if (!is_lower_bound(i))
570  {
571  const Qfloat *Q_i = Q.get_Q(i, l);
572  double alpha_i = alpha[i];
573  int j;
574  for (j = 0; j < l; j++)
575  G[j] += alpha_i * Q_i[j];
576  if (is_upper_bound(i))
577  for (j = 0; j < l; j++)
578  G_bar[j] += get_C(i) * Q_i[j];
579  }
580  }
581 
582  // optimization step
583 
584  int iter = 0;
585  int max_iter = max(10000000, l > INT_MAX / 100 ? INT_MAX : 100 * l);
586  int counter = min(l, 1000) + 1;
587 
588  while (iter < max_iter)
589  {
590  // show progress and do shrinking
591 
592  if (--counter == 0)
593  {
594  counter = min(l, 1000);
595  if (shrinking) do_shrinking();
596  //info(".");
597  }
598 
599  int i, j;
600  if (select_working_set(i, j) != 0)
601  {
602  // reconstruct the whole gradient
603  reconstruct_gradient();
604  // reset active set size and check
605  active_size = l;
606  //info("*");
607  if (select_working_set(i, j) != 0)
608  break;
609  else
610  counter = 1; // do shrinking next iteration
611  }
612 
613  ++iter;
614 
615  // update alpha[i] and alpha[j], handle bounds carefully
616 
617  const Qfloat *Q_i = Q.get_Q(i, active_size);
618  const Qfloat *Q_j = Q.get_Q(j, active_size);
619 
620  double C_i = get_C(i);
621  double C_j = get_C(j);
622 
623  double old_alpha_i = alpha[i];
624  double old_alpha_j = alpha[j];
625 
626  if (y[i] != y[j])
627  {
628  double quad_coef = QD[i] + QD[j] + 2 * Q_i[j];
629  if (quad_coef <= 0)
630  quad_coef = TAU;
631  double delta = (-G[i] - G[j]) / quad_coef;
632  double diff = alpha[i] - alpha[j];
633  alpha[i] += delta;
634  alpha[j] += delta;
635 
636  if (diff > 0)
637  {
638  if (alpha[j] < 0)
639  {
640  alpha[j] = 0;
641  alpha[i] = diff;
642  }
643  }
644  else
645  {
646  if (alpha[i] < 0)
647  {
648  alpha[i] = 0;
649  alpha[j] = -diff;
650  }
651  }
652  if (diff > C_i - C_j)
653  {
654  if (alpha[i] > C_i)
655  {
656  alpha[i] = C_i;
657  alpha[j] = C_i - diff;
658  }
659  }
660  else
661  {
662  if (alpha[j] > C_j)
663  {
664  alpha[j] = C_j;
665  alpha[i] = C_j + diff;
666  }
667  }
668  }
669  else
670  {
671  double quad_coef = QD[i] + QD[j] - 2 * Q_i[j];
672  if (quad_coef <= 0)
673  quad_coef = TAU;
674  double delta = (G[i] - G[j]) / quad_coef;
675  double sum = alpha[i] + alpha[j];
676  alpha[i] -= delta;
677  alpha[j] += delta;
678 
679  if (sum > C_i)
680  {
681  if (alpha[i] > C_i)
682  {
683  alpha[i] = C_i;
684  alpha[j] = sum - C_i;
685  }
686  }
687  else
688  {
689  if (alpha[j] < 0)
690  {
691  alpha[j] = 0;
692  alpha[i] = sum;
693  }
694  }
695  if (sum > C_j)
696  {
697  if (alpha[j] > C_j)
698  {
699  alpha[j] = C_j;
700  alpha[i] = sum - C_j;
701  }
702  }
703  else
704  {
705  if (alpha[i] < 0)
706  {
707  alpha[i] = 0;
708  alpha[j] = sum;
709  }
710  }
711  }
712 
713  // update G
714 
715  double delta_alpha_i = alpha[i] - old_alpha_i;
716  double delta_alpha_j = alpha[j] - old_alpha_j;
717 
718  for (int k = 0; k < active_size; k++)
719  {
720  G[k] += Q_i[k] * delta_alpha_i + Q_j[k] * delta_alpha_j;
721  }
722 
723  // update alpha_status and G_bar
724 
725  {
726  bool ui = is_upper_bound(i);
727  bool uj = is_upper_bound(j);
728  update_alpha_status(i);
729  update_alpha_status(j);
730  int k;
731  if (ui != is_upper_bound(i))
732  {
733  Q_i = Q.get_Q(i, l);
734  if (ui)
735  for (k = 0; k < l; k++)
736  G_bar[k] -= C_i * Q_i[k];
737  else
738  for (k = 0; k < l; k++)
739  G_bar[k] += C_i * Q_i[k];
740  }
741 
742  if (uj != is_upper_bound(j))
743  {
744  Q_j = Q.get_Q(j, l);
745  if (uj)
746  for (k = 0; k < l; k++)
747  G_bar[k] -= C_j * Q_j[k];
748  else
749  for (k = 0; k < l; k++)
750  G_bar[k] += C_j * Q_j[k];
751  }
752  }
753  }
754 
755  if (iter >= max_iter)
756  {
757  if (active_size < l)
758  {
759  // reconstruct the whole gradient to calculate objective value
760  reconstruct_gradient();
761  active_size = l;
762  //info("*");
763  }
764  fprintf(stderr, "\nWARNING: reaching max number of iterations\n");
765  }
766 
767  // calculate rho
768 
769  si->rho = calculate_rho();
770 
771  // calculate objective value
772  {
773  double v = 0;
774  int i;
775  for (i = 0; i < l; i++)
776  v += alpha[i] * (G[i] + p[i]);
777 
778  si->obj = v / 2;
779  }
780 
781  // put back the solution
782  {
783  for (int i = 0; i < l; i++)
784  alpha_[active_set[i]] = alpha[i];
785  }
786 
787  // juggle everything back
788  /*{
789  for(int i=0;i<l;i++)
790  while(active_set[i] != i)
791  swap_index(i,active_set[i]);
792  // or Q.swap_index(i,active_set[i]);
793  }*/
794 
795  si->upper_bound_p = Cp;
796  si->upper_bound_n = Cn;
797 
798  //info("\noptimization finished, #iter = %d\n",iter);
799 
800  delete[] p;
801  delete[] y;
802  delete[] alpha;
803  delete[] alpha_status;
804  delete[] active_set;
805  delete[] G;
806  delete[] G_bar;
807 }
808 
809 // return 1 if already optimal, return 0 otherwise
810 int Solver::select_working_set(int &out_i, int &out_j)
811 {
812  // return i,j such that
813  // i: maximizes -y_i * grad(f)_i, i in I_up(\alpha)
814  // j: minimizes the decrease of obj value
815  // (if quadratic coefficeint <= 0, replace it with tau)
816  // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha)
817 
818  double Gmax = -INF;
819  double Gmax2 = -INF;
820  int Gmax_idx = -1;
821  int Gmin_idx = -1;
822  double obj_diff_min = INF;
823 
824  for (int t = 0; t < active_size; t++)
825  if (y[t] == +1)
826  {
827  if (!is_upper_bound(t))
828  if (-G[t] >= Gmax)
829  {
830  Gmax = -G[t];
831  Gmax_idx = t;
832  }
833  }
834  else
835  {
836  if (!is_lower_bound(t))
837  if (G[t] >= Gmax)
838  {
839  Gmax = G[t];
840  Gmax_idx = t;
841  }
842  }
843 
844  int i = Gmax_idx;
845  const Qfloat *Q_i = NULL;
846  if (i != -1) // NULL Q_i not accessed: Gmax=-INF if i=-1
847  Q_i = Q->get_Q(i, active_size);
848 
849  for (int j = 0; j < active_size; j++)
850  {
851  if (y[j] == +1)
852  {
853  if (!is_lower_bound(j))
854  {
855  double grad_diff = Gmax + G[j];
856  if (G[j] >= Gmax2)
857  Gmax2 = G[j];
858  if (grad_diff > 0)
859  {
860  double obj_diff;
861  double quad_coef = QD[i] + QD[j] - 2.0 * y[i] * Q_i[j];
862  if (quad_coef > 0)
863  obj_diff = -(grad_diff * grad_diff) / quad_coef;
864  else
865  obj_diff = -(grad_diff * grad_diff) / TAU;
866 
867  if (obj_diff <= obj_diff_min)
868  {
869  Gmin_idx = j;
870  obj_diff_min = obj_diff;
871  }
872  }
873  }
874  }
875  else
876  {
877  if (!is_upper_bound(j))
878  {
879  double grad_diff = Gmax - G[j];
880  if (-G[j] >= Gmax2)
881  Gmax2 = -G[j];
882  if (grad_diff > 0)
883  {
884  double obj_diff;
885  double quad_coef = QD[i] + QD[j] + 2.0 * y[i] * Q_i[j];
886  if (quad_coef > 0)
887  obj_diff = -(grad_diff * grad_diff) / quad_coef;
888  else
889  obj_diff = -(grad_diff * grad_diff) / TAU;
890 
891  if (obj_diff <= obj_diff_min)
892  {
893  Gmin_idx = j;
894  obj_diff_min = obj_diff;
895  }
896  }
897  }
898  }
899  }
900 
901  if (Gmax + Gmax2 < eps)
902  return 1;
903 
904  out_i = Gmax_idx;
905  out_j = Gmin_idx;
906  return 0;
907 }
908 
909 bool Solver::be_shrunk(int i, double Gmax1, double Gmax2)
910 {
911  if (is_upper_bound(i))
912  {
913  if (y[i] == +1)
914  return (-G[i] > Gmax1);
915  else
916  return (-G[i] > Gmax2);
917  }
918  else if (is_lower_bound(i))
919  {
920  if (y[i] == +1)
921  return (G[i] > Gmax2);
922  else
923  return (G[i] > Gmax1);
924  }
925  else
926  return (false);
927 }
928 
930 {
931  int i;
932  double Gmax1 = -INF; // max { -y_i * grad(f)_i | i in I_up(\alpha) }
933  double Gmax2 = -INF; // max { y_i * grad(f)_i | i in I_low(\alpha) }
934 
935  // find maximal violating pair first
936  for (i = 0; i < active_size; i++)
937  {
938  if (y[i] == +1)
939  {
940  if (!is_upper_bound(i))
941  {
942  if (-G[i] >= Gmax1)
943  Gmax1 = -G[i];
944  }
945  if (!is_lower_bound(i))
946  {
947  if (G[i] >= Gmax2)
948  Gmax2 = G[i];
949  }
950  }
951  else
952  {
953  if (!is_upper_bound(i))
954  {
955  if (-G[i] >= Gmax2)
956  Gmax2 = -G[i];
957  }
958  if (!is_lower_bound(i))
959  {
960  if (G[i] >= Gmax1)
961  Gmax1 = G[i];
962  }
963  }
964  }
965 
966  if (unshrink == false && Gmax1 + Gmax2 <= eps * 10)
967  {
968  unshrink = true;
969  reconstruct_gradient();
970  active_size = l;
971  //info("*");
972  }
973 
974  for (i = 0; i < active_size; i++)
975  if (be_shrunk(i, Gmax1, Gmax2))
976  {
977  active_size--;
978  while (active_size > i)
979  {
980  if (!be_shrunk(active_size, Gmax1, Gmax2))
981  {
982  swap_index(i, active_size);
983  break;
984  }
985  active_size--;
986  }
987  }
988 }
989 
991 {
992  double r;
993  int nr_free = 0;
994  double ub = INF, lb = -INF, sum_free = 0;
995  for (int i = 0; i < active_size; i++)
996  {
997  double yG = y[i] * G[i];
998 
999  if (is_upper_bound(i))
1000  {
1001  if (y[i] == -1)
1002  ub = min(ub, yG);
1003  else
1004  lb = max(lb, yG);
1005  }
1006  else if (is_lower_bound(i))
1007  {
1008  if (y[i] == +1)
1009  ub = min(ub, yG);
1010  else
1011  lb = max(lb, yG);
1012  }
1013  else
1014  {
1015  ++nr_free;
1016  sum_free += yG;
1017  }
1018  }
1019 
1020  if (nr_free > 0)
1021  r = sum_free / nr_free;
1022  else
1023  r = (ub + lb) / 2;
1024 
1025  return r;
1026 }
1027 
1028 //
1029 // Solver for nu-svm classification and regression
1030 //
1031 // additional constraint: e^T \alpha = constant
1032 //
1033 class Solver_NU: public Solver
1034 {
1035 public:
1037  void Solve(int l, const QMatrix& Q, const double *p, const schar *y,
1038  double *alpha, double Cp, double Cn, double eps,
1039  SolutionInfo* si, int shrinking)
1040  {
1041  this->si = si;
1042  Solver::Solve(l, Q, p, y, alpha, Cp, Cn, eps, si, shrinking);
1043  }
1044 private:
1046  int select_working_set(int &i, int &j);
1047  double calculate_rho();
1048  bool be_shrunk(int i, double Gmax1, double Gmax2, double Gmax3, double Gmax4);
1049  void do_shrinking();
1050 };
1051 
1052 // return 1 if already optimal, return 0 otherwise
1053 int Solver_NU::select_working_set(int &out_i, int &out_j)
1054 {
1055  // return i,j such that y_i = y_j and
1056  // i: maximizes -y_i * grad(f)_i, i in I_up(\alpha)
1057  // j: minimizes the decrease of obj value
1058  // (if quadratic coefficeint <= 0, replace it with tau)
1059  // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha)
1060 
1061  double Gmaxp = -INF;
1062  double Gmaxp2 = -INF;
1063  int Gmaxp_idx = -1;
1064 
1065  double Gmaxn = -INF;
1066  double Gmaxn2 = -INF;
1067  int Gmaxn_idx = -1;
1068 
1069  int Gmin_idx = -1;
1070  double obj_diff_min = INF;
1071 
1072  for (int t = 0; t < active_size; t++)
1073  if (y[t] == +1)
1074  {
1075  if (!is_upper_bound(t))
1076  if (-G[t] >= Gmaxp)
1077  {
1078  Gmaxp = -G[t];
1079  Gmaxp_idx = t;
1080  }
1081  }
1082  else
1083  {
1084  if (!is_lower_bound(t))
1085  if (G[t] >= Gmaxn)
1086  {
1087  Gmaxn = G[t];
1088  Gmaxn_idx = t;
1089  }
1090  }
1091 
1092  int ip = Gmaxp_idx;
1093  int in = Gmaxn_idx;
1094  const Qfloat *Q_ip = NULL;
1095  const Qfloat *Q_in = NULL;
1096  if (ip != -1) // NULL Q_ip not accessed: Gmaxp=-INF if ip=-1
1097  Q_ip = Q->get_Q(ip, active_size);
1098  if (in != -1)
1099  Q_in = Q->get_Q(in, active_size);
1100 
1101  for (int j = 0; j < active_size; j++)
1102  {
1103  if (y[j] == +1)
1104  {
1105  if (!is_lower_bound(j))
1106  {
1107  double grad_diff = Gmaxp + G[j];
1108  if (G[j] >= Gmaxp2)
1109  Gmaxp2 = G[j];
1110  if (grad_diff > 0)
1111  {
1112  double obj_diff;
1113  double quad_coef = QD[ip] + QD[j] - 2 * Q_ip[j];
1114  if (quad_coef > 0)
1115  obj_diff = -(grad_diff * grad_diff) / quad_coef;
1116  else
1117  obj_diff = -(grad_diff * grad_diff) / TAU;
1118 
1119  if (obj_diff <= obj_diff_min)
1120  {
1121  Gmin_idx = j;
1122  obj_diff_min = obj_diff;
1123  }
1124  }
1125  }
1126  }
1127  else
1128  {
1129  if (!is_upper_bound(j))
1130  {
1131  double grad_diff = Gmaxn - G[j];
1132  if (-G[j] >= Gmaxn2)
1133  Gmaxn2 = -G[j];
1134  if (grad_diff > 0)
1135  {
1136  double obj_diff;
1137  double quad_coef = QD[in] + QD[j] - 2 * Q_in[j];
1138  if (quad_coef > 0)
1139  obj_diff = -(grad_diff * grad_diff) / quad_coef;
1140  else
1141  obj_diff = -(grad_diff * grad_diff) / TAU;
1142 
1143  if (obj_diff <= obj_diff_min)
1144  {
1145  Gmin_idx = j;
1146  obj_diff_min = obj_diff;
1147  }
1148  }
1149  }
1150  }
1151  }
1152 
1153  if (max(Gmaxp + Gmaxp2, Gmaxn + Gmaxn2) < eps)
1154  return 1;
1155 
1156  if (y[Gmin_idx] == +1)
1157  out_i = Gmaxp_idx;
1158  else
1159  out_i = Gmaxn_idx;
1160  out_j = Gmin_idx;
1161 
1162  return 0;
1163 }
1164 
1165 bool Solver_NU::be_shrunk(int i, double Gmax1, double Gmax2, double Gmax3, double Gmax4)
1166 {
1167  if (is_upper_bound(i))
1168  {
1169  if (y[i] == +1)
1170  return (-G[i] > Gmax1);
1171  else
1172  return (-G[i] > Gmax4);
1173  }
1174  else if (is_lower_bound(i))
1175  {
1176  if (y[i] == +1)
1177  return (G[i] > Gmax2);
1178  else
1179  return (G[i] > Gmax3);
1180  }
1181  else
1182  return (false);
1183 }
1184 
1186 {
1187  double Gmax1 = -INF; // max { -y_i * grad(f)_i | y_i = +1, i in I_up(\alpha) }
1188  double Gmax2 = -INF; // max { y_i * grad(f)_i | y_i = +1, i in I_low(\alpha) }
1189  double Gmax3 = -INF; // max { -y_i * grad(f)_i | y_i = -1, i in I_up(\alpha) }
1190  double Gmax4 = -INF; // max { y_i * grad(f)_i | y_i = -1, i in I_low(\alpha) }
1191 
1192  // find maximal violating pair first
1193  int i;
1194  for (i = 0; i < active_size; i++)
1195  {
1196  if (!is_upper_bound(i))
1197  {
1198  if (y[i] == +1)
1199  {
1200  if (-G[i] > Gmax1) Gmax1 = -G[i];
1201  }
1202  else if (-G[i] > Gmax4) Gmax4 = -G[i];
1203  }
1204  if (!is_lower_bound(i))
1205  {
1206  if (y[i] == +1)
1207  {
1208  if (G[i] > Gmax2) Gmax2 = G[i];
1209  }
1210  else if (G[i] > Gmax3) Gmax3 = G[i];
1211  }
1212  }
1213 
1214  if (unshrink == false && max(Gmax1 + Gmax2, Gmax3 + Gmax4) <= eps * 10)
1215  {
1216  unshrink = true;
1217  reconstruct_gradient();
1218  active_size = l;
1219  }
1220 
1221  for (i = 0; i < active_size; i++)
1222  if (be_shrunk(i, Gmax1, Gmax2, Gmax3, Gmax4))
1223  {
1224  active_size--;
1225  while (active_size > i)
1226  {
1227  if (!be_shrunk(active_size, Gmax1, Gmax2, Gmax3, Gmax4))
1228  {
1229  swap_index(i, active_size);
1230  break;
1231  }
1232  active_size--;
1233  }
1234  }
1235 }
1236 
1238 {
1239  int nr_free1 = 0, nr_free2 = 0;
1240  double ub1 = INF, ub2 = INF;
1241  double lb1 = -INF, lb2 = -INF;
1242  double sum_free1 = 0, sum_free2 = 0;
1243 
1244  for (int i = 0; i < active_size; i++)
1245  {
1246  if (y[i] == +1)
1247  {
1248  if (is_upper_bound(i))
1249  lb1 = max(lb1, G[i]);
1250  else if (is_lower_bound(i))
1251  ub1 = min(ub1, G[i]);
1252  else
1253  {
1254  ++nr_free1;
1255  sum_free1 += G[i];
1256  }
1257  }
1258  else
1259  {
1260  if (is_upper_bound(i))
1261  lb2 = max(lb2, G[i]);
1262  else if (is_lower_bound(i))
1263  ub2 = min(ub2, G[i]);
1264  else
1265  {
1266  ++nr_free2;
1267  sum_free2 += G[i];
1268  }
1269  }
1270  }
1271 
1272  double r1, r2;
1273  if (nr_free1 > 0)
1274  r1 = sum_free1 / nr_free1;
1275  else
1276  r1 = (ub1 + lb1) / 2;
1277 
1278  if (nr_free2 > 0)
1279  r2 = sum_free2 / nr_free2;
1280  else
1281  r2 = (ub2 + lb2) / 2;
1282 
1283  si->r = (r1 + r2) / 2;
1284  return (r1 - r2) / 2;
1285 }
1286 
1287 //
1288 // Q matrices for various formulations
1289 //
1290 class SVC_Q: public Kernel
1291 {
1292 public:
1293  SVC_Q(const svm_problem& prob, const svm_parameter& param, const schar *y_)
1294  : Kernel(prob.l, prob.x, param)
1295  {
1296  clone(y, y_, prob.l);
1297  cache = new Cache(prob.l, (long int)(param.cache_size * (1 << 20)));
1298  QD = new double[prob.l];
1299  for (int i = 0; i < prob.l; i++)
1300  QD[i] = (this->*kernel_function)(i, i);
1301  }
1302 
1303  Qfloat *get_Q(int i, int len) const
1304  {
1305  Qfloat *data;
1306  int start, j;
1307  if ((start = cache->get_data(i, &data, len)) < len)
1308  {
1309  for (j = start; j < len; j++)
1310  data[j] = (Qfloat)(y[i] * y[j] * (this->*kernel_function)(i, j));
1311  }
1312  return data;
1313  }
1314 
1315  double *get_QD() const
1316  {
1317  return QD;
1318  }
1319 
1320  void swap_index(int i, int j) const
1321  {
1322  cache->swap_index(i, j);
1323  Kernel::swap_index(i, j);
1324  swap(y[i], y[j]);
1325  swap(QD[i], QD[j]);
1326  }
1327 
1329  {
1330  delete[] y;
1331  delete cache;
1332  delete[] QD;
1333  }
1334 private:
1337  double *QD;
1338 };
1339 
1340 class ONE_CLASS_Q: public Kernel
1341 {
1342 public:
1344  : Kernel(prob.l, prob.x, param)
1345  {
1346  cache = new Cache(prob.l, (long int)(param.cache_size * (1 << 20)));
1347  QD = new double[prob.l];
1348  for (int i = 0; i < prob.l; i++)
1349  QD[i] = (this->*kernel_function)(i, i);
1350  }
1351 
1352  Qfloat *get_Q(int i, int len) const
1353  {
1354  Qfloat *data;
1355  int start, j;
1356  if ((start = cache->get_data(i, &data, len)) < len)
1357  {
1358  for (j = start; j < len; j++)
1359  data[j] = (Qfloat)(this->*kernel_function)(i, j);
1360  }
1361  return data;
1362  }
1363 
1364  double *get_QD() const
1365  {
1366  return QD;
1367  }
1368 
1369  void swap_index(int i, int j) const
1370  {
1371  cache->swap_index(i, j);
1372  Kernel::swap_index(i, j);
1373  swap(QD[i], QD[j]);
1374  }
1375 
1377  {
1378  delete cache;
1379  delete[] QD;
1380  }
1381 private:
1383  double *QD;
1384 };
1385 
1386 class SVR_Q: public Kernel
1387 {
1388 public:
1390  : Kernel(prob.l, prob.x, param)
1391  {
1392  l = prob.l;
1393  cache = new Cache(l, (long int)(param.cache_size * (1 << 20)));
1394  QD = new double[2 * l];
1395  sign = new schar[2 * l];
1396  index = new int[2 * l];
1397  for (int k = 0; k < l; k++)
1398  {
1399  sign[k] = 1;
1400  sign[k + l] = -1;
1401  index[k] = k;
1402  index[k + l] = k;
1403  QD[k] = (this->*kernel_function)(k, k);
1404  QD[k + l] = QD[k];
1405  }
1406  buffer[0] = new Qfloat[2 * l];
1407  buffer[1] = new Qfloat[2 * l];
1408  next_buffer = 0;
1409  }
1410 
1411  void swap_index(int i, int j) const
1412  {
1413  swap(sign[i], sign[j]);
1414  swap(index[i], index[j]);
1415  swap(QD[i], QD[j]);
1416  }
1417 
1418  Qfloat *get_Q(int i, int len) const
1419  {
1420  Qfloat *data;
1421  int j, real_i = index[i];
1422  if (cache->get_data(real_i, &data, l) < l)
1423  {
1424  for (j = 0; j < l; j++)
1425  data[j] = (Qfloat)(this->*kernel_function)(real_i, j);
1426  }
1427 
1428  // reorder and copy
1429  Qfloat *buf = buffer[next_buffer];
1430  next_buffer = 1 - next_buffer;
1431  schar si = sign[i];
1432  for (j = 0; j < len; j++)
1433  buf[j] = (Qfloat) si * (Qfloat) sign[j] * data[index[j]];
1434  return buf;
1435  }
1436 
1437  double *get_QD() const
1438  {
1439  return QD;
1440  }
1441 
1443  {
1444  delete cache;
1445  delete[] sign;
1446  delete[] index;
1447  delete[] buffer[0];
1448  delete[] buffer[1];
1449  delete[] QD;
1450  }
1451 private:
1452  int l;
1455  int *index;
1456  mutable int next_buffer;
1458  double *QD;
1459 };
1460 
1461 //
1462 // construct and solve various formulations
1463 //
1464 static void solve_c_svc(
1465  const svm_problem *prob, const svm_parameter* param,
1466  double *alpha, Solver::SolutionInfo* si, double Cp, double Cn)
1467 {
1468  int l = prob->l;
1469  double *minus_ones = new double[l];
1470  schar *y = new schar[l];
1471 
1472  int i;
1473 
1474  for (i = 0; i < l; i++)
1475  {
1476  alpha[i] = 0;
1477  minus_ones[i] = -1;
1478  if (prob->y[i] > 0) y[i] = +1;
1479  else y[i] = -1;
1480  }
1481 
1482  Solver s;
1483  s.Solve(l, SVC_Q(*prob, *param, y), minus_ones, y,
1484  alpha, Cp, Cn, param->eps, si, param->shrinking);
1485 
1486  double sum_alpha = 0;
1487  for (i = 0; i < l; i++)
1488  sum_alpha += alpha[i];
1489 
1490  //if (Cp==Cn)
1491  //info("nu = %f\n", sum_alpha/(Cp*prob->l));
1492 
1493  for (i = 0; i < l; i++)
1494  alpha[i] *= y[i];
1495 
1496  delete[] minus_ones;
1497  delete[] y;
1498 }
1499 
1500 static void solve_nu_svc(
1501  const svm_problem *prob, const svm_parameter *param,
1502  double *alpha, Solver::SolutionInfo* si)
1503 {
1504  int i;
1505  int l = prob->l;
1506  double nu = param->nu;
1507 
1508  schar *y = new schar[l];
1509 
1510  for (i = 0; i < l; i++)
1511  if (prob->y[i] > 0)
1512  y[i] = +1;
1513  else
1514  y[i] = -1;
1515 
1516  double sum_pos = nu * l / 2;
1517  double sum_neg = nu * l / 2;
1518 
1519  for (i = 0; i < l; i++)
1520  if (y[i] == +1)
1521  {
1522  alpha[i] = min(1.0, sum_pos);
1523  sum_pos -= alpha[i];
1524  }
1525  else
1526  {
1527  alpha[i] = min(1.0, sum_neg);
1528  sum_neg -= alpha[i];
1529  }
1530 
1531  double *zeros = new double[l];
1532 
1533  for (i = 0; i < l; i++)
1534  zeros[i] = 0;
1535 
1536  Solver_NU s;
1537  s.Solve(l, SVC_Q(*prob, *param, y), zeros, y,
1538  alpha, 1.0, 1.0, param->eps, si, param->shrinking);
1539  double r = si->r;
1540 
1541  //info("C = %f\n",1/r);
1542 
1543  for (i = 0; i < l; i++)
1544  alpha[i] *= y[i] / r;
1545 
1546  si->rho /= r;
1547  si->obj /= (r * r);
1548  si->upper_bound_p = 1 / r;
1549  si->upper_bound_n = 1 / r;
1550 
1551  delete[] y;
1552  delete[] zeros;
1553 }
1554 
1555 static void solve_one_class(
1556  const svm_problem *prob, const svm_parameter *param,
1557  double *alpha, Solver::SolutionInfo* si)
1558 {
1559  int l = prob->l;
1560  double *zeros = new double[l];
1561  schar *ones = new schar[l];
1562  int i;
1563 
1564  int n = (int)(param->nu * prob->l); // # of alpha's at upper bound
1565 
1566  for (i = 0; i < n; i++)
1567  alpha[i] = 1;
1568  if (n < prob->l)
1569  alpha[n] = param->nu * prob->l - n;
1570  for (i = n + 1; i < l; i++)
1571  alpha[i] = 0;
1572 
1573  for (i = 0; i < l; i++)
1574  {
1575  zeros[i] = 0;
1576  ones[i] = 1;
1577  }
1578 
1579  Solver s;
1580  s.Solve(l, ONE_CLASS_Q(*prob, *param), zeros, ones,
1581  alpha, 1.0, 1.0, param->eps, si, param->shrinking);
1582 
1583  delete[] zeros;
1584  delete[] ones;
1585 }
1586 
1587 static void solve_epsilon_svr(
1588  const svm_problem *prob, const svm_parameter *param,
1589  double *alpha, Solver::SolutionInfo* si)
1590 {
1591  int l = prob->l;
1592  double *alpha2 = new double[2 * l];
1593  double *linear_term = new double[2 * l];
1594  schar *y = new schar[2 * l];
1595  int i;
1596 
1597  for (i = 0; i < l; i++)
1598  {
1599  alpha2[i] = 0;
1600  linear_term[i] = param->p - prob->y[i];
1601  y[i] = 1;
1602 
1603  alpha2[i + l] = 0;
1604  linear_term[i + l] = param->p + prob->y[i];
1605  y[i + l] = -1;
1606  }
1607 
1608  Solver s;
1609  s.Solve(2 * l, SVR_Q(*prob, *param), linear_term, y,
1610  alpha2, param->C, param->C, param->eps, si, param->shrinking);
1611 
1612  double sum_alpha = 0;
1613  for (i = 0; i < l; i++)
1614  {
1615  alpha[i] = alpha2[i] - alpha2[i + l];
1616  sum_alpha += fabs(alpha[i]);
1617  }
1618  //info("nu = %f\n",sum_alpha/(param->C*l));
1619 
1620  delete[] alpha2;
1621  delete[] linear_term;
1622  delete[] y;
1623 }
1624 
1625 static void solve_nu_svr(
1626  const svm_problem *prob, const svm_parameter *param,
1627  double *alpha, Solver::SolutionInfo* si)
1628 {
1629  int l = prob->l;
1630  double C = param->C;
1631  double *alpha2 = new double[2 * l];
1632  double *linear_term = new double[2 * l];
1633  schar *y = new schar[2 * l];
1634  int i;
1635 
1636  double sum = C * param->nu * l / 2;
1637  for (i = 0; i < l; i++)
1638  {
1639  alpha2[i] = alpha2[i + l] = min(sum, C);
1640  sum -= alpha2[i];
1641 
1642  linear_term[i] = - prob->y[i];
1643  y[i] = 1;
1644 
1645  linear_term[i + l] = prob->y[i];
1646  y[i + l] = -1;
1647  }
1648 
1649  Solver_NU s;
1650  s.Solve(2 * l, SVR_Q(*prob, *param), linear_term, y,
1651  alpha2, C, C, param->eps, si, param->shrinking);
1652 
1653  //info("epsilon = %f\n",-si->r);
1654 
1655  for (i = 0; i < l; i++)
1656  alpha[i] = alpha2[i] - alpha2[i + l];
1657 
1658  delete[] alpha2;
1659  delete[] linear_term;
1660  delete[] y;
1661 }
1662 
1663 //
1664 // decision_function
1665 //
1667 {
1668  double *alpha;
1669  double rho;
1670 };
1671 
1673  const svm_problem *prob, const svm_parameter *param,
1674  double Cp, double Cn)
1675 {
1676  double *alpha = Malloc(double, prob->l);
1678  switch (param->svm_type)
1679  {
1680  case C_SVC:
1681  solve_c_svc(prob, param, alpha, &si, Cp, Cn);
1682  break;
1683  case NU_SVC:
1684  solve_nu_svc(prob, param, alpha, &si);
1685  break;
1686  case ONE_CLASS:
1687  solve_one_class(prob, param, alpha, &si);
1688  break;
1689  case EPSILON_SVR:
1690  solve_epsilon_svr(prob, param, alpha, &si);
1691  break;
1692  case NU_SVR:
1693  solve_nu_svr(prob, param, alpha, &si);
1694  break;
1695  }
1696 
1697  //info("obj = %f, rho = %f\n",si.obj,si.rho);
1698 
1699  // output SVs
1700 
1701  int nSV = 0;
1702  int nBSV = 0;
1703  for (int i = 0; i < prob->l; i++)
1704  {
1705  if (fabs(alpha[i]) > 0)
1706  {
1707  ++nSV;
1708  if (prob->y[i] > 0)
1709  {
1710  if (fabs(alpha[i]) >= si.upper_bound_p)
1711  ++nBSV;
1712  }
1713  else
1714  {
1715  if (fabs(alpha[i]) >= si.upper_bound_n)
1716  ++nBSV;
1717  }
1718  }
1719  }
1720 
1721  //info("nSV = %d, nBSV = %d\n",nSV,nBSV);
1722 
1724  f.alpha = alpha;
1725  f.rho = si.rho;
1726  return f;
1727 }
1728 
1729 // Platt's binary SVM Probablistic Output: an improvement from Lin et al.
1730 static void sigmoid_train(
1731  int l, const double *dec_values, const double *labels,
1732  double& A, double& B)
1733 {
1734  double prior1 = 0, prior0 = 0;
1735  int i;
1736 
1737  for (i = 0; i < l; i++)
1738  if (labels[i] > 0) prior1 += 1;
1739  else prior0 += 1;
1740 
1741  int max_iter = 100; // Maximal number of iterations
1742  double min_step = 1e-10; // Minimal step taken in line search
1743  double sigma = 1e-12; // For numerically strict PD of Hessian
1744  double eps = 1e-5;
1745  double hiTarget = (prior1 + 1.0) / (prior1 + 2.0);
1746  double loTarget = 1 / (prior0 + 2.0);
1747  double *t = Malloc(double, l);
1748  double fApB, p, q, h11, h22, h21, g1, g2, det, dA, dB, gd, stepsize;
1749  double newA, newB, newf, d1, d2;
1750  int iter;
1751 
1752  // Initial Point and Initial Fun Value
1753  A = 0.0;
1754  B = log((prior0 + 1.0) / (prior1 + 1.0));
1755  double fval = 0.0;
1756 
1757  for (i = 0; i < l; i++)
1758  {
1759  if (labels[i] > 0) t[i] = hiTarget;
1760  else t[i] = loTarget;
1761  fApB = dec_values[i] * A + B;
1762  if (fApB >= 0)
1763  fval += t[i] * fApB + log(1 + exp(-fApB));
1764  else
1765  fval += (t[i] - 1) * fApB + log(1 + exp(fApB));
1766  }
1767  for (iter = 0; iter < max_iter; iter++)
1768  {
1769  // Update Gradient and Hessian (use H' = H + sigma I)
1770  h11 = sigma; // numerically ensures strict PD
1771  h22 = sigma;
1772  h21 = 0.0;
1773  g1 = 0.0;
1774  g2 = 0.0;
1775  for (i = 0; i < l; i++)
1776  {
1777  fApB = dec_values[i] * A + B;
1778  if (fApB >= 0)
1779  {
1780  p = exp(-fApB) / (1.0 + exp(-fApB));
1781  q = 1.0 / (1.0 + exp(-fApB));
1782  }
1783  else
1784  {
1785  p = 1.0 / (1.0 + exp(fApB));
1786  q = exp(fApB) / (1.0 + exp(fApB));
1787  }
1788  d2 = p * q;
1789  h11 += dec_values[i] * dec_values[i] * d2;
1790  h22 += d2;
1791  h21 += dec_values[i] * d2;
1792  d1 = t[i] - p;
1793  g1 += dec_values[i] * d1;
1794  g2 += d1;
1795  }
1796 
1797  // Stopping Criteria
1798  if (fabs(g1) < eps && fabs(g2) < eps)
1799  break;
1800 
1801  // Finding Newton direction: -inv(H') * g
1802  det = h11 * h22 - h21 * h21;
1803  dA = -(h22 * g1 - h21 * g2) / det;
1804  dB = -(-h21 * g1 + h11 * g2) / det;
1805  gd = g1 * dA + g2 * dB;
1806 
1807 
1808  stepsize = 1; // Line Search
1809  while (stepsize >= min_step)
1810  {
1811  newA = A + stepsize * dA;
1812  newB = B + stepsize * dB;
1813 
1814  // New function value
1815  newf = 0.0;
1816  for (i = 0; i < l; i++)
1817  {
1818  fApB = dec_values[i] * newA + newB;
1819  if (fApB >= 0)
1820  newf += t[i] * fApB + log(1 + exp(-fApB));
1821  else
1822  newf += (t[i] - 1) * fApB + log(1 + exp(fApB));
1823  }
1824  // Check sufficient decrease
1825  if (newf < fval + 0.0001 * stepsize * gd)
1826  {
1827  A = newA;
1828  B = newB;
1829  fval = newf;
1830  break;
1831  }
1832  else
1833  stepsize = stepsize / 2.0;
1834  }
1835 
1836  if (stepsize < min_step)
1837  {
1838  //info("Line search fails in two-class probability estimates\n");
1839  break;
1840  }
1841  }
1842 
1843  if (iter >= max_iter)
1844  info("Reaching maximal iterations in two-class probability estimates\n");
1845  free(t);
1846 }
1847 
1848 static double sigmoid_predict(double decision_value, double A, double B)
1849 {
1850  double fApB = decision_value * A + B;
1851  // 1-p used later; avoid catastrophic cancellation
1852  if (fApB >= 0)
1853  return exp(-fApB) / (1.0 + exp(-fApB));
1854  else
1855  return 1.0 / (1 + exp(fApB)) ;
1856 }
1857 
1858 // Method 2 from the multiclass_prob paper by Wu, Lin, and Weng
1859 static void multiclass_probability(int k, double **r, double *p)
1860 {
1861  int t, j;
1862  int iter = 0, max_iter = max(100, k);
1863  double **Q = Malloc(double *, k);
1864  double *Qp = Malloc(double, k);
1865  double pQp, eps = 0.005 / k;
1866 
1867  for (t = 0; t < k; t++)
1868  {
1869  p[t] = 1.0 / k; // Valid if k = 1
1870  Q[t] = Malloc(double, k);
1871  Q[t][t] = 0;
1872  for (j = 0; j < t; j++)
1873  {
1874  Q[t][t] += r[j][t] * r[j][t];
1875  Q[t][j] = Q[j][t];
1876  }
1877  for (j = t + 1; j < k; j++)
1878  {
1879  Q[t][t] += r[j][t] * r[j][t];
1880  Q[t][j] = -r[j][t] * r[t][j];
1881  }
1882  }
1883  for (iter = 0; iter < max_iter; iter++)
1884  {
1885  // stopping condition, recalculate QP,pQP for numerical accuracy
1886  pQp = 0;
1887  for (t = 0; t < k; t++)
1888  {
1889  Qp[t] = 0;
1890  for (j = 0; j < k; j++)
1891  Qp[t] += Q[t][j] * p[j];
1892  pQp += p[t] * Qp[t];
1893  }
1894  double max_error = 0;
1895  for (t = 0; t < k; t++)
1896  {
1897  double error = fabs(Qp[t] - pQp);
1898  if (error > max_error)
1899  max_error = error;
1900  }
1901  if (max_error < eps) break;
1902 
1903  for (t = 0; t < k; t++)
1904  {
1905  double diff = (-Qp[t] + pQp) / Q[t][t];
1906  p[t] += diff;
1907  pQp = (pQp + diff * (diff * Q[t][t] + 2 * Qp[t])) / (1 + diff) / (1 + diff);
1908  for (j = 0; j < k; j++)
1909  {
1910  Qp[j] = (Qp[j] + diff * Q[t][j]) / (1 + diff);
1911  p[j] /= (1 + diff);
1912  }
1913  }
1914  }
1915  if (iter >= max_iter)
1916  info("Exceeds max_iter in multiclass_prob\n");
1917  for (t = 0; t < k; t++) free(Q[t]);
1918  free(Q);
1919  free(Qp);
1920 }
1921 
1922 // Cross-validation decision values for probability estimates
1924  const svm_problem *prob, const svm_parameter *param,
1925  double Cp, double Cn, double& probA, double& probB)
1926 {
1927  int i;
1928  int nr_fold = 5;
1929  int *perm = Malloc(int, prob->l);
1930  double *dec_values = Malloc(double, prob->l);
1931 
1932  // random shuffle
1933  for (i = 0; i < prob->l; i++) perm[i] = i;
1934  for (i = 0; i < prob->l; i++)
1935  {
1936  int j = i + rand() % (prob->l - i);
1937  swap(perm[i], perm[j]);
1938  }
1939  for (i = 0; i < nr_fold; i++)
1940  {
1941  int begin = i * prob->l / nr_fold;
1942  int end = (i + 1) * prob->l / nr_fold;
1943  int j, k;
1944  struct svm_problem subprob;
1945 
1946  subprob.l = prob->l - (end - begin);
1947  subprob.x = Malloc(struct svm_node*, subprob.l);
1948  subprob.y = Malloc(double, subprob.l);
1949 
1950  k = 0;
1951  for (j = 0; j < begin; j++)
1952  {
1953  subprob.x[k] = prob->x[perm[j]];
1954  subprob.y[k] = prob->y[perm[j]];
1955  ++k;
1956  }
1957  for (j = end; j < prob->l; j++)
1958  {
1959  subprob.x[k] = prob->x[perm[j]];
1960  subprob.y[k] = prob->y[perm[j]];
1961  ++k;
1962  }
1963  int p_count = 0, n_count = 0;
1964  for (j = 0; j < k; j++)
1965  if (subprob.y[j] > 0)
1966  p_count++;
1967  else
1968  n_count++;
1969 
1970  if (p_count == 0 && n_count == 0)
1971  for (j = begin; j < end; j++)
1972  dec_values[perm[j]] = 0;
1973  else if (p_count > 0 && n_count == 0)
1974  for (j = begin; j < end; j++)
1975  dec_values[perm[j]] = 1;
1976  else if (p_count == 0 && n_count > 0)
1977  for (j = begin; j < end; j++)
1978  dec_values[perm[j]] = -1;
1979  else
1980  {
1981  svm_parameter subparam = *param;
1982  subparam.probability = 0;
1983  subparam.C = 1.0;
1984  subparam.nr_weight = 2;
1985  subparam.weight_label = Malloc(int, 2);
1986  subparam.weight = Malloc(double, 2);
1987  subparam.weight_label[0] = +1;
1988  subparam.weight_label[1] = -1;
1989  subparam.weight[0] = Cp;
1990  subparam.weight[1] = Cn;
1991  struct svm_model *submodel = svm_train(&subprob, &subparam);
1992  for (j = begin; j < end; j++)
1993  {
1994  svm_predict_values(submodel, prob->x[perm[j]], &(dec_values[perm[j]]));
1995  // ensure +1 -1 order; reason not using CV subroutine
1996  dec_values[perm[j]] *= submodel->label[0];
1997  }
1998  svm_free_and_destroy_model(&submodel);
1999  svm_destroy_param(&subparam);
2000  }
2001  free(subprob.x);
2002  free(subprob.y);
2003  }
2004  sigmoid_train(prob->l, dec_values, prob->y, probA, probB);
2005  free(dec_values);
2006  free(perm);
2007 }
2008 
2009 // Return parameter of a Laplace distribution
2010 static double svm_svr_probability(
2011  const svm_problem *prob, const svm_parameter *param)
2012 {
2013  int i;
2014  int nr_fold = 5;
2015  double *ymv = Malloc(double, prob->l);
2016  double mae = 0;
2017 
2018  svm_parameter newparam = *param;
2019  newparam.probability = 0;
2020  svm_cross_validation(prob, &newparam, nr_fold, ymv);
2021  for (i = 0; i < prob->l; i++)
2022  {
2023  ymv[i] = prob->y[i] - ymv[i];
2024  mae += fabs(ymv[i]);
2025  }
2026  mae /= prob->l;
2027  double std = sqrt(2 * mae * mae);
2028  int count = 0;
2029  mae = 0;
2030  for (i = 0; i < prob->l; i++)
2031  if (fabs(ymv[i]) > 5 * std)
2032  count = count + 1;
2033  else
2034  mae += fabs(ymv[i]);
2035  mae /= (prob->l - count);
2036  info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma= %g\n", mae);
2037  free(ymv);
2038  return mae;
2039 }
2040 
2041 
2042 // label: label name, start: begin of each class, count: #data of classes, perm: indices to the original data
2043 // perm, length l, must be allocated before calling this subroutine
2044 static void svm_group_classes(const svm_problem *prob, int *nr_class_ret, int **label_ret, int **start_ret, int **count_ret, int *perm)
2045 {
2046  int l = prob->l;
2047  int max_nr_class = 16;
2048  int nr_class = 0;
2049  int *label = Malloc(int, max_nr_class);
2050  int *count = Malloc(int, max_nr_class);
2051  int *data_label = Malloc(int, l);
2052  int i;
2053 
2054  for (i = 0; i < l; i++)
2055  {
2056  int this_label = (int)prob->y[i];
2057  int j;
2058  for (j = 0; j < nr_class; j++)
2059  {
2060  if (this_label == label[j])
2061  {
2062  ++count[j];
2063  break;
2064  }
2065  }
2066  data_label[i] = j;
2067  if (j == nr_class)
2068  {
2069  if (nr_class == max_nr_class)
2070  {
2071  max_nr_class *= 2;
2072  label = (int *)realloc(label, max_nr_class * sizeof(int));
2073  count = (int *)realloc(count, max_nr_class * sizeof(int));
2074  }
2075  label[nr_class] = this_label;
2076  count[nr_class] = 1;
2077  ++nr_class;
2078  }
2079  }
2080 
2081  int *start = Malloc(int, nr_class);
2082  start[0] = 0;
2083  for (i = 1; i < nr_class; i++)
2084  start[i] = start[i - 1] + count[i - 1];
2085  for (i = 0; i < l; i++)
2086  {
2087  perm[start[data_label[i]]] = i;
2088  ++start[data_label[i]];
2089  }
2090  start[0] = 0;
2091  for (i = 1; i < nr_class; i++)
2092  start[i] = start[i - 1] + count[i - 1];
2093 
2094  *nr_class_ret = nr_class;
2095  *label_ret = label;
2096  *start_ret = start;
2097  *count_ret = count;
2098  free(data_label);
2099 }
2100 
2101 //
2102 // Interface functions
2103 //
2105 {
2107  model->param = *param;
2108  model->free_sv = 0; // XXX
2109 
2110  if (param->svm_type == ONE_CLASS ||
2111  param->svm_type == EPSILON_SVR ||
2112  param->svm_type == NU_SVR)
2113  {
2114  // regression or one-class-svm
2115  model->nr_class = 2;
2116  model->label = NULL;
2117  model->nSV = NULL;
2118  model->probA = NULL;
2119  model->probB = NULL;
2120  model->sv_coef = Malloc(double *, 1);
2121 
2122  if (param->probability &&
2123  (param->svm_type == EPSILON_SVR ||
2124  param->svm_type == NU_SVR))
2125  {
2126  model->probA = Malloc(double, 1);
2127  model->probA[0] = svm_svr_probability(prob, param);
2128  }
2129 
2130  decision_function f = svm_train_one(prob, param, 0, 0);
2131  model->rho = Malloc(double, 1);
2132  model->rho[0] = f.rho;
2133 
2134  int nSV = 0;
2135  int i;
2136  for (i = 0; i < prob->l; i++)
2137  if (fabs(f.alpha[i]) > 0) ++nSV;
2138  model->l = nSV;
2139  model->SV = Malloc(svm_node *, nSV);
2140  model->sv_coef[0] = Malloc(double, nSV);
2141  model->sv_indices = Malloc(int, nSV);
2142  int j = 0;
2143  for (i = 0; i < prob->l; i++)
2144  if (fabs(f.alpha[i]) > 0)
2145  {
2146  model->SV[j] = prob->x[i];
2147  model->sv_coef[0][j] = f.alpha[i];
2148  model->sv_indices[j] = i + 1;
2149  ++j;
2150  }
2151 
2152  free(f.alpha);
2153  }
2154  else
2155  {
2156  // classification
2157  int l = prob->l;
2158  int nr_class;
2159  int *label = NULL;
2160  int *start = NULL;
2161  int *count = NULL;
2162  int *perm = Malloc(int, l);
2163 
2164  // group training data of the same class
2165  svm_group_classes(prob, &nr_class, &label, &start, &count, perm);
2166  //if(nr_class == 1)
2167  //info("WARNING: training data in only one class. See README for details.\n");
2168 
2169  svm_node **x = Malloc(svm_node *, l);
2170  int i;
2171  for (i = 0; i < l; i++)
2172  x[i] = prob->x[perm[i]];
2173 
2174  // calculate weighted C
2175 
2176  double *weighted_C = Malloc(double, nr_class);
2177  for (i = 0; i < nr_class; i++)
2178  weighted_C[i] = param->C;
2179  for (i = 0; i < param->nr_weight; i++)
2180  {
2181  int j;
2182  for (j = 0; j < nr_class; j++)
2183  if (param->weight_label[i] == label[j])
2184  break;
2185  if (j == nr_class)
2186  fprintf(stderr, "WARNING: class label %d specified in weight is not found\n", param->weight_label[i]);
2187  else
2188  weighted_C[j] *= param->weight[i];
2189  }
2190 
2191  // train k*(k-1)/2 models
2192 
2193  bool *nonzero = Malloc(bool, l);
2194  for (i = 0; i < l; i++)
2195  nonzero[i] = false;
2196  decision_function *f = Malloc(decision_function, nr_class * (nr_class - 1) / 2);
2197 
2198  double *probA = NULL, *probB = NULL;
2199  if (param->probability)
2200  {
2201  probA = Malloc(double, nr_class * (nr_class - 1) / 2);
2202  probB = Malloc(double, nr_class * (nr_class - 1) / 2);
2203  }
2204 
2205  int p = 0;
2206  for (i = 0; i < nr_class; i++)
2207  for (int j = i + 1; j < nr_class; j++)
2208  {
2209  svm_problem sub_prob;
2210  int si = start[i], sj = start[j];
2211  int ci = count[i], cj = count[j];
2212  sub_prob.l = ci + cj;
2213  sub_prob.x = Malloc(svm_node *, sub_prob.l);
2214  sub_prob.y = Malloc(double, sub_prob.l);
2215  int k;
2216  for (k = 0; k < ci; k++)
2217  {
2218  sub_prob.x[k] = x[si + k];
2219  sub_prob.y[k] = +1;
2220  }
2221  for (k = 0; k < cj; k++)
2222  {
2223  sub_prob.x[ci + k] = x[sj + k];
2224  sub_prob.y[ci + k] = -1;
2225  }
2226 
2227  if (param->probability)
2228  svm_binary_svc_probability(&sub_prob, param, weighted_C[i], weighted_C[j], probA[p], probB[p]);
2229 
2230  f[p] = svm_train_one(&sub_prob, param, weighted_C[i], weighted_C[j]);
2231  for (k = 0; k < ci; k++)
2232  if (!nonzero[si + k] && fabs(f[p].alpha[k]) > 0)
2233  nonzero[si + k] = true;
2234  for (k = 0; k < cj; k++)
2235  if (!nonzero[sj + k] && fabs(f[p].alpha[ci + k]) > 0)
2236  nonzero[sj + k] = true;
2237  free(sub_prob.x);
2238  free(sub_prob.y);
2239  ++p;
2240  }
2241 
2242  // build output
2243 
2244  model->nr_class = nr_class;
2245 
2246  model->label = Malloc(int, nr_class);
2247  for (i = 0; i < nr_class; i++)
2248  model->label[i] = label[i];
2249 
2250  model->rho = Malloc(double, nr_class * (nr_class - 1) / 2);
2251  for (i = 0; i < nr_class * (nr_class - 1) / 2; i++)
2252  model->rho[i] = f[i].rho;
2253 
2254  if (param->probability)
2255  {
2256  model->probA = Malloc(double, nr_class * (nr_class - 1) / 2);
2257  model->probB = Malloc(double, nr_class * (nr_class - 1) / 2);
2258  for (i = 0; i < nr_class * (nr_class - 1) / 2; i++)
2259  {
2260  model->probA[i] = probA[i];
2261  model->probB[i] = probB[i];
2262  }
2263  }
2264  else
2265  {
2266  model->probA = NULL;
2267  model->probB = NULL;
2268  }
2269 
2270  int total_sv = 0;
2271  int *nz_count = Malloc(int, nr_class);
2272  model->nSV = Malloc(int, nr_class);
2273  for (i = 0; i < nr_class; i++)
2274  {
2275  int nSV = 0;
2276  for (int j = 0; j < count[i]; j++)
2277  if (nonzero[start[i] + j])
2278  {
2279  ++nSV;
2280  ++total_sv;
2281  }
2282  model->nSV[i] = nSV;
2283  nz_count[i] = nSV;
2284  }
2285 
2286  //info("Total nSV = %d\n",total_sv);
2287 
2288  model->l = total_sv;
2289  model->SV = Malloc(svm_node *, total_sv);
2290  model->sv_indices = Malloc(int, total_sv);
2291  p = 0;
2292  for (i = 0; i < l; i++)
2293  if (nonzero[i])
2294  {
2295  model->SV[p] = x[i];
2296  model->sv_indices[p++] = perm[i] + 1;
2297  }
2298 
2299  int *nz_start = Malloc(int, nr_class);
2300  nz_start[0] = 0;
2301  for (i = 1; i < nr_class; i++)
2302  nz_start[i] = nz_start[i - 1] + nz_count[i - 1];
2303 
2304  model->sv_coef = Malloc(double *, nr_class - 1);
2305  for (i = 0; i < nr_class - 1; i++)
2306  model->sv_coef[i] = Malloc(double, total_sv);
2307 
2308  p = 0;
2309  for (i = 0; i < nr_class; i++)
2310  for (int j = i + 1; j < nr_class; j++)
2311  {
2312  // classifier (i,j): coefficients with
2313  // i are in sv_coef[j-1][nz_start[i]...],
2314  // j are in sv_coef[i][nz_start[j]...]
2315 
2316  int si = start[i];
2317  int sj = start[j];
2318  int ci = count[i];
2319  int cj = count[j];
2320 
2321  int q = nz_start[i];
2322  int k;
2323  for (k = 0; k < ci; k++)
2324  if (nonzero[si + k])
2325  model->sv_coef[j - 1][q++] = f[p].alpha[k];
2326  q = nz_start[j];
2327  for (k = 0; k < cj; k++)
2328  if (nonzero[sj + k])
2329  model->sv_coef[i][q++] = f[p].alpha[ci + k];
2330  ++p;
2331  }
2332 
2333  free(label);
2334  free(probA);
2335  free(probB);
2336  free(count);
2337  free(perm);
2338  free(start);
2339  free(x);
2340  free(weighted_C);
2341  free(nonzero);
2342  for (i = 0; i < nr_class * (nr_class - 1) / 2; i++)
2343  free(f[i].alpha);
2344  free(f);
2345  free(nz_count);
2346  free(nz_start);
2347  }
2348  return model;
2349 }
2350 
2351 // Stratified cross validation
2352 void svm_cross_validation(const svm_problem *prob, const svm_parameter *param, int nr_fold, double *target)
2353 {
2354  int i;
2355  int *fold_start = Malloc(int, nr_fold + 1);
2356  int l = prob->l;
2357  int *perm = Malloc(int, l);
2358  int nr_class;
2359 
2360  // stratified cv may not give leave-one-out rate
2361  // Each class to l folds -> some folds may have zero elements
2362  if ((param->svm_type == C_SVC ||
2363  param->svm_type == NU_SVC) && nr_fold < l)
2364  {
2365  int *start = NULL;
2366  int *label = NULL;
2367  int *count = NULL;
2368  svm_group_classes(prob, &nr_class, &label, &start, &count, perm);
2369 
2370  // random shuffle and then data grouped by fold using the array perm
2371  int *fold_count = Malloc(int, nr_fold);
2372  int c;
2373  int *index = Malloc(int, l);
2374  for (i = 0; i < l; i++)
2375  index[i] = perm[i];
2376  for (c = 0; c < nr_class; c++)
2377  for (i = 0; i < count[c]; i++)
2378  {
2379  int j = i + rand() % (count[c] - i);
2380  swap(index[start[c] + j], index[start[c] + i]);
2381  }
2382  for (i = 0; i < nr_fold; i++)
2383  {
2384  fold_count[i] = 0;
2385  for (c = 0; c < nr_class; c++)
2386  fold_count[i] += (i + 1) * count[c] / nr_fold - i * count[c] / nr_fold;
2387  }
2388  fold_start[0] = 0;
2389  for (i = 1; i <= nr_fold; i++)
2390  fold_start[i] = fold_start[i - 1] + fold_count[i - 1];
2391  for (c = 0; c < nr_class; c++)
2392  for (i = 0; i < nr_fold; i++)
2393  {
2394  int begin = start[c] + i * count[c] / nr_fold;
2395  int end = start[c] + (i + 1) * count[c] / nr_fold;
2396  for (int j = begin; j < end; j++)
2397  {
2398  perm[fold_start[i]] = index[j];
2399  fold_start[i]++;
2400  }
2401  }
2402  fold_start[0] = 0;
2403  for (i = 1; i <= nr_fold; i++)
2404  fold_start[i] = fold_start[i - 1] + fold_count[i - 1];
2405  free(start);
2406  free(label);
2407  free(count);
2408  free(index);
2409  free(fold_count);
2410  }
2411  else
2412  {
2413  for (i = 0; i < l; i++) perm[i] = i;
2414  for (i = 0; i < l; i++)
2415  {
2416  int j = i + rand() % (l - i);
2417  swap(perm[i], perm[j]);
2418  }
2419  for (i = 0; i <= nr_fold; i++)
2420  fold_start[i] = i * l / nr_fold;
2421  }
2422 
2423  for (i = 0; i < nr_fold; i++)
2424  {
2425  int begin = fold_start[i];
2426  int end = fold_start[i + 1];
2427  int j, k;
2428  struct svm_problem subprob;
2429 
2430  subprob.l = l - (end - begin);
2431  subprob.x = Malloc(struct svm_node*, subprob.l);
2432  subprob.y = Malloc(double, subprob.l);
2433 
2434  k = 0;
2435  for (j = 0; j < begin; j++)
2436  {
2437  subprob.x[k] = prob->x[perm[j]];
2438  subprob.y[k] = prob->y[perm[j]];
2439  ++k;
2440  }
2441  for (j = end; j < l; j++)
2442  {
2443  subprob.x[k] = prob->x[perm[j]];
2444  subprob.y[k] = prob->y[perm[j]];
2445  ++k;
2446  }
2447  struct svm_model *submodel = svm_train(&subprob, param);
2448  if (param->probability &&
2449  (param->svm_type == C_SVC || param->svm_type == NU_SVC))
2450  {
2451  double *prob_estimates = Malloc(double, svm_get_nr_class(submodel));
2452  for (j = begin; j < end; j++)
2453  target[perm[j]] = svm_predict_probability(submodel, prob->x[perm[j]], prob_estimates);
2454  free(prob_estimates);
2455  }
2456  else
2457  for (j = begin; j < end; j++)
2458  {
2459  target[perm[j]] = svm_predict(submodel, prob->x[perm[j]]);
2460  }
2461  svm_free_and_destroy_model(&submodel);
2462  free(subprob.x);
2463  free(subprob.y);
2464  }
2465  free(fold_start);
2466  free(perm);
2467 }
2468 
2469 
2471 {
2472  return model->param.svm_type;
2473 }
2474 
2476 {
2477  return model->nr_class;
2478 }
2479 
2481 {
2482  if (model->label != NULL)
2483  for (int i = 0; i < model->nr_class; i++)
2484  label[i] = model->label[i];
2485 }
2486 
2487 void svm_get_sv_indices(const svm_model *model, int* indices)
2488 {
2489  if (model->sv_indices != NULL)
2490  for (int i = 0; i < model->l; i++)
2491  indices[i] = model->sv_indices[i];
2492 }
2493 
2495 {
2496  return model->l;
2497 }
2498 
2500 {
2501  if ((model->param.svm_type == EPSILON_SVR || model->param.svm_type == NU_SVR) &&
2502  model->probA != NULL)
2503  return model->probA[0];
2504  else
2505  {
2506  fprintf(stderr, "Model doesn't contain information for SVR probability inference\n");
2507  return 0;
2508  }
2509 }
2510 
2511 double svm_predict_values(const svm_model *model, const svm_node *x, double* dec_values)
2512 {
2513  int i;
2514  if (model->param.svm_type == ONE_CLASS ||
2515  model->param.svm_type == EPSILON_SVR ||
2516  model->param.svm_type == NU_SVR)
2517  {
2518  double *sv_coef = model->sv_coef[0];
2519  double sum = 0;
2520  for (i = 0; i < model->l; i++)
2521  sum += sv_coef[i] * Kernel::k_function(x, model->SV[i], model->param);
2522  sum -= model->rho[0];
2523  *dec_values = sum;
2524 
2525  if (model->param.svm_type == ONE_CLASS)
2526  return (sum > 0) ? 1 : -1;
2527  else
2528  return sum;
2529  }
2530  else
2531  {
2532  int nr_class = model->nr_class;
2533  int l = model->l;
2534 
2535  double *kvalue = Malloc(double, l);
2536  for (i = 0; i < l; i++)
2537  kvalue[i] = Kernel::k_function(x, model->SV[i], model->param);
2538 
2539  int *start = Malloc(int, nr_class);
2540  start[0] = 0;
2541  for (i = 1; i < nr_class; i++)
2542  start[i] = start[i - 1] + model->nSV[i - 1];
2543 
2544  int *vote = Malloc(int, nr_class);
2545  for (i = 0; i < nr_class; i++)
2546  vote[i] = 0;
2547 
2548  int p = 0;
2549  for (i = 0; i < nr_class; i++)
2550  for (int j = i + 1; j < nr_class; j++)
2551  {
2552  double sum = 0;
2553  int si = start[i];
2554  int sj = start[j];
2555  int ci = model->nSV[i];
2556  int cj = model->nSV[j];
2557 
2558  int k;
2559  double *coef1 = model->sv_coef[j - 1];
2560  double *coef2 = model->sv_coef[i];
2561  for (k = 0; k < ci; k++)
2562  sum += coef1[si + k] * kvalue[si + k];
2563  for (k = 0; k < cj; k++)
2564  sum += coef2[sj + k] * kvalue[sj + k];
2565  sum -= model->rho[p];
2566  dec_values[p] = sum;
2567 
2568  if (dec_values[p] > 0)
2569  ++vote[i];
2570  else
2571  ++vote[j];
2572  p++;
2573  }
2574 
2575  int vote_max_idx = 0;
2576  for (i = 1; i < nr_class; i++)
2577  if (vote[i] > vote[vote_max_idx])
2578  vote_max_idx = i;
2579 
2580  free(kvalue);
2581  free(start);
2582  free(vote);
2583  return model->label[vote_max_idx];
2584  }
2585 }
2586 
2587 double svm_predict(const svm_model *model, const svm_node *x)
2588 {
2589  int nr_class = model->nr_class;
2590  double *dec_values;
2591  if (model->param.svm_type == ONE_CLASS ||
2592  model->param.svm_type == EPSILON_SVR ||
2593  model->param.svm_type == NU_SVR)
2594  dec_values = Malloc(double, 1);
2595  else
2596  dec_values = Malloc(double, nr_class * (nr_class - 1) / 2);
2597  double pred_result = svm_predict_values(model, x, dec_values);
2598  free(dec_values);
2599  return pred_result;
2600 }
2601 
2603  const svm_model *model, const svm_node *x, double *prob_estimates)
2604 {
2605  if ((model->param.svm_type == C_SVC || model->param.svm_type == NU_SVC) &&
2606  model->probA != NULL && model->probB != NULL)
2607  {
2608  int i;
2609  int nr_class = model->nr_class;
2610  double *dec_values = Malloc(double, nr_class * (nr_class - 1) / 2);
2611  svm_predict_values(model, x, dec_values);
2612 
2613  double min_prob = 1e-7;
2614  double **pairwise_prob = Malloc(double *, nr_class);
2615  for (i = 0; i < nr_class; i++)
2616  pairwise_prob[i] = Malloc(double, nr_class);
2617  int k = 0;
2618  for (i = 0; i < nr_class; i++)
2619  for (int j = i + 1; j < nr_class; j++)
2620  {
2621  pairwise_prob[i][j] = min(max(sigmoid_predict(dec_values[k], model->probA[k], model->probB[k]), min_prob), 1 - min_prob);
2622  pairwise_prob[j][i] = 1 - pairwise_prob[i][j];
2623  k++;
2624  }
2625  multiclass_probability(nr_class, pairwise_prob, prob_estimates);
2626 
2627  int prob_max_idx = 0;
2628  for (i = 1; i < nr_class; i++)
2629  if (prob_estimates[i] > prob_estimates[prob_max_idx])
2630  prob_max_idx = i;
2631  for (i = 0; i < nr_class; i++)
2632  free(pairwise_prob[i]);
2633  free(dec_values);
2634  free(pairwise_prob);
2635  return model->label[prob_max_idx];
2636  }
2637  else
2638  return svm_predict(model, x);
2639 }
2640 
2641 static const char *svm_type_table[] =
2642 {
2643  "c_svc", "nu_svc", "one_class", "epsilon_svr", "nu_svr", NULL
2644 };
2645 
2646 static const char *kernel_type_table[] =
2647 {
2648  "linear", "polynomial", "rbf", "sigmoid", "precomputed", NULL
2649 };
2650 
2651 int svm_save_model(const char *model_file_name, const svm_model *model)
2652 {
2653  FILE *fp = fopen(model_file_name, "w");
2654  if (fp == NULL) return -1;
2655 
2656  char *old_locale = strdup(setlocale(LC_ALL, NULL));
2657  setlocale(LC_ALL, "C");
2658 
2659  const svm_parameter& param = model->param;
2660 
2661  fprintf(fp, "svm_type %s\n", svm_type_table[param.svm_type]);
2662  fprintf(fp, "kernel_type %s\n", kernel_type_table[param.kernel_type]);
2663 
2664  if (param.kernel_type == POLY)
2665  fprintf(fp, "degree %d\n", param.degree);
2666 
2667  if (param.kernel_type == POLY || param.kernel_type == RBF || param.kernel_type == SIGMOID)
2668  fprintf(fp, "gamma %g\n", param.gamma);
2669 
2670  if (param.kernel_type == POLY || param.kernel_type == SIGMOID)
2671  fprintf(fp, "coef0 %g\n", param.coef0);
2672 
2673  int nr_class = model->nr_class;
2674  int l = model->l;
2675  fprintf(fp, "nr_class %d\n", nr_class);
2676  fprintf(fp, "total_sv %d\n", l);
2677 
2678  {
2679  fprintf(fp, "rho");
2680  for (int i = 0; i < nr_class * (nr_class - 1) / 2; i++)
2681  fprintf(fp, " %g", model->rho[i]);
2682  fprintf(fp, "\n");
2683  }
2684 
2685  if (model->label)
2686  {
2687  fprintf(fp, "label");
2688  for (int i = 0; i < nr_class; i++)
2689  fprintf(fp, " %d", model->label[i]);
2690  fprintf(fp, "\n");
2691  }
2692 
2693  if (model->probA) // regression has probA only
2694  {
2695  fprintf(fp, "probA");
2696  for (int i = 0; i < nr_class * (nr_class - 1) / 2; i++)
2697  fprintf(fp, " %g", model->probA[i]);
2698  fprintf(fp, "\n");
2699  }
2700  if (model->probB)
2701  {
2702  fprintf(fp, "probB");
2703  for (int i = 0; i < nr_class * (nr_class - 1) / 2; i++)
2704  fprintf(fp, " %g", model->probB[i]);
2705  fprintf(fp, "\n");
2706  }
2707 
2708  if (model->nSV)
2709  {
2710  fprintf(fp, "nr_sv");
2711  for (int i = 0; i < nr_class; i++)
2712  fprintf(fp, " %d", model->nSV[i]);
2713  fprintf(fp, "\n");
2714  }
2715 
2716  fprintf(fp, "SV\n");
2717  const double * const *sv_coef = model->sv_coef;
2718  const svm_node * const *SV = model->SV;
2719 
2720  for (int i = 0; i < l; i++)
2721  {
2722  for (int j = 0; j < nr_class - 1; j++)
2723  fprintf(fp, "%.16g ", sv_coef[j][i]);
2724 
2725  const svm_node *p = SV[i];
2726 
2727  if (param.kernel_type == PRECOMPUTED)
2728  fprintf(fp, "0:%d ", (int)(p->value));
2729  else
2730  while (p->index != -1)
2731  {
2732  fprintf(fp, "%d:%.8g ", p->index, p->value);
2733  p++;
2734  }
2735  fprintf(fp, "\n");
2736  }
2737 
2738  setlocale(LC_ALL, old_locale);
2739  free(old_locale);
2740 
2741  if (ferror(fp) != 0 || fclose(fp) != 0) return -1;
2742  else return 0;
2743 }
2744 
2745 static char *line = NULL;
2746 static int max_line_len;
2747 
2748 static char* readline(FILE *input)
2749 {
2750  int len;
2751 
2752  if (fgets(line, max_line_len, input) == NULL)
2753  return NULL;
2754 
2755  while (strrchr(line, '\n') == NULL)
2756  {
2757  max_line_len *= 2;
2758  line = (char *) realloc(line, max_line_len);
2759  len = (int) strlen(line);
2760  if (fgets(line + len, max_line_len - len, input) == NULL)
2761  break;
2762  }
2763  return line;
2764 }
2765 
2766 svm_model *svm_load_model(const char *model_file_name)
2767 {
2768  FILE *fp = fopen(model_file_name, "rb");
2769  if (fp == NULL) return NULL;
2770 
2771  char *old_locale = strdup(setlocale(LC_ALL, NULL));
2772  setlocale(LC_ALL, "C");
2773 
2774  // read parameters
2775 
2777  svm_parameter& param = model->param;
2778  model->rho = NULL;
2779  model->probA = NULL;
2780  model->probB = NULL;
2781  model->label = NULL;
2782  model->nSV = NULL;
2783 
2784  char cmd[81];
2785  while (1)
2786  {
2787  fscanf(fp, "%80s", cmd);
2788 
2789  if (strcmp(cmd, "svm_type") == 0)
2790  {
2791  fscanf(fp, "%80s", cmd);
2792  int i;
2793  for (i = 0; svm_type_table[i]; i++)
2794  {
2795  if (strcmp(svm_type_table[i], cmd) == 0)
2796  {
2797  param.svm_type = i;
2798  break;
2799  }
2800  }
2801  if (svm_type_table[i] == NULL)
2802  {
2803  fprintf(stderr, "unknown svm type.\n");
2804 
2805  setlocale(LC_ALL, old_locale);
2806  free(old_locale);
2807  free(model->rho);
2808  free(model->label);
2809  free(model->nSV);
2810  free(model);
2811  return NULL;
2812  }
2813  }
2814  else if (strcmp(cmd, "kernel_type") == 0)
2815  {
2816  fscanf(fp, "%80s", cmd);
2817  int i;
2818  for (i = 0; kernel_type_table[i]; i++)
2819  {
2820  if (strcmp(kernel_type_table[i], cmd) == 0)
2821  {
2822  param.kernel_type = i;
2823  break;
2824  }
2825  }
2826  if (kernel_type_table[i] == NULL)
2827  {
2828  fprintf(stderr, "unknown kernel function.\n");
2829 
2830  setlocale(LC_ALL, old_locale);
2831  free(old_locale);
2832  free(model->rho);
2833  free(model->label);
2834  free(model->nSV);
2835  free(model);
2836  return NULL;
2837  }
2838  }
2839  else if (strcmp(cmd, "degree") == 0)
2840  fscanf(fp, "%d", &param.degree);
2841  else if (strcmp(cmd, "gamma") == 0)
2842  fscanf(fp, "%lf", &param.gamma);
2843  else if (strcmp(cmd, "coef0") == 0)
2844  fscanf(fp, "%lf", &param.coef0);
2845  else if (strcmp(cmd, "nr_class") == 0)
2846  fscanf(fp, "%d", &model->nr_class);
2847  else if (strcmp(cmd, "total_sv") == 0)
2848  fscanf(fp, "%d", &model->l);
2849  else if (strcmp(cmd, "rho") == 0)
2850  {
2851  int n = model->nr_class * (model->nr_class - 1) / 2;
2852  model->rho = Malloc(double, n);
2853  for (int i = 0; i < n; i++)
2854  fscanf(fp, "%lf", &model->rho[i]);
2855  }
2856  else if (strcmp(cmd, "label") == 0)
2857  {
2858  int n = model->nr_class;
2859  model->label = Malloc(int, n);
2860  for (int i = 0; i < n; i++)
2861  fscanf(fp, "%d", &model->label[i]);
2862  }
2863  else if (strcmp(cmd, "probA") == 0)
2864  {
2865  int n = model->nr_class * (model->nr_class - 1) / 2;
2866  model->probA = Malloc(double, n);
2867  for (int i = 0; i < n; i++)
2868  fscanf(fp, "%lf", &model->probA[i]);
2869  }
2870  else if (strcmp(cmd, "probB") == 0)
2871  {
2872  int n = model->nr_class * (model->nr_class - 1) / 2;
2873  model->probB = Malloc(double, n);
2874  for (int i = 0; i < n; i++)
2875  fscanf(fp, "%lf", &model->probB[i]);
2876  }
2877  else if (strcmp(cmd, "nr_sv") == 0)
2878  {
2879  int n = model->nr_class;
2880  model->nSV = Malloc(int, n);
2881  for (int i = 0; i < n; i++)
2882  fscanf(fp, "%d", &model->nSV[i]);
2883  }
2884  else if (strcmp(cmd, "SV") == 0)
2885  {
2886  while (1)
2887  {
2888  int c = getc(fp);
2889  if (c == EOF || c == '\n') break;
2890  }
2891  break;
2892  }
2893  else
2894  {
2895  fprintf(stderr, "unknown text in model file: [%s]\n", cmd);
2896 
2897  setlocale(LC_ALL, old_locale);
2898  free(old_locale);
2899  free(model->rho);
2900  free(model->label);
2901  free(model->nSV);
2902  free(model);
2903  return NULL;
2904  }
2905  }
2906 
2907  // read sv_coef and SV
2908 
2909  int elements = 0;
2910  long pos = ftell(fp);
2911 
2912  max_line_len = 1024;
2913  line = Malloc(char, max_line_len);
2914  char *p, *endptr, *idx, *val;
2915 
2916  while (readline(fp) != NULL)
2917  {
2918  p = strtok(line, ":");
2919  while (1)
2920  {
2921  p = strtok(NULL, ":");
2922  if (p == NULL)
2923  break;
2924  ++elements;
2925  }
2926  }
2927  elements += model->l;
2928 
2929  fseek(fp, pos, SEEK_SET);
2930 
2931  int m = model->nr_class - 1;
2932  int l = model->l;
2933  model->sv_coef = Malloc(double *, m);
2934  int i;
2935  for (i = 0; i < m; i++)
2936  model->sv_coef[i] = Malloc(double, l);
2937  model->SV = Malloc(svm_node*, l);
2938  svm_node *x_space = NULL;
2939  if (l > 0) x_space = Malloc(svm_node, elements);
2940 
2941  int j = 0;
2942  for (i = 0; i < l; i++)
2943  {
2944  readline(fp);
2945  model->SV[i] = &x_space[j];
2946 
2947  p = strtok(line, " \t");
2948  model->sv_coef[0][i] = strtod(p, &endptr);
2949  for (int k = 1; k < m; k++)
2950  {
2951  p = strtok(NULL, " \t");
2952  model->sv_coef[k][i] = strtod(p, &endptr);
2953  }
2954 
2955  while (1)
2956  {
2957  idx = strtok(NULL, ":");
2958  val = strtok(NULL, " \t");
2959 
2960  if (val == NULL)
2961  break;
2962  x_space[j].index = (int) strtol(idx, &endptr, 10);
2963  x_space[j].value = strtod(val, &endptr);
2964 
2965  ++j;
2966  }
2967  x_space[j++].index = -1;
2968  }
2969  free(line);
2970 
2971  setlocale(LC_ALL, old_locale);
2972  free(old_locale);
2973 
2974  if (ferror(fp) != 0 || fclose(fp) != 0)
2975  return NULL;
2976 
2977  model->free_sv = 1; // XXX
2978  return model;
2979 }
2980 
2982 {
2983  if (model_ptr->free_sv && model_ptr->l > 0 && model_ptr->SV != NULL)
2984  free((void *)(model_ptr->SV[0]));
2985  if (model_ptr->sv_coef)
2986  {
2987  for (int i = 0; i < model_ptr->nr_class - 1; i++)
2988  free(model_ptr->sv_coef[i]);
2989  }
2990 
2991  free(model_ptr->SV);
2992  model_ptr->SV = NULL;
2993 
2994  free(model_ptr->sv_coef);
2995  model_ptr->sv_coef = NULL;
2996 
2997  free(model_ptr->rho);
2998  model_ptr->rho = NULL;
2999 
3000  free(model_ptr->label);
3001  model_ptr->label = NULL;
3002 
3003  free(model_ptr->probA);
3004  model_ptr->probA = NULL;
3005 
3006  free(model_ptr->probB);
3007  model_ptr->probB = NULL;
3008 
3009  free(model_ptr->nSV);
3010  model_ptr->nSV = NULL;
3011 }
3012 
3014 {
3015  if (model_ptr_ptr != NULL && *model_ptr_ptr != NULL)
3016  {
3017  svm_free_model_content(*model_ptr_ptr);
3018  free(*model_ptr_ptr);
3019  *model_ptr_ptr = NULL;
3020  }
3021 }
3022 
3024 {
3025  free(param->weight_label);
3026  free(param->weight);
3027 }
3028 
3030 {
3031  // svm_type
3032 
3033  int svm_type = param->svm_type;
3034  if (svm_type != C_SVC &&
3035  svm_type != NU_SVC &&
3036  svm_type != ONE_CLASS &&
3037  svm_type != EPSILON_SVR &&
3038  svm_type != NU_SVR)
3039  return "unknown svm type";
3040 
3041  // kernel_type, degree
3042 
3043  int kernel_type = param->kernel_type;
3044  if (kernel_type != LINEAR &&
3045  kernel_type != POLY &&
3046  kernel_type != RBF &&
3047  kernel_type != SIGMOID &&
3048  kernel_type != PRECOMPUTED)
3049  return "unknown kernel type";
3050 
3051  if (param->gamma < 0)
3052  return "gamma < 0";
3053 
3054  if (param->degree < 0)
3055  return "degree of polynomial kernel < 0";
3056 
3057  // cache_size,eps,C,nu,p,shrinking
3058 
3059  if (param->cache_size <= 0)
3060  return "cache_size <= 0";
3061 
3062  if (param->eps <= 0)
3063  return "eps <= 0";
3064 
3065  if (svm_type == C_SVC ||
3066  svm_type == EPSILON_SVR ||
3067  svm_type == NU_SVR)
3068  if (param->C <= 0)
3069  return "C <= 0";
3070 
3071  if (svm_type == NU_SVC ||
3072  svm_type == ONE_CLASS ||
3073  svm_type == NU_SVR)
3074  if (param->nu <= 0 || param->nu > 1)
3075  return "nu <= 0 or nu > 1";
3076 
3077  if (svm_type == EPSILON_SVR)
3078  if (param->p < 0)
3079  return "p < 0";
3080 
3081  if (param->shrinking != 0 &&
3082  param->shrinking != 1)
3083  return "shrinking != 0 and shrinking != 1";
3084 
3085  if (param->probability != 0 &&
3086  param->probability != 1)
3087  return "probability != 0 and probability != 1";
3088 
3089  if (param->probability == 1 &&
3090  svm_type == ONE_CLASS)
3091  return "one-class SVM probability output not supported yet";
3092 
3093 
3094  // check whether nu-svc is feasible
3095 
3096  if (svm_type == NU_SVC)
3097  {
3098  int l = prob->l;
3099  int max_nr_class = 16;
3100  int nr_class = 0;
3101  int *label = Malloc(int, max_nr_class);
3102  int *count = Malloc(int, max_nr_class);
3103 
3104  int i;
3105  for (i = 0; i < l; i++)
3106  {
3107  int this_label = (int)prob->y[i];
3108  int j;
3109  for (j = 0; j < nr_class; j++)
3110  if (this_label == label[j])
3111  {
3112  ++count[j];
3113  break;
3114  }
3115  if (j == nr_class)
3116  {
3117  if (nr_class == max_nr_class)
3118  {
3119  max_nr_class *= 2;
3120  label = (int *)realloc(label, max_nr_class * sizeof(int));
3121  count = (int *)realloc(count, max_nr_class * sizeof(int));
3122  }
3123  label[nr_class] = this_label;
3124  count[nr_class] = 1;
3125  ++nr_class;
3126  }
3127  }
3128 
3129  for (i = 0; i < nr_class; i++)
3130  {
3131  int n1 = count[i];
3132  for (int j = i + 1; j < nr_class; j++)
3133  {
3134  int n2 = count[j];
3135  if (param->nu * (n1 + n2) / 2 > min(n1, n2))
3136  {
3137  free(label);
3138  free(count);
3139  return "specified nu is infeasible";
3140  }
3141  }
3142  }
3143  free(label);
3144  free(count);
3145  }
3146 
3147  return NULL;
3148 }
3149 
3151 {
3152  return ((model->param.svm_type == C_SVC || model->param.svm_type == NU_SVC) &&
3153  model->probA != NULL && model->probB != NULL) ||
3154  ((model->param.svm_type == EPSILON_SVR || model->param.svm_type == NU_SVR) &&
3155  model->probA != NULL);
3156 }
3157 
3158 void svm_set_print_string_function(void (*print_func)(const char *))
3159 {
3160  if (print_func == NULL)
3162  else
3163  svm_print_string = print_func;
3164 }
double * QD
Definition: svm.cpp:1383
int active_size
Definition: svm.cpp:425
d
signed char schar
Definition: svm.cpp:13
~SVR_Q()
Definition: svm.cpp:1442
const double coef0
Definition: svm.cpp:241
Cache * cache
Definition: svm.cpp:1453
int svm_save_model(const char *model_file_name, const svm_model *model)
Definition: svm.cpp:2651
int l
Definition: svm.cpp:438
svm_model * svm_train(const svm_problem *prob, const svm_parameter *param)
Definition: svm.cpp:2104
double upper_bound_n
Definition: svm.cpp:417
static void svm_group_classes(const svm_problem *prob, int *nr_class_ret, int **label_ret, int **start_ret, int **count_ret, int *perm)
Definition: svm.cpp:2044
static void solve_one_class(const svm_problem *prob, const svm_parameter *param, double *alpha, Solver::SolutionInfo *si)
Definition: svm.cpp:1555
int get_data(const int index, Qfloat **data, int len)
Definition: svm.cpp:137
static char * line
Definition: svm.cpp:2745
int * nSV
Definition: svm.h:67
string cmd
Definition: easy.py:48
int nr_fold
Definition: svmtrain.c:64
void svm_free_model_content(svm_model *model_ptr)
Definition: svm.cpp:2981
struct svm_problem prob
Definition: svmtrain.c:60
const double gamma
Definition: svm.cpp:240
double kernel_sigmoid(int i, int j) const
Definition: svm.cpp:256
double kernel_linear(int i, int j) const
Definition: svm.cpp:244
ROSCPP_DECL void start()
void Solve(int l, const QMatrix &Q, const double *p, const schar *y, double *alpha, double Cp, double Cn, double eps, SolutionInfo *si, int shrinking)
Definition: svm.cpp:1037
const char * svm_check_parameter(const svm_problem *prob, const svm_parameter *param)
Definition: svm.cpp:3029
Definition: svm.cpp:205
static double dot(const svm_node *px, const svm_node *py)
Definition: svm.cpp:307
struct svm_parameter param
Definition: svmtrain.c:59
void swap_index(int i, int j)
Definition: svm.cpp:474
HBITMAP buffer
Cache * cache
Definition: svm.cpp:1382
ONE_CLASS_Q(const svm_problem &prob, const svm_parameter &param)
Definition: svm.cpp:1343
double * QD
Definition: svm.cpp:1458
double * G
Definition: svm.cpp:427
virtual Qfloat * get_Q(int column, int len) const =0
Definition: svm.h:25
static double sigmoid_predict(double decision_value, double A, double B)
Definition: svm.cpp:1848
int select_working_set(int &i, int &j)
Definition: svm.cpp:1053
stdout
Definition: easy.py:50
XmlRpcServer s
double value
Definition: svm.h:15
head_t * prev
Definition: svm.cpp:94
static void solve_epsilon_svr(const svm_problem *prob, const svm_parameter *param, double *alpha, Solver::SolutionInfo *si)
Definition: svm.cpp:1587
int svm_get_nr_sv(const svm_model *model)
Definition: svm.cpp:2494
double * G_bar
Definition: svm.cpp:437
int l
Definition: svm.h:56
int nr_weight
Definition: svm.h:40
int len
Definition: svm.cpp:96
static const char * kernel_type_table[]
Definition: svm.cpp:2646
double * alpha
Definition: svm.cpp:430
void svm_cross_validation(const svm_problem *prob, const svm_parameter *param, int nr_fold, double *target)
Definition: svm.cpp:2352
struct svm_node ** SV
Definition: svm.h:57
double * probB
Definition: svm.h:61
void swap_index(int i, int j) const
Definition: svm.cpp:1369
double(Kernel::* kernel_function)(int i, int j) const
Definition: svm.cpp:231
virtual ~Solver()
Definition: svm.cpp:410
head_t * next
Definition: svm.cpp:94
const int degree
Definition: svm.cpp:239
int * weight_label
Definition: svm.h:41
double calculate_rho()
Definition: svm.cpp:1237
Definition: svm.h:25
double upper_bound_p
Definition: svm.cpp:416
double Cp
Definition: svm.cpp:434
void swap_index(int i, int j) const
Definition: svm.cpp:1411
double * alpha
Definition: svm.cpp:1668
static void sigmoid_train(int l, const double *dec_values, const double *labels, double &A, double &B)
Definition: svm.cpp:1730
double svm_get_svr_probability(const svm_model *model)
Definition: svm.cpp:2499
static int max_line_len
Definition: svm.cpp:2746
virtual int select_working_set(int &i, int &j)
Definition: svm.cpp:810
void do_shrinking()
Definition: svm.cpp:1185
double * probA
Definition: svm.h:60
int nr_class
Definition: svm.h:55
double svm_predict(const svm_model *model, const svm_node *x)
Definition: svm.cpp:2587
Definition: svm.h:52
static void info(const char *fmt,...)
Definition: svm.cpp:59
Cache * cache
Definition: svm.cpp:1336
static void print_string_stdout(const char *s)
Definition: svm.cpp:52
#define LIBSVM_VERSION
Definition: svm.h:4
double p
Definition: svm.h:44
int * index
Definition: svm.cpp:1455
Definition: svm.cpp:78
double * get_QD() const
Definition: svm.cpp:1437
float Qfloat
Definition: svm.cpp:12
Solver()
Definition: svm.cpp:409
struct svm_node * x
Definition: svm-predict.c:12
Definition: svm.h:25
virtual void swap_index(int i, int j) const
Definition: svm.cpp:224
double cache_size
Definition: svm.h:37
double kernel_precomputed(int i, int j) const
Definition: svm.cpp:260
schar * y
Definition: svm.cpp:1335
int svm_get_nr_class(const svm_model *model)
Definition: svm.cpp:2475
Kernel(int l, svm_node *const *x, const svm_parameter &param)
Definition: svm.cpp:266
int svm_check_probability_model(const svm_model *model)
Definition: svm.cpp:3150
static void solve_nu_svr(const svm_problem *prob, const svm_parameter *param, double *alpha, Solver::SolutionInfo *si)
Definition: svm.cpp:1625
double * p
Definition: svm.cpp:435
#define INF
Definition: svm.cpp:48
schar * sign
Definition: svm.cpp:1454
static void clone(T *&dst, S *src, int n)
Definition: svm.cpp:32
double eps
Definition: svm.h:38
bool is_upper_bound(int i)
Definition: svm.cpp:453
static void(* svm_print_string)(const char *)
Definition: svm.cpp:57
static void svm_binary_svc_probability(const svm_problem *prob, const svm_parameter *param, double Cp, double Cn, double &probA, double &probB)
Definition: svm.cpp:1923
double * QD
Definition: svm.cpp:1337
Qfloat * data
Definition: svm.cpp:95
const double * QD
Definition: svm.cpp:432
head_t lru_head
Definition: svm.cpp:100
~ONE_CLASS_Q()
Definition: svm.cpp:1376
f
Definition: easy.py:54
Definition: svm.h:26
void lru_insert(head_t *h)
Definition: svm.cpp:128
static char * readline(FILE *input)
Definition: svm.cpp:2748
Definition: svm.cpp:1386
static double powi(double base, int times)
Definition: svm.cpp:37
int shrinking
Definition: svm.h:45
~Cache()
Definition: svm.cpp:114
double * get_QD() const
Definition: svm.cpp:1315
struct svm_node ** x
Definition: svm.h:22
int * label
Definition: svm.h:66
void svm_free_and_destroy_model(svm_model **model_ptr_ptr)
Definition: svm.cpp:3013
const svm_node ** x
Definition: svm.cpp:234
SolutionInfo * si
Definition: svm.cpp:1045
double svm_predict_values(const svm_model *model, const svm_node *x, double *dec_values)
Definition: svm.cpp:2511
double svm_predict_probability(const svm_model *model, const svm_node *x, double *prob_estimates)
Definition: svm.cpp:2602
void update_alpha_status(int i)
Definition: svm.cpp:445
struct svm_parameter param
Definition: svm.h:54
static T max(T x, T y)
Definition: svm.cpp:21
int * active_set
Definition: svm.cpp:436
void swap_index(int i, int j) const
Definition: svm.cpp:1320
int * sv_indices
Definition: svm.h:62
static double svm_svr_probability(const svm_problem *prob, const svm_parameter *param)
Definition: svm.cpp:2010
SVR_Q(const svm_problem &prob, const svm_parameter &param)
Definition: svm.cpp:1389
double * rho
Definition: svm.h:59
Definition: svm.h:26
#define Malloc(type, n)
Definition: svm.cpp:50
Definition: svm.h:26
Definition: svm.cpp:1290
void svm_get_sv_indices(const svm_model *model, int *indices)
Definition: svm.cpp:2487
void lru_delete(head_t *h)
Definition: svm.cpp:121
~SVC_Q()
Definition: svm.cpp:1328
Qfloat * get_Q(int i, int len) const
Definition: svm.cpp:1418
int index
Definition: svm.h:14
double kernel_poly(int i, int j) const
Definition: svm.cpp:248
const QMatrix * Q
Definition: svm.cpp:431
Definition: svm.cpp:406
double kernel_rbf(int i, int j) const
Definition: svm.cpp:252
static T min(T x, T y)
Definition: svm.cpp:15
int l
Definition: svm.cpp:1452
schar * y
Definition: svm.cpp:426
double * x_square
Definition: svm.cpp:235
double ** sv_coef
Definition: svm.h:58
int l
Definition: svm.cpp:90
virtual double * get_QD() const =0
static decision_function svm_train_one(const svm_problem *prob, const svm_parameter *param, double Cp, double Cn)
Definition: svm.cpp:1672
head_t * head
Definition: svm.cpp:99
void swap_index(int i, int j)
Definition: svm.cpp:167
SVC_Q(const svm_problem &prob, const svm_parameter &param, const schar *y_)
Definition: svm.cpp:1293
static void multiclass_probability(int k, double **r, double *p)
Definition: svm.cpp:1859
static void swap(T &x, T &y)
Definition: svm.cpp:26
int svm_get_svm_type(const svm_model *model)
Definition: svm.cpp:2470
virtual void do_shrinking()
Definition: svm.cpp:929
Definition: svm.cpp:214
int probability
Definition: svm.h:46
int degree
Definition: svm.h:32
int free_sv
Definition: svm.h:70
#define TAU
Definition: svm.cpp:49
static double k_function(const svm_node *x, const svm_node *y, const svm_parameter &param)
Definition: svm.cpp:329
int libsvm_version
Definition: svm.cpp:11
static void solve_nu_svc(const svm_problem *prob, const svm_parameter *param, double *alpha, Solver::SolutionInfo *si)
Definition: svm.cpp:1500
Definition: svm.h:25
bool is_lower_bound(int i)
Definition: svm.cpp:457
Definition: svm.h:12
static const char * svm_type_table[]
Definition: svm.cpp:2641
c
Definition: easy.py:61
bool is_free(int i)
Definition: svm.cpp:461
double * y
Definition: svm.h:21
double gamma
Definition: svm.h:33
double eps
Definition: svm.cpp:433
void Solve(int l, const QMatrix &Q, const double *p_, const schar *y_, double *alpha_, double Cp, double Cn, double eps, SolutionInfo *si, int shrinking)
Definition: svm.cpp:528
int l
Definition: svm.h:20
double * weight
Definition: svm.h:42
double * get_QD() const
Definition: svm.cpp:1364
bool unshrink
Definition: svm.cpp:439
Definition: svm.h:26
virtual ~QMatrix()
Definition: svm.cpp:211
double C
Definition: svm.h:39
int svm_type
Definition: svm.h:30
bool be_shrunk(int i, double Gmax1, double Gmax2)
Definition: svm.cpp:909
int next_buffer
Definition: svm.cpp:1456
double nu
Definition: svm.h:43
const int kernel_type
Definition: svm.cpp:238
virtual double calculate_rho()
Definition: svm.cpp:990
struct svm_model * model
Definition: svmtrain.c:61
static void solve_c_svc(const svm_problem *prob, const svm_parameter *param, double *alpha, Solver::SolutionInfo *si, double Cp, double Cn)
Definition: svm.cpp:1464
svm_model * svm_load_model(const char *model_file_name)
Definition: svm.cpp:2766
double coef0
Definition: svm.h:34
void svm_destroy_param(svm_parameter *param)
Definition: svm.cpp:3023
double get_C(int i)
Definition: svm.cpp:441
int kernel_type
Definition: svm.h:31
struct svm_node * x_space
Definition: svmtrain.c:62
virtual ~Kernel()
Definition: svm.cpp:301
Qfloat * get_Q(int i, int len) const
Definition: svm.cpp:1352
void svm_get_labels(const svm_model *model, int *label)
Definition: svm.cpp:2480
bool be_shrunk(int i, double Gmax1, double Gmax2, double Gmax3, double Gmax4)
Definition: svm.cpp:1165
void reconstruct_gradient()
Definition: svm.cpp:486
char * alpha_status
Definition: svm.cpp:429
Solver_NU()
Definition: svm.cpp:1036
Cache(int l, long int size)
Definition: svm.cpp:105
long int size
Definition: svm.cpp:91
Qfloat * get_Q(int i, int len) const
Definition: svm.cpp:1303
void svm_set_print_string_function(void(*print_func)(const char *))
Definition: svm.cpp:3158


ml_classifiers
Author(s): Scott Niekum , Joshua Whitley
autogenerated on Mon Feb 28 2022 22:46:49