VCSVMLight.cpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. /**
  2. * @file VCSVMLight.cpp
  3. * @brief Simple Nearest Neighbour Implementation
  4. * @author Erik Rodner
  5. * @date 10/25/2007
  6. */
  7. #ifdef NICE_USELIB_SVMLIGHT
  8. #include <iostream>
  9. #include "vislearning/classifier/vclassifier/VCSVMLight.h"
  10. // just because of some macro problems
  11. # define SVM_LINEAR 0 /* linear kernel type */
  12. # define SVM_POLY 1 /* polynoial kernel type */
  13. # define SVM_RBF 2 /* rbf kernel type */
  14. # define SVM_SIGMOID 3 /* sigmoid kernel type */
  15. # define SVM_CUSTOM 4 /* custom kernel type */
  16. using namespace OBJREC;
  17. using namespace std;
  18. using namespace NICE;
  19. VCSVMLight::VCSVMLight ( const Config *_conf, const string & section )
  20. : VecClassifier ( _conf )
  21. {
  22. finalModel = NULL;
  23. std::string kernel_type_s = _conf->gS(section, "kernel", "rbf");
  24. if ( (kernel_type_s == "none") || (kernel_type_s == "linear") )
  25. {
  26. kernel_type = SVM_LINEAR;
  27. } else if ( (kernel_type_s == "poly") ) {
  28. kernel_type = SVM_POLY;
  29. } else if ( (kernel_type_s == "rbf") ) {
  30. kernel_type = SVM_RBF;
  31. } else if ( (kernel_type_s == "sigmoid") ) {
  32. kernel_type = SVM_SIGMOID;
  33. } else if ( (kernel_type_s == "custom") ) {
  34. kernel_type = SVM_CUSTOM;
  35. } else {
  36. fthrow ( Exception, "Kernel method " << kernel_type_s << " not supported." );
  37. }
  38. std::string normalization_type_s = _conf->gS(section, "normalization_type", "euclidean" );
  39. if ( normalization_type_s == "none" ) {
  40. normalization_type = SVM_NORMALIZATION_NONE;
  41. } else if ( normalization_type_s == "euclidean" ) {
  42. normalization_type = SVM_NORMALIZATION_EUCLIDEAN;
  43. } else if ( normalization_type_s == "maxmin" ) {
  44. normalization_type = SVM_NORMALIZATION_01;
  45. } else {
  46. fthrow ( Exception, "Normalization method " << normalization_type << " not supported." );
  47. }
  48. fprintf (stderr, "VCSVMLight: kernel %d (%s) normalization %d (%s)\n", kernel_type, kernel_type_s.c_str(),
  49. normalization_type, normalization_type_s.c_str() );
  50. poly_degree = _conf->gI(section, "poly_degree", 1);
  51. rbf_gamma = _conf->gD(section, "rbf_gamma", 1.0);
  52. sigmoidpoly_scale = _conf->gD(section, "sigmoidpoly_scale", 1.0);
  53. sigmoidpoly_bias = _conf->gD(section, "sigmoidpoly_bias", 1.0);
  54. svm_c = _conf->gD(section, "C", 0.0 );
  55. use_crossvalidation = _conf->gB(section, "use_crossvalidation", true );
  56. optimize_parameters = _conf->gB(section, "optimize_parameters", true );
  57. rbf_gamma_min = _conf->gD(section, "rbf_gamma_min", 0.1 );
  58. rbf_gamma_max = _conf->gD(section, "rbf_gamma_max", 2.0 );
  59. rbf_gamma_step = _conf->gD(section, "rbf_gamma_step", 0.3 );
  60. svm_c_min = _conf->gD(section, "svm_c_min", 0.0 );
  61. svm_c_max = _conf->gD(section, "svm_c_max", 9.0 );
  62. svm_c_step = _conf->gD(section, "svm_c_step", 1.0 );
  63. bool calibrate_probabilities = _conf->gB(section, "calibrate_probabilities", true );
  64. if ( calibrate_probabilities )
  65. {
  66. logreg = new VCLogisticRegression ( _conf );
  67. } else {
  68. logreg = NULL;
  69. }
  70. // used in SVMLight
  71. verbosity = _conf->gI(section, "verbose", 1 );
  72. if ( optimize_parameters && use_crossvalidation )
  73. {
  74. fprintf (stderr, "VCSVMLight: SVMLight verbosity activated as a workaround for a bug within SVMLight\n");
  75. verbosity = 1;
  76. }
  77. maxClassNo = 1;
  78. }
  79. VCSVMLight::VCSVMLight ( const VCSVMLight & classifier ) : VecClassifier()
  80. {
  81. this->max = classifier.max;
  82. this->min = classifier.min;
  83. this->normalization_type = classifier.normalization_type;
  84. this->kernel_type = classifier.kernel_type;
  85. this->poly_degree = classifier.poly_degree;
  86. this->rbf_gamma = classifier.rbf_gamma;
  87. this->sigmoidpoly_scale = classifier.sigmoidpoly_scale;
  88. this->sigmoidpoly_bias = classifier.sigmoidpoly_bias;
  89. this->svm_c = classifier.svm_c;
  90. this->use_crossvalidation = classifier.use_crossvalidation;
  91. this->optimize_parameters = classifier.optimize_parameters;
  92. this->rbf_gamma_min = classifier.rbf_gamma_min;
  93. this->rbf_gamma_max = classifier.rbf_gamma_max;
  94. this->rbf_gamma_step = classifier.rbf_gamma_step;
  95. this->svm_c_min = classifier.svm_c_min;
  96. this->svm_c_max = classifier.svm_c_max;
  97. this->svm_c_step = classifier.svm_c_step;
  98. if ( classifier.logreg != NULL )
  99. this->logreg = classifier.logreg->clone();
  100. else
  101. this->logreg = NULL;
  102. if ( classifier.finalModel != NULL )
  103. this->finalModel = copy_model ( classifier.finalModel );
  104. else
  105. this->finalModel = NULL;
  106. }
  107. VCSVMLight::~VCSVMLight()
  108. {
  109. }
  110. void VCSVMLight::normalizeVector ( int normalization_type, NICE::Vector & x ) const
  111. {
  112. assert ( x.size() > 0 );
  113. if ( normalization_type == SVM_NORMALIZATION_EUCLIDEAN )
  114. {
  115. double length = x.normL2();
  116. if ( fabs(length) > 1e-12 )
  117. for ( uint i = 0 ; i < x.size() ; i++ )
  118. x[i] /= length;
  119. } else if ( normalization_type == SVM_NORMALIZATION_01 ) {
  120. for ( uint i = 0 ; i < x.size() ; i++ )
  121. if ( fabs(max[i]-min[i]) > 1e-20 )
  122. x[i] = (x[i] - min[i]) / (max[i] - min[i]);
  123. }
  124. }
  125. double VCSVMLight::getSVMScore ( const NICE::Vector & x ) const
  126. {
  127. if ( finalModel != NULL ) {
  128. DOC *example;
  129. NICE::Vector x_normalized ( x );
  130. normalizeVector ( normalization_type, x_normalized );
  131. WORD *words = (WORD *)my_malloc(sizeof(WORD)*(x.size()+10));
  132. long wc;
  133. for(wc=0; wc<(long)x.size(); wc++){
  134. words[wc].weight = (FVAL)(x_normalized[wc]);
  135. words[wc].wnum = wc+1;
  136. }
  137. words[wc].wnum = 0;
  138. long queryid=0;
  139. long slackid=0;
  140. long costfactor=1;
  141. char comment [2] = "";
  142. example = create_example(0,queryid,slackid,costfactor,
  143. create_svector(words,comment,1.0));
  144. double score = classify_example ( finalModel, example );
  145. free (words);
  146. free_example (example,1);
  147. return score;
  148. } else {
  149. fprintf (stderr, "VCSVMLight: no model found !!\n");
  150. exit(-1);
  151. }
  152. return 0.0;
  153. }
  154. ClassificationResult VCSVMLight::classify ( const NICE::Vector & x ) const
  155. {
  156. double score = getSVMScore ( x );
  157. if ( logreg != NULL )
  158. {
  159. NICE::Vector y (1);
  160. y[0] = score;
  161. return logreg->classify ( y );
  162. } else {
  163. FullVector scores ( maxClassNo+1 );
  164. scores[0] = 0.0;
  165. scores[1] = score;
  166. // we are dealing only with binary classification
  167. if ( score < 0.0 )
  168. return ClassificationResult (0, scores);
  169. else
  170. return ClassificationResult (1, scores);
  171. }
  172. }
  173. void VCSVMLight::estimateMaxMin ( const LabeledSetVector & train )
  174. {
  175. max.resize(0);
  176. min.resize(0);
  177. LOOP_ALL(train)
  178. {
  179. EACH(classno,v);
  180. UNUSED_PARAMETER(classno);
  181. if ( (max.size()<=0) && (min.size()<=0) )
  182. {
  183. max = Vector(v);
  184. min = Vector(v);
  185. } else {
  186. for ( uint i = 0 ; i < v.size() ; i++ )
  187. {
  188. if ( v[i] > max[i] ) max[i] = v[i];
  189. if ( v[i] < min[i] ) min[i] = v[i];
  190. }
  191. }
  192. }
  193. }
  194. void VCSVMLight::readDocuments(
  195. DOC ***docs,
  196. double **label,
  197. long int *totwords,
  198. long int *totdoc,
  199. const LabeledSetVector & train )
  200. {
  201. if ( normalization_type == SVM_NORMALIZATION_01 )
  202. estimateMaxMin ( train );
  203. char comment [2] = "";
  204. WORD *words;
  205. long vectorcount = train.count();
  206. long vectorsize = train.dimension();
  207. long dnum=0,dpos=0,dneg=0,dunlab=0,queryid,slackid,max_docs=vectorcount+2;
  208. long max_words_doc=vectorsize;
  209. double costfactor;
  210. long int wc=0;
  211. (*docs) = (DOC **)my_malloc(sizeof(DOC *)*max_docs); /* feature vectors */
  212. (*label) = (double *)my_malloc(sizeof(double)*max_docs); /* target values */
  213. words = (WORD *)my_malloc(sizeof(WORD)*(max_words_doc+10));
  214. (*totwords)=0;
  215. (*totwords) = vectorsize+1;
  216. queryid=0; slackid=0; costfactor=1.;
  217. dnum=0;
  218. LOOP_ALL(train)
  219. {
  220. EACH(classno,v);
  221. NICE::Vector v_normalized ( v );
  222. normalizeVector ( normalization_type, v_normalized );
  223. for(wc=0; wc<vectorsize; wc++){
  224. words[wc].weight = (FVAL)(v_normalized[wc]);
  225. words[wc].wnum = wc+1;
  226. }
  227. words[wc].wnum = 0;
  228. (*label)[dnum]= (classno==1) ? 1 : -1;
  229. if (classno == 1) dpos++;
  230. if (classno == 0) dneg++;
  231. if (classno == 0) dunlab++;
  232. if((*totwords) > MAXFEATNUM) {
  233. printf("\nMaximum feature number exceeds limit defined in MAXFEATNUM!\n");
  234. exit(1);
  235. }
  236. (*docs)[dnum] = create_example(dnum,queryid,slackid,costfactor,
  237. create_svector(words,comment,1.0));
  238. dnum++;
  239. }
  240. free(words);
  241. (*totdoc)=dnum;
  242. }
  243. void VCSVMLight::initMainParameters ( LEARN_PARM *learn_parm,
  244. KERNEL_PARM *kernel_parm )
  245. {
  246. learn_parm->svm_c=svm_c;
  247. kernel_parm->kernel_type=kernel_type;
  248. kernel_parm->poly_degree=poly_degree;
  249. kernel_parm->rbf_gamma=rbf_gamma;
  250. kernel_parm->coef_lin=sigmoidpoly_scale;
  251. kernel_parm->coef_const=sigmoidpoly_bias;
  252. strcpy(kernel_parm->custom,"empty");
  253. if(learn_parm->svm_c<0) {
  254. printf("\nThe C parameter must be greater than zero!\n\n");
  255. exit(-1);
  256. }
  257. }
  258. void VCSVMLight::initParameters (
  259. LEARN_PARM *learn_parm,
  260. KERNEL_PARM *kernel_parm )
  261. {
  262. /* set default */
  263. strcpy (learn_parm->predfile, "");
  264. strcpy (learn_parm->alphafile, "");
  265. learn_parm->biased_hyperplane=1;
  266. learn_parm->sharedslack=0;
  267. learn_parm->remove_inconsistent=0;
  268. learn_parm->skip_final_opt_check=0;
  269. learn_parm->svm_maxqpsize=10;
  270. learn_parm->svm_newvarsinqp=0;
  271. if( kernel_type == SVM_LINEAR )
  272. learn_parm->svm_iter_to_shrink=2;
  273. else
  274. learn_parm->svm_iter_to_shrink=100;
  275. learn_parm->maxiter=100000;
  276. learn_parm->kernel_cache_size=40;
  277. learn_parm->eps=0.1;
  278. learn_parm->transduction_posratio=-1.0;
  279. learn_parm->svm_costratio=1.0;
  280. learn_parm->svm_costratio_unlab=1.0;
  281. learn_parm->svm_unlabbound=1E-5;
  282. learn_parm->epsilon_crit=0.001;
  283. learn_parm->epsilon_a=1E-15;
  284. learn_parm->rho=1.0;
  285. learn_parm->xa_depth=0;
  286. // cross validation
  287. learn_parm->compute_loo = (use_crossvalidation) ? 1 : 0;
  288. learn_parm->type=CLASSIFICATION;
  289. if((learn_parm->skip_final_opt_check)
  290. && (kernel_parm->kernel_type == SVM_LINEAR)) {
  291. printf("\nIt does not make sense to skip the final optimality check for linear kernels.\n\n");
  292. learn_parm->skip_final_opt_check=0;
  293. }
  294. if((learn_parm->skip_final_opt_check)
  295. && (learn_parm->remove_inconsistent)) {
  296. printf("\nIt is necessary to do the final optimality check when removing inconsistent \nexamples.\n");
  297. exit(-1);
  298. }
  299. if((learn_parm->svm_maxqpsize<2)) {
  300. printf("\nMaximum size of QP-subproblems not in valid range: %ld [2..]\n",learn_parm->svm_maxqpsize);
  301. exit(-1);
  302. }
  303. if((learn_parm->svm_maxqpsize<learn_parm->svm_newvarsinqp)) {
  304. printf("\nMaximum size of QP-subproblems [%ld] must be larger than the number of\n",learn_parm->svm_maxqpsize);
  305. printf("new variables [%ld] entering the working set in each iteration.\n",learn_parm->svm_newvarsinqp);
  306. exit(-1);
  307. }
  308. if(learn_parm->svm_iter_to_shrink<1) {
  309. printf("\nMaximum number of iterations for shrinking not in valid range: %ld [1,..]\n",learn_parm->svm_iter_to_shrink);
  310. exit(-1);
  311. }
  312. if(learn_parm->transduction_posratio>1) {
  313. printf("\nThe fraction of unlabeled examples to classify as positives must\n");
  314. printf("be less than 1.0 !!!\n\n");
  315. exit(-1);
  316. }
  317. if(learn_parm->svm_costratio<=0) {
  318. printf("\nThe COSTRATIO parameter must be greater than zero!\n\n");
  319. exit(-1);
  320. }
  321. if(learn_parm->epsilon_crit<=0) {
  322. printf("\nThe epsilon parameter must be greater than zero!\n\n");
  323. exit(-1);
  324. }
  325. if(learn_parm->rho<0) {
  326. printf("\nThe parameter rho for xi/alpha-estimates and leave-one-out pruning must\n");
  327. printf("be greater than zero (typically 1.0 or 2.0, see T. Joachims, Estimating the\n");
  328. printf("Generalization Performance of an SVM Efficiently, ICML, 2000.)!\n\n");
  329. exit(-1);
  330. }
  331. if((learn_parm->xa_depth<0) || (learn_parm->xa_depth>100)) {
  332. printf("\nThe parameter depth for ext. xi/alpha-estimates must be in [0..100] (zero\n");
  333. printf("for switching to the conventional xa/estimates described in T. Joachims,\n");
  334. printf("Estimating the Generalization Performance of an SVM Efficiently, ICML, 2000.)\n");
  335. exit(-1);
  336. }
  337. }
  338. MODEL *VCSVMLight::optimizeParameters ( DOC **docs,
  339. double *target, long int totwords, long int totdoc,
  340. MODEL *model, KERNEL_PARM *kernel_parm,
  341. LEARN_PARM *learn_parm )
  342. {
  343. double best_error = numeric_limits<double>::max();
  344. double old_best_error = best_error;
  345. const int numberOfParameters = (kernel_type == SVM_RBF ? 2 : 1 );
  346. KERNEL_PARM *currentKernelParm = (KERNEL_PARM *)my_malloc(sizeof(KERNEL_PARM));
  347. LEARN_PARM *currentLearnParm = (LEARN_PARM *)my_malloc(sizeof(LEARN_PARM));
  348. MODEL *currentModel = (MODEL *)my_malloc(sizeof(MODEL));
  349. initParameters ( learn_parm, kernel_parm );
  350. initMainParameters ( learn_parm, kernel_parm );
  351. double *parametersToOptimize [ numberOfParameters ];
  352. double minValue [ numberOfParameters ];
  353. double maxValue [ numberOfParameters ];
  354. double steps [ numberOfParameters ];
  355. parametersToOptimize[0] = &(currentLearnParm->svm_c);
  356. minValue[0] = svm_c_min;
  357. maxValue[0] = svm_c_max;
  358. steps[0] = svm_c_step;
  359. if ( kernel_type == SVM_RBF ) {
  360. parametersToOptimize[1] = &(currentKernelParm->rbf_gamma);
  361. minValue[1] = rbf_gamma_min;
  362. maxValue[1] = rbf_gamma_max;
  363. steps[1] = rbf_gamma_step;
  364. }
  365. if ( use_crossvalidation )
  366. {
  367. fprintf (stderr, "VCSVMLight: error estimate type: leave-one-out error\n");
  368. } else {
  369. fprintf (stderr, "VCSVMLight: error estimate type: Xi/Alpha error\n");
  370. }
  371. do {
  372. old_best_error = best_error;
  373. for ( int i = 0 ; i < numberOfParameters ; i++ )
  374. {
  375. for ( double value = minValue[i] ; value < maxValue[i] ; value += steps[i] )
  376. {
  377. memcpy ( currentKernelParm, kernel_parm, sizeof(KERNEL_PARM) );
  378. memcpy ( currentLearnParm, learn_parm, sizeof(LEARN_PARM) );
  379. double *parameter = parametersToOptimize[i];
  380. *parameter = value;
  381. singleTraining ( docs, target, totwords, totdoc, currentModel,
  382. currentKernelParm, currentLearnParm );
  383. fprintf (stderr, "VCSVMLight: optimize rbf_gamma=%f C=%f err=%f xa_err=%f (%f, %f)\n",
  384. currentKernelParm->rbf_gamma, currentLearnParm->svm_c, currentModel->loo_error,
  385. currentModel->xa_error,
  386. kernel_parm->rbf_gamma, learn_parm->svm_c );
  387. double currentError = (use_crossvalidation) ? currentModel->loo_error : currentModel->xa_error;
  388. if ( currentError < 0 ) {
  389. fthrow ( Exception, "Error < 0; check your settings; SVMLight verbosity bug ?");
  390. }
  391. if ( currentError < best_error )
  392. {
  393. fprintf (stderr, "VCSVMLight: new optimum found !\n");
  394. best_error = currentError;
  395. memcpy ( kernel_parm, currentKernelParm, sizeof(KERNEL_PARM) );
  396. memcpy ( learn_parm, currentLearnParm, sizeof(LEARN_PARM) );
  397. memcpy ( model, currentModel, sizeof(MODEL) );
  398. }
  399. }
  400. }
  401. } while ( old_best_error > best_error );
  402. free_model(currentModel,0);
  403. free (currentLearnParm);
  404. free (currentKernelParm);
  405. // rewrite
  406. rbf_gamma = kernel_parm->rbf_gamma;
  407. svm_c = learn_parm->svm_c;
  408. fprintf (stderr, "VCSVMLight: optimimum rbf_gamma=%f C=%f !\n", rbf_gamma, svm_c );
  409. return model;
  410. }
  411. MODEL *VCSVMLight::singleTraining ( DOC **docs,
  412. double *target, long int totwords, long int totdoc,
  413. MODEL *model, KERNEL_PARM *kernel_parm,
  414. LEARN_PARM *learn_parm )
  415. {
  416. double *alpha_in=NULL;
  417. KERNEL_CACHE *kernel_cache;
  418. if(kernel_parm->kernel_type == SVM_LINEAR) { /* don't need the cache */
  419. kernel_cache=NULL;
  420. }
  421. else {
  422. /* Always get a new kernel cache. It is not possible to use the
  423. same cache for two different training runs */
  424. kernel_cache = kernel_cache_init(totdoc,learn_parm->kernel_cache_size);
  425. }
  426. svm_learn_classification(docs,target,totdoc,totwords,learn_parm,
  427. kernel_parm,kernel_cache,model,alpha_in);
  428. if(kernel_cache) {
  429. /* Free the memory used for the cache. */
  430. kernel_cache_cleanup(kernel_cache);
  431. }
  432. free(alpha_in);
  433. return model;
  434. }
  435. void VCSVMLight::svmLightTraining (
  436. const LabeledSetVector & trainSet )
  437. {
  438. double *target;
  439. long int totwords,totdoc,i;
  440. DOC **docs; /* training examples */
  441. LEARN_PARM learn_parm;
  442. KERNEL_PARM kernel_parm;
  443. MODEL *model=(MODEL *)my_malloc(sizeof(MODEL));
  444. readDocuments( &docs,&target,&totwords,&totdoc,
  445. trainSet);
  446. initParameters(&learn_parm, &kernel_parm);
  447. initMainParameters(&learn_parm, &kernel_parm);
  448. if ( optimize_parameters )
  449. optimizeParameters ( docs, target, totwords, totdoc, model, &kernel_parm, &learn_parm );
  450. else
  451. singleTraining ( docs, target, totwords, totdoc, model, &kernel_parm, &learn_parm );
  452. finalModel = copy_model(model);
  453. free_model(model,0);
  454. for(i=0;i<totdoc;i++)
  455. free_example(docs[i],1);
  456. free(docs);
  457. free(target);
  458. };
  459. void VCSVMLight::teach ( const LabeledSetVector & _teachSet )
  460. {
  461. maxClassNo = _teachSet.getMaxClassno();
  462. if ( _teachSet.getMaxClassno() != 1 )
  463. fthrow ( Exception, "This classifier is only suitable for binary classification problems!");
  464. svmLightTraining ( _teachSet );
  465. if ( logreg != NULL )
  466. {
  467. LabeledSetVector scoreSet;
  468. LOOP_ALL ( _teachSet )
  469. {
  470. EACH(classno,x);
  471. double score = getSVMScore ( x );
  472. Vector y (1);
  473. y[0] = score;
  474. scoreSet.add ( classno, y );
  475. }
  476. logreg->teach ( scoreSet );
  477. scoreSet.clear();
  478. }
  479. }
  480. void VCSVMLight::finishTeaching()
  481. {
  482. }
  483. /** clone this object */
  484. VCSVMLight *VCSVMLight::clone(void) const
  485. {
  486. VCSVMLight *classifier = new VCSVMLight( *this );
  487. return classifier;
  488. }
  489. void VCSVMLight::clear ()
  490. {
  491. if ( finalModel != NULL )
  492. {
  493. free(finalModel);
  494. finalModel = NULL;
  495. }
  496. }
  497. void VCSVMLight::read (const string& s, int format)
  498. {
  499. finalModel = read_model ( const_cast<char *>(s.c_str()) );
  500. }
  501. void VCSVMLight::save (const string& s, int format) const
  502. {
  503. write_model ( const_cast<char *>(s.c_str()), finalModel );
  504. }
  505. void VCSVMLight::store ( std::ostream & os, int format ) const
  506. {
  507. fprintf (stderr, "VCSVMLight: unable to write to stream! please use read()\n");
  508. }
  509. void VCSVMLight::restore ( std::istream & is, int format )
  510. {
  511. fprintf (stderr, "VCSVMLight: unable to read from stream! please use save()\n");
  512. exit (-1);
  513. }
  514. #endif