pktools  2.6.7
Processing Kernel for geospatial data
pkoptsvm.cc
1 /**********************************************************************
2 pkoptsvm.cc: program to optimize parameters for support vector machine classifier pksvm
3 Copyright (C) 2008-2014 Pieter Kempeneers
4 
5 This file is part of pktools
6 
7 pktools is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11 
12 pktools is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with pktools. If not, see <http://www.gnu.org/licenses/>.
19 ***********************************************************************/
20 #include <iostream>
21 #include <sstream>
22 #include <fstream>
23 #include <vector>
24 #include <math.h>
25 //#include <nlopt.hpp>
26 #include "base/Optionpk.h"
27 #include "algorithms/ConfusionMatrix.h"
28 #include "algorithms/FeatureSelector.h"
29 //#include "algorithms/OptFactory.h"
30 #include "algorithms/CostFactorySVM.h"
31 #include "algorithms/svm.h"
32 #include "imageclasses/ImgReaderOgr.h"
33 
34 #ifdef HAVE_CONFIG_H
35 #include <config.h>
36 #endif
37 
38 /******************************************************************************/
103 using namespace std;
104 
105 #define Malloc(type,n) (type *)malloc((n)*sizeof(type))
106  //declare objective function
107 double objFunction(const std::vector<double> &x, std::vector<double> &grad, void *my_func_data);
108 
109 //global parameters used in objective function
110 map<string,short> classValueMap;
111 vector<std::string> nameVector;
112 vector<unsigned int> nctraining;
113 vector<unsigned int> nctest;
114 Optionpk<std::string> svm_type_opt("svmt", "svmtype", "type of SVM (C_SVC, nu_SVC,one_class, epsilon_SVR, nu_SVR)","C_SVC");
115 Optionpk<std::string> kernel_type_opt("kt", "kerneltype", "type of kernel function (linear,polynomial,radial,sigmoid) ","radial");
116 Optionpk<unsigned short> kernel_degree_opt("kd", "kd", "degree in kernel function",3);
117 Optionpk<float> coef0_opt("c0", "coef0", "coef0 in kernel function",0);
118 Optionpk<float> nu_opt("nu", "nu", "the parameter nu of nu-SVC, one-class SVM, and nu-SVR",0.5);
119 Optionpk<float> epsilon_loss_opt("eloss", "eloss", "the epsilon in loss function of epsilon-SVR",0.1);
120 Optionpk<int> cache_opt("cache", "cache", "cache memory size in MB",100);
121 Optionpk<float> epsilon_tol_opt("etol", "etol", "the tolerance of termination criterion",0.001);
122 Optionpk<bool> shrinking_opt("shrink", "shrink", "whether to use the shrinking heuristics",false);
123 Optionpk<bool> prob_est_opt("pe", "probest", "whether to train a SVC or SVR model for probability estimates",true,2);
124 Optionpk<bool> costfunction_opt("cf", "cf", "use Overall Accuracy instead of kappa",false);
125 // Optionpk<bool> weight_opt("wi", "wi", "set the parameter C of class i to weight*C, for C-SVC",true);
126 Optionpk<unsigned short> cv_opt("cv", "cv", "n-fold cross validation mode",2);
127 Optionpk<string> classname_opt("c", "class", "list of class names.");
128 Optionpk<short> classvalue_opt("r", "reclass", "list of class values (use same order as in class opt).");
129 Optionpk<short> verbose_opt("v", "verbose", "use 1 to output intermediate results for plotting",0,2);
130 
131 double objFunction(const std::vector<double> &x, std::vector<double> &grad, void *my_func_data){
132 
133  assert(grad.empty());
134  vector<Vector2d<float> > *tf=reinterpret_cast<vector<Vector2d<float> >*> (my_func_data);
135  float ccost=x[0];
136  float gamma=x[1];
137  double error=1.0/epsilon_tol_opt[0];
138  double kappa=1.0;
139  double oa=1.0;
140 
141  CostFactorySVM costfactory(svm_type_opt[0], kernel_type_opt[0], kernel_degree_opt[0], gamma, coef0_opt[0], ccost, nu_opt[0], epsilon_loss_opt[0], cache_opt[0], epsilon_tol_opt[0], shrinking_opt[0], prob_est_opt[0], cv_opt[0], verbose_opt[0]);
142 
143  assert(tf->size());
144  // if(nctest>0)
145  // costfactory.setCv(0);
146 
147  costfactory.setCv(cv_opt[0]);
148 
149  if(classname_opt.size()){
150  assert(classname_opt.size()==classvalue_opt.size());
151  for(int iclass=0;iclass<classname_opt.size();++iclass)
152  costfactory.setClassValueMap(classname_opt[iclass],classvalue_opt[iclass]);
153  }
154  //set names in confusion matrix using nameVector
155  costfactory.setNameVector(nameVector);
156  // vector<string> nameVector=costfactory.getNameVector();
157  for(int iname=0;iname<nameVector.size();++iname){
158  if(costfactory.getClassValueMap().empty()){
159  costfactory.pushBackClassName(nameVector[iname]);
160  // cm.pushBackClassName(nameVector[iname]);
161  }
162  else if(costfactory.getClassIndex(type2string<short>((costfactory.getClassValueMap())[nameVector[iname]]))<0)
163  costfactory.pushBackClassName(type2string<short>((costfactory.getClassValueMap())[nameVector[iname]]));
164  }
165 
166  costfactory.setNcTraining(nctraining);
167  costfactory.setNcTest(nctest);
168 
169  kappa=costfactory.getCost(*tf);
170  return(kappa);
171 }
172 
173 int main(int argc, char *argv[])
174 {
175  map<short,int> reclassMap;
176  vector<int> vreclass;
177  Optionpk<string> training_opt("t", "training", "training vector file. A single vector file contains all training features (must be set as: b0, b1, b2,...) for all classes (class numbers identified by label option).");
178  Optionpk<float> ccost_opt("cc", "ccost", "min and max boundaries the parameter C of C-SVC, epsilon-SVR, and nu-SVR (optional: initial value)",1);
179  Optionpk<float> gamma_opt("g", "gamma", "min max boundaries for gamma in kernel function (optional: initial value)",0);
180  Optionpk<double> stepcc_opt("stepcc","stepcc","multiplicative step for ccost in GRID search",2);
181  Optionpk<double> stepg_opt("stepg","stepg","multiplicative step for gamma in GRID search",2);
182  Optionpk<string> input_opt("i", "input", "input test vector file");
183  Optionpk<string> tlayer_opt("tln", "tln", "training layer name(s)");
184  Optionpk<string> label_opt("label", "label", "identifier for class label in training vector file.","label");
185  // Optionpk<unsigned short> reclass_opt("\0", "rc", "reclass code (e.g. --rc=12 --rc=23 to reclass first two classes to 12 and 23 resp.).", 0);
186  Optionpk<unsigned int> balance_opt("bal", "balance", "balance the input data to this number of samples for each class", 0);
187  Optionpk<bool> random_opt("random","random", "in case of balance, randomize input data", true);
188  Optionpk<int> minSize_opt("min", "min", "if number of training pixels is less then min, do not take this class into account", 0);
189  Optionpk<unsigned short> band_opt("b", "band", "band index (starting from 0, either use band option or use start to end)");
190  Optionpk<unsigned short> bstart_opt("sband", "startband", "Start band sequence number");
191  Optionpk<unsigned short> bend_opt("eband", "endband", "End band sequence number");
192  Optionpk<double> offset_opt("offset", "offset", "offset value for each spectral band input features: refl[band]=(DN[band]-offset[band])/scale[band]", 0.0);
193  Optionpk<double> scale_opt("scale", "scale", "scale value for each spectral band input features: refl=(DN[band]-offset[band])/scale[band] (use 0 if scale min and max in each band to -1.0 and 1.0)", 0.0);
194  Optionpk<unsigned int> maxit_opt("maxit","maxit","maximum number of iterations",500);
195 //Optionpk<string> algorithm_opt("a", "algorithm", "GRID, or any optimization algorithm from http://ab-initio.mit.edu/wiki/index.php/NLopt_Algorithms","GRID");
196  Optionpk<double> tolerance_opt("tol","tolerance","relative tolerance for stopping criterion",0.0001);
197 
198  input_opt.setHide(1);
199  tlayer_opt.setHide(1);
200  label_opt.setHide(1);
201  balance_opt.setHide(1);
202  random_opt.setHide(1);
203  minSize_opt.setHide(1);
204  band_opt.setHide(1);
205  bstart_opt.setHide(1);
206  bend_opt.setHide(1);
207  offset_opt.setHide(1);
208  scale_opt.setHide(1);
209  svm_type_opt.setHide(1);
210  kernel_type_opt.setHide(1);
211  kernel_degree_opt.setHide(1);
212  coef0_opt.setHide(1);
213  nu_opt.setHide(1);
214  epsilon_loss_opt.setHide(1);
215  cache_opt.setHide(1);
216  epsilon_tol_opt.setHide(1);
217  shrinking_opt.setHide(1);
218  prob_est_opt.setHide(1);
219  cv_opt.setHide(1);
220  costfunction_opt.setHide(1);
221  maxit_opt.setHide(1);
222  tolerance_opt.setHide(1);
223 // algorithm_opt.setHide(1);
224  classname_opt.setHide(1);
225  classvalue_opt.setHide(1);
226 
227  bool doProcess;//stop process when program was invoked with help option (-h --help)
228  try{
229  doProcess=training_opt.retrieveOption(argc,argv);
230  ccost_opt.retrieveOption(argc,argv);
231  gamma_opt.retrieveOption(argc,argv);
232  stepcc_opt.retrieveOption(argc,argv);
233  stepg_opt.retrieveOption(argc,argv);
234  input_opt.retrieveOption(argc,argv);
235  tlayer_opt.retrieveOption(argc,argv);
236  label_opt.retrieveOption(argc,argv);
237  balance_opt.retrieveOption(argc,argv);
238  random_opt.retrieveOption(argc,argv);
239  minSize_opt.retrieveOption(argc,argv);
240  band_opt.retrieveOption(argc,argv);
241  bstart_opt.retrieveOption(argc,argv);
242  bend_opt.retrieveOption(argc,argv);
243  offset_opt.retrieveOption(argc,argv);
244  scale_opt.retrieveOption(argc,argv);
245  svm_type_opt.retrieveOption(argc,argv);
246  kernel_type_opt.retrieveOption(argc,argv);
247  kernel_degree_opt.retrieveOption(argc,argv);
248  coef0_opt.retrieveOption(argc,argv);
249  nu_opt.retrieveOption(argc,argv);
250  epsilon_loss_opt.retrieveOption(argc,argv);
251  cache_opt.retrieveOption(argc,argv);
252  epsilon_tol_opt.retrieveOption(argc,argv);
253  shrinking_opt.retrieveOption(argc,argv);
254  prob_est_opt.retrieveOption(argc,argv);
255  cv_opt.retrieveOption(argc,argv);
256  costfunction_opt.retrieveOption(argc,argv);
257  maxit_opt.retrieveOption(argc,argv);
258  tolerance_opt.retrieveOption(argc,argv);
259 // algorithm_opt.retrieveOption(argc,argv);
260  classname_opt.retrieveOption(argc,argv);
261  classvalue_opt.retrieveOption(argc,argv);
262  verbose_opt.retrieveOption(argc,argv);
263  }
264  catch(string predefinedString){
265  std::cout << predefinedString << std::endl;
266  exit(0);
267  }
268  if(!doProcess){
269  cout << endl;
270  cout << "Usage: pkoptsvm -t training" << endl;
271  cout << endl;
272  std::cout << "short option -h shows basic options only, use long option --help to show all options" << std::endl;
273  exit(0);//help was invoked, stop processing
274  }
275 
276  assert(training_opt.size());
277  if(input_opt.size())
278  cv_opt[0]=0;
279 
280  if(verbose_opt[0]>=1){
281  if(input_opt.size())
282  std::cout << "input filename: " << input_opt[0] << std::endl;
283  std::cout << "training vector file: " << std::endl;
284  for(int ifile=0;ifile<training_opt.size();++ifile)
285  std::cout << training_opt[ifile] << std::endl;
286  std::cout << "verbose: " << verbose_opt[0] << std::endl;
287  }
288 
289  unsigned int totalSamples=0;
290  unsigned int totalTestSamples=0;
291 
292  unsigned short nclass=0;
293  int nband=0;
294  int startBand=2;//first two bands represent X and Y pos
295 
296  vector<double> offset;
297  vector<double> scale;
298  vector< Vector2d<float> > trainingPixels;//[class][sample][band]
299  vector< Vector2d<float> > testPixels;//[class][sample][band]
300 
301  // if(priors_opt.size()>1){//priors from argument list
302  // priors.resize(priors_opt.size());
303  // double normPrior=0;
304  // for(int iclass=0;iclass<priors_opt.size();++iclass){
305  // priors[iclass]=priors_opt[iclass];
306  // normPrior+=priors[iclass];
307  // }
308  // //normalize
309  // for(int iclass=0;iclass<priors_opt.size();++iclass)
310  // priors[iclass]/=normPrior;
311  // }
312 
313  //convert start and end band options to vector of band indexes
314  try{
315  if(bstart_opt.size()){
316  if(bend_opt.size()!=bstart_opt.size()){
317  string errorstring="Error: options for start and end band indexes must be provided as pairs, missing end band";
318  throw(errorstring);
319  }
320  band_opt.clear();
321  for(int ipair=0;ipair<bstart_opt.size();++ipair){
322  if(bend_opt[ipair]<=bstart_opt[ipair]){
323  string errorstring="Error: index for end band must be smaller then start band";
324  throw(errorstring);
325  }
326  for(int iband=bstart_opt[ipair];iband<=bend_opt[ipair];++iband)
327  band_opt.push_back(iband);
328  }
329  }
330  }
331  catch(string error){
332  cerr << error << std::endl;
333  exit(1);
334  }
335  //sort bands
336  if(band_opt.size())
337  std::sort(band_opt.begin(),band_opt.end());
338 
339  // map<string,short> classValueMap;//global variable for now (due to getCost)
340  if(classname_opt.size()){
341  assert(classname_opt.size()==classvalue_opt.size());
342  for(int iclass=0;iclass<classname_opt.size();++iclass)
343  classValueMap[classname_opt[iclass]]=classvalue_opt[iclass];
344  }
345 
346  //----------------------------------- Training -------------------------------
347  struct svm_problem prob;
348  vector<string> fields;
349  //organize training data
350  trainingPixels.clear();
351  testPixels.clear();
352  map<string,Vector2d<float> > trainingMap;
353  map<string,Vector2d<float> > testMap;
354  if(verbose_opt[0]>=1)
355  std::cout << "reading training file " << training_opt[0] << std::endl;
356  try{
357  ImgReaderOgr trainingReader(training_opt[0]);
358  if(band_opt.size()){
359  totalSamples=trainingReader.readDataImageOgr(trainingMap,fields,band_opt,label_opt[0],tlayer_opt,verbose_opt[0]);
360  if(input_opt.size()){
361  ImgReaderOgr inputReader(input_opt[0]);
362  totalTestSamples=inputReader.readDataImageOgr(testMap,fields,band_opt,label_opt[0],tlayer_opt,verbose_opt[0]);
363  inputReader.close();
364  }
365  }
366  else{
367  totalSamples=trainingReader.readDataImageOgr(trainingMap,fields,0,0,label_opt[0],tlayer_opt,verbose_opt[0]);
368  if(input_opt.size()){
369  ImgReaderOgr inputReader(input_opt[0]);
370  totalTestSamples=inputReader.readDataImageOgr(testMap,fields,0,0,label_opt[0],tlayer_opt,verbose_opt[0]);
371  inputReader.close();
372  }
373  trainingReader.close();
374  }
375  if(trainingMap.size()<2){
376  // map<string,Vector2d<float> >::iterator mapit=trainingMap.begin();
377  // while(mapit!=trainingMap.end())
378  // cerr << mapit->first << " -> " << classValueMap[mapit->first] << std::endl;
379  string errorstring="Error: could not read at least two classes from training input file";
380  throw(errorstring);
381  }
382  if(input_opt.size()&&testMap.size()<2){
383  string errorstring="Error: could not read at least two classes from test input file";
384  throw(errorstring);
385  }
386  }
387  catch(string error){
388  cerr << error << std::endl;
389  exit(1);
390  }
391  catch(...){
392  cerr << "error caught" << std::endl;
393  exit(1);
394  }
395  //todo delete class 0 ?
396  // if(verbose_opt[0]>=1)
397  // std::cout << "erasing class 0 from training set (" << trainingMap[0].size() << " from " << totalSamples << ") samples" << std::endl;
398  // totalSamples-=trainingMap[0].size();
399  // trainingMap.erase(0);
400 
401  if(verbose_opt[0]>1)
402  std::cout << "training pixels: " << std::endl;
403  map<string,Vector2d<float> >::iterator mapit;
404  mapit=trainingMap.begin();
405  while(mapit!=trainingMap.end()){
406  if(classValueMap.size()){
407  //check if name in training is covered by classname_opt (values can not be 0)
408  if(classValueMap[mapit->first]>0){
409  if(verbose_opt[0])
410  std::cout << mapit->first << " -> " << classValueMap[mapit->first] << std::endl;
411  }
412  else{
413  std::cerr << "Error: names in classname option are not complete, please check names in training vector and make sure classvalue is > 0" << std::endl;
414  exit(1);
415  }
416  }
417  //delete small classes
418  if((mapit->second).size()<minSize_opt[0]){
419  trainingMap.erase(mapit);
420  continue;
421  }
422  nameVector.push_back(mapit->first);
423  trainingPixels.push_back(mapit->second);
424  if(verbose_opt[0]>1)
425  std::cout << mapit->first << ": " << (mapit->second).size() << " samples" << std::endl;
426  // trainingPixels.push_back(mapit->second); ??
427  // ++iclass;
428  ++mapit;
429  }
430  nclass=trainingPixels.size();
431  if(classname_opt.size())
432  assert(nclass==classname_opt.size());
433  nband=trainingPixels[0][0].size()-2;//X and Y//trainingPixels[0][0].size();
434 
435  mapit=testMap.begin();
436  while(mapit!=testMap.end()){
437  if(classValueMap.size()){
438  //check if name in test is covered by classname_opt (values can not be 0)
439  if(classValueMap[mapit->first]>0){
440  ;//ok, no need to print to std::cout
441  }
442  else{
443  std::cerr << "Error: names in classname option are not complete, please check names in test vector and make sure classvalue is > 0" << std::endl;
444  exit(1);
445  }
446  }
447  //no need to delete small classes for test sample
448  testPixels.push_back(mapit->second);
449  if(verbose_opt[0]>1)
450  std::cout << mapit->first << ": " << (mapit->second).size() << " samples" << std::endl;
451  ++mapit;
452  }
453  if(input_opt.size()){
454  assert(nclass==testPixels.size());
455  assert(nband=testPixels[0][0].size()-2);//X and Y//testPixels[0][0].size();
456  assert(!cv_opt[0]);
457  }
458 
459  //do not remove outliers here: could easily be obtained through ogr2ogr -where 'B2<110' output.shp input.shp
460  //balance training data
461  if(balance_opt[0]>0){
462  if(random_opt[0])
463  srand(time(NULL));
464  totalSamples=0;
465  for(int iclass=0;iclass<nclass;++iclass){
466  if(trainingPixels[iclass].size()>balance_opt[0]){
467  while(trainingPixels[iclass].size()>balance_opt[0]){
468  int index=rand()%trainingPixels[iclass].size();
469  trainingPixels[iclass].erase(trainingPixels[iclass].begin()+index);
470  }
471  }
472  else{
473  int oldsize=trainingPixels[iclass].size();
474  for(int isample=trainingPixels[iclass].size();isample<balance_opt[0];++isample){
475  int index = rand()%oldsize;
476  trainingPixels[iclass].push_back(trainingPixels[iclass][index]);
477  }
478  }
479  totalSamples+=trainingPixels[iclass].size();
480  }
481  assert(totalSamples==nclass*balance_opt[0]);
482  }
483 
484  //no need to balance test sample
485  //set scale and offset
486  offset.resize(nband);
487  scale.resize(nband);
488  if(offset_opt.size()>1)
489  assert(offset_opt.size()==nband);
490  if(scale_opt.size()>1)
491  assert(scale_opt.size()==nband);
492  for(int iband=0;iband<nband;++iband){
493  if(verbose_opt[0]>1)
494  std::cout << "scaling for band" << iband << std::endl;
495  offset[iband]=(offset_opt.size()==1)?offset_opt[0]:offset_opt[iband];
496  scale[iband]=(scale_opt.size()==1)?scale_opt[0]:scale_opt[iband];
497  //search for min and maximum
498  if(scale[iband]<=0){
499  float theMin=trainingPixels[0][0][iband+startBand];
500  float theMax=trainingPixels[0][0][iband+startBand];
501  for(int iclass=0;iclass<nclass;++iclass){
502  for(int isample=0;isample<trainingPixels[iclass].size();++isample){
503  if(theMin>trainingPixels[iclass][isample][iband+startBand])
504  theMin=trainingPixels[iclass][isample][iband+startBand];
505  if(theMax<trainingPixels[iclass][isample][iband+startBand])
506  theMax=trainingPixels[iclass][isample][iband+startBand];
507  }
508  }
509  offset[iband]=theMin+(theMax-theMin)/2.0;
510  scale[iband]=(theMax-theMin)/2.0;
511  if(verbose_opt[0]>1){
512  std::cout << "Extreme image values for band " << iband << ": [" << theMin << "," << theMax << "]" << std::endl;
513  std::cout << "Using offset, scale: " << offset[iband] << ", " << scale[iband] << std::endl;
514  std::cout << "scaled values for band " << iband << ": [" << (theMin-offset[iband])/scale[iband] << "," << (theMax-offset[iband])/scale[iband] << "]" << std::endl;
515  }
516  }
517  }
518 
519  // if(priors_opt.size()==1){//default: equal priors for each class
520  // priors.resize(nclass);
521  // for(int iclass=0;iclass<nclass;++iclass)
522  // priors[iclass]=1.0/nclass;
523  // }
524  // assert(priors_opt.size()==1||priors_opt.size()==nclass);
525 
526  if(verbose_opt[0]>=1){
527  std::cout << "number of bands: " << nband << std::endl;
528  std::cout << "number of classes: " << nclass << std::endl;
529  // std::cout << "priors:";
530  // for(int iclass=0;iclass<nclass;++iclass)
531  // std::cout << " " << priors[iclass];
532  // std::cout << std::endl;
533  }
534 
535  //Calculate features of training (and test) set
536  nctraining.resize(nclass);
537  nctest.resize(nclass);
538  vector< Vector2d<float> > trainingFeatures(nclass);
539  for(int iclass=0;iclass<nclass;++iclass){
540  if(verbose_opt[0]>=1)
541  std::cout << "calculating features for class " << iclass << std::endl;
542  nctraining[iclass]=trainingPixels[iclass].size();
543  if(verbose_opt[0]>=1)
544  std::cout << "nctraining[" << iclass << "]: " << nctraining[iclass] << std::endl;
545  if(testPixels.size()>iclass){
546  nctest[iclass]=testPixels[iclass].size();
547  if(verbose_opt[0]>=1){
548  std::cout << "nctest[" << iclass << "]: " << nctest[iclass] << std::endl;
549  }
550  }
551  else
552  nctest[iclass]=0;
553  // trainingFeatures[iclass].resize(nctraining[iclass]);
554  trainingFeatures[iclass].resize(nctraining[iclass]+nctest[iclass]);
555  for(int isample=0;isample<nctraining[iclass];++isample){
556  //scale pixel values according to scale and offset!!!
557  for(int iband=0;iband<nband;++iband){
558  assert(trainingPixels[iclass].size()>isample);
559  assert(trainingPixels[iclass][isample].size()>iband+startBand);
560  assert(offset.size()>iband);
561  assert(scale.size()>iband);
562  float value=trainingPixels[iclass][isample][iband+startBand];
563  trainingFeatures[iclass][isample].push_back((value-offset[iband])/scale[iband]);
564  }
565  }
566  // assert(trainingFeatures[iclass].size()==nctraining[iclass]);
567  for(int isample=0;isample<nctest[iclass];++isample){
568  //scale pixel values according to scale and offset!!!
569  for(int iband=0;iband<nband;++iband){
570  assert(testPixels[iclass].size()>isample);
571  assert(testPixels[iclass][isample].size()>iband+startBand);
572  assert(offset.size()>iband);
573  assert(scale.size()>iband);
574  float value=testPixels[iclass][isample][iband+startBand];
575  // testFeatures[iclass][isample].push_back((value-offset[iband])/scale[iband]);
576  trainingFeatures[iclass][nctraining[iclass]+isample].push_back((value-offset[iband])/scale[iband]);
577  }
578  }
579  assert(trainingFeatures[iclass].size()==nctraining[iclass]+nctest[iclass]);
580  }
581 
582  assert(ccost_opt.size()>1);//must have boundaries at least (initial value is optional)
583  if(ccost_opt.size()<3)//create initial value
584  ccost_opt.push_back(sqrt(ccost_opt[0]*ccost_opt[1]));
585  assert(gamma_opt.size()>1);//must have boundaries at least (initial value is optional)
586  if(gamma_opt.size()<3)//create initial value
587  gamma_opt.push_back(sqrt(gamma_opt[0]*gamma_opt[1]));//will be translated to 1.0/nFeatures
588  assert(ccost_opt.size()==3);//min, init, max
589  assert(gamma_opt.size()==3);//min, init, max
590  assert(gamma_opt[0]<gamma_opt[1]);
591  assert(gamma_opt[0]<gamma_opt[2]);
592  assert(gamma_opt[2]<gamma_opt[1]);
593  assert(ccost_opt[0]<ccost_opt[1]);
594  assert(ccost_opt[0]<ccost_opt[2]);
595  assert(ccost_opt[2]<ccost_opt[1]);
596 
597  std::vector<double> x(2);
598 // if(algorithm_opt[0]=="GRID"){
599  if (1){
600  // double minError=1000;
601  // double minCost=0;
602  // double minGamma=0;
603  double maxKappa=0;
604  double maxCost=0;
605  double maxGamma=0;
606  const char* pszMessage;
607  void* pProgressArg=NULL;
608  GDALProgressFunc pfnProgress=GDALTermProgress;
609  double progress=0;
610  if(!verbose_opt[0])
611  pfnProgress(progress,pszMessage,pProgressArg);
612  double ncost=log(ccost_opt[1])/log(stepcc_opt[0])-log(ccost_opt[0])/log(stepcc_opt[0]);
613  double ngamma=log(gamma_opt[1])/log(stepg_opt[0])-log(gamma_opt[0])/log(stepg_opt[0]);
614  for(double ccost=ccost_opt[0];ccost<=ccost_opt[1];ccost*=stepcc_opt[0]){
615  for(double gamma=gamma_opt[0];gamma<=gamma_opt[1];gamma*=stepg_opt[0]){
616  x[0]=ccost;
617  x[1]=gamma;
618  std::vector<double> theGrad;
619  double kappa=0;
620  kappa=objFunction(x,theGrad,&trainingFeatures);
621  if(kappa>maxKappa){
622  maxKappa=kappa;
623  maxCost=ccost;
624  maxGamma=gamma;
625  }
626  if(verbose_opt[0])
627  std::cout << ccost << " " << gamma << " " << kappa<< std::endl;
628  progress+=1.0/ncost/ngamma;
629  if(!verbose_opt[0])
630  pfnProgress(progress,pszMessage,pProgressArg);
631  }
632  }
633  progress=1.0;
634  if(!verbose_opt[0])
635  pfnProgress(progress,pszMessage,pProgressArg);
636  x[0]=maxCost;
637  x[1]=maxGamma;
638  }
639  //else{
640  // nlopt::opt optimizer=OptFactory::getOptimizer(algorithm_opt[0],2);
641  // if(verbose_opt[0]>1)
642  // std::cout << "optimization algorithm: " << optimizer.get_algorithm_name() << "..." << std::endl;
643  // std::vector<double> lb(2);
644  // std::vector<double> init(2);
645  // std::vector<double> ub(2);
646 
647  // lb[0]=ccost_opt[0];
648  // lb[1]=(gamma_opt[0]>0)? gamma_opt[0] : 1.0/trainingFeatures[0][0].size();
649  // init[0]=ccost_opt[2];
650  // init[1]=(gamma_opt[2]>0)? gamma_opt[1] : 1.0/trainingFeatures[0][0].size();
651  // ub[0]=ccost_opt[1];
652  // ub[1]=(gamma_opt[1]>0)? gamma_opt[1] : 1.0/trainingFeatures[0][0].size();
653  // // optimizer.set_min_objective(objFunction, &trainingFeatures);
654  // optimizer.set_max_objective(objFunction, &trainingFeatures);
655  // optimizer.set_lower_bounds(lb);
656  // optimizer.set_upper_bounds(ub);
657  // if(verbose_opt[0]>1)
658  // std::cout << "set stopping criteria" << std::endl;
659  // //set stopping criteria
660  // if(maxit_opt[0])
661  // optimizer.set_maxeval(maxit_opt[0]);
662  // else
663  // optimizer.set_xtol_rel(tolerance_opt[0]);
664  // double minf=0;
665  // x=init;
666  // try{
667  // optimizer.optimize(x, minf);
668  // }
669  // catch(string error){
670  // cerr << error << std::endl;
671  // exit(1);
672  // }
673  // catch (exception& e){
674  // cout << e.what() << endl;
675  // }
676  // catch(...){
677  // cerr << "error caught" << std::endl;
678  // exit(1);
679  // }
680 
681  // double ccost=x[0];
682  // double gamma=x[1];
683  // if(verbose_opt[0])
684  // std::cout << "optimized with " << optimizer.get_algorithm_name() << "..." << std::endl;
685  //}
686  std::cout << " --ccost " << x[0];
687  std::cout << " --gamma " << x[1];
688  std::cout << std::endl;
689 }
STL namespace.