Log In | Get Help   
Home My Page Projects Code Snippets Project Openings Mareframe
Summary Activity Forums Tracker Lists Tasks Docs Surveys News SCM Files
[mareframe] Annotation of /trunk/gadget/optinfo.h
[mareframe] / trunk / gadget / optinfo.h Repository:
ViewVC logotype

Annotation of /trunk/gadget/optinfo.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 20 - (view) (download)

1 : agomez 1 #ifndef optinfo_h
2 :     #define optinfo_h
3 :    
4 :     #include "maininfo.h"
5 :     #include "doublematrix.h"
6 :     #include "doublevector.h"
7 :     #include "intvector.h"
8 : agomez 20 #include "intmatrix.h"
9 : ulcessvp 11 #include "seq_optimize_template.h"
10 : agomez 1
11 : agomez 20 enum OptType { OPTHOOKE = 1, OPTSIMANN, OPTBFGS, OPTPSO };
12 : agomez 1
13 :     /**
14 :     * \class OptInfo
15 :     * \brief This is the base class used to perform the optimisation calculation for the model
16 :     *
17 :     * \note This will always be overridden by the derived classes that actually perform the optimisation calculation
18 :     */
19 :     class OptInfo {
20 :     public:
21 :     /**
22 :     * \brief This is the default OptInfo constructor
23 :     */
24 :     OptInfo() { converge = 0; iters = 0; score = 0.0; };
25 :     /**
26 :     * \brief This is the default OptInfo destructor
27 :     */
28 :     ~OptInfo() {};
29 :     /**
30 :     * \brief This is the function used to read in the optimisation parameters
31 :     * \param infile is the CommentStream to read the optimisation parameters from
32 :     * \param text is a text string used to compare parameter names
33 :     */
34 :     virtual void read(CommentStream& infile, char* text) {};
35 :     /**
36 :     * \brief This function will print information from the optimisation algorithm
37 :     * \param outfile is the ofstream that the optimisation information gets sent to
38 :     * \param prec is the precision to use in the output file
39 :     */
40 :     virtual void Print(ofstream& outfile, int prec) {};
41 :     /**
42 :     * \brief This is the function used to call the optimisation algorithms
43 :     */
44 :     virtual void OptimiseLikelihood() {};
45 : agomez 20 #ifdef _OPENMP
46 : ulcessvp 12 /**
47 :     * \brief This is the function used to call the optimisation algorithms parallelized with OpenMP of the reproducible version
48 :     */
49 : ulcessvp 11 virtual void OptimiseLikelihoodOMP() {};
50 : agomez 20 virtual void OptimiseLikelihoodREP() {};
51 :     #endif
52 : ulcessvp 12 /**
53 :     * \brief This function set the seeds used in SA
54 :     * \param val array of unsigned int with the seeds
55 :     */
56 : ulcessvp 11 void setSeed(unsigned* val) {seed = val[0]; seedM = val[1]; seedP = val[2];};
57 : agomez 1 /**
58 :     * \brief This will return the type of optimisation class
59 :     * \return type
60 :     */
61 :     OptType getType() const { return type; };
62 :     protected:
63 :     /**
64 :     * \brief This is the flag used to denote whether the optimisation converged or not
65 :     */
66 :     int converge;
67 :     /**
68 :     * \brief This is the number of iterations that took place during the optimisation
69 :     */
70 :     int iters;
71 :     /**
72 :     * \brief This is the value of the best likelihood score from the optimisation
73 :     */
74 :     double score;
75 :     /**
76 :     * \brief This denotes what type of optimisation class has been created
77 :     */
78 :     OptType type;
79 : ulcessvp 12 /**
80 :     * \brief This is the seed used for the calculation of the new value of the parameters
81 :     */
82 : ulcessvp 11 unsigned seed;
83 : ulcessvp 12 /**
84 :     * \brief This is the seed used for the acceptance of the metropolis
85 :     */
86 : ulcessvp 11 unsigned seedM;
87 : ulcessvp 12 /**
88 :     * \brief This is the seed used to change the order of the parameters
89 :     */
90 : ulcessvp 11 unsigned seedP;
91 : agomez 1 };
92 :    
93 :     /**
94 :     * \class OptInfoHooke
95 :     * \brief This is the class used for the Hooke & Jeeves optimisation
96 :     *
97 :     * The Hooke & Jeeves optimisation is the default optimisation, and is a simple and fast optimising method, but somewhat unreliable, which is often described as a "hill climbing" technique. From the initial starting point the algorithm takes a step in various directions, and conducts a new model run. If the new likelihood score is better than the old one then the algorithm uses the new point as it's best guess. If it is worse then the algorithm retains the old point. The search proceeds in series of these steps, each step slightly smaller than the previous one. When the algorithm finds a point which it cannot improve on with a small step in any direction then it accepts this point as being the "solution", and exits. It is recommended that you re-run the optimisation, using the final point of one run as the start of the next.
98 :     *
99 :     * The Hooke & Jeeves algorithm used in Gadget is derived from that originally presented by R. Hooke and T. A. Jeeves, ''Direct Search Solution of Numerical and Statistical Problems'' in the April 1961 (Vol. 8, pp. 212-229) issue of the Journal of the ACM, with improvements presented by Arthur F Kaupe Jr., ''Algorithm 178: Direct Search'' in the June 1963 (Vol 6, pp.313-314) issue of the Communications of the ACM.
100 :    
101 :     */
102 :     class OptInfoHooke : public OptInfo {
103 :     public:
104 :     /**
105 :     * \brief This is the default OptInfoHooke constructor
106 :     */
107 :     OptInfoHooke();
108 :     /**
109 :     * \brief This is the default OptInfoHooke destructor
110 :     */
111 :     virtual ~OptInfoHooke() {};
112 :     /**
113 :     * \brief This is the function used to read in the Hooke & Jeeves parameters
114 :     * \param infile is the CommentStream to read the optimisation parameters from
115 :     * \param text is a text string used to compare parameter names
116 :     */
117 :     virtual void read(CommentStream& infile, char* text);
118 :     /**
119 :     * \brief This function will print information from the optimisation algorithm
120 :     * \param outfile is the ofstream that the optimisation information gets sent to
121 :     * \param prec is the precision to use in the output file
122 :     */
123 :     virtual void Print(ofstream& outfile, int prec);
124 :     /**
125 :     * \brief This is the function that will calculate the likelihood score using the Hooke & Jeeves optimiser
126 :     */
127 :     virtual void OptimiseLikelihood();
128 : agomez 20 #ifdef _OPENMP
129 : ulcessvp 12 /**
130 :     * \brief This is the function that will calculate the likelihood score using the Hooke & Jeeves optimiser parallelized with the reproducible version implemented OpenMP
131 :     */
132 : agomez 20
133 : ulcessvp 11 virtual void OptimiseLikelihoodOMP();
134 : agomez 20 virtual void OptimiseLikelihoodREP();
135 : ulcessvp 11 #endif
136 : agomez 1 private:
137 :     /**
138 :     * \brief This function will calculate the best point that can be found close to the current point
139 :     * \param delta is the DoubleVector of the steps to take when looking for the best point
140 :     * \param point is the DoubleVector that will contain the parameters corresponding to the best function value found from the search
141 :     * \param prevbest is the current best point value
142 :     * \param param is the IntVector containing the order that the parameters should be searched in
143 :     * \return the best function value found from the search
144 :     */
145 :     double bestNearby(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param);
146 : ulcessvp 12 /**
147 :     * \brief This function implemented the reproducible version with OpenMP will calculate the best point that can be found close to the current point
148 :     * \param delta is the DoubleVector of the steps to take when looking for the best point
149 :     * \param point is the DoubleVector that will contain the parameters corresponding to the best function value found from the search
150 :     * \param prevbest is the current best point value
151 :     * \param param is the IntVector containing the order that the parameters should be searched in
152 :     * \return the best function value found from the search
153 :     */
154 : ulcessvp 15 double bestNearbyRepro(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param);
155 : ulcessvp 12 /**
156 :     * \brief This function implemented the speculative version with OpenMP will calculate the best point that can be found close to the current point
157 :     * \param delta is the DoubleVector of the steps to take when looking for the best point
158 :     * \param point is the DoubleVector that will contain the parameters corresponding to the best function value found from the search
159 :     * \param prevbest is the current best point value
160 :     * \param param is the IntVector containing the order that the parameters should be searched in
161 :     * \return the best function value found from the search
162 :     */
163 : ulcessvp 15 double bestNearbySpec(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param);
164 : agomez 1 /**
165 :     * \brief This is the maximum number of iterations for the Hooke & Jeeves optimisation
166 :     */
167 :     int hookeiter;
168 :     /**
169 :     * \brief This is the reduction factor for the step length
170 :     */
171 :     double rho;
172 :     /**
173 :     * \brief This is the initial step length
174 :     */
175 :     double lambda;
176 :     /**
177 :     * \brief This is the minimum step length, use as the halt criteria for the optimisation process
178 :     */
179 :     double hookeeps;
180 :     /**
181 :     * \brief This is the limit when checking if a parameter is stuck on the bound
182 :     */
183 :     double bndcheck;
184 :     };
185 :    
186 :     /**
187 :     * \class OptInfoSimann
188 :     * \brief This is the class used for the Simualted Annealing optimisation
189 :     *
190 :     * Simulated Annealing is a global optimisation method that distinguishes different local optima. Starting from an initial point, the algorithm takes a step and the function is evaluated. When minimizing a function, any downhill step is accepted and the process repeats from this new point. An uphill step may be accepted (thus, it can escape from local optima). This uphill decision is made by the Metropolis criteria. It uses a parameter known as "temperature" and the size of the uphill step in a probabilistic manner, and varying the temperature will affect the number of the uphill moves that are accepted. As the optimisation process proceeds, the length of the steps decline and the algorithm closes in on the global optimum.
191 :     *
192 :     * The Simulated Annealing algorithm used in Gadget is derived from that presented by Corana et al, ''Minimising Multimodal Functions of Continuous Variables with the 'Simulated Annealing' Algorithm'' in the September 1987 (Vol. 13, pp. 262-280) issue of the ACM Transactions on Mathematical Software and Goffe et al, ''Global Optimisation of Statistical Functions with Simulated Annealing'' in the January/February 1994 (Vol. 60, pp. 65-100) issue of the Journal of Econometrics.
193 :     */
194 :     class OptInfoSimann : public OptInfo {
195 :     public:
196 :     /**
197 :     * \brief This is the default OptInfoSimann constructor
198 :     */
199 :     OptInfoSimann();
200 :     /**
201 :     * \brief This is the default OptInfoSimann destructor
202 :     */
203 :     virtual ~OptInfoSimann() {};
204 :     /**
205 :     * \brief This is the function used to read in the Simulated Annealing parameters
206 :     * \param infile is the CommentStream to read the optimisation parameters from
207 :     * \param text is a text string used to compare parameter names
208 :     */
209 :     virtual void read(CommentStream& infile, char* text);
210 :     /**
211 :     * \brief This function will print information from the optimisation algorithm
212 :     * \param outfile is the ofstream that the optimisation information gets sent to
213 :     * \param prec is the precision to use in the output file
214 :     */
215 :     virtual void Print(ofstream& outfile, int prec);
216 :     /**
217 :     * \brief This is the function that will calculate the likelihood score using the Simulated Annealing optimiser
218 :     */
219 :     virtual void OptimiseLikelihood();
220 : agomez 20 #ifdef _OPENMP
221 :     //#ifdef SPECULATIVE
222 : ulcessvp 12 /**
223 :     * \brief This is the function that will calculate the likelihood score using the Simulated Annealing optimiser parallelized with the reproducible version implemented OpenMP
224 :     */
225 : ulcessvp 11 virtual void OptimiseLikelihoodOMP();
226 : agomez 20 virtual void OptimiseLikelihoodREP();
227 :     //#endif
228 : ulcessvp 11 #endif
229 : ulcessvp 12 /**
230 :     * \brief This function calculate a new valor for the parameter l
231 :     * \param nvars the number of variables to be optimised
232 :     * \param l the parameter to change
233 :     * \param param IntVector with the order of the parameters
234 :     * \param trialx DoubleVector that storage the values of the parameters to evaluate during this iteration
235 :     * \param x DoubleVector that storage the best values of the parameters
236 :     * \param lowerb DoubleVector with the lower bounds of the variables to be optimised
237 :     * \param upperb DoubleVector with the upper bounds of the variables to be optimised
238 :     * \param vm DoubleVector with the value for the maximum step length for each parameter
239 :     */
240 : ulcessvp 11 virtual void newValue(int nvars, int l, IntVector& param, DoubleVector& trialx,
241 :     DoubleVector& x, DoubleVector& lowerb, DoubleVector& upperb, DoubleVector& vm);
242 : agomez 1 private:
243 :     /**
244 :     * \brief This is the temperature reduction factor
245 :     */
246 :     double rt;
247 :     /**
248 :     * \brief This is the halt criteria for the Simulated Annealing algorithm
249 :     */
250 :     double simanneps;
251 :     /**
252 :     * \brief This is the number of loops before the step length is adjusted
253 :     */
254 :     int ns;
255 :     /**
256 :     * \brief This is the number of loops before the temperature is adjusted
257 :     */
258 :     int nt;
259 :     /**
260 :     * \brief This is the "temperature" used for the Simulated Annealing algorithm
261 :     */
262 :     double t;
263 :     /**
264 :     * \brief This is the factor used to adjust the step length
265 :     */
266 :     double cs;
267 :     /**
268 :     * \brief This is the initial value for the maximum step length
269 :     */
270 :     double vminit;
271 :     /**
272 :     * \brief This is the maximum number of function evaluations for the Simulated Annealing optimiation
273 :     */
274 :     int simanniter;
275 :     /**
276 :     * \brief This is the upper bound when adjusting the step length
277 :     */
278 :     double uratio;
279 :     /**
280 :     * \brief This is the lower bound when adjusting the step length
281 :     */
282 :     double lratio;
283 :     /**
284 :     * \brief This is the number of temperature loops to check when testing for convergence
285 :     */
286 :     int tempcheck;
287 :     /**
288 :     * \brief This is the flag to denote whether the parameters should be scaled or not (default 0, not scale)
289 :     */
290 :     int scale;
291 :     };
292 :    
293 :     /**
294 :     * \class OptInfoBFGS
295 :     * \brief This is the class used for the BFGS optimisation
296 :     *
297 :     * BFGS is a quasi-Newton global optimisation method that uses information about the gradient of the function at the current point to calculate the best direction to look in to find a better point. Using this information, the BFGS algorithm can iteratively calculate a better approximation to the inverse Hessian matrix, which will lead to a better approximation of the minimum value. From an initial starting point, the gradient of the function is calculated and then the algorithm uses this information to calculate the best direction to perform a linesearch for a point that is ''sufficiently better''. The linesearch that is used in Gadget to look for a better point in this direction is the ''Armijo'' linesearch. The algorithm will then adjust the current estimate of the inverse Hessian matrix, and restart from this new point. If a better point cannot be found, then the inverse Hessian matrix is reset and the algorithm restarts from the last accepted point.
298 :     *
299 :     * The BFGS algorithm used in Gadget is derived from that presented by Dimitri P Bertsekas, ''Nonlinear Programming'' (2nd edition, pp22-61) published by Athena Scientific.
300 :     */
301 :     class OptInfoBFGS : public OptInfo {
302 :     public:
303 :     /**
304 :     * \brief This is the default OptInfoBFGS constructor
305 :     */
306 :     OptInfoBFGS();
307 :     /**
308 :     * \brief This is the default OptInfoBFGS destructor
309 :     */
310 :     ~OptInfoBFGS() {};
311 :     /**
312 :     * \brief This is the function used to read in the BFGS parameters
313 :     * \param infile is the CommentStream to read the optimisation parameters from
314 :     * \param text is a text string used to compare parameter names
315 :     */
316 :     virtual void read(CommentStream& infile, char* text);
317 :     /**
318 :     * \brief This function will print information from the optimisation algorithm
319 :     * \param outfile is the ofstream that the optimisation information gets sent to
320 :     * \param prec is the precision to use in the output file
321 :     */
322 :     virtual void Print(ofstream& outfile, int prec);
323 :     /**
324 :     * \brief This is the function that will calculate the likelihood score using the BFGS optimiser
325 :     */
326 :     virtual void OptimiseLikelihood();
327 : agomez 20 #ifdef _OPENMP
328 :     //#ifdef SPECULATIVE
329 : ulcessvp 12 /**
330 :     * \brief This function call the sequential function. BFGS isn't implemented with OpenMP
331 :     */
332 : ulcessvp 11 virtual void OptimiseLikelihoodOMP();
333 : agomez 20 virtual void OptimiseLikelihoodREP();
334 :     //#endif
335 : ulcessvp 11 #endif
336 : agomez 1 private:
337 :     /**
338 :     * \brief This function will numerically calculate the gradient of the function at the current point
339 :     * \param point is the DoubleVector that contains the parameters corresponding to the current function value
340 :     * \param pointvalue is the current function value
341 :     * \param newgrad is the DoubleVector that will contain the gradient vector for the current point
342 :     */
343 :     void gradient(DoubleVector& point, double pointvalue, DoubleVector& newgrad);
344 :     /**
345 :     * \brief This function will calculate the smallest eigenvalue of the inverse Hessian matrix
346 :     * \param M is the DoubleMatrix containing the inverse Hessian matrix
347 :     * \return the smallest eigen value of the matrix
348 :     */
349 :     double getSmallestEigenValue(DoubleMatrix M);
350 :     /**
351 :     * \brief This is the maximum number of function evaluations for the BFGS optimiation
352 :     */
353 :     int bfgsiter;
354 :     /**
355 :     * \brief This is the halt criteria for the BFGS algorithm
356 :     */
357 :     double bfgseps;
358 :     /**
359 :     * \brief This is the adjustment factor in the Armijo linesearch
360 :     */
361 :     double beta;
362 :     /**
363 :     * \brief This is the halt criteria for the Armijo linesearch
364 :     */
365 :     double sigma;
366 :     /**
367 :     * \brief This is the initial step size for the Armijo linesearch
368 :     */
369 :     double step;
370 :     /**
371 :     * \brief This is the accuracy term used when calculating the gradient
372 :     */
373 :     double gradacc;
374 :     /**
375 :     * \brief This is the factor used to adjust the gradient accuracy term
376 :     */
377 :     double gradstep;
378 :     /**
379 :     * \brief This is the halt criteria for the gradient accuracy term
380 :     */
381 :     double gradeps;
382 :     };
383 :    
384 : agomez 20 /**
385 :     * \class OptInfoPso
386 :     * \brief This is the class used for the PSO optimisation
387 :     *
388 :     * PSO or Particle Swarm Optimization
389 :     *
390 :     * The PSO algorithm used in Gadget is derived from that presented by Kyriakos Kentzoglanakis.
391 :     */
392 :     class OptInfoPso : public OptInfo {
393 :     public:
394 :     /**
395 :     * \brief This is the default OptInfoBFGS constructor
396 :     */
397 :     OptInfoPso();
398 :     /**
399 :     * \brief This is the default OptInfoBFGS destructor
400 :     */
401 :     ~OptInfoPso() {};
402 :     /**
403 :     * \brief This is the function used to read in the BFGS parameters
404 :     * \param infile is the CommentStream to read the optimisation parameters from
405 :     * \param text is a text string used to compare parameter names
406 :     */
407 :     virtual void read(CommentStream& infile, char* text);
408 :     /**
409 :     * \brief This function will print information from the optimisation algorithm
410 :     * \param outfile is the ofstream that the optimisation information gets sent to
411 :     * \param prec is the precision to use in the output file
412 :     */
413 :     virtual void Print(ofstream& outfile, int prec);
414 :     /**
415 :     * \brief This is the function that will calculate the likelihood score using the PSO optimiser
416 :     */
417 :     virtual void OptimiseLikelihood();
418 :     #ifdef _OPENMP
419 :     //#ifdef SPECULATIVE
420 :     /**
421 :     * \brief This function call the sequential function. PSO isn't implemented with OpenMP
422 :     */
423 :     virtual void OptimiseLikelihoodOMP();
424 :     virtual void OptimiseLikelihoodREP();
425 :     //#endif
426 : agomez 1 #endif
427 : agomez 20 private:
428 :    
429 :     /**
430 :     * \brief CONSTANTS: max swarm size
431 :     */
432 :     #define PSO_MAX_SIZE 100
433 :    
434 :     /**
435 :     * \brief CONSTANTS: default value of w (see clerc02)
436 :     */
437 :     #define PSO_INERTIA 0.7298
438 :    
439 :    
440 :    
441 :     /**
442 :     * \brief NEIGHBORHOOD SCHEMES: global best topology
443 :     */
444 :     #define PSO_NHOOD_GLOBAL 0
445 :    
446 :     /**
447 :     * \brief NEIGHBORHOOD SCHEMES: ring topology
448 :     */
449 :     #define PSO_NHOOD_RING 1
450 :    
451 :     /**
452 :     * \brief NEIGHBORHOOD SCHEMES: Random neighborhood topology. see http://clerc.maurice.free.fr/pso/random_topology.pdf
453 :     */
454 :     #define PSO_NHOOD_RANDOM 2
455 :    
456 :    
457 :    
458 :     /**
459 :     * \brief INERTIA WEIGHT UPDATE FUNCTIONS
460 :     */
461 :     #define PSO_W_CONST 0
462 :     #define PSO_W_LIN_DEC 1
463 :    
464 :    
465 :    
466 :     /**
467 :     * \brief PSO SOLUTION -- Initialized by the user
468 :     */
469 :     typedef struct {
470 :    
471 :     double error;
472 :     double *gbest; // should contain DIM elements!!
473 :    
474 :     } pso_result_t;
475 :    
476 :     /**
477 :     * \brief optimization goal (error threshold)
478 :     */
479 :     double goal;
480 :    
481 :     /**
482 :     * \brief swarm size (number of particles)
483 :     */
484 :     int size;
485 :    
486 :     /**
487 :     * \brief maximum number of iterations
488 :     */
489 :     int psoiter;
490 :    
491 :     /**
492 :     * \brief cognitive coefficient
493 :     */
494 :     double c1;
495 :    
496 :     /**
497 :     * \brief social coefficient
498 :     */
499 :     double c2;
500 :    
501 :     /**
502 :     * \brief max inertia weight value
503 :     */
504 :     double w_max;
505 :    
506 :     /**
507 :     * \brief min inertia weight value
508 :     */
509 :     double w_min;
510 :    
511 :     /**
512 :     * \brief whether to keep particle position within defined bounds (TRUE) or apply periodic boundary conditions (FALSE)
513 :     */
514 :     int clamp_pos;
515 :    
516 :     /**
517 :     * \brief neighborhood strategy (see PSO_NHOOD_*)
518 :     */
519 :     int nhood_strategy;
520 :    
521 :     /**
522 :     * \brief neighborhood size
523 :     */
524 :     int nhood_size;
525 :    
526 :     /**
527 :     * \brief inertia weight strategy (see PSO_W_*)
528 :     */
529 :     int w_strategy;
530 :    
531 :    
532 :     /**
533 :     * \brief seed for the generator
534 :     */
535 :     long seed;
536 :    
537 :    
538 :    
539 :    
540 :     /**
541 :     * \brief return the swarm size based on dimensionality
542 :     */
543 :     int pso_calc_swarm_size(int dim);
544 :    
545 :     double calc_inertia_const(int step);
546 :     double calc_inertia_lin_dec(int step);
547 :     void inform_global(IntMatrix& comm, DoubleMatrix& pos_nb,
548 :     DoubleMatrix& pos_b, DoubleVector& fit_b,
549 :     DoubleVector& gbest, int improved);
550 :     void inform_ring(IntMatrix& comm, DoubleMatrix& pos_nb,
551 :     DoubleMatrix& pos_b, DoubleVector& fit_b,
552 :     DoubleVector& gbest, int improved);
553 :     void inform_random(IntMatrix& comm, DoubleMatrix& pos_nb,
554 :     DoubleMatrix& pos_b, DoubleVector& fit_b,
555 :     DoubleVector& gbest, int improved);
556 :     void inform(IntMatrix& comm, DoubleMatrix& pos_nb, DoubleMatrix& pos_b, DoubleVector& fit_b, int improved);
557 :     void init_comm_ring(IntMatrix& comm);
558 :     void init_comm_random(IntMatrix& comm) ;
559 :    
560 :     typedef void (OptInfoPso::*Inform_fun)(IntMatrix&, DoubleMatrix&, DoubleMatrix&, DoubleVector&, DoubleVector&, int); // neighborhood update function
561 :     typedef double (OptInfoPso::*Calc_inertia_fun)(int); // inertia weight update function
562 :    
563 :     };
564 :     #endif

root@forge.cesga.es
ViewVC Help
Powered by ViewVC 1.0.0  

Powered By FusionForge