Log In | Get Help   
Home My Page Projects Code Snippets Project Openings Mareframe
Summary Activity Forums Tracker Lists Tasks Docs Surveys News SCM Files
[mareframe] Annotation of /trunk/gadget/hooke.cc
[mareframe] / trunk / gadget / hooke.cc Repository:
ViewVC logotype

Annotation of /trunk/gadget/hooke.cc

Parent Directory Parent Directory | Revision Log Revision Log


Revision 20 - (view) (download)

1 : agomez 1 /* Nonlinear Optimization using the algorithm of Hooke and Jeeves */
2 :     /* 12 February 1994 author: Mark G. Johnson */
3 : ulcessvp 11 //
4 : agomez 1 /* Find a point X where the nonlinear function f(X) has a local */
5 :     /* minimum. X is an n-vector and f(X) is a scalar. In mathe- */
6 :     /* matical notation f: R^n -> R^1. The objective function f() */
7 :     /* is not required to be continuous. Nor does f() need to be */
8 :     /* differentiable. The program does not use or require */
9 :     /* derivatives of f(). */
10 : ulcessvp 11 //
11 : agomez 1 /* The software user supplies three things: a subroutine that */
12 :     /* computes f(X), an initial "starting guess" of the minimum point */
13 :     /* X, and values for the algorithm convergence parameters. Then */
14 :     /* the program searches for a local minimum, beginning from the */
15 :     /* starting guess, using the Direct Search algorithm of Hooke and */
16 :     /* Jeeves. */
17 : ulcessvp 11 //
18 : agomez 1 /* This C program is adapted from the Algol pseudocode found in */
19 :     /* "Algorithm 178: Direct Search" by Arthur F. Kaupe Jr., Commun- */
20 :     /* ications of the ACM, Vol 6. p.313 (June 1963). It includes the */
21 :     /* improvements suggested by Bell and Pike (CACM v.9, p. 684, Sept */
22 :     /* 1966) and those of Tomlin and Smith, "Remark on Algorithm 178" */
23 :     /* (CACM v.12). The original paper, which I don't recommend as */
24 :     /* highly as the one by A. Kaupe, is: R. Hooke and T. A. Jeeves, */
25 :     /* "Direct Search Solution of Numerical and Statistical Problems", */
26 :     /* Journal of the ACM, Vol. 8, April 1961, pp. 212-229. */
27 : ulcessvp 11 //
28 : agomez 1 /* Calling sequence: */
29 :     /* int hooke(nvars, startpt, endpt, rho, epsilon, itermax) */
30 :     /* */
31 :     /* nvars {an integer} */
32 :     /* This is the number of dimensions in the domain of f(). */
33 :     /* It is the number of coordinates of the starting point */
34 :     /* (and the minimum point.) */
35 :     /* startpt {an array of doubles} */
36 :     /* This is the user-supplied guess at the minimum. */
37 :     /* endpt {an array of doubles} */
38 :     /* This is the calculated location of the local minimum */
39 :     /* rho {a double} */
40 :     /* This is a user-supplied convergence parameter (more */
41 :     /* detail below), which should be set to a value between */
42 :     /* 0.0 and 1.0. Larger values of rho give greater */
43 :     /* probability of convergence on highly nonlinear */
44 :     /* functions, at a cost of more function evaluations. */
45 :     /* Smaller values of rho reduces the number of evaluations */
46 :     /* (and the program running time), but increases the risk */
47 :     /* of nonconvergence. See below. */
48 :     /* epsilon {a double} */
49 :     /* This is the criterion for halting the search for a */
50 :     /* minimum. When the algorithm begins to make less and */
51 :     /* less progress on each iteration, it checks the halting */
52 :     /* criterion: if the stepsize is below epsilon, terminate */
53 :     /* the iteration and return the current best estimate of */
54 :     /* the minimum. Larger values of epsilon (such as 1.0e-4) */
55 :     /* give quicker running time, but a less accurate estimate */
56 :     /* of the minimum. Smaller values of epsilon (such as */
57 :     /* 1.0e-7) give longer running time, but a more accurate */
58 :     /* estimate of the minimum. */
59 :     /* itermax {an integer} A second, rarely used, halting */
60 :     /* criterion. If the algorithm uses >= itermax */
61 :     /* iterations, halt. */
62 : ulcessvp 11 //
63 : agomez 1 /* The user-supplied objective function f(x,n) should return a C */
64 :     /* "double". Its arguments are x -- an array of doubles, and */
65 :     /* n -- an integer. x is the point at which f(x) should be */
66 :     /* evaluated, and n is the number of coordinates of x. That is, */
67 :     /* n is the number of coefficients being fitted. */
68 : ulcessvp 11 //
69 : agomez 1 /* rho, the algorithm convergence control */
70 : ulcessvp 11 //
71 : agomez 1 /* The algorithm works by taking "steps" from one estimate of */
72 :     /* a minimum, to another (hopefully better) estimate. Taking */
73 :     /* big steps gets to the minimum more quickly, at the risk of */
74 :     /* "stepping right over" an excellent point. The stepsize is */
75 :     /* controlled by a user supplied parameter called rho. At each */
76 :     /* iteration, the stepsize is multiplied by rho (0 < rho < 1), */
77 :     /* so the stepsize is successively reduced. */
78 :     /* Small values of rho correspond to big stepsize changes, */
79 :     /* which make the algorithm run more quickly. However, there */
80 :     /* is a chance (especially with highly nonlinear functions) */
81 :     /* that these big changes will accidentally overlook a */
82 :     /* promising search vector, leading to nonconvergence. */
83 :     /* Large values of rho correspond to small stepsize changes, */
84 :     /* which force the algorithm to carefully examine nearby points */
85 :     /* instead of optimistically forging ahead. This improves the */
86 :     /* probability of convergence. */
87 :     /* The stepsize is reduced until it is equal to (or smaller */
88 :     /* than) epsilon. So the number of iterations performed by */
89 :     /* Hooke-Jeeves is determined by rho and epsilon: */
90 :     /* rho**(number_of_iterations) = epsilon */
91 :     /* In general it is a good idea to set rho to an aggressively */
92 :     /* small value like 0.5 (hoping for fast convergence). Then, */
93 :     /* if the user suspects that the reported minimum is incorrect */
94 :     /* (or perhaps not accurate enough), the program can be run */
95 :     /* again with a larger value of rho such as 0.85, using the */
96 :     /* result of the first minimization as the starting guess to */
97 :     /* begin the second minimization. */
98 : ulcessvp 11 //
99 : agomez 1 /* Normal use: */
100 :     /* (1) Code your function f() in the C language */
101 :     /* (2) Install your starting guess {or read it in} */
102 :     /* (3) Run the program */
103 :     /* (4) {for the skeptical}: Use the computed minimum */
104 :     /* as the starting point for another run */
105 : ulcessvp 11 //
106 : agomez 1 /* Data Fitting: */
107 :     /* Code your function f() to be the sum of the squares of the */
108 :     /* errors (differences) between the computed values and the */
109 :     /* measured values. Then minimize f() using Hooke-Jeeves. */
110 :     /* EXAMPLE: you have 20 datapoints (ti, yi) and you want to */
111 :     /* find A,B,C such that (A*t*t) + (B*exp(t)) + (C*tan(t)) */
112 :     /* fits the data as closely as possible. Then f() is just */
113 :     /* f(x) = SUM (measured_y[i] - ((A*t[i]*t[i]) + (B*exp(t[i])) */
114 :     /* + (C*tan(t[i]))))^2 */
115 :     /* where x[] is a 3-vector consisting of {A, B, C}. */
116 : ulcessvp 11 //
117 : agomez 1 /* The author of this software is M.G. Johnson. */
118 :     /* Permission to use, copy, modify, and distribute this software */
119 :     /* for any purpose without fee is hereby granted, provided that */
120 :     /* this entire notice is included in all copies of any software */
121 :     /* which is or includes a copy or modification of this software */
122 :     /* and in all copies of the supporting documentation for such */
123 :     /* software. THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT */
124 :     /* ANY EXPRESS OR IMPLIED WARRANTY. IN PARTICULAR, NEITHER THE */
125 :     /* AUTHOR NOR AT&T MAKE ANY REPRESENTATION OR WARRANTY OF ANY */
126 :     /* KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS */
127 :     /* FITNESS FOR ANY PARTICULAR PURPOSE. */
128 : ulcessvp 11 //
129 : agomez 1 /* JMB this has been modified to work with the gadget object structure */
130 :     /* This means that the function has been replaced by a call to ecosystem */
131 :     /* object, and we can use the vector objects that have been defined */
132 :    
133 :     #include "gadget.h"
134 :     #include "optinfo.h"
135 :     #include "mathfunc.h"
136 :     #include "doublevector.h"
137 :     #include "intvector.h"
138 :     #include "errorhandler.h"
139 :     #include "ecosystem.h"
140 :     #include "global.h"
141 :    
142 : ulcessvp 16 #ifdef _OPENMP
143 : ulcessvp 11 #include "omp.h"
144 :     #endif
145 :    
146 : agomez 1 extern Ecosystem* EcoSystem;
147 : ulcessvp 16 #ifdef _OPENMP
148 : ulcessvp 11 extern Ecosystem** EcoSystems;
149 :     #endif
150 : agomez 1
151 :     /* given a point, look for a better one nearby, one coord at a time */
152 :     double OptInfoHooke::bestNearby(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
153 :    
154 :     double minf, ftmp;
155 :     int i;
156 :     DoubleVector z(point);
157 :    
158 :     minf = prevbest;
159 :     for (i = 0; i < point.Size(); i++) {
160 :     z[param[i]] = point[param[i]] + delta[param[i]];
161 :     ftmp = EcoSystem->SimulateAndUpdate(z);
162 :     if (ftmp < minf) {
163 :     minf = ftmp;
164 :     } else {
165 :     delta[param[i]] = 0.0 - delta[param[i]];
166 :     z[param[i]] = point[param[i]] + delta[param[i]];
167 :     ftmp = EcoSystem->SimulateAndUpdate(z);
168 :     if (ftmp < minf)
169 :     minf = ftmp;
170 :     else
171 :     z[param[i]] = point[param[i]];
172 :     }
173 :     }
174 :    
175 :     for (i = 0; i < point.Size(); i++)
176 :     point[i] = z[i];
177 :     return minf;
178 :     }
179 :    
180 : ulcessvp 11 /* given a point, look for a better one nearby, one coord at a time */
181 : ulcessvp 16 #ifdef _OPENMP
182 : ulcessvp 12 /*
183 :     * function bestBeraby parallelized with OpenMP
184 :     * · 2 threads per coord to parallelize the calculation of +delta/-delta
185 :     * · parallelize the calculation of the best nearby of the coord
186 :     */
187 : ulcessvp 15 double OptInfoHooke::bestNearbyRepro(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
188 : ulcessvp 11 double minf;//, ftmp;
189 :     int i, j, k;
190 :     DoubleVector z(point);
191 :    
192 :     struct Storage {
193 :     DoubleVector z;
194 :     DoubleVector delta;
195 :     double ftmp;
196 :     int iters;
197 :     };
198 :    
199 :     minf = prevbest;
200 :     i = 0;
201 :    
202 :     int paral_tokens, numThr, nvars = point.Size();
203 :     numThr = omp_get_max_threads ( );
204 :    
205 :     Storage* storage = new Storage[numThr];
206 :     if ((numThr % 2) == 0)
207 :     paral_tokens = numThr / 2;
208 :     else {
209 :     return -1;
210 :     }
211 :    
212 : ulcessvp 19 // omp_set_dynamic(0);
213 :     // omp_set_nested(1); //permit the nested parallelization
214 : ulcessvp 11 while ( i < nvars) {
215 :     if ((i + paral_tokens -1) >= nvars)
216 :     paral_tokens = nvars - i;
217 : ulcessvp 19 #pragma omp parallel for num_threads(paral_tokens*2) private(k) //parallelize the parameters (numThr)
218 :     for (j = 0; j < (paral_tokens*2); ++j) {
219 : ulcessvp 11 storage[j].z = z;
220 :     storage[j].delta = delta;
221 : ulcessvp 19 DoubleVector v(z);
222 : ulcessvp 11
223 : ulcessvp 19 if (j<paral_tokens) {
224 :     k = param[i+j];
225 :     v[k] += delta[k];
226 : ulcessvp 11 }
227 : ulcessvp 19 else {
228 :     k = param[i+j-paral_tokens];
229 :     v[k] -= delta[k];
230 :     }
231 : ulcessvp 11
232 : ulcessvp 19 storage[j].ftmp = EcoSystems[j]->SimulateAndUpdate(v);
233 :     storage[j].z[k] = v[k];
234 :     }
235 :     for (j = 0; j < paral_tokens; ++j) {
236 :     k = param[i+j];
237 :     if (storage[j].ftmp < minf) {
238 : ulcessvp 11 storage[j].iters = 1;
239 : ulcessvp 19 // storage[j].z[k] = v1[k];
240 : ulcessvp 11 } else {
241 :     storage[j].iters = 2;
242 :     storage[j].delta[k] = 0.0 - delta[k];
243 :     if (storage[j+paral_tokens].ftmp < minf) {
244 :     storage[j].ftmp = storage[j+paral_tokens].ftmp;
245 : ulcessvp 19 storage[j].z[k] = storage[j+paral_tokens].z[k];;
246 : ulcessvp 11 }
247 :     }
248 :     }
249 :    
250 :     for (j = 0; j < paral_tokens; ++j) {
251 :     i++;
252 :     iters += storage[j].iters;
253 :     if (storage[j].ftmp < minf) {
254 :     minf = storage[j].ftmp;
255 :     z = storage[j].z;
256 :     delta = storage[j].delta;
257 :     break;
258 :     }
259 :     }
260 :     }
261 : ulcessvp 15 delete[] storage;
262 : ulcessvp 11 for (i = 0; i < nvars; ++i)
263 :     point[i] = z[i];
264 :     return minf;
265 :     }
266 : agomez 20 void OptInfoHooke::OptimiseLikelihoodREP() {
267 : ulcessvp 11
268 : agomez 1 double oldf, newf, bestf, steplength, tmp;
269 :     int i, offset;
270 :     int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
271 :    
272 :     handle.logMessage(LOGINFO, "\nStarting Hooke & Jeeves optimisation algorithm\n");
273 :     int nvars = EcoSystem->numOptVariables();
274 :     DoubleVector x(nvars);
275 :     DoubleVector trialx(nvars);
276 :     DoubleVector bestx(nvars);
277 :     DoubleVector lowerb(nvars);
278 :     DoubleVector upperb(nvars);
279 :     DoubleVector init(nvars);
280 :     DoubleVector initialstep(nvars, rho);
281 :     DoubleVector delta(nvars);
282 :     IntVector param(nvars, 0);
283 :     IntVector lbound(nvars, 0);
284 :     IntVector rbounds(nvars, 0);
285 :     IntVector trapped(nvars, 0);
286 :    
287 :     EcoSystem->scaleVariables();
288 : ulcessvp 11 int numThr = omp_get_max_threads ( );
289 : ulcessvp 14 for (i = 0; i < numThr; i++) // scale the variables for the ecosystem of every thread
290 : ulcessvp 11 EcoSystems[i]->scaleVariables();
291 : agomez 1 EcoSystem->getOptScaledValues(x);
292 :     EcoSystem->getOptLowerBounds(lowerb);
293 :     EcoSystem->getOptUpperBounds(upperb);
294 :     EcoSystem->getOptInitialValues(init);
295 :    
296 :     for (i = 0; i < nvars; i++) {
297 :     // Scaling the bounds, because the parameters are scaled
298 :     lowerb[i] = lowerb[i] / init[i];
299 :     upperb[i] = upperb[i] / init[i];
300 :     if (lowerb[i] > upperb[i]) {
301 :     tmp = lowerb[i];
302 :     lowerb[i] = upperb[i];
303 :     upperb[i] = tmp;
304 :     }
305 :    
306 :     bestx[i] = x[i];
307 :     trialx[i] = x[i];
308 :     param[i] = i;
309 :     delta[i] = ((2 * (rand() % 2)) - 1) * rho; //JMB - randomise the sign
310 :     }
311 :    
312 :     bestf = EcoSystem->SimulateAndUpdate(trialx);
313 :     if (bestf != bestf) { //check for NaN
314 :     handle.logMessage(LOGINFO, "Error starting Hooke & Jeeves optimisation with f(x) = infinity");
315 :     converge = -1;
316 :     iters = 1;
317 :     return;
318 :     }
319 :    
320 :     offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop
321 :     newf = bestf;
322 :     oldf = bestf;
323 :     steplength = lambda;
324 :     if (isZero(steplength))
325 :     steplength = rho;
326 :    
327 : ulcessvp 11 iters = 0;
328 :    
329 : agomez 1 while (1) {
330 :     if (isZero(bestf)) {
331 :     iters = EcoSystem->getFuncEval() - offset;
332 :     handle.logMessage(LOGINFO, "Error in Hooke & Jeeves optimisation after", iters, "function evaluations, f(x) = 0");
333 :     converge = -1;
334 :     return;
335 :     }
336 :    
337 :     /* randomize the order of the parameters once in a while */
338 :     rchange = 0;
339 :     while (rchange < nvars) {
340 :     rnumber = rand() % nvars;
341 :     rcheck = 1;
342 :     for (i = 0; i < rchange; i++)
343 :     if (param[i] == rnumber)
344 :     rcheck = 0;
345 :     if (rcheck) {
346 :     param[rchange] = rnumber;
347 :     rchange++;
348 :     }
349 :     }
350 :    
351 :     /* find best new point, one coord at a time */
352 :     for (i = 0; i < nvars; i++)
353 :     trialx[i] = x[i];
354 : ulcessvp 15 newf = this->bestNearbyRepro(delta, trialx, bestf, param);
355 : ulcessvp 11 if (newf == -1) {
356 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
357 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
358 :     return;
359 :     }
360 :     /* if too many function evaluations occur, terminate the algorithm */
361 : agomez 1
362 :     iters = EcoSystem->getFuncEval() - offset;
363 :     if (iters > hookeiter) {
364 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
365 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
366 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
367 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
368 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
369 :    
370 :     score = EcoSystem->SimulateAndUpdate(trialx);
371 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
372 :     for (i = 0; i < nvars; i++)
373 :     bestx[i] = trialx[i] * init[i];
374 :     EcoSystem->storeVariables(score, bestx);
375 :     return;
376 :     }
377 :    
378 :     /* if we made some improvements, pursue that direction */
379 :     while (newf < bestf) {
380 :     for (i = 0; i < nvars; i++) {
381 :     /* if it has been trapped but f has now gotten better (bndcheck) */
382 :     /* we assume that we are out of the trap, reset the counters */
383 :     /* and go back to the stepsize we had when we got trapped */
384 :     if ((trapped[i]) && (newf < oldf * bndcheck)) {
385 :     trapped[i] = 0;
386 :     lbound[i] = 0;
387 :     rbounds[i] = 0;
388 :     delta[i] = initialstep[i];
389 :    
390 :     } else if (trialx[i] < (lowerb[i] + verysmall)) {
391 :     lbound[i]++;
392 :     trialx[i] = lowerb[i];
393 :     if (!trapped[i]) {
394 :     initialstep[i] = delta[i];
395 :     trapped[i] = 1;
396 :     }
397 :     /* if it has hit the bounds 2 times then increase the stepsize */
398 :     if (lbound[i] >= 2)
399 :     delta[i] /= rho;
400 :    
401 :     } else if (trialx[i] > (upperb[i] - verysmall)) {
402 :     rbounds[i]++;
403 :     trialx[i] = upperb[i];
404 :     if (!trapped[i]) {
405 :     initialstep[i] = delta[i];
406 :     trapped[i] = 1;
407 :     }
408 :     /* if it has hit the bounds 2 times then increase the stepsize */
409 :     if (rbounds[i] >= 2)
410 :     delta[i] /= rho;
411 :     }
412 :     }
413 :    
414 :     for (i = 0; i < nvars; i++) {
415 :     /* firstly, arrange the sign of delta[] */
416 :     if (trialx[i] < x[i])
417 :     delta[i] = 0.0 - fabs(delta[i]);
418 :     else
419 :     delta[i] = fabs(delta[i]);
420 :    
421 :     /* now, move further in this direction */
422 :     tmp = x[i];
423 :     x[i] = trialx[i];
424 :     trialx[i] = trialx[i] + trialx[i] - tmp;
425 :     }
426 :    
427 :     /* only move forward if this is really an improvement */
428 :     oldf = newf;
429 :     newf = EcoSystem->SimulateAndUpdate(trialx);
430 : ulcessvp 11 iters++;
431 : agomez 1 if ((isEqual(newf, oldf)) || (newf > oldf)) {
432 :     newf = oldf; //JMB no improvement, so reset the value of newf
433 :     break;
434 :     }
435 :    
436 :     /* OK, it's better, so update variables and look around */
437 :     bestf = newf;
438 :     for (i = 0; i < nvars; i++)
439 :     x[i] = trialx[i];
440 : ulcessvp 11
441 : ulcessvp 15 newf = this->bestNearbyRepro(delta, trialx, bestf, param);
442 : ulcessvp 11 if (newf == -1) {
443 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
444 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
445 :     return;
446 :     }
447 : agomez 1 if (isEqual(newf, bestf))
448 :     break;
449 :    
450 :     /* if too many function evaluations occur, terminate the algorithm */
451 :     iters = EcoSystem->getFuncEval() - offset;
452 :     if (iters > hookeiter) {
453 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
454 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
455 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
456 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
457 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
458 :    
459 :     score = EcoSystem->SimulateAndUpdate(trialx);
460 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
461 :     for (i = 0; i < nvars; i++)
462 :     bestx[i] = trialx[i] * init[i];
463 :     EcoSystem->storeVariables(score, bestx);
464 :     return;
465 :     }
466 : ulcessvp 16 } // while (newf < bestf)
467 : agomez 1
468 :     iters = EcoSystem->getFuncEval() - offset;
469 : agomez 20 if (newf < bestf) {
470 :     for (i = 0; i < nvars; i++)
471 :     bestx[i] = x[i] * init[i];
472 :     bestf = newf;
473 :     handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
474 :     handle.logMessage(LOGINFO, "The likelihood score is", bestf, "at the point");
475 :     EcoSystem->storeVariables(bestf, bestx);
476 :     EcoSystem->writeBestValues();
477 :    
478 :     } else
479 :     handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ...");
480 :    
481 :     /* if the step length is less than hookeeps, terminate the algorithm */
482 :     if (steplength < hookeeps) {
483 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
484 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
485 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
486 :     handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
487 :    
488 :     converge = 1;
489 :     score = bestf;
490 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
491 :     EcoSystem->storeVariables(bestf, bestx);
492 :     return;
493 :     }
494 :    
495 :     steplength *= rho;
496 :     handle.logMessage(LOGINFO, "Reducing the steplength to", steplength);
497 :     for (i = 0; i < nvars; i++)
498 :     delta[i] *= rho;
499 :     }
500 :     }
501 : ulcessvp 11 #endif
502 : agomez 20
503 :     void OptInfoHooke::OptimiseLikelihood() {
504 :    
505 :     double oldf, newf, bestf, steplength, tmp;
506 :     int i, offset;
507 :     int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
508 :    
509 :     handle.logMessage(LOGINFO, "\nStarting Hooke & Jeeves optimisation algorithm\n");
510 :     int nvars = EcoSystem->numOptVariables();
511 :     DoubleVector x(nvars);
512 :     DoubleVector trialx(nvars);
513 :     DoubleVector bestx(nvars);
514 :     DoubleVector lowerb(nvars);
515 :     DoubleVector upperb(nvars);
516 :     DoubleVector init(nvars);
517 :     DoubleVector initialstep(nvars, rho);
518 :     DoubleVector delta(nvars);
519 :     IntVector param(nvars, 0);
520 :     IntVector lbound(nvars, 0);
521 :     IntVector rbounds(nvars, 0);
522 :     IntVector trapped(nvars, 0);
523 :    
524 :     EcoSystem->scaleVariables();
525 :     EcoSystem->getOptScaledValues(x);
526 :     EcoSystem->getOptLowerBounds(lowerb);
527 :     EcoSystem->getOptUpperBounds(upperb);
528 :     EcoSystem->getOptInitialValues(init);
529 :    
530 :     for (i = 0; i < nvars; i++) {
531 :     // Scaling the bounds, because the parameters are scaled
532 :     lowerb[i] = lowerb[i] / init[i];
533 :     upperb[i] = upperb[i] / init[i];
534 :     if (lowerb[i] > upperb[i]) {
535 :     tmp = lowerb[i];
536 :     lowerb[i] = upperb[i];
537 :     upperb[i] = tmp;
538 :     }
539 :    
540 :     bestx[i] = x[i];
541 :     trialx[i] = x[i];
542 :     param[i] = i;
543 :     delta[i] = ((2 * (rand() % 2)) - 1) * rho; //JMB - randomise the sign
544 :     }
545 :    
546 :     bestf = EcoSystem->SimulateAndUpdate(trialx);
547 :     if (bestf != bestf) { //check for NaN
548 :     handle.logMessage(LOGINFO, "Error starting Hooke & Jeeves optimisation with f(x) = infinity");
549 :     converge = -1;
550 :     iters = 1;
551 :     return;
552 :     }
553 :    
554 :     offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop
555 :     newf = bestf;
556 :     oldf = bestf;
557 :     steplength = lambda;
558 :     if (isZero(steplength))
559 :     steplength = rho;
560 :    
561 :     iters = 0;
562 :    
563 :     while (1) {
564 :     if (isZero(bestf)) {
565 :     handle.logMessage(LOGINFO, "Error in Hooke & Jeeves optimisation after", iters, "function evaluations, f(x) = 0");
566 :     converge = -1;
567 :     return;
568 :     }
569 :    
570 :     /* randomize the order of the parameters once in a while */
571 :     rchange = 0;
572 :     while (rchange < nvars) {
573 :     rnumber = rand() % nvars;
574 :     rcheck = 1;
575 :     for (i = 0; i < rchange; i++)
576 :     if (param[i] == rnumber)
577 :     rcheck = 0;
578 :     if (rcheck) {
579 :     param[rchange] = rnumber;
580 :     rchange++;
581 :     }
582 :     }
583 :    
584 :     /* find best new point, one coord at a time */
585 :     for (i = 0; i < nvars; i++)
586 :     trialx[i] = x[i];
587 :     newf = this->bestNearby(delta, trialx, bestf, param);
588 :     /* if too many function evaluations occur, terminate the algorithm */
589 :    
590 :     if (iters > hookeiter) {
591 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
592 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
593 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
594 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
595 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
596 :    
597 :     score = EcoSystem->SimulateAndUpdate(trialx);
598 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
599 :     for (i = 0; i < nvars; i++)
600 :     bestx[i] = trialx[i] * init[i];
601 :     EcoSystem->storeVariables(score, bestx);
602 :     return;
603 :     }
604 :    
605 :     /* if we made some improvements, pursue that direction */
606 :     while (newf < bestf) {
607 :     for (i = 0; i < nvars; i++) {
608 :     /* if it has been trapped but f has now gotten better (bndcheck) */
609 :     /* we assume that we are out of the trap, reset the counters */
610 :     /* and go back to the stepsize we had when we got trapped */
611 :     if ((trapped[i]) && (newf < oldf * bndcheck)) {
612 :     trapped[i] = 0;
613 :     lbound[i] = 0;
614 :     rbounds[i] = 0;
615 :     delta[i] = initialstep[i];
616 :    
617 :     } else if (trialx[i] < (lowerb[i] + verysmall)) {
618 :     lbound[i]++;
619 :     trialx[i] = lowerb[i];
620 :     if (!trapped[i]) {
621 :     initialstep[i] = delta[i];
622 :     trapped[i] = 1;
623 :     }
624 :     /* if it has hit the bounds 2 times then increase the stepsize */
625 :     if (lbound[i] >= 2)
626 :     delta[i] /= rho;
627 :    
628 :     } else if (trialx[i] > (upperb[i] - verysmall)) {
629 :     rbounds[i]++;
630 :     trialx[i] = upperb[i];
631 :     if (!trapped[i]) {
632 :     initialstep[i] = delta[i];
633 :     trapped[i] = 1;
634 :     }
635 :     /* if it has hit the bounds 2 times then increase the stepsize */
636 :     if (rbounds[i] >= 2)
637 :     delta[i] /= rho;
638 :     }
639 :     }
640 :    
641 :     for (i = 0; i < nvars; i++) {
642 :     /* firstly, arrange the sign of delta[] */
643 :     if (trialx[i] < x[i])
644 :     delta[i] = 0.0 - fabs(delta[i]);
645 :     else
646 :     delta[i] = fabs(delta[i]);
647 :    
648 :     /* now, move further in this direction */
649 :     tmp = x[i];
650 :     x[i] = trialx[i];
651 :     trialx[i] = trialx[i] + trialx[i] - tmp;
652 :     }
653 :    
654 :     /* only move forward if this is really an improvement */
655 :     oldf = newf;
656 :     newf = EcoSystem->SimulateAndUpdate(trialx);
657 :     if ((isEqual(newf, oldf)) || (newf > oldf)) {
658 :     newf = oldf; //JMB no improvement, so reset the value of newf
659 :     break;
660 :     }
661 :    
662 :     /* OK, it's better, so update variables and look around */
663 :     bestf = newf;
664 :     for (i = 0; i < nvars; i++)
665 :     x[i] = trialx[i];
666 :    
667 :     if (isEqual(newf, bestf))
668 :     break;
669 :    
670 :     /* if too many function evaluations occur, terminate the algorithm */
671 :     if (iters > hookeiter) {
672 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
673 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
674 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
675 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
676 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
677 :    
678 :     score = EcoSystem->SimulateAndUpdate(trialx);
679 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
680 :     for (i = 0; i < nvars; i++)
681 :     bestx[i] = trialx[i] * init[i];
682 :     EcoSystem->storeVariables(score, bestx);
683 :     return;
684 :     }
685 :     } // while (newf < bestf)
686 :    
687 : agomez 1 if (newf < bestf) {
688 :     for (i = 0; i < nvars; i++)
689 :     bestx[i] = x[i] * init[i];
690 :     bestf = newf;
691 :     handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
692 :     handle.logMessage(LOGINFO, "The likelihood score is", bestf, "at the point");
693 :     EcoSystem->storeVariables(bestf, bestx);
694 :     EcoSystem->writeBestValues();
695 :    
696 :     } else
697 :     handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ...");
698 :    
699 :     /* if the step length is less than hookeeps, terminate the algorithm */
700 :     if (steplength < hookeeps) {
701 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
702 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
703 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
704 :     handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
705 :    
706 :     converge = 1;
707 :     score = bestf;
708 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
709 :     EcoSystem->storeVariables(bestf, bestx);
710 :     return;
711 :     }
712 :    
713 :     steplength *= rho;
714 :     handle.logMessage(LOGINFO, "Reducing the steplength to", steplength);
715 :     for (i = 0; i < nvars; i++)
716 :     delta[i] *= rho;
717 :     }
718 :     }
719 : ulcessvp 11
720 : ulcessvp 12 /* Functions to perform the parallelization of the algorithm of HJ with OpenMP*/
721 : agomez 20 #ifdef _OPENMP
722 :     //#ifdef SPECULATIVE
723 : ulcessvp 15 double OptInfoHooke::bestNearbySpec(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
724 : ulcessvp 12 double minf;
725 : ulcessvp 11 int i, j, k, ii;
726 :     DoubleVector z(point);
727 :     int bestId = 0;
728 :     struct Storage {
729 :     DoubleVector z;
730 :     DoubleVector delta;
731 :     double ftmp;
732 :     int iters;
733 :     };
734 :    
735 :     minf = prevbest;
736 :    
737 :     int paral_tokens, numThr, nvars = point.Size();
738 :     numThr = omp_get_max_threads ( );
739 :    
740 :     Storage* storage = new Storage[numThr];
741 :     if ((numThr % 2) == 0)
742 :     paral_tokens = numThr / 2;
743 :     else {
744 :     return -1;
745 :     }
746 :    
747 : ulcessvp 19 // omp_set_dynamic(0);
748 :     // omp_set_nested(1); //permit the nested parallelization
749 : ulcessvp 11 for (ii=0; ii< paral_tokens; ii++) {
750 :     i = 0;
751 :     while ( i < nvars) {
752 :     if ((i + paral_tokens -1) >= nvars)
753 :     paral_tokens = nvars - i;
754 : ulcessvp 19 #pragma omp parallel for num_threads(paral_tokens*2) private(k)
755 :     for (j = 0; j < (paral_tokens*2); ++j) {
756 : ulcessvp 11 storage[j].z = z;
757 :     storage[j].delta = delta;
758 : ulcessvp 19 DoubleVector v(z);
759 : ulcessvp 11
760 : ulcessvp 19 if (j<paral_tokens) {
761 :     k = param[i+j];
762 :     v[k] += delta[k];
763 : ulcessvp 11 }
764 : ulcessvp 19 else {
765 :     k = param[i+j-paral_tokens];
766 :     v[k] -= delta[k];
767 :     }
768 :    
769 :     storage[j].ftmp = EcoSystems[j]->SimulateAndUpdate(v);
770 :     storage[j].z[k] = v[k];
771 :     }
772 :    
773 :     for (j = 0; j < paral_tokens; ++j) {
774 :     k = param[i+j];
775 : ulcessvp 11 if (storage[j].ftmp < minf) {
776 :     storage[j].iters = 1;
777 : ulcessvp 19 // storage[j].z[k] = v1[k];
778 : ulcessvp 11 } else {
779 :     storage[j].iters = 2;
780 :     storage[j].delta[k] = 0.0 - delta[k];
781 :     if (storage[j+paral_tokens].ftmp < minf) {
782 :     storage[j].ftmp = storage[j+paral_tokens].ftmp;
783 : ulcessvp 19 storage[j].z[k] = storage[j+paral_tokens].z[k];
784 : ulcessvp 11 }
785 :     else iters += 2;
786 :     }
787 :     }
788 :    
789 :     bestId = 0;
790 :     for (j = 1; j < paral_tokens; ++j) {
791 :     if (storage[j].ftmp < storage[bestId].ftmp)
792 :     bestId = j;
793 :     }
794 :     if (storage[bestId].ftmp < minf) {
795 :     iters += storage[bestId].iters;
796 :     minf = storage[bestId].ftmp;
797 :     z = storage[bestId].z;
798 :     delta = storage[bestId].delta;
799 :     }
800 :    
801 :     i += paral_tokens;
802 :     }
803 : ulcessvp 19 paral_tokens = numThr / 2;
804 : ulcessvp 11 }
805 :    
806 :     delete[] storage;
807 :     for (i = 0; i < nvars; ++i)
808 :     point[i] = z[i];
809 :    
810 :     return minf;
811 :     }
812 :    
813 :     void OptInfoHooke::OptimiseLikelihoodOMP() {
814 :     double oldf, newf, bestf, steplength, tmp;
815 :     int i, offset;
816 :     int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
817 :    
818 :     handle.logMessage(LOGINFO, "\nStarting Hooke & Jeeves optimisation algorithm\n");
819 :     int nvars = EcoSystem->numOptVariables();
820 :     DoubleVector x(nvars);
821 :     DoubleVector trialx(nvars);
822 :     DoubleVector bestx(nvars);
823 :     DoubleVector lowerb(nvars);
824 :     DoubleVector upperb(nvars);
825 :     DoubleVector init(nvars);
826 :     DoubleVector initialstep(nvars, rho);
827 :     DoubleVector delta(nvars);
828 :     IntVector param(nvars, 0);
829 :     IntVector lbound(nvars, 0);
830 :     IntVector rbounds(nvars, 0);
831 :     IntVector trapped(nvars, 0);
832 :    
833 :     EcoSystem->scaleVariables();
834 :     int numThr = omp_get_max_threads ( );
835 : ulcessvp 14 for (i = 0; i < numThr; i++) // scale the variables for the ecosystem of every thread
836 : ulcessvp 11 EcoSystems[i]->scaleVariables();
837 :     EcoSystem->getOptScaledValues(x);
838 :     EcoSystem->getOptLowerBounds(lowerb);
839 :     EcoSystem->getOptUpperBounds(upperb);
840 :     EcoSystem->getOptInitialValues(init);
841 :    
842 :     for (i = 0; i < nvars; i++) {
843 :     // Scaling the bounds, because the parameters are scaled
844 :     lowerb[i] = lowerb[i] / init[i];
845 :     upperb[i] = upperb[i] / init[i];
846 :     if (lowerb[i] > upperb[i]) {
847 :     tmp = lowerb[i];
848 :     lowerb[i] = upperb[i];
849 :     upperb[i] = tmp;
850 :     }
851 :    
852 :     bestx[i] = x[i];
853 :     trialx[i] = x[i];
854 :     param[i] = i;
855 :     delta[i] = ((2 * (rand() % 2)) - 1) * rho; //JMB - randomise the sign
856 :     }
857 :    
858 :     bestf = EcoSystem->SimulateAndUpdate(trialx);
859 :     if (bestf != bestf) { //check for NaN
860 :     handle.logMessage(LOGINFO, "Error starting Hooke & Jeeves optimisation with f(x) = infinity");
861 :     converge = -1;
862 :     iters = 1;
863 :     return;
864 :     }
865 :    
866 :     offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop
867 :     newf = bestf;
868 :     oldf = bestf;
869 :     steplength = lambda;
870 :     if (isZero(steplength))
871 :     steplength = rho;
872 :    
873 :     iters = 0;
874 :    
875 :     while (1) {
876 :     if (isZero(bestf)) {
877 : ulcessvp 16 #ifndef _OPENMP
878 : ulcessvp 11 iters = EcoSystem->getFuncEval() - offset;
879 :     #endif
880 :     handle.logMessage(LOGINFO, "Error in Hooke & Jeeves optimisation after", iters, "function evaluations, f(x) = 0");
881 :     converge = -1;
882 :     return;
883 :     }
884 :    
885 :     /* randomize the order of the parameters once in a while */
886 :     rchange = 0;
887 :     while (rchange < nvars) {
888 :     rnumber = rand() % nvars;
889 :     rcheck = 1;
890 :     for (i = 0; i < rchange; i++)
891 :     if (param[i] == rnumber)
892 :     rcheck = 0;
893 :     if (rcheck) {
894 :     param[rchange] = rnumber;
895 :     rchange++;
896 :     }
897 :     }
898 :    
899 :     /* find best new point, one coord at a time */
900 :     for (i = 0; i < nvars; i++)
901 :     trialx[i] = x[i];
902 : ulcessvp 16 #ifdef _OPENMP
903 : ulcessvp 15 newf = this->bestNearbySpec(delta, trialx, bestf, param);
904 : ulcessvp 11 if (newf == -1) {
905 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
906 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
907 :     return;
908 :     }
909 :     #else
910 :     newf = this->bestNearby(delta, trialx, bestf, param);
911 :     #endif
912 :     /* if too many function evaluations occur, terminate the algorithm */
913 :    
914 : ulcessvp 16 #ifndef _OPENMP
915 : ulcessvp 11 iters = EcoSystem->getFuncEval() - offset;
916 :     #endif
917 :     if (iters > hookeiter) {
918 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
919 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
920 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
921 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
922 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
923 :    
924 :     score = EcoSystem->SimulateAndUpdate(trialx);
925 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
926 :     for (i = 0; i < nvars; i++)
927 :     bestx[i] = trialx[i] * init[i];
928 :     EcoSystem->storeVariables(score, bestx);
929 :     return;
930 :     }
931 :    
932 :     /* if we made some improvements, pursue that direction */
933 :     while (newf < bestf) {
934 :     for (i = 0; i < nvars; i++) {
935 :     /* if it has been trapped but f has now gotten better (bndcheck) */
936 :     /* we assume that we are out of the trap, reset the counters */
937 :     /* and go back to the stepsize we had when we got trapped */
938 :     if ((trapped[i]) && (newf < oldf * bndcheck)) {
939 :     trapped[i] = 0;
940 :     lbound[i] = 0;
941 :     rbounds[i] = 0;
942 :     delta[i] = initialstep[i];
943 :    
944 :     } else if (trialx[i] < (lowerb[i] + verysmall)) {
945 :     lbound[i]++;
946 :     trialx[i] = lowerb[i];
947 :     if (!trapped[i]) {
948 :     initialstep[i] = delta[i];
949 :     trapped[i] = 1;
950 :     }
951 :     /* if it has hit the bounds 2 times then increase the stepsize */
952 :     if (lbound[i] >= 2)
953 :     delta[i] /= rho;
954 :    
955 :     } else if (trialx[i] > (upperb[i] - verysmall)) {
956 :     rbounds[i]++;
957 :     trialx[i] = upperb[i];
958 :     if (!trapped[i]) {
959 :     initialstep[i] = delta[i];
960 :     trapped[i] = 1;
961 :     }
962 :     /* if it has hit the bounds 2 times then increase the stepsize */
963 :     if (rbounds[i] >= 2)
964 :     delta[i] /= rho;
965 :     }
966 :     }
967 :    
968 :     for (i = 0; i < nvars; i++) {
969 :     /* firstly, arrange the sign of delta[] */
970 :     if (trialx[i] < x[i])
971 :     delta[i] = 0.0 - fabs(delta[i]);
972 :     else
973 :     delta[i] = fabs(delta[i]);
974 :    
975 :     /* now, move further in this direction */
976 :     tmp = x[i];
977 :     x[i] = trialx[i];
978 :     trialx[i] = trialx[i] + trialx[i] - tmp;
979 :     }
980 :    
981 :     /* only move forward if this is really an improvement */
982 :     oldf = newf;
983 :     newf = EcoSystem->SimulateAndUpdate(trialx);
984 : ulcessvp 16 #ifdef _OPENMP
985 : ulcessvp 11 iters++;
986 :     #endif
987 :     if ((isEqual(newf, oldf)) || (newf > oldf)) {
988 :     newf = oldf; //JMB no improvement, so reset the value of newf
989 :     break;
990 :     }
991 :    
992 :     /* OK, it's better, so update variables and look around */
993 :     bestf = newf;
994 :     for (i = 0; i < nvars; i++)
995 :     x[i] = trialx[i];
996 :    
997 : ulcessvp 16 #ifdef _OPENMP
998 : ulcessvp 15 newf = this->bestNearbySpec(delta, trialx, bestf, param);
999 : ulcessvp 11 if (newf == -1) {
1000 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
1001 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
1002 :     return;
1003 :     }
1004 :     #else
1005 :     newf = this->bestNearby(delta, trialx, bestf, param);
1006 :     #endif
1007 :     if (isEqual(newf, bestf))
1008 :     break;
1009 :    
1010 :     /* if too many function evaluations occur, terminate the algorithm */
1011 : ulcessvp 16 #ifndef _OPENMP
1012 : ulcessvp 11 iters = EcoSystem->getFuncEval() - offset;
1013 :     #endif
1014 :     if (iters > hookeiter) {
1015 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
1016 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
1017 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
1018 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
1019 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
1020 :    
1021 :     score = EcoSystem->SimulateAndUpdate(trialx);
1022 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
1023 :     for (i = 0; i < nvars; i++)
1024 :     bestx[i] = trialx[i] * init[i];
1025 :     EcoSystem->storeVariables(score, bestx);
1026 :     return;
1027 :     }
1028 :     }
1029 :    
1030 : ulcessvp 16 #ifndef _OPENMP
1031 : ulcessvp 11 iters = EcoSystem->getFuncEval() - offset;
1032 :     #endif
1033 :     if (newf < bestf) {
1034 :     for (i = 0; i < nvars; i++)
1035 :     bestx[i] = x[i] * init[i];
1036 :     bestf = newf;
1037 :     handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
1038 :     handle.logMessage(LOGINFO, "The likelihood score is", bestf, "at the point");
1039 :     EcoSystem->storeVariables(bestf, bestx);
1040 :     EcoSystem->writeBestValues();
1041 :    
1042 :     } else
1043 :     handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ...");
1044 :    
1045 :     /* if the step length is less than hookeeps, terminate the algorithm */
1046 :     if (steplength < hookeeps) {
1047 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
1048 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
1049 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
1050 :     handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
1051 :    
1052 :     converge = 1;
1053 :     score = bestf;
1054 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
1055 :     EcoSystem->storeVariables(bestf, bestx);
1056 :     return;
1057 :     }
1058 :    
1059 :     steplength *= rho;
1060 :     handle.logMessage(LOGINFO, "Reducing the steplength to", steplength);
1061 :     for (i = 0; i < nvars; i++)
1062 :     delta[i] *= rho;
1063 :     }
1064 :     }
1065 : agomez 20 //#endif
1066 : ulcessvp 11 #endif

root@forge.cesga.es
ViewVC Help
Powered by ViewVC 1.0.0  

Powered By FusionForge