Log In | Get Help   
Home My Page Projects Code Snippets Project Openings Mareframe
Summary Activity Forums Tracker Lists Tasks Docs Surveys News SCM Files
[mareframe] Annotation of /trunk/gadget/hooke.cc
[mareframe] / trunk / gadget / hooke.cc Repository:
ViewVC logotype

Annotation of /trunk/gadget/hooke.cc

Parent Directory Parent Directory | Revision Log Revision Log


Revision 26 - (view) (download)

1 : agomez 1 /* Nonlinear Optimization using the algorithm of Hooke and Jeeves */
2 :     /* 12 February 1994 author: Mark G. Johnson */
3 : ulcessvp 11 //
4 : agomez 1 /* Find a point X where the nonlinear function f(X) has a local */
5 :     /* minimum. X is an n-vector and f(X) is a scalar. In mathe- */
6 :     /* matical notation f: R^n -> R^1. The objective function f() */
7 :     /* is not required to be continuous. Nor does f() need to be */
8 :     /* differentiable. The program does not use or require */
9 :     /* derivatives of f(). */
10 : ulcessvp 11 //
11 : agomez 1 /* The software user supplies three things: a subroutine that */
12 :     /* computes f(X), an initial "starting guess" of the minimum point */
13 :     /* X, and values for the algorithm convergence parameters. Then */
14 :     /* the program searches for a local minimum, beginning from the */
15 :     /* starting guess, using the Direct Search algorithm of Hooke and */
16 :     /* Jeeves. */
17 : ulcessvp 11 //
18 : agomez 1 /* This C program is adapted from the Algol pseudocode found in */
19 :     /* "Algorithm 178: Direct Search" by Arthur F. Kaupe Jr., Commun- */
20 :     /* ications of the ACM, Vol 6. p.313 (June 1963). It includes the */
21 :     /* improvements suggested by Bell and Pike (CACM v.9, p. 684, Sept */
22 :     /* 1966) and those of Tomlin and Smith, "Remark on Algorithm 178" */
23 :     /* (CACM v.12). The original paper, which I don't recommend as */
24 :     /* highly as the one by A. Kaupe, is: R. Hooke and T. A. Jeeves, */
25 :     /* "Direct Search Solution of Numerical and Statistical Problems", */
26 :     /* Journal of the ACM, Vol. 8, April 1961, pp. 212-229. */
27 : ulcessvp 11 //
28 : agomez 1 /* Calling sequence: */
29 :     /* int hooke(nvars, startpt, endpt, rho, epsilon, itermax) */
30 :     /* */
31 :     /* nvars {an integer} */
32 :     /* This is the number of dimensions in the domain of f(). */
33 :     /* It is the number of coordinates of the starting point */
34 :     /* (and the minimum point.) */
35 :     /* startpt {an array of doubles} */
36 :     /* This is the user-supplied guess at the minimum. */
37 :     /* endpt {an array of doubles} */
38 :     /* This is the calculated location of the local minimum */
39 :     /* rho {a double} */
40 :     /* This is a user-supplied convergence parameter (more */
41 :     /* detail below), which should be set to a value between */
42 :     /* 0.0 and 1.0. Larger values of rho give greater */
43 :     /* probability of convergence on highly nonlinear */
44 :     /* functions, at a cost of more function evaluations. */
45 :     /* Smaller values of rho reduces the number of evaluations */
46 :     /* (and the program running time), but increases the risk */
47 :     /* of nonconvergence. See below. */
48 :     /* epsilon {a double} */
49 :     /* This is the criterion for halting the search for a */
50 :     /* minimum. When the algorithm begins to make less and */
51 :     /* less progress on each iteration, it checks the halting */
52 :     /* criterion: if the stepsize is below epsilon, terminate */
53 :     /* the iteration and return the current best estimate of */
54 :     /* the minimum. Larger values of epsilon (such as 1.0e-4) */
55 :     /* give quicker running time, but a less accurate estimate */
56 :     /* of the minimum. Smaller values of epsilon (such as */
57 :     /* 1.0e-7) give longer running time, but a more accurate */
58 :     /* estimate of the minimum. */
59 :     /* itermax {an integer} A second, rarely used, halting */
60 :     /* criterion. If the algorithm uses >= itermax */
61 :     /* iterations, halt. */
62 : ulcessvp 11 //
63 : agomez 1 /* The user-supplied objective function f(x,n) should return a C */
64 :     /* "double". Its arguments are x -- an array of doubles, and */
65 :     /* n -- an integer. x is the point at which f(x) should be */
66 :     /* evaluated, and n is the number of coordinates of x. That is, */
67 :     /* n is the number of coefficients being fitted. */
68 : ulcessvp 11 //
69 : agomez 1 /* rho, the algorithm convergence control */
70 : ulcessvp 11 //
71 : agomez 1 /* The algorithm works by taking "steps" from one estimate of */
72 :     /* a minimum, to another (hopefully better) estimate. Taking */
73 :     /* big steps gets to the minimum more quickly, at the risk of */
74 :     /* "stepping right over" an excellent point. The stepsize is */
75 :     /* controlled by a user supplied parameter called rho. At each */
76 :     /* iteration, the stepsize is multiplied by rho (0 < rho < 1), */
77 :     /* so the stepsize is successively reduced. */
78 :     /* Small values of rho correspond to big stepsize changes, */
79 :     /* which make the algorithm run more quickly. However, there */
80 :     /* is a chance (especially with highly nonlinear functions) */
81 :     /* that these big changes will accidentally overlook a */
82 :     /* promising search vector, leading to nonconvergence. */
83 :     /* Large values of rho correspond to small stepsize changes, */
84 :     /* which force the algorithm to carefully examine nearby points */
85 :     /* instead of optimistically forging ahead. This improves the */
86 :     /* probability of convergence. */
87 :     /* The stepsize is reduced until it is equal to (or smaller */
88 :     /* than) epsilon. So the number of iterations performed by */
89 :     /* Hooke-Jeeves is determined by rho and epsilon: */
90 :     /* rho**(number_of_iterations) = epsilon */
91 :     /* In general it is a good idea to set rho to an aggressively */
92 :     /* small value like 0.5 (hoping for fast convergence). Then, */
93 :     /* if the user suspects that the reported minimum is incorrect */
94 :     /* (or perhaps not accurate enough), the program can be run */
95 :     /* again with a larger value of rho such as 0.85, using the */
96 :     /* result of the first minimization as the starting guess to */
97 :     /* begin the second minimization. */
98 : ulcessvp 11 //
99 : agomez 1 /* Normal use: */
100 :     /* (1) Code your function f() in the C language */
101 :     /* (2) Install your starting guess {or read it in} */
102 :     /* (3) Run the program */
103 :     /* (4) {for the skeptical}: Use the computed minimum */
104 :     /* as the starting point for another run */
105 : ulcessvp 11 //
106 : agomez 1 /* Data Fitting: */
107 :     /* Code your function f() to be the sum of the squares of the */
108 :     /* errors (differences) between the computed values and the */
109 :     /* measured values. Then minimize f() using Hooke-Jeeves. */
110 :     /* EXAMPLE: you have 20 datapoints (ti, yi) and you want to */
111 :     /* find A,B,C such that (A*t*t) + (B*exp(t)) + (C*tan(t)) */
112 :     /* fits the data as closely as possible. Then f() is just */
113 :     /* f(x) = SUM (measured_y[i] - ((A*t[i]*t[i]) + (B*exp(t[i])) */
114 :     /* + (C*tan(t[i]))))^2 */
115 :     /* where x[] is a 3-vector consisting of {A, B, C}. */
116 : ulcessvp 11 //
117 : agomez 1 /* The author of this software is M.G. Johnson. */
118 :     /* Permission to use, copy, modify, and distribute this software */
119 :     /* for any purpose without fee is hereby granted, provided that */
120 :     /* this entire notice is included in all copies of any software */
121 :     /* which is or includes a copy or modification of this software */
122 :     /* and in all copies of the supporting documentation for such */
123 :     /* software. THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT */
124 :     /* ANY EXPRESS OR IMPLIED WARRANTY. IN PARTICULAR, NEITHER THE */
125 :     /* AUTHOR NOR AT&T MAKE ANY REPRESENTATION OR WARRANTY OF ANY */
126 :     /* KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS */
127 :     /* FITNESS FOR ANY PARTICULAR PURPOSE. */
128 : ulcessvp 11 //
129 : agomez 1 /* JMB this has been modified to work with the gadget object structure */
130 :     /* This means that the function has been replaced by a call to ecosystem */
131 :     /* object, and we can use the vector objects that have been defined */
132 :    
133 :     #include "gadget.h"
134 :     #include "optinfo.h"
135 :     #include "mathfunc.h"
136 :     #include "doublevector.h"
137 :     #include "intvector.h"
138 :     #include "errorhandler.h"
139 :     #include "ecosystem.h"
140 :     #include "global.h"
141 :    
142 : ulcessvp 16 #ifdef _OPENMP
143 : ulcessvp 11 #include "omp.h"
144 :     #endif
145 :    
146 : agomez 1 extern Ecosystem* EcoSystem;
147 : ulcessvp 16 #ifdef _OPENMP
148 : ulcessvp 11 extern Ecosystem** EcoSystems;
149 :     #endif
150 : agomez 1
151 :     /* given a point, look for a better one nearby, one coord at a time */
152 :     double OptInfoHooke::bestNearby(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
153 :    
154 :     double minf, ftmp;
155 :     int i;
156 :     DoubleVector z(point);
157 :    
158 :     minf = prevbest;
159 :     for (i = 0; i < point.Size(); i++) {
160 :     z[param[i]] = point[param[i]] + delta[param[i]];
161 :     ftmp = EcoSystem->SimulateAndUpdate(z);
162 :     if (ftmp < minf) {
163 :     minf = ftmp;
164 :     } else {
165 :     delta[param[i]] = 0.0 - delta[param[i]];
166 :     z[param[i]] = point[param[i]] + delta[param[i]];
167 :     ftmp = EcoSystem->SimulateAndUpdate(z);
168 :     if (ftmp < minf)
169 :     minf = ftmp;
170 :     else
171 :     z[param[i]] = point[param[i]];
172 :     }
173 :     }
174 :    
175 :     for (i = 0; i < point.Size(); i++)
176 :     point[i] = z[i];
177 :     return minf;
178 :     }
179 :    
180 : ulcessvp 11 /* given a point, look for a better one nearby, one coord at a time */
181 : ulcessvp 16 #ifdef _OPENMP
182 : ulcessvp 12 /*
183 :     * function bestBeraby parallelized with OpenMP
184 :     * · 2 threads per coord to parallelize the calculation of +delta/-delta
185 :     * · parallelize the calculation of the best nearby of the coord
186 :     */
187 : ulcessvp 15 double OptInfoHooke::bestNearbyRepro(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
188 : ulcessvp 11 double minf;//, ftmp;
189 :     int i, j, k;
190 :     DoubleVector z(point);
191 :    
192 :     struct Storage {
193 :     DoubleVector z;
194 :     DoubleVector delta;
195 :     double ftmp;
196 :     int iters;
197 :     };
198 :    
199 :     minf = prevbest;
200 :     i = 0;
201 :    
202 :     int paral_tokens, numThr, nvars = point.Size();
203 :     numThr = omp_get_max_threads ( );
204 :    
205 :     Storage* storage = new Storage[numThr];
206 :     if ((numThr % 2) == 0)
207 :     paral_tokens = numThr / 2;
208 :     else {
209 :     return -1;
210 :     }
211 :    
212 : ulcessvp 19 // omp_set_dynamic(0);
213 :     // omp_set_nested(1); //permit the nested parallelization
214 : ulcessvp 11 while ( i < nvars) {
215 :     if ((i + paral_tokens -1) >= nvars)
216 :     paral_tokens = nvars - i;
217 : ulcessvp 19 #pragma omp parallel for num_threads(paral_tokens*2) private(k) //parallelize the parameters (numThr)
218 :     for (j = 0; j < (paral_tokens*2); ++j) {
219 : ulcessvp 11 storage[j].z = z;
220 :     storage[j].delta = delta;
221 : ulcessvp 19 DoubleVector v(z);
222 : ulcessvp 11
223 : ulcessvp 19 if (j<paral_tokens) {
224 :     k = param[i+j];
225 :     v[k] += delta[k];
226 : ulcessvp 11 }
227 : ulcessvp 19 else {
228 :     k = param[i+j-paral_tokens];
229 :     v[k] -= delta[k];
230 :     }
231 : ulcessvp 11
232 : ulcessvp 19 storage[j].ftmp = EcoSystems[j]->SimulateAndUpdate(v);
233 :     storage[j].z[k] = v[k];
234 :     }
235 :     for (j = 0; j < paral_tokens; ++j) {
236 :     k = param[i+j];
237 :     if (storage[j].ftmp < minf) {
238 : ulcessvp 11 storage[j].iters = 1;
239 : ulcessvp 19 // storage[j].z[k] = v1[k];
240 : ulcessvp 11 } else {
241 :     storage[j].iters = 2;
242 :     storage[j].delta[k] = 0.0 - delta[k];
243 :     if (storage[j+paral_tokens].ftmp < minf) {
244 :     storage[j].ftmp = storage[j+paral_tokens].ftmp;
245 : ulcessvp 19 storage[j].z[k] = storage[j+paral_tokens].z[k];;
246 : ulcessvp 11 }
247 :     }
248 :     }
249 :    
250 :     for (j = 0; j < paral_tokens; ++j) {
251 :     i++;
252 :     iters += storage[j].iters;
253 :     if (storage[j].ftmp < minf) {
254 :     minf = storage[j].ftmp;
255 :     z = storage[j].z;
256 :     delta = storage[j].delta;
257 :     break;
258 :     }
259 :     }
260 :     }
261 : ulcessvp 15 delete[] storage;
262 : ulcessvp 11 for (i = 0; i < nvars; ++i)
263 :     point[i] = z[i];
264 :     return minf;
265 :     }
266 : agomez 20 void OptInfoHooke::OptimiseLikelihoodREP() {
267 : ulcessvp 11
268 : agomez 1 double oldf, newf, bestf, steplength, tmp;
269 :     int i, offset;
270 :     int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
271 :    
272 :     handle.logMessage(LOGINFO, "\nStarting Hooke & Jeeves optimisation algorithm\n");
273 :     int nvars = EcoSystem->numOptVariables();
274 :     DoubleVector x(nvars);
275 :     DoubleVector trialx(nvars);
276 :     DoubleVector bestx(nvars);
277 :     DoubleVector lowerb(nvars);
278 :     DoubleVector upperb(nvars);
279 :     DoubleVector init(nvars);
280 :     DoubleVector initialstep(nvars, rho);
281 :     DoubleVector delta(nvars);
282 :     IntVector param(nvars, 0);
283 :     IntVector lbound(nvars, 0);
284 :     IntVector rbounds(nvars, 0);
285 :     IntVector trapped(nvars, 0);
286 :    
287 :     EcoSystem->scaleVariables();
288 : ulcessvp 11 int numThr = omp_get_max_threads ( );
289 : ulcessvp 14 for (i = 0; i < numThr; i++) // scale the variables for the ecosystem of every thread
290 : ulcessvp 11 EcoSystems[i]->scaleVariables();
291 : agomez 1 EcoSystem->getOptScaledValues(x);
292 :     EcoSystem->getOptLowerBounds(lowerb);
293 :     EcoSystem->getOptUpperBounds(upperb);
294 :     EcoSystem->getOptInitialValues(init);
295 :    
296 :     for (i = 0; i < nvars; i++) {
297 :     // Scaling the bounds, because the parameters are scaled
298 :     lowerb[i] = lowerb[i] / init[i];
299 :     upperb[i] = upperb[i] / init[i];
300 :     if (lowerb[i] > upperb[i]) {
301 :     tmp = lowerb[i];
302 :     lowerb[i] = upperb[i];
303 :     upperb[i] = tmp;
304 :     }
305 :    
306 :     bestx[i] = x[i];
307 :     trialx[i] = x[i];
308 :     param[i] = i;
309 :     delta[i] = ((2 * (rand() % 2)) - 1) * rho; //JMB - randomise the sign
310 :     }
311 :    
312 :     bestf = EcoSystem->SimulateAndUpdate(trialx);
313 :     if (bestf != bestf) { //check for NaN
314 :     handle.logMessage(LOGINFO, "Error starting Hooke & Jeeves optimisation with f(x) = infinity");
315 :     converge = -1;
316 :     iters = 1;
317 :     return;
318 :     }
319 :    
320 :     offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop
321 :     newf = bestf;
322 :     oldf = bestf;
323 :     steplength = lambda;
324 :     if (isZero(steplength))
325 :     steplength = rho;
326 :    
327 : ulcessvp 11 iters = 0;
328 :    
329 : agomez 1 while (1) {
330 :     if (isZero(bestf)) {
331 : ulcessvp 26 // iters = EcoSystem->getFuncEval() - offset;
332 : agomez 1 handle.logMessage(LOGINFO, "Error in Hooke & Jeeves optimisation after", iters, "function evaluations, f(x) = 0");
333 :     converge = -1;
334 :     return;
335 :     }
336 :    
337 :     /* randomize the order of the parameters once in a while */
338 :     rchange = 0;
339 :     while (rchange < nvars) {
340 :     rnumber = rand() % nvars;
341 :     rcheck = 1;
342 :     for (i = 0; i < rchange; i++)
343 :     if (param[i] == rnumber)
344 :     rcheck = 0;
345 :     if (rcheck) {
346 :     param[rchange] = rnumber;
347 :     rchange++;
348 :     }
349 :     }
350 :    
351 :     /* find best new point, one coord at a time */
352 :     for (i = 0; i < nvars; i++)
353 :     trialx[i] = x[i];
354 : ulcessvp 15 newf = this->bestNearbyRepro(delta, trialx, bestf, param);
355 : ulcessvp 11 if (newf == -1) {
356 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
357 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
358 :     return;
359 :     }
360 :     /* if too many function evaluations occur, terminate the algorithm */
361 : agomez 1
362 : ulcessvp 26 // iters = EcoSystem->getFuncEval() - offset;
363 : agomez 1 if (iters > hookeiter) {
364 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
365 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
366 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
367 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
368 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
369 :    
370 :     score = EcoSystem->SimulateAndUpdate(trialx);
371 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
372 :     for (i = 0; i < nvars; i++)
373 :     bestx[i] = trialx[i] * init[i];
374 :     EcoSystem->storeVariables(score, bestx);
375 :     return;
376 :     }
377 :    
378 :     /* if we made some improvements, pursue that direction */
379 :     while (newf < bestf) {
380 :     for (i = 0; i < nvars; i++) {
381 :     /* if it has been trapped but f has now gotten better (bndcheck) */
382 :     /* we assume that we are out of the trap, reset the counters */
383 :     /* and go back to the stepsize we had when we got trapped */
384 :     if ((trapped[i]) && (newf < oldf * bndcheck)) {
385 :     trapped[i] = 0;
386 :     lbound[i] = 0;
387 :     rbounds[i] = 0;
388 :     delta[i] = initialstep[i];
389 :    
390 :     } else if (trialx[i] < (lowerb[i] + verysmall)) {
391 :     lbound[i]++;
392 :     trialx[i] = lowerb[i];
393 :     if (!trapped[i]) {
394 :     initialstep[i] = delta[i];
395 :     trapped[i] = 1;
396 :     }
397 :     /* if it has hit the bounds 2 times then increase the stepsize */
398 :     if (lbound[i] >= 2)
399 :     delta[i] /= rho;
400 :    
401 :     } else if (trialx[i] > (upperb[i] - verysmall)) {
402 :     rbounds[i]++;
403 :     trialx[i] = upperb[i];
404 :     if (!trapped[i]) {
405 :     initialstep[i] = delta[i];
406 :     trapped[i] = 1;
407 :     }
408 :     /* if it has hit the bounds 2 times then increase the stepsize */
409 :     if (rbounds[i] >= 2)
410 :     delta[i] /= rho;
411 :     }
412 :     }
413 :    
414 :     for (i = 0; i < nvars; i++) {
415 :     /* firstly, arrange the sign of delta[] */
416 :     if (trialx[i] < x[i])
417 :     delta[i] = 0.0 - fabs(delta[i]);
418 :     else
419 :     delta[i] = fabs(delta[i]);
420 :    
421 :     /* now, move further in this direction */
422 :     tmp = x[i];
423 :     x[i] = trialx[i];
424 :     trialx[i] = trialx[i] + trialx[i] - tmp;
425 :     }
426 :    
427 :     /* only move forward if this is really an improvement */
428 :     oldf = newf;
429 :     newf = EcoSystem->SimulateAndUpdate(trialx);
430 : ulcessvp 11 iters++;
431 : agomez 1 if ((isEqual(newf, oldf)) || (newf > oldf)) {
432 :     newf = oldf; //JMB no improvement, so reset the value of newf
433 :     break;
434 :     }
435 :    
436 :     /* OK, it's better, so update variables and look around */
437 :     bestf = newf;
438 :     for (i = 0; i < nvars; i++)
439 :     x[i] = trialx[i];
440 : ulcessvp 11
441 : ulcessvp 15 newf = this->bestNearbyRepro(delta, trialx, bestf, param);
442 : ulcessvp 11 if (newf == -1) {
443 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
444 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
445 :     return;
446 :     }
447 : agomez 1 if (isEqual(newf, bestf))
448 :     break;
449 :    
450 :     /* if too many function evaluations occur, terminate the algorithm */
451 : ulcessvp 26 // iters = EcoSystem->getFuncEval() - offset;
452 : agomez 1 if (iters > hookeiter) {
453 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
454 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
455 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
456 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
457 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
458 :    
459 :     score = EcoSystem->SimulateAndUpdate(trialx);
460 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
461 :     for (i = 0; i < nvars; i++)
462 :     bestx[i] = trialx[i] * init[i];
463 :     EcoSystem->storeVariables(score, bestx);
464 :     return;
465 :     }
466 : ulcessvp 16 } // while (newf < bestf)
467 : agomez 1
468 : ulcessvp 26 // iters = EcoSystem->getFuncEval() - offset;
469 : agomez 20 if (newf < bestf) {
470 :     for (i = 0; i < nvars; i++)
471 :     bestx[i] = x[i] * init[i];
472 :     bestf = newf;
473 :     handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
474 :     handle.logMessage(LOGINFO, "The likelihood score is", bestf, "at the point");
475 :     EcoSystem->storeVariables(bestf, bestx);
476 :     EcoSystem->writeBestValues();
477 :    
478 :     } else
479 :     handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ...");
480 :    
481 :     /* if the step length is less than hookeeps, terminate the algorithm */
482 :     if (steplength < hookeeps) {
483 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
484 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
485 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
486 :     handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
487 :    
488 :     converge = 1;
489 :     score = bestf;
490 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
491 :     EcoSystem->storeVariables(bestf, bestx);
492 :     return;
493 :     }
494 :    
495 :     steplength *= rho;
496 :     handle.logMessage(LOGINFO, "Reducing the steplength to", steplength);
497 :     for (i = 0; i < nvars; i++)
498 :     delta[i] *= rho;
499 :     }
500 :     }
501 : ulcessvp 11 #endif
502 : agomez 20
503 :     void OptInfoHooke::OptimiseLikelihood() {
504 :    
505 :     double oldf, newf, bestf, steplength, tmp;
506 :     int i, offset;
507 :     int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
508 :    
509 :     handle.logMessage(LOGINFO, "\nStarting Hooke & Jeeves optimisation algorithm\n");
510 :     int nvars = EcoSystem->numOptVariables();
511 :     DoubleVector x(nvars);
512 :     DoubleVector trialx(nvars);
513 :     DoubleVector bestx(nvars);
514 :     DoubleVector lowerb(nvars);
515 :     DoubleVector upperb(nvars);
516 :     DoubleVector init(nvars);
517 :     DoubleVector initialstep(nvars, rho);
518 :     DoubleVector delta(nvars);
519 :     IntVector param(nvars, 0);
520 :     IntVector lbound(nvars, 0);
521 :     IntVector rbounds(nvars, 0);
522 :     IntVector trapped(nvars, 0);
523 :    
524 :     EcoSystem->scaleVariables();
525 :     EcoSystem->getOptScaledValues(x);
526 :     EcoSystem->getOptLowerBounds(lowerb);
527 :     EcoSystem->getOptUpperBounds(upperb);
528 :     EcoSystem->getOptInitialValues(init);
529 :    
530 :     for (i = 0; i < nvars; i++) {
531 :     // Scaling the bounds, because the parameters are scaled
532 :     lowerb[i] = lowerb[i] / init[i];
533 :     upperb[i] = upperb[i] / init[i];
534 :     if (lowerb[i] > upperb[i]) {
535 :     tmp = lowerb[i];
536 :     lowerb[i] = upperb[i];
537 :     upperb[i] = tmp;
538 :     }
539 :    
540 :     bestx[i] = x[i];
541 :     trialx[i] = x[i];
542 :     param[i] = i;
543 :     delta[i] = ((2 * (rand() % 2)) - 1) * rho; //JMB - randomise the sign
544 :     }
545 :    
546 :     bestf = EcoSystem->SimulateAndUpdate(trialx);
547 :     if (bestf != bestf) { //check for NaN
548 :     handle.logMessage(LOGINFO, "Error starting Hooke & Jeeves optimisation with f(x) = infinity");
549 :     converge = -1;
550 :     iters = 1;
551 :     return;
552 :     }
553 :    
554 :     offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop
555 :     newf = bestf;
556 :     oldf = bestf;
557 :     steplength = lambda;
558 :     if (isZero(steplength))
559 :     steplength = rho;
560 :    
561 :     iters = 0;
562 :    
563 :     while (1) {
564 :     if (isZero(bestf)) {
565 : ulcessvp 25 iters = EcoSystem->getFuncEval() - offset;
566 : agomez 20 handle.logMessage(LOGINFO, "Error in Hooke & Jeeves optimisation after", iters, "function evaluations, f(x) = 0");
567 :     converge = -1;
568 :     return;
569 :     }
570 :    
571 :     /* randomize the order of the parameters once in a while */
572 :     rchange = 0;
573 :     while (rchange < nvars) {
574 :     rnumber = rand() % nvars;
575 :     rcheck = 1;
576 :     for (i = 0; i < rchange; i++)
577 :     if (param[i] == rnumber)
578 :     rcheck = 0;
579 :     if (rcheck) {
580 :     param[rchange] = rnumber;
581 :     rchange++;
582 :     }
583 :     }
584 :    
585 :     /* find best new point, one coord at a time */
586 :     for (i = 0; i < nvars; i++)
587 :     trialx[i] = x[i];
588 :     newf = this->bestNearby(delta, trialx, bestf, param);
589 :     /* if too many function evaluations occur, terminate the algorithm */
590 :    
591 : ulcessvp 25 iters = EcoSystem->getFuncEval() - offset;
592 : agomez 20 if (iters > hookeiter) {
593 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
594 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
595 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
596 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
597 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
598 :    
599 :     score = EcoSystem->SimulateAndUpdate(trialx);
600 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
601 :     for (i = 0; i < nvars; i++)
602 :     bestx[i] = trialx[i] * init[i];
603 :     EcoSystem->storeVariables(score, bestx);
604 :     return;
605 :     }
606 :    
607 :     /* if we made some improvements, pursue that direction */
608 :     while (newf < bestf) {
609 :     for (i = 0; i < nvars; i++) {
610 :     /* if it has been trapped but f has now gotten better (bndcheck) */
611 :     /* we assume that we are out of the trap, reset the counters */
612 :     /* and go back to the stepsize we had when we got trapped */
613 :     if ((trapped[i]) && (newf < oldf * bndcheck)) {
614 :     trapped[i] = 0;
615 :     lbound[i] = 0;
616 :     rbounds[i] = 0;
617 :     delta[i] = initialstep[i];
618 :    
619 :     } else if (trialx[i] < (lowerb[i] + verysmall)) {
620 :     lbound[i]++;
621 :     trialx[i] = lowerb[i];
622 :     if (!trapped[i]) {
623 :     initialstep[i] = delta[i];
624 :     trapped[i] = 1;
625 :     }
626 :     /* if it has hit the bounds 2 times then increase the stepsize */
627 :     if (lbound[i] >= 2)
628 :     delta[i] /= rho;
629 :    
630 :     } else if (trialx[i] > (upperb[i] - verysmall)) {
631 :     rbounds[i]++;
632 :     trialx[i] = upperb[i];
633 :     if (!trapped[i]) {
634 :     initialstep[i] = delta[i];
635 :     trapped[i] = 1;
636 :     }
637 :     /* if it has hit the bounds 2 times then increase the stepsize */
638 :     if (rbounds[i] >= 2)
639 :     delta[i] /= rho;
640 :     }
641 :     }
642 :    
643 :     for (i = 0; i < nvars; i++) {
644 :     /* firstly, arrange the sign of delta[] */
645 :     if (trialx[i] < x[i])
646 :     delta[i] = 0.0 - fabs(delta[i]);
647 :     else
648 :     delta[i] = fabs(delta[i]);
649 :    
650 :     /* now, move further in this direction */
651 :     tmp = x[i];
652 :     x[i] = trialx[i];
653 :     trialx[i] = trialx[i] + trialx[i] - tmp;
654 :     }
655 :    
656 :     /* only move forward if this is really an improvement */
657 :     oldf = newf;
658 :     newf = EcoSystem->SimulateAndUpdate(trialx);
659 :     if ((isEqual(newf, oldf)) || (newf > oldf)) {
660 :     newf = oldf; //JMB no improvement, so reset the value of newf
661 :     break;
662 :     }
663 :    
664 :     /* OK, it's better, so update variables and look around */
665 :     bestf = newf;
666 :     for (i = 0; i < nvars; i++)
667 :     x[i] = trialx[i];
668 : ulcessvp 25 newf = this->bestNearby(delta, trialx, bestf, param);
669 : agomez 20 if (isEqual(newf, bestf))
670 :     break;
671 :    
672 :     /* if too many function evaluations occur, terminate the algorithm */
673 : ulcessvp 25 iters = EcoSystem->getFuncEval() - offset;
674 : agomez 20 if (iters > hookeiter) {
675 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
676 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
677 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
678 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
679 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
680 :    
681 :     score = EcoSystem->SimulateAndUpdate(trialx);
682 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
683 :     for (i = 0; i < nvars; i++)
684 :     bestx[i] = trialx[i] * init[i];
685 :     EcoSystem->storeVariables(score, bestx);
686 :     return;
687 :     }
688 :     } // while (newf < bestf)
689 :    
690 : ulcessvp 25 iters = EcoSystem->getFuncEval() - offset;
691 : agomez 1 if (newf < bestf) {
692 :     for (i = 0; i < nvars; i++)
693 :     bestx[i] = x[i] * init[i];
694 :     bestf = newf;
695 :     handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
696 :     handle.logMessage(LOGINFO, "The likelihood score is", bestf, "at the point");
697 :     EcoSystem->storeVariables(bestf, bestx);
698 :     EcoSystem->writeBestValues();
699 :    
700 :     } else
701 :     handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ...");
702 :    
703 :     /* if the step length is less than hookeeps, terminate the algorithm */
704 :     if (steplength < hookeeps) {
705 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
706 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
707 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
708 :     handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
709 :    
710 :     converge = 1;
711 :     score = bestf;
712 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
713 :     EcoSystem->storeVariables(bestf, bestx);
714 :     return;
715 :     }
716 :    
717 :     steplength *= rho;
718 :     handle.logMessage(LOGINFO, "Reducing the steplength to", steplength);
719 :     for (i = 0; i < nvars; i++)
720 :     delta[i] *= rho;
721 :     }
722 :     }
723 : ulcessvp 11
724 : ulcessvp 12 /* Functions to perform the parallelization of the algorithm of HJ with OpenMP*/
725 : agomez 20 #ifdef _OPENMP
726 :     //#ifdef SPECULATIVE
727 : ulcessvp 15 double OptInfoHooke::bestNearbySpec(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
728 : ulcessvp 12 double minf;
729 : ulcessvp 11 int i, j, k, ii;
730 :     DoubleVector z(point);
731 :     int bestId = 0;
732 :     struct Storage {
733 :     DoubleVector z;
734 :     DoubleVector delta;
735 :     double ftmp;
736 :     int iters;
737 :     };
738 :    
739 :     minf = prevbest;
740 :    
741 :     int paral_tokens, numThr, nvars = point.Size();
742 :     numThr = omp_get_max_threads ( );
743 :    
744 :     Storage* storage = new Storage[numThr];
745 :     if ((numThr % 2) == 0)
746 :     paral_tokens = numThr / 2;
747 :     else {
748 :     return -1;
749 :     }
750 :    
751 : ulcessvp 19 // omp_set_dynamic(0);
752 :     // omp_set_nested(1); //permit the nested parallelization
753 : ulcessvp 11 for (ii=0; ii< paral_tokens; ii++) {
754 :     i = 0;
755 :     while ( i < nvars) {
756 :     if ((i + paral_tokens -1) >= nvars)
757 :     paral_tokens = nvars - i;
758 : ulcessvp 19 #pragma omp parallel for num_threads(paral_tokens*2) private(k)
759 :     for (j = 0; j < (paral_tokens*2); ++j) {
760 : ulcessvp 11 storage[j].z = z;
761 :     storage[j].delta = delta;
762 : ulcessvp 19 DoubleVector v(z);
763 : ulcessvp 11
764 : ulcessvp 19 if (j<paral_tokens) {
765 :     k = param[i+j];
766 :     v[k] += delta[k];
767 : ulcessvp 11 }
768 : ulcessvp 19 else {
769 :     k = param[i+j-paral_tokens];
770 :     v[k] -= delta[k];
771 :     }
772 :    
773 :     storage[j].ftmp = EcoSystems[j]->SimulateAndUpdate(v);
774 :     storage[j].z[k] = v[k];
775 :     }
776 :    
777 :     for (j = 0; j < paral_tokens; ++j) {
778 :     k = param[i+j];
779 : ulcessvp 11 if (storage[j].ftmp < minf) {
780 :     storage[j].iters = 1;
781 : ulcessvp 19 // storage[j].z[k] = v1[k];
782 : ulcessvp 11 } else {
783 :     storage[j].iters = 2;
784 :     storage[j].delta[k] = 0.0 - delta[k];
785 :     if (storage[j+paral_tokens].ftmp < minf) {
786 :     storage[j].ftmp = storage[j+paral_tokens].ftmp;
787 : ulcessvp 19 storage[j].z[k] = storage[j+paral_tokens].z[k];
788 : ulcessvp 11 }
789 :     else iters += 2;
790 :     }
791 :     }
792 :    
793 :     bestId = 0;
794 :     for (j = 1; j < paral_tokens; ++j) {
795 :     if (storage[j].ftmp < storage[bestId].ftmp)
796 :     bestId = j;
797 :     }
798 :     if (storage[bestId].ftmp < minf) {
799 :     iters += storage[bestId].iters;
800 :     minf = storage[bestId].ftmp;
801 :     z = storage[bestId].z;
802 :     delta = storage[bestId].delta;
803 :     }
804 :    
805 :     i += paral_tokens;
806 :     }
807 : ulcessvp 19 paral_tokens = numThr / 2;
808 : ulcessvp 11 }
809 :    
810 :     delete[] storage;
811 :     for (i = 0; i < nvars; ++i)
812 :     point[i] = z[i];
813 :    
814 :     return minf;
815 :     }
816 :    
817 :     void OptInfoHooke::OptimiseLikelihoodOMP() {
818 :     double oldf, newf, bestf, steplength, tmp;
819 :     int i, offset;
820 :     int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
821 :    
822 :     handle.logMessage(LOGINFO, "\nStarting Hooke & Jeeves optimisation algorithm\n");
823 :     int nvars = EcoSystem->numOptVariables();
824 :     DoubleVector x(nvars);
825 :     DoubleVector trialx(nvars);
826 :     DoubleVector bestx(nvars);
827 :     DoubleVector lowerb(nvars);
828 :     DoubleVector upperb(nvars);
829 :     DoubleVector init(nvars);
830 :     DoubleVector initialstep(nvars, rho);
831 :     DoubleVector delta(nvars);
832 :     IntVector param(nvars, 0);
833 :     IntVector lbound(nvars, 0);
834 :     IntVector rbounds(nvars, 0);
835 :     IntVector trapped(nvars, 0);
836 :    
837 :     EcoSystem->scaleVariables();
838 :     int numThr = omp_get_max_threads ( );
839 : ulcessvp 14 for (i = 0; i < numThr; i++) // scale the variables for the ecosystem of every thread
840 : ulcessvp 11 EcoSystems[i]->scaleVariables();
841 :     EcoSystem->getOptScaledValues(x);
842 :     EcoSystem->getOptLowerBounds(lowerb);
843 :     EcoSystem->getOptUpperBounds(upperb);
844 :     EcoSystem->getOptInitialValues(init);
845 :    
846 :     for (i = 0; i < nvars; i++) {
847 :     // Scaling the bounds, because the parameters are scaled
848 :     lowerb[i] = lowerb[i] / init[i];
849 :     upperb[i] = upperb[i] / init[i];
850 :     if (lowerb[i] > upperb[i]) {
851 :     tmp = lowerb[i];
852 :     lowerb[i] = upperb[i];
853 :     upperb[i] = tmp;
854 :     }
855 :    
856 :     bestx[i] = x[i];
857 :     trialx[i] = x[i];
858 :     param[i] = i;
859 :     delta[i] = ((2 * (rand() % 2)) - 1) * rho; //JMB - randomise the sign
860 :     }
861 :    
862 :     bestf = EcoSystem->SimulateAndUpdate(trialx);
863 :     if (bestf != bestf) { //check for NaN
864 :     handle.logMessage(LOGINFO, "Error starting Hooke & Jeeves optimisation with f(x) = infinity");
865 :     converge = -1;
866 :     iters = 1;
867 :     return;
868 :     }
869 :    
870 :     offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop
871 :     newf = bestf;
872 :     oldf = bestf;
873 :     steplength = lambda;
874 :     if (isZero(steplength))
875 :     steplength = rho;
876 :    
877 :     iters = 0;
878 :    
879 :     while (1) {
880 :     if (isZero(bestf)) {
881 : ulcessvp 26 // #ifndef _OPENMP
882 :     // iters = EcoSystem->getFuncEval() - offset;
883 :     // #endif
884 : ulcessvp 11 handle.logMessage(LOGINFO, "Error in Hooke & Jeeves optimisation after", iters, "function evaluations, f(x) = 0");
885 :     converge = -1;
886 :     return;
887 :     }
888 :    
889 :     /* randomize the order of the parameters once in a while */
890 :     rchange = 0;
891 :     while (rchange < nvars) {
892 :     rnumber = rand() % nvars;
893 :     rcheck = 1;
894 :     for (i = 0; i < rchange; i++)
895 :     if (param[i] == rnumber)
896 :     rcheck = 0;
897 :     if (rcheck) {
898 :     param[rchange] = rnumber;
899 :     rchange++;
900 :     }
901 :     }
902 :    
903 :     /* find best new point, one coord at a time */
904 :     for (i = 0; i < nvars; i++)
905 :     trialx[i] = x[i];
906 : ulcessvp 26 // #ifdef _OPENMP
907 : ulcessvp 15 newf = this->bestNearbySpec(delta, trialx, bestf, param);
908 : ulcessvp 11 if (newf == -1) {
909 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
910 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
911 :     return;
912 :     }
913 : ulcessvp 26 // #else
914 :     // newf = this->bestNearby(delta, trialx, bestf, param);
915 :     // #endif
916 : ulcessvp 11 /* if too many function evaluations occur, terminate the algorithm */
917 :    
918 : ulcessvp 26 // #ifndef _OPENMP
919 :     // iters = EcoSystem->getFuncEval() - offset;
920 :     // #endif
921 : ulcessvp 11 if (iters > hookeiter) {
922 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
923 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
924 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
925 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
926 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
927 :    
928 :     score = EcoSystem->SimulateAndUpdate(trialx);
929 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
930 :     for (i = 0; i < nvars; i++)
931 :     bestx[i] = trialx[i] * init[i];
932 :     EcoSystem->storeVariables(score, bestx);
933 :     return;
934 :     }
935 :    
936 :     /* if we made some improvements, pursue that direction */
937 :     while (newf < bestf) {
938 :     for (i = 0; i < nvars; i++) {
939 :     /* if it has been trapped but f has now gotten better (bndcheck) */
940 :     /* we assume that we are out of the trap, reset the counters */
941 :     /* and go back to the stepsize we had when we got trapped */
942 :     if ((trapped[i]) && (newf < oldf * bndcheck)) {
943 :     trapped[i] = 0;
944 :     lbound[i] = 0;
945 :     rbounds[i] = 0;
946 :     delta[i] = initialstep[i];
947 :    
948 :     } else if (trialx[i] < (lowerb[i] + verysmall)) {
949 :     lbound[i]++;
950 :     trialx[i] = lowerb[i];
951 :     if (!trapped[i]) {
952 :     initialstep[i] = delta[i];
953 :     trapped[i] = 1;
954 :     }
955 :     /* if it has hit the bounds 2 times then increase the stepsize */
956 :     if (lbound[i] >= 2)
957 :     delta[i] /= rho;
958 :    
959 :     } else if (trialx[i] > (upperb[i] - verysmall)) {
960 :     rbounds[i]++;
961 :     trialx[i] = upperb[i];
962 :     if (!trapped[i]) {
963 :     initialstep[i] = delta[i];
964 :     trapped[i] = 1;
965 :     }
966 :     /* if it has hit the bounds 2 times then increase the stepsize */
967 :     if (rbounds[i] >= 2)
968 :     delta[i] /= rho;
969 :     }
970 :     }
971 :    
972 :     for (i = 0; i < nvars; i++) {
973 :     /* firstly, arrange the sign of delta[] */
974 :     if (trialx[i] < x[i])
975 :     delta[i] = 0.0 - fabs(delta[i]);
976 :     else
977 :     delta[i] = fabs(delta[i]);
978 :    
979 :     /* now, move further in this direction */
980 :     tmp = x[i];
981 :     x[i] = trialx[i];
982 :     trialx[i] = trialx[i] + trialx[i] - tmp;
983 :     }
984 :    
985 :     /* only move forward if this is really an improvement */
986 :     oldf = newf;
987 :     newf = EcoSystem->SimulateAndUpdate(trialx);
988 : ulcessvp 26 // #ifdef _OPENMP
989 : ulcessvp 11 iters++;
990 : ulcessvp 26 // #endif
991 : ulcessvp 11 if ((isEqual(newf, oldf)) || (newf > oldf)) {
992 :     newf = oldf; //JMB no improvement, so reset the value of newf
993 :     break;
994 :     }
995 :    
996 :     /* OK, it's better, so update variables and look around */
997 :     bestf = newf;
998 :     for (i = 0; i < nvars; i++)
999 :     x[i] = trialx[i];
1000 :    
1001 : ulcessvp 26 // #ifdef _OPENMP
1002 : ulcessvp 15 newf = this->bestNearbySpec(delta, trialx, bestf, param);
1003 : ulcessvp 11 if (newf == -1) {
1004 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
1005 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
1006 :     return;
1007 :     }
1008 : ulcessvp 26 // #else
1009 :     // newf = this->bestNearby(delta, trialx, bestf, param);
1010 :     // #endif
1011 : ulcessvp 11 if (isEqual(newf, bestf))
1012 :     break;
1013 :    
1014 :     /* if too many function evaluations occur, terminate the algorithm */
1015 : ulcessvp 26 // #ifndef _OPENMP
1016 :     // iters = EcoSystem->getFuncEval() - offset;
1017 :     // #endif
1018 : ulcessvp 11 if (iters > hookeiter) {
1019 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
1020 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
1021 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
1022 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
1023 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
1024 :    
1025 :     score = EcoSystem->SimulateAndUpdate(trialx);
1026 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
1027 :     for (i = 0; i < nvars; i++)
1028 :     bestx[i] = trialx[i] * init[i];
1029 :     EcoSystem->storeVariables(score, bestx);
1030 :     return;
1031 :     }
1032 :     }
1033 :    
1034 : ulcessvp 26 // #ifndef _OPENMP
1035 :     // iters = EcoSystem->getFuncEval() - offset;
1036 :     // #endif
1037 : ulcessvp 11 if (newf < bestf) {
1038 :     for (i = 0; i < nvars; i++)
1039 :     bestx[i] = x[i] * init[i];
1040 :     bestf = newf;
1041 :     handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
1042 :     handle.logMessage(LOGINFO, "The likelihood score is", bestf, "at the point");
1043 :     EcoSystem->storeVariables(bestf, bestx);
1044 :     EcoSystem->writeBestValues();
1045 :    
1046 :     } else
1047 :     handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ...");
1048 :    
1049 :     /* if the step length is less than hookeeps, terminate the algorithm */
1050 :     if (steplength < hookeeps) {
1051 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
1052 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
1053 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
1054 :     handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
1055 :    
1056 :     converge = 1;
1057 :     score = bestf;
1058 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
1059 :     EcoSystem->storeVariables(bestf, bestx);
1060 :     return;
1061 :     }
1062 :    
1063 :     steplength *= rho;
1064 :     handle.logMessage(LOGINFO, "Reducing the steplength to", steplength);
1065 :     for (i = 0; i < nvars; i++)
1066 :     delta[i] *= rho;
1067 :     }
1068 :     }
1069 : agomez 20 //#endif
1070 : ulcessvp 11 #endif

root@forge.cesga.es
ViewVC Help
Powered by ViewVC 1.0.0  

Powered By FusionForge