Log In | Get Help   
Home My Page Projects Code Snippets Project Openings Mareframe
Summary Activity Forums Tracker Lists Tasks Docs Surveys News SCM Files
[mareframe] Annotation of /trunk/gadget/hooke.cc
[mareframe] / trunk / gadget / hooke.cc Repository:
ViewVC logotype

Annotation of /trunk/gadget/hooke.cc

Parent Directory Parent Directory | Revision Log Revision Log


Revision 12 - (view) (download)

1 : agomez 1 /* Nonlinear Optimization using the algorithm of Hooke and Jeeves */
2 :     /* 12 February 1994 author: Mark G. Johnson */
3 : ulcessvp 11 //
4 : agomez 1 /* Find a point X where the nonlinear function f(X) has a local */
5 :     /* minimum. X is an n-vector and f(X) is a scalar. In mathe- */
6 :     /* matical notation f: R^n -> R^1. The objective function f() */
7 :     /* is not required to be continuous. Nor does f() need to be */
8 :     /* differentiable. The program does not use or require */
9 :     /* derivatives of f(). */
10 : ulcessvp 11 //
11 : agomez 1 /* The software user supplies three things: a subroutine that */
12 :     /* computes f(X), an initial "starting guess" of the minimum point */
13 :     /* X, and values for the algorithm convergence parameters. Then */
14 :     /* the program searches for a local minimum, beginning from the */
15 :     /* starting guess, using the Direct Search algorithm of Hooke and */
16 :     /* Jeeves. */
17 : ulcessvp 11 //
18 : agomez 1 /* This C program is adapted from the Algol pseudocode found in */
19 :     /* "Algorithm 178: Direct Search" by Arthur F. Kaupe Jr., Commun- */
20 :     /* ications of the ACM, Vol 6. p.313 (June 1963). It includes the */
21 :     /* improvements suggested by Bell and Pike (CACM v.9, p. 684, Sept */
22 :     /* 1966) and those of Tomlin and Smith, "Remark on Algorithm 178" */
23 :     /* (CACM v.12). The original paper, which I don't recommend as */
24 :     /* highly as the one by A. Kaupe, is: R. Hooke and T. A. Jeeves, */
25 :     /* "Direct Search Solution of Numerical and Statistical Problems", */
26 :     /* Journal of the ACM, Vol. 8, April 1961, pp. 212-229. */
27 : ulcessvp 11 //
28 : agomez 1 /* Calling sequence: */
29 :     /* int hooke(nvars, startpt, endpt, rho, epsilon, itermax) */
30 :     /* */
31 :     /* nvars {an integer} */
32 :     /* This is the number of dimensions in the domain of f(). */
33 :     /* It is the number of coordinates of the starting point */
34 :     /* (and the minimum point.) */
35 :     /* startpt {an array of doubles} */
36 :     /* This is the user-supplied guess at the minimum. */
37 :     /* endpt {an array of doubles} */
38 :     /* This is the calculated location of the local minimum */
39 :     /* rho {a double} */
40 :     /* This is a user-supplied convergence parameter (more */
41 :     /* detail below), which should be set to a value between */
42 :     /* 0.0 and 1.0. Larger values of rho give greater */
43 :     /* probability of convergence on highly nonlinear */
44 :     /* functions, at a cost of more function evaluations. */
45 :     /* Smaller values of rho reduces the number of evaluations */
46 :     /* (and the program running time), but increases the risk */
47 :     /* of nonconvergence. See below. */
48 :     /* epsilon {a double} */
49 :     /* This is the criterion for halting the search for a */
50 :     /* minimum. When the algorithm begins to make less and */
51 :     /* less progress on each iteration, it checks the halting */
52 :     /* criterion: if the stepsize is below epsilon, terminate */
53 :     /* the iteration and return the current best estimate of */
54 :     /* the minimum. Larger values of epsilon (such as 1.0e-4) */
55 :     /* give quicker running time, but a less accurate estimate */
56 :     /* of the minimum. Smaller values of epsilon (such as */
57 :     /* 1.0e-7) give longer running time, but a more accurate */
58 :     /* estimate of the minimum. */
59 :     /* itermax {an integer} A second, rarely used, halting */
60 :     /* criterion. If the algorithm uses >= itermax */
61 :     /* iterations, halt. */
62 : ulcessvp 11 //
63 : agomez 1 /* The user-supplied objective function f(x,n) should return a C */
64 :     /* "double". Its arguments are x -- an array of doubles, and */
65 :     /* n -- an integer. x is the point at which f(x) should be */
66 :     /* evaluated, and n is the number of coordinates of x. That is, */
67 :     /* n is the number of coefficients being fitted. */
68 : ulcessvp 11 //
69 : agomez 1 /* rho, the algorithm convergence control */
70 : ulcessvp 11 //
71 : agomez 1 /* The algorithm works by taking "steps" from one estimate of */
72 :     /* a minimum, to another (hopefully better) estimate. Taking */
73 :     /* big steps gets to the minimum more quickly, at the risk of */
74 :     /* "stepping right over" an excellent point. The stepsize is */
75 :     /* controlled by a user supplied parameter called rho. At each */
76 :     /* iteration, the stepsize is multiplied by rho (0 < rho < 1), */
77 :     /* so the stepsize is successively reduced. */
78 :     /* Small values of rho correspond to big stepsize changes, */
79 :     /* which make the algorithm run more quickly. However, there */
80 :     /* is a chance (especially with highly nonlinear functions) */
81 :     /* that these big changes will accidentally overlook a */
82 :     /* promising search vector, leading to nonconvergence. */
83 :     /* Large values of rho correspond to small stepsize changes, */
84 :     /* which force the algorithm to carefully examine nearby points */
85 :     /* instead of optimistically forging ahead. This improves the */
86 :     /* probability of convergence. */
87 :     /* The stepsize is reduced until it is equal to (or smaller */
88 :     /* than) epsilon. So the number of iterations performed by */
89 :     /* Hooke-Jeeves is determined by rho and epsilon: */
90 :     /* rho**(number_of_iterations) = epsilon */
91 :     /* In general it is a good idea to set rho to an aggressively */
92 :     /* small value like 0.5 (hoping for fast convergence). Then, */
93 :     /* if the user suspects that the reported minimum is incorrect */
94 :     /* (or perhaps not accurate enough), the program can be run */
95 :     /* again with a larger value of rho such as 0.85, using the */
96 :     /* result of the first minimization as the starting guess to */
97 :     /* begin the second minimization. */
98 : ulcessvp 11 //
99 : agomez 1 /* Normal use: */
100 :     /* (1) Code your function f() in the C language */
101 :     /* (2) Install your starting guess {or read it in} */
102 :     /* (3) Run the program */
103 :     /* (4) {for the skeptical}: Use the computed minimum */
104 :     /* as the starting point for another run */
105 : ulcessvp 11 //
106 : agomez 1 /* Data Fitting: */
107 :     /* Code your function f() to be the sum of the squares of the */
108 :     /* errors (differences) between the computed values and the */
109 :     /* measured values. Then minimize f() using Hooke-Jeeves. */
110 :     /* EXAMPLE: you have 20 datapoints (ti, yi) and you want to */
111 :     /* find A,B,C such that (A*t*t) + (B*exp(t)) + (C*tan(t)) */
112 :     /* fits the data as closely as possible. Then f() is just */
113 :     /* f(x) = SUM (measured_y[i] - ((A*t[i]*t[i]) + (B*exp(t[i])) */
114 :     /* + (C*tan(t[i]))))^2 */
115 :     /* where x[] is a 3-vector consisting of {A, B, C}. */
116 : ulcessvp 11 //
117 : agomez 1 /* The author of this software is M.G. Johnson. */
118 :     /* Permission to use, copy, modify, and distribute this software */
119 :     /* for any purpose without fee is hereby granted, provided that */
120 :     /* this entire notice is included in all copies of any software */
121 :     /* which is or includes a copy or modification of this software */
122 :     /* and in all copies of the supporting documentation for such */
123 :     /* software. THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT */
124 :     /* ANY EXPRESS OR IMPLIED WARRANTY. IN PARTICULAR, NEITHER THE */
125 :     /* AUTHOR NOR AT&T MAKE ANY REPRESENTATION OR WARRANTY OF ANY */
126 :     /* KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS */
127 :     /* FITNESS FOR ANY PARTICULAR PURPOSE. */
128 : ulcessvp 11 //
129 : agomez 1 /* JMB this has been modified to work with the gadget object structure */
130 :     /* This means that the function has been replaced by a call to ecosystem */
131 :     /* object, and we can use the vector objects that have been defined */
132 :    
133 :     #include "gadget.h"
134 :     #include "optinfo.h"
135 :     #include "mathfunc.h"
136 :     #include "doublevector.h"
137 :     #include "intvector.h"
138 :     #include "errorhandler.h"
139 :     #include "ecosystem.h"
140 :     #include "global.h"
141 :    
142 : ulcessvp 11 #ifndef NO_OPENMP
143 :     #include "omp.h"
144 :     #endif
145 :    
146 : agomez 1 extern Ecosystem* EcoSystem;
147 : ulcessvp 11 #ifndef NO_OPENMP
148 :     extern Ecosystem** EcoSystems;
149 :     #endif
150 : agomez 1
151 :     /* given a point, look for a better one nearby, one coord at a time */
152 :     double OptInfoHooke::bestNearby(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
153 :    
154 :     double minf, ftmp;
155 :     int i;
156 :     DoubleVector z(point);
157 :    
158 :     minf = prevbest;
159 : ulcessvp 11 // for (int k=0;k<point.Size(); k++)
160 :     // cout << z[k] ;
161 :     // cout << endl;
162 : agomez 1 for (i = 0; i < point.Size(); i++) {
163 : ulcessvp 11
164 :     // for (int k=0;k<point.Size(); k++)
165 :     // cout << z[k] << " " ;
166 :     //cout << endl;
167 : agomez 1 z[param[i]] = point[param[i]] + delta[param[i]];
168 :     ftmp = EcoSystem->SimulateAndUpdate(z);
169 : ulcessvp 11 // cout << i <<"-z["<< param[i]<<"]:" <<z[param[i]] << " - " << ftmp << endl;
170 : agomez 1 if (ftmp < minf) {
171 :     minf = ftmp;
172 :     } else {
173 :     delta[param[i]] = 0.0 - delta[param[i]];
174 :     z[param[i]] = point[param[i]] + delta[param[i]];
175 :     ftmp = EcoSystem->SimulateAndUpdate(z);
176 :     if (ftmp < minf)
177 :     minf = ftmp;
178 :     else
179 :     z[param[i]] = point[param[i]];
180 :     }
181 : ulcessvp 11 // cout << i <<"-z["<< param[i]<<"]:" <<z[param[i]] << " - " << ftmp <<" - " << prevbest << endl;
182 : agomez 1 }
183 :    
184 :     for (i = 0; i < point.Size(); i++)
185 :     point[i] = z[i];
186 :     return minf;
187 :     }
188 :    
189 : ulcessvp 11 /* given a point, look for a better one nearby, one coord at a time */
190 :     #ifndef NO_OPENMP
191 : ulcessvp 12 /*
192 :     * function bestBeraby parallelized with OpenMP
193 :     * · 2 threads per coord to parallelize the calculation of +delta/-delta
194 :     * · parallelize the calculation of the best nearby of the coord
195 :     */
196 : ulcessvp 11 double OptInfoHooke::bestNearbyOMP(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
197 :     double minf;//, ftmp;
198 :     int i, j, k;
199 :     DoubleVector z(point);
200 :    
201 :     struct Storage {
202 :     DoubleVector z;
203 :     DoubleVector delta;
204 :     double ftmp;
205 :     int iters;
206 :     };
207 :    
208 :     minf = prevbest;
209 :     i = 0;
210 :    
211 :     int paral_tokens, numThr, nvars = point.Size();
212 :     numThr = omp_get_max_threads ( );
213 :    
214 :     Storage* storage = new Storage[numThr];
215 :     if ((numThr % 2) == 0)
216 :     paral_tokens = numThr / 2;
217 :     else {
218 :     return -1;
219 :     }
220 :    
221 :     while ( i < nvars) {
222 :     if ((i + paral_tokens -1) >= nvars)
223 :     paral_tokens = nvars - i;
224 :     omp_set_dynamic(0);
225 :     omp_set_nested(1);
226 :     #pragma omp parallel for num_threads(paral_tokens) private(k)
227 :     for (j = 0; j < paral_tokens; ++j) {
228 :     storage[j].z = z;
229 :     storage[j].delta = delta;
230 :     DoubleVector v1(z);
231 :     DoubleVector v2(z);
232 :     k = param[i+j];
233 :     v1[k] += delta[k];
234 :     v2[k] -= delta[k];
235 :    
236 :     #pragma omp parallel sections num_threads(2)
237 :     {
238 : ulcessvp 12 #pragma omp section
239 : ulcessvp 11 {
240 :     storage[j].ftmp = EcoSystems[j]->SimulateAndUpdate(v1);
241 :     }
242 : ulcessvp 12 #pragma omp section
243 : ulcessvp 11 {
244 :     storage[j+paral_tokens].ftmp = EcoSystems[j+paral_tokens]->SimulateAndUpdate(v2);
245 :     }
246 :     }
247 :    
248 :     if (storage[j].ftmp < minf) {
249 :     storage[j].iters = 1;
250 :     storage[j].z[k] = v1[k];
251 :     } else {
252 :     storage[j].iters = 2;
253 :     storage[j].delta[k] = 0.0 - delta[k];
254 :     if (storage[j+paral_tokens].ftmp < minf) {
255 :     storage[j].ftmp = storage[j+paral_tokens].ftmp;
256 :     storage[j].z[k] = v2[k];
257 :     }
258 :     }
259 :     }
260 :    
261 :     for (j = 0; j < paral_tokens; ++j) {
262 :     i++;
263 :     iters += storage[j].iters;
264 :     if (storage[j].ftmp < minf) {
265 :     minf = storage[j].ftmp;
266 :     z = storage[j].z;
267 :     delta = storage[j].delta;
268 :     break;
269 :     }
270 :     }
271 :     }
272 :    
273 :     for (i = 0; i < nvars; ++i)
274 :     point[i] = z[i];
275 :     return minf;
276 :     }
277 :     #endif
278 :    
279 : agomez 1 void OptInfoHooke::OptimiseLikelihood() {
280 :    
281 :     double oldf, newf, bestf, steplength, tmp;
282 :     int i, offset;
283 :     int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
284 :    
285 :     handle.logMessage(LOGINFO, "\nStarting Hooke & Jeeves optimisation algorithm\n");
286 :     int nvars = EcoSystem->numOptVariables();
287 :     DoubleVector x(nvars);
288 :     DoubleVector trialx(nvars);
289 :     DoubleVector bestx(nvars);
290 :     DoubleVector lowerb(nvars);
291 :     DoubleVector upperb(nvars);
292 :     DoubleVector init(nvars);
293 :     DoubleVector initialstep(nvars, rho);
294 :     DoubleVector delta(nvars);
295 :     IntVector param(nvars, 0);
296 :     IntVector lbound(nvars, 0);
297 :     IntVector rbounds(nvars, 0);
298 :     IntVector trapped(nvars, 0);
299 :    
300 :     EcoSystem->scaleVariables();
301 : ulcessvp 11 #ifndef NO_OPENMP
302 :     int numThr = omp_get_max_threads ( );
303 :     for (i = 0; i < numThr; i++)
304 :     EcoSystems[i]->scaleVariables();
305 :     #endif
306 : agomez 1 EcoSystem->getOptScaledValues(x);
307 :     EcoSystem->getOptLowerBounds(lowerb);
308 :     EcoSystem->getOptUpperBounds(upperb);
309 :     EcoSystem->getOptInitialValues(init);
310 :    
311 :     for (i = 0; i < nvars; i++) {
312 :     // Scaling the bounds, because the parameters are scaled
313 :     lowerb[i] = lowerb[i] / init[i];
314 :     upperb[i] = upperb[i] / init[i];
315 :     if (lowerb[i] > upperb[i]) {
316 :     tmp = lowerb[i];
317 :     lowerb[i] = upperb[i];
318 :     upperb[i] = tmp;
319 :     }
320 :    
321 :     bestx[i] = x[i];
322 :     trialx[i] = x[i];
323 :     param[i] = i;
324 :     delta[i] = ((2 * (rand() % 2)) - 1) * rho; //JMB - randomise the sign
325 :     }
326 :    
327 :     bestf = EcoSystem->SimulateAndUpdate(trialx);
328 :     if (bestf != bestf) { //check for NaN
329 :     handle.logMessage(LOGINFO, "Error starting Hooke & Jeeves optimisation with f(x) = infinity");
330 :     converge = -1;
331 :     iters = 1;
332 :     return;
333 :     }
334 :    
335 :     offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop
336 :     newf = bestf;
337 :     oldf = bestf;
338 :     steplength = lambda;
339 :     if (isZero(steplength))
340 :     steplength = rho;
341 :    
342 : ulcessvp 11 iters = 0;
343 :    
344 : agomez 1 while (1) {
345 :     if (isZero(bestf)) {
346 : ulcessvp 11 #ifdef NO_OPENMP
347 : agomez 1 iters = EcoSystem->getFuncEval() - offset;
348 : ulcessvp 11 #endif
349 : agomez 1 handle.logMessage(LOGINFO, "Error in Hooke & Jeeves optimisation after", iters, "function evaluations, f(x) = 0");
350 :     converge = -1;
351 :     return;
352 :     }
353 :    
354 :     /* randomize the order of the parameters once in a while */
355 :     rchange = 0;
356 :     while (rchange < nvars) {
357 :     rnumber = rand() % nvars;
358 :     rcheck = 1;
359 :     for (i = 0; i < rchange; i++)
360 :     if (param[i] == rnumber)
361 :     rcheck = 0;
362 :     if (rcheck) {
363 :     param[rchange] = rnumber;
364 :     rchange++;
365 :     }
366 :     }
367 :    
368 :     /* find best new point, one coord at a time */
369 :     for (i = 0; i < nvars; i++)
370 :     trialx[i] = x[i];
371 : ulcessvp 11 #ifndef NO_OPENMP
372 :     newf = this->bestNearbyOMP(delta, trialx, bestf, param);
373 :     if (newf == -1) {
374 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
375 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
376 :     return;
377 :     }
378 :     #else
379 : agomez 1 newf = this->bestNearby(delta, trialx, bestf, param);
380 : ulcessvp 11 #endif
381 :     /* if too many function evaluations occur, terminate the algorithm */
382 : agomez 1
383 : ulcessvp 11 #ifdef NO_OPENMP
384 : agomez 1 iters = EcoSystem->getFuncEval() - offset;
385 : ulcessvp 11 #endif
386 : agomez 1 if (iters > hookeiter) {
387 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
388 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
389 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
390 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
391 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
392 :    
393 :     score = EcoSystem->SimulateAndUpdate(trialx);
394 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
395 :     for (i = 0; i < nvars; i++)
396 :     bestx[i] = trialx[i] * init[i];
397 :     EcoSystem->storeVariables(score, bestx);
398 :     return;
399 :     }
400 :    
401 :     /* if we made some improvements, pursue that direction */
402 :     while (newf < bestf) {
403 :     for (i = 0; i < nvars; i++) {
404 :     /* if it has been trapped but f has now gotten better (bndcheck) */
405 :     /* we assume that we are out of the trap, reset the counters */
406 :     /* and go back to the stepsize we had when we got trapped */
407 :     if ((trapped[i]) && (newf < oldf * bndcheck)) {
408 :     trapped[i] = 0;
409 :     lbound[i] = 0;
410 :     rbounds[i] = 0;
411 :     delta[i] = initialstep[i];
412 :    
413 :     } else if (trialx[i] < (lowerb[i] + verysmall)) {
414 :     lbound[i]++;
415 :     trialx[i] = lowerb[i];
416 :     if (!trapped[i]) {
417 :     initialstep[i] = delta[i];
418 :     trapped[i] = 1;
419 :     }
420 :     /* if it has hit the bounds 2 times then increase the stepsize */
421 :     if (lbound[i] >= 2)
422 :     delta[i] /= rho;
423 :    
424 :     } else if (trialx[i] > (upperb[i] - verysmall)) {
425 :     rbounds[i]++;
426 :     trialx[i] = upperb[i];
427 :     if (!trapped[i]) {
428 :     initialstep[i] = delta[i];
429 :     trapped[i] = 1;
430 :     }
431 :     /* if it has hit the bounds 2 times then increase the stepsize */
432 :     if (rbounds[i] >= 2)
433 :     delta[i] /= rho;
434 :     }
435 :     }
436 :    
437 :     for (i = 0; i < nvars; i++) {
438 :     /* firstly, arrange the sign of delta[] */
439 :     if (trialx[i] < x[i])
440 :     delta[i] = 0.0 - fabs(delta[i]);
441 :     else
442 :     delta[i] = fabs(delta[i]);
443 :    
444 :     /* now, move further in this direction */
445 :     tmp = x[i];
446 :     x[i] = trialx[i];
447 :     trialx[i] = trialx[i] + trialx[i] - tmp;
448 :     }
449 :    
450 :     /* only move forward if this is really an improvement */
451 :     oldf = newf;
452 :     newf = EcoSystem->SimulateAndUpdate(trialx);
453 : ulcessvp 11 #ifndef NO_OPENMP
454 :     iters++;
455 :     #endif
456 : agomez 1 if ((isEqual(newf, oldf)) || (newf > oldf)) {
457 :     newf = oldf; //JMB no improvement, so reset the value of newf
458 :     break;
459 :     }
460 :    
461 :     /* OK, it's better, so update variables and look around */
462 :     bestf = newf;
463 :     for (i = 0; i < nvars; i++)
464 :     x[i] = trialx[i];
465 : ulcessvp 11
466 :     #ifndef NO_OPENMP
467 :     newf = this->bestNearbyOMP(delta, trialx, bestf, param);
468 :     if (newf == -1) {
469 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
470 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
471 :     return;
472 :     }
473 :     #else
474 : agomez 1 newf = this->bestNearby(delta, trialx, bestf, param);
475 : ulcessvp 11 #endif
476 : agomez 1 if (isEqual(newf, bestf))
477 :     break;
478 :    
479 :     /* if too many function evaluations occur, terminate the algorithm */
480 : ulcessvp 11 #ifdef NO_OPENMP
481 : agomez 1 iters = EcoSystem->getFuncEval() - offset;
482 : ulcessvp 11 #endif
483 : agomez 1 if (iters > hookeiter) {
484 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
485 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
486 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
487 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
488 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
489 :    
490 :     score = EcoSystem->SimulateAndUpdate(trialx);
491 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
492 :     for (i = 0; i < nvars; i++)
493 :     bestx[i] = trialx[i] * init[i];
494 :     EcoSystem->storeVariables(score, bestx);
495 :     return;
496 :     }
497 :     }
498 :    
499 : ulcessvp 11 #ifdef NO_OPENMP
500 : agomez 1 iters = EcoSystem->getFuncEval() - offset;
501 : ulcessvp 11 #endif
502 : agomez 1 if (newf < bestf) {
503 :     for (i = 0; i < nvars; i++)
504 :     bestx[i] = x[i] * init[i];
505 :     bestf = newf;
506 :     handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
507 :     handle.logMessage(LOGINFO, "The likelihood score is", bestf, "at the point");
508 :     EcoSystem->storeVariables(bestf, bestx);
509 :     EcoSystem->writeBestValues();
510 :    
511 :     } else
512 :     handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ...");
513 :    
514 :     /* if the step length is less than hookeeps, terminate the algorithm */
515 :     if (steplength < hookeeps) {
516 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
517 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
518 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
519 :     handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
520 :    
521 :     converge = 1;
522 :     score = bestf;
523 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
524 :     EcoSystem->storeVariables(bestf, bestx);
525 :     return;
526 :     }
527 :    
528 :     steplength *= rho;
529 :     handle.logMessage(LOGINFO, "Reducing the steplength to", steplength);
530 :     for (i = 0; i < nvars; i++)
531 :     delta[i] *= rho;
532 :     }
533 :     }
534 : ulcessvp 11
535 : ulcessvp 12 /* Functions to perform the parallelization of the algorithm of HJ with OpenMP*/
536 : ulcessvp 11 #ifdef GADGET_OPENMP
537 :     double OptInfoHooke::bestNearbyOMP2(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
538 : ulcessvp 12 double minf;
539 : ulcessvp 11 int i, j, k, ii;
540 :     DoubleVector z(point);
541 :     int bestId = 0;
542 :     struct Storage {
543 :     DoubleVector z;
544 :     DoubleVector delta;
545 :     double ftmp;
546 :     int iters;
547 :     };
548 :    
549 :     minf = prevbest;
550 :    
551 :     int paral_tokens, numThr, nvars = point.Size();
552 :     numThr = omp_get_max_threads ( );
553 :    
554 :     Storage* storage = new Storage[numThr];
555 :     if ((numThr % 2) == 0)
556 :     paral_tokens = numThr / 2;
557 :     else {
558 :     return -1;
559 :     }
560 :    
561 :     for (ii=0; ii< paral_tokens; ii++) {
562 :     i = 0;
563 :     while ( i < nvars) {
564 :     if ((i + paral_tokens -1) >= nvars)
565 :     paral_tokens = nvars - i;
566 :     omp_set_dynamic(0);
567 :     omp_set_nested(1);
568 :     #pragma omp parallel for num_threads(paral_tokens) private(k)
569 :     for (j = 0; j < paral_tokens; ++j) {
570 :     storage[j].z = z;
571 :     storage[j].delta = delta;
572 :     DoubleVector v1(z);
573 :     DoubleVector v2(z);
574 :     k = param[i+j];
575 :     v1[k] += delta[k];
576 :     v2[k] -= delta[k];
577 :    
578 : ulcessvp 12 #pragma omp parallel sections num_threads(2)
579 : ulcessvp 11 {
580 :     #pragma omp section
581 :     {
582 :     storage[j].ftmp = EcoSystems[j]->SimulateAndUpdate(v1);
583 :     }
584 :     #pragma omp section
585 :     {
586 :     storage[j+paral_tokens].ftmp = EcoSystems[j+paral_tokens]->SimulateAndUpdate(v2);
587 :     }
588 :     }
589 :     if (storage[j].ftmp < minf) {
590 :     storage[j].iters = 1;
591 :     storage[j].z[k] = v1[k];
592 :     } else {
593 :     storage[j].iters = 2;
594 :     storage[j].delta[k] = 0.0 - delta[k];
595 :     if (storage[j+paral_tokens].ftmp < minf) {
596 :     storage[j].ftmp = storage[j+paral_tokens].ftmp;
597 :     storage[j].z[k] = v2[k];
598 :     }
599 :     else iters += 2;
600 :     }
601 :     }
602 :    
603 :     bestId = 0;
604 :     for (j = 1; j < paral_tokens; ++j) {
605 :     if (storage[j].ftmp < storage[bestId].ftmp)
606 :     bestId = j;
607 :     }
608 :     if (storage[bestId].ftmp < minf) {
609 :     iters += storage[bestId].iters;
610 :     minf = storage[bestId].ftmp;
611 :     z = storage[bestId].z;
612 :     delta = storage[bestId].delta;
613 :     }
614 :    
615 :     i += paral_tokens;
616 :     }
617 :     }
618 :    
619 :     delete[] storage;
620 :     for (i = 0; i < nvars; ++i)
621 :     point[i] = z[i];
622 :    
623 :     return minf;
624 :     }
625 :    
626 :     void OptInfoHooke::OptimiseLikelihoodOMP() {
627 :     double oldf, newf, bestf, steplength, tmp;
628 :     int i, offset;
629 :     int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
630 :    
631 :     handle.logMessage(LOGINFO, "\nStarting Hooke & Jeeves optimisation algorithm\n");
632 :     int nvars = EcoSystem->numOptVariables();
633 :     DoubleVector x(nvars);
634 :     DoubleVector trialx(nvars);
635 :     DoubleVector bestx(nvars);
636 :     DoubleVector lowerb(nvars);
637 :     DoubleVector upperb(nvars);
638 :     DoubleVector init(nvars);
639 :     DoubleVector initialstep(nvars, rho);
640 :     DoubleVector delta(nvars);
641 :     IntVector param(nvars, 0);
642 :     IntVector lbound(nvars, 0);
643 :     IntVector rbounds(nvars, 0);
644 :     IntVector trapped(nvars, 0);
645 :    
646 :     EcoSystem->scaleVariables();
647 :     #ifndef NO_OPENMP
648 :     int numThr = omp_get_max_threads ( );
649 :     for (i = 0; i < numThr; i++)
650 :     EcoSystems[i]->scaleVariables();
651 :     #endif
652 :     EcoSystem->getOptScaledValues(x);
653 :     EcoSystem->getOptLowerBounds(lowerb);
654 :     EcoSystem->getOptUpperBounds(upperb);
655 :     EcoSystem->getOptInitialValues(init);
656 :    
657 :     for (i = 0; i < nvars; i++) {
658 :     // Scaling the bounds, because the parameters are scaled
659 :     lowerb[i] = lowerb[i] / init[i];
660 :     upperb[i] = upperb[i] / init[i];
661 :     if (lowerb[i] > upperb[i]) {
662 :     tmp = lowerb[i];
663 :     lowerb[i] = upperb[i];
664 :     upperb[i] = tmp;
665 :     }
666 :    
667 :     bestx[i] = x[i];
668 :     trialx[i] = x[i];
669 :     param[i] = i;
670 :     delta[i] = ((2 * (rand() % 2)) - 1) * rho; //JMB - randomise the sign
671 :     }
672 :    
673 :     bestf = EcoSystem->SimulateAndUpdate(trialx);
674 :     if (bestf != bestf) { //check for NaN
675 :     handle.logMessage(LOGINFO, "Error starting Hooke & Jeeves optimisation with f(x) = infinity");
676 :     converge = -1;
677 :     iters = 1;
678 :     return;
679 :     }
680 :    
681 :     offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop
682 :     newf = bestf;
683 :     oldf = bestf;
684 :     steplength = lambda;
685 :     if (isZero(steplength))
686 :     steplength = rho;
687 :    
688 :     iters = 0;
689 :    
690 :     while (1) {
691 :     if (isZero(bestf)) {
692 :     #ifdef NO_OPENMP
693 :     iters = EcoSystem->getFuncEval() - offset;
694 :     #endif
695 :     handle.logMessage(LOGINFO, "Error in Hooke & Jeeves optimisation after", iters, "function evaluations, f(x) = 0");
696 :     converge = -1;
697 :     return;
698 :     }
699 :    
700 :     /* randomize the order of the parameters once in a while */
701 :     rchange = 0;
702 :     while (rchange < nvars) {
703 :     rnumber = rand() % nvars;
704 :     rcheck = 1;
705 :     for (i = 0; i < rchange; i++)
706 :     if (param[i] == rnumber)
707 :     rcheck = 0;
708 :     if (rcheck) {
709 :     param[rchange] = rnumber;
710 :     rchange++;
711 :     }
712 :     }
713 :    
714 :     /* find best new point, one coord at a time */
715 :     for (i = 0; i < nvars; i++)
716 :     trialx[i] = x[i];
717 :     #ifndef NO_OPENMP
718 :     newf = this->bestNearbyOMP2(delta, trialx, bestf, param);
719 :     if (newf == -1) {
720 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
721 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
722 :     return;
723 :     }
724 :     #else
725 :     newf = this->bestNearby(delta, trialx, bestf, param);
726 :     #endif
727 :     /* if too many function evaluations occur, terminate the algorithm */
728 :    
729 :     #ifdef NO_OPENMP
730 :     iters = EcoSystem->getFuncEval() - offset;
731 :     #endif
732 :     if (iters > hookeiter) {
733 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
734 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
735 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
736 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
737 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
738 :    
739 :     score = EcoSystem->SimulateAndUpdate(trialx);
740 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
741 :     for (i = 0; i < nvars; i++)
742 :     bestx[i] = trialx[i] * init[i];
743 :     EcoSystem->storeVariables(score, bestx);
744 :     return;
745 :     }
746 :    
747 :     /* if we made some improvements, pursue that direction */
748 :     while (newf < bestf) {
749 :     for (i = 0; i < nvars; i++) {
750 :     /* if it has been trapped but f has now gotten better (bndcheck) */
751 :     /* we assume that we are out of the trap, reset the counters */
752 :     /* and go back to the stepsize we had when we got trapped */
753 :     if ((trapped[i]) && (newf < oldf * bndcheck)) {
754 :     trapped[i] = 0;
755 :     lbound[i] = 0;
756 :     rbounds[i] = 0;
757 :     delta[i] = initialstep[i];
758 :    
759 :     } else if (trialx[i] < (lowerb[i] + verysmall)) {
760 :     lbound[i]++;
761 :     trialx[i] = lowerb[i];
762 :     if (!trapped[i]) {
763 :     initialstep[i] = delta[i];
764 :     trapped[i] = 1;
765 :     }
766 :     /* if it has hit the bounds 2 times then increase the stepsize */
767 :     if (lbound[i] >= 2)
768 :     delta[i] /= rho;
769 :    
770 :     } else if (trialx[i] > (upperb[i] - verysmall)) {
771 :     rbounds[i]++;
772 :     trialx[i] = upperb[i];
773 :     if (!trapped[i]) {
774 :     initialstep[i] = delta[i];
775 :     trapped[i] = 1;
776 :     }
777 :     /* if it has hit the bounds 2 times then increase the stepsize */
778 :     if (rbounds[i] >= 2)
779 :     delta[i] /= rho;
780 :     }
781 :     }
782 :    
783 :     for (i = 0; i < nvars; i++) {
784 :     /* firstly, arrange the sign of delta[] */
785 :     if (trialx[i] < x[i])
786 :     delta[i] = 0.0 - fabs(delta[i]);
787 :     else
788 :     delta[i] = fabs(delta[i]);
789 :    
790 :     /* now, move further in this direction */
791 :     tmp = x[i];
792 :     x[i] = trialx[i];
793 :     trialx[i] = trialx[i] + trialx[i] - tmp;
794 :     }
795 :    
796 :     /* only move forward if this is really an improvement */
797 :     oldf = newf;
798 :     newf = EcoSystem->SimulateAndUpdate(trialx);
799 :     #ifndef NO_OPENMP
800 :     iters++;
801 :     #endif
802 :     if ((isEqual(newf, oldf)) || (newf > oldf)) {
803 :     newf = oldf; //JMB no improvement, so reset the value of newf
804 :     break;
805 :     }
806 :    
807 :     /* OK, it's better, so update variables and look around */
808 :     bestf = newf;
809 :     for (i = 0; i < nvars; i++)
810 :     x[i] = trialx[i];
811 :    
812 :     #ifndef NO_OPENMP
813 :     newf = this->bestNearbyOMP2(delta, trialx, bestf, param);
814 :     if (newf == -1) {
815 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
816 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
817 :     return;
818 :     }
819 :     #else
820 :     newf = this->bestNearby(delta, trialx, bestf, param);
821 :     #endif
822 :     if (isEqual(newf, bestf))
823 :     break;
824 :    
825 :     /* if too many function evaluations occur, terminate the algorithm */
826 :     #ifdef NO_OPENMP
827 :     iters = EcoSystem->getFuncEval() - offset;
828 :     #endif
829 :     if (iters > hookeiter) {
830 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
831 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
832 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
833 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
834 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
835 :    
836 :     score = EcoSystem->SimulateAndUpdate(trialx);
837 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
838 :     for (i = 0; i < nvars; i++)
839 :     bestx[i] = trialx[i] * init[i];
840 :     EcoSystem->storeVariables(score, bestx);
841 :     return;
842 :     }
843 :     }
844 :    
845 :     #ifdef NO_OPENMP
846 :     iters = EcoSystem->getFuncEval() - offset;
847 :     #endif
848 :     if (newf < bestf) {
849 :     for (i = 0; i < nvars; i++)
850 :     bestx[i] = x[i] * init[i];
851 :     bestf = newf;
852 :     handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
853 :     handle.logMessage(LOGINFO, "The likelihood score is", bestf, "at the point");
854 :     EcoSystem->storeVariables(bestf, bestx);
855 :     EcoSystem->writeBestValues();
856 :    
857 :     } else
858 :     handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ...");
859 :    
860 :     /* if the step length is less than hookeeps, terminate the algorithm */
861 :     if (steplength < hookeeps) {
862 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
863 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
864 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
865 :     handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
866 :    
867 :     converge = 1;
868 :     score = bestf;
869 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
870 :     EcoSystem->storeVariables(bestf, bestx);
871 :     return;
872 :     }
873 :    
874 :     steplength *= rho;
875 :     handle.logMessage(LOGINFO, "Reducing the steplength to", steplength);
876 :     for (i = 0; i < nvars; i++)
877 :     delta[i] *= rho;
878 :     }
879 :     }
880 :     #endif

root@forge.cesga.es
ViewVC Help
Powered by ViewVC 1.0.0  

Powered By FusionForge