Log In | Get Help   
Home My Page Projects Code Snippets Project Openings Mareframe
Summary Activity Forums Tracker Lists Tasks Docs Surveys News SCM Files
[mareframe] Annotation of /trunk/gadget/hooke.cc
[mareframe] / trunk / gadget / hooke.cc Repository:
ViewVC logotype

Annotation of /trunk/gadget/hooke.cc

Parent Directory Parent Directory | Revision Log Revision Log


Revision 11 - (view) (download)

1 : agomez 1 /* Nonlinear Optimization using the algorithm of Hooke and Jeeves */
2 :     /* 12 February 1994 author: Mark G. Johnson */
3 : ulcessvp 11 //
4 : agomez 1 /* Find a point X where the nonlinear function f(X) has a local */
5 :     /* minimum. X is an n-vector and f(X) is a scalar. In mathe- */
6 :     /* matical notation f: R^n -> R^1. The objective function f() */
7 :     /* is not required to be continuous. Nor does f() need to be */
8 :     /* differentiable. The program does not use or require */
9 :     /* derivatives of f(). */
10 : ulcessvp 11 //
11 : agomez 1 /* The software user supplies three things: a subroutine that */
12 :     /* computes f(X), an initial "starting guess" of the minimum point */
13 :     /* X, and values for the algorithm convergence parameters. Then */
14 :     /* the program searches for a local minimum, beginning from the */
15 :     /* starting guess, using the Direct Search algorithm of Hooke and */
16 :     /* Jeeves. */
17 : ulcessvp 11 //
18 : agomez 1 /* This C program is adapted from the Algol pseudocode found in */
19 :     /* "Algorithm 178: Direct Search" by Arthur F. Kaupe Jr., Commun- */
20 :     /* ications of the ACM, Vol 6. p.313 (June 1963). It includes the */
21 :     /* improvements suggested by Bell and Pike (CACM v.9, p. 684, Sept */
22 :     /* 1966) and those of Tomlin and Smith, "Remark on Algorithm 178" */
23 :     /* (CACM v.12). The original paper, which I don't recommend as */
24 :     /* highly as the one by A. Kaupe, is: R. Hooke and T. A. Jeeves, */
25 :     /* "Direct Search Solution of Numerical and Statistical Problems", */
26 :     /* Journal of the ACM, Vol. 8, April 1961, pp. 212-229. */
27 : ulcessvp 11 //
28 : agomez 1 /* Calling sequence: */
29 :     /* int hooke(nvars, startpt, endpt, rho, epsilon, itermax) */
30 :     /* */
31 :     /* nvars {an integer} */
32 :     /* This is the number of dimensions in the domain of f(). */
33 :     /* It is the number of coordinates of the starting point */
34 :     /* (and the minimum point.) */
35 :     /* startpt {an array of doubles} */
36 :     /* This is the user-supplied guess at the minimum. */
37 :     /* endpt {an array of doubles} */
38 :     /* This is the calculated location of the local minimum */
39 :     /* rho {a double} */
40 :     /* This is a user-supplied convergence parameter (more */
41 :     /* detail below), which should be set to a value between */
42 :     /* 0.0 and 1.0. Larger values of rho give greater */
43 :     /* probability of convergence on highly nonlinear */
44 :     /* functions, at a cost of more function evaluations. */
45 :     /* Smaller values of rho reduces the number of evaluations */
46 :     /* (and the program running time), but increases the risk */
47 :     /* of nonconvergence. See below. */
48 :     /* epsilon {a double} */
49 :     /* This is the criterion for halting the search for a */
50 :     /* minimum. When the algorithm begins to make less and */
51 :     /* less progress on each iteration, it checks the halting */
52 :     /* criterion: if the stepsize is below epsilon, terminate */
53 :     /* the iteration and return the current best estimate of */
54 :     /* the minimum. Larger values of epsilon (such as 1.0e-4) */
55 :     /* give quicker running time, but a less accurate estimate */
56 :     /* of the minimum. Smaller values of epsilon (such as */
57 :     /* 1.0e-7) give longer running time, but a more accurate */
58 :     /* estimate of the minimum. */
59 :     /* itermax {an integer} A second, rarely used, halting */
60 :     /* criterion. If the algorithm uses >= itermax */
61 :     /* iterations, halt. */
62 : ulcessvp 11 //
63 : agomez 1 /* The user-supplied objective function f(x,n) should return a C */
64 :     /* "double". Its arguments are x -- an array of doubles, and */
65 :     /* n -- an integer. x is the point at which f(x) should be */
66 :     /* evaluated, and n is the number of coordinates of x. That is, */
67 :     /* n is the number of coefficients being fitted. */
68 : ulcessvp 11 //
69 : agomez 1 /* rho, the algorithm convergence control */
70 : ulcessvp 11 //
71 : agomez 1 /* The algorithm works by taking "steps" from one estimate of */
72 :     /* a minimum, to another (hopefully better) estimate. Taking */
73 :     /* big steps gets to the minimum more quickly, at the risk of */
74 :     /* "stepping right over" an excellent point. The stepsize is */
75 :     /* controlled by a user supplied parameter called rho. At each */
76 :     /* iteration, the stepsize is multiplied by rho (0 < rho < 1), */
77 :     /* so the stepsize is successively reduced. */
78 :     /* Small values of rho correspond to big stepsize changes, */
79 :     /* which make the algorithm run more quickly. However, there */
80 :     /* is a chance (especially with highly nonlinear functions) */
81 :     /* that these big changes will accidentally overlook a */
82 :     /* promising search vector, leading to nonconvergence. */
83 :     /* Large values of rho correspond to small stepsize changes, */
84 :     /* which force the algorithm to carefully examine nearby points */
85 :     /* instead of optimistically forging ahead. This improves the */
86 :     /* probability of convergence. */
87 :     /* The stepsize is reduced until it is equal to (or smaller */
88 :     /* than) epsilon. So the number of iterations performed by */
89 :     /* Hooke-Jeeves is determined by rho and epsilon: */
90 :     /* rho**(number_of_iterations) = epsilon */
91 :     /* In general it is a good idea to set rho to an aggressively */
92 :     /* small value like 0.5 (hoping for fast convergence). Then, */
93 :     /* if the user suspects that the reported minimum is incorrect */
94 :     /* (or perhaps not accurate enough), the program can be run */
95 :     /* again with a larger value of rho such as 0.85, using the */
96 :     /* result of the first minimization as the starting guess to */
97 :     /* begin the second minimization. */
98 : ulcessvp 11 //
99 : agomez 1 /* Normal use: */
100 :     /* (1) Code your function f() in the C language */
101 :     /* (2) Install your starting guess {or read it in} */
102 :     /* (3) Run the program */
103 :     /* (4) {for the skeptical}: Use the computed minimum */
104 :     /* as the starting point for another run */
105 : ulcessvp 11 //
106 : agomez 1 /* Data Fitting: */
107 :     /* Code your function f() to be the sum of the squares of the */
108 :     /* errors (differences) between the computed values and the */
109 :     /* measured values. Then minimize f() using Hooke-Jeeves. */
110 :     /* EXAMPLE: you have 20 datapoints (ti, yi) and you want to */
111 :     /* find A,B,C such that (A*t*t) + (B*exp(t)) + (C*tan(t)) */
112 :     /* fits the data as closely as possible. Then f() is just */
113 :     /* f(x) = SUM (measured_y[i] - ((A*t[i]*t[i]) + (B*exp(t[i])) */
114 :     /* + (C*tan(t[i]))))^2 */
115 :     /* where x[] is a 3-vector consisting of {A, B, C}. */
116 : ulcessvp 11 //
117 : agomez 1 /* The author of this software is M.G. Johnson. */
118 :     /* Permission to use, copy, modify, and distribute this software */
119 :     /* for any purpose without fee is hereby granted, provided that */
120 :     /* this entire notice is included in all copies of any software */
121 :     /* which is or includes a copy or modification of this software */
122 :     /* and in all copies of the supporting documentation for such */
123 :     /* software. THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT */
124 :     /* ANY EXPRESS OR IMPLIED WARRANTY. IN PARTICULAR, NEITHER THE */
125 :     /* AUTHOR NOR AT&T MAKE ANY REPRESENTATION OR WARRANTY OF ANY */
126 :     /* KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS */
127 :     /* FITNESS FOR ANY PARTICULAR PURPOSE. */
128 : ulcessvp 11 //
129 : agomez 1 /* JMB this has been modified to work with the gadget object structure */
130 :     /* This means that the function has been replaced by a call to ecosystem */
131 :     /* object, and we can use the vector objects that have been defined */
132 :    
133 :     #include "gadget.h"
134 :     #include "optinfo.h"
135 :     #include "mathfunc.h"
136 :     #include "doublevector.h"
137 :     #include "intvector.h"
138 :     #include "errorhandler.h"
139 :     #include "ecosystem.h"
140 :     #include "global.h"
141 :    
142 : ulcessvp 11 #ifndef NO_OPENMP
143 :     #include "omp.h"
144 :     #endif
145 :    
146 : agomez 1 extern Ecosystem* EcoSystem;
147 : ulcessvp 11 #ifndef NO_OPENMP
148 :     extern Ecosystem** EcoSystems;
149 :     #endif
150 : agomez 1
151 :     /* given a point, look for a better one nearby, one coord at a time */
152 :     double OptInfoHooke::bestNearby(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
153 :    
154 :     double minf, ftmp;
155 :     int i;
156 :     DoubleVector z(point);
157 :    
158 :     minf = prevbest;
159 : ulcessvp 11 // for (int k=0;k<point.Size(); k++)
160 :     // cout << z[k] ;
161 :     // cout << endl;
162 : agomez 1 for (i = 0; i < point.Size(); i++) {
163 : ulcessvp 11
164 :     // for (int k=0;k<point.Size(); k++)
165 :     // cout << z[k] << " " ;
166 :     //cout << endl;
167 : agomez 1 z[param[i]] = point[param[i]] + delta[param[i]];
168 :     ftmp = EcoSystem->SimulateAndUpdate(z);
169 : ulcessvp 11 // cout << i <<"-z["<< param[i]<<"]:" <<z[param[i]] << " - " << ftmp << endl;
170 : agomez 1 if (ftmp < minf) {
171 :     minf = ftmp;
172 :     } else {
173 :     delta[param[i]] = 0.0 - delta[param[i]];
174 :     z[param[i]] = point[param[i]] + delta[param[i]];
175 :     ftmp = EcoSystem->SimulateAndUpdate(z);
176 :     if (ftmp < minf)
177 :     minf = ftmp;
178 :     else
179 :     z[param[i]] = point[param[i]];
180 :     }
181 : ulcessvp 11 // cout << i <<"-z["<< param[i]<<"]:" <<z[param[i]] << " - " << ftmp <<" - " << prevbest << endl;
182 : agomez 1 }
183 :    
184 :     for (i = 0; i < point.Size(); i++)
185 :     point[i] = z[i];
186 :     return minf;
187 :     }
188 :    
189 : ulcessvp 11 /* given a point, look for a better one nearby, one coord at a time */
190 :     #ifndef NO_OPENMP
191 :     double OptInfoHooke::bestNearbyOMP(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
192 :     //cout << "beastNearbyINI" << endl;
193 :     double minf;//, ftmp;
194 :     int i, j, k;
195 :     DoubleVector z(point);
196 :    
197 :     struct Storage {
198 :     DoubleVector z;
199 :     DoubleVector delta;
200 :     double ftmp;
201 :     int iters;
202 :     };
203 :    
204 :     minf = prevbest;
205 :     i = 0;
206 :    
207 :     int paral_tokens, numThr, nvars = point.Size();
208 :     numThr = omp_get_max_threads ( );
209 :    
210 :     Storage* storage = new Storage[numThr];
211 :     if ((numThr % 2) == 0)
212 :     paral_tokens = numThr / 2;
213 :     else {
214 :     return -1;
215 :     }
216 :    
217 :     while ( i < nvars) {
218 :     // for (int k=0;k<point.Size(); k++)
219 :     // cout << z[k] << " " ;
220 :     // cout << endl;
221 :     if ((i + paral_tokens -1) >= nvars)
222 :     paral_tokens = nvars - i;
223 :     //cout << "i:" << i << endl;
224 :     omp_set_dynamic(0);
225 :     omp_set_nested(1);
226 :     #pragma omp parallel for num_threads(paral_tokens) private(k)
227 :     for (j = 0; j < paral_tokens; ++j) {
228 :     // cout << "thr_for:" << omp_get_num_threads()<<endl;
229 :     // cout << "J:" << j << endl;
230 :     storage[j].z = z;
231 :     storage[j].delta = delta;
232 :     DoubleVector v1(z);
233 :     DoubleVector v2(z);
234 :     k = param[i+j];
235 :     v1[k] += delta[k];
236 :     v2[k] -= delta[k];
237 :    
238 :     #pragma omp parallel sections num_threads(2)
239 :     {
240 :     // cout << "thr_sec:" << omp_get_num_threads()<<endl;
241 :     #pragma omp section
242 :     {
243 :     storage[j].ftmp = EcoSystems[j]->SimulateAndUpdate(v1);
244 :     // cout << "->" << j << endl;
245 :     }
246 :     #pragma omp section
247 :     {
248 :     // cout << "->" << j+paral_tokens << endl;
249 :     storage[j+paral_tokens].ftmp = EcoSystems[j+paral_tokens]->SimulateAndUpdate(v2);
250 :     }
251 :     }
252 :     //cout << "-----" << endl;
253 :     // cout <<i+j<< "-z["<< param[i+j]<<"]:" << (storage[j].z)[param[i+j]] << " - " << storage[j].ftmp << endl;
254 :    
255 :     if (storage[j].ftmp < minf) {
256 :     storage[j].iters = 1;
257 :     storage[j].z[k] = v1[k];
258 :     // storage[j].delta[k] += delta[k];
259 :     // minf = ftmp;
260 :     } else {
261 :     storage[j].iters = 2;
262 :     storage[j].delta[k] = 0.0 - delta[k];
263 :     if (storage[j+paral_tokens].ftmp < minf) {
264 :     // minf = ftmp;
265 :     storage[j].ftmp = storage[j+paral_tokens].ftmp;
266 :     storage[j].z[k] = v2[k];
267 :     }
268 :     // else
269 :     // storage[j].z[k] = point[k];
270 :     }
271 :     }
272 :    
273 :     // cout << i <<"-z["<< param[i]<<"]:" <<z[param[i]] << " - " ;
274 :     for (j = 0; j < paral_tokens; ++j) {
275 :     i++;
276 :     iters += storage[j].iters;
277 :     if (storage[j].ftmp < minf) {
278 :     minf = storage[j].ftmp;
279 :     z = storage[j].z;
280 :     delta = storage[j].delta;
281 :     // cout << storage[j].ftmp;
282 :     break;
283 :     }
284 :     }
285 :     }
286 :    
287 :     // omp_set_num_threads(numThr);
288 :    
289 :     for (i = 0; i < nvars; ++i)
290 :     point[i] = z[i];
291 :     return minf;
292 :     }
293 :     #endif
294 :    
295 : agomez 1 void OptInfoHooke::OptimiseLikelihood() {
296 :    
297 :     double oldf, newf, bestf, steplength, tmp;
298 :     int i, offset;
299 :     int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
300 :    
301 :     handle.logMessage(LOGINFO, "\nStarting Hooke & Jeeves optimisation algorithm\n");
302 :     int nvars = EcoSystem->numOptVariables();
303 :     DoubleVector x(nvars);
304 :     DoubleVector trialx(nvars);
305 :     DoubleVector bestx(nvars);
306 :     DoubleVector lowerb(nvars);
307 :     DoubleVector upperb(nvars);
308 :     DoubleVector init(nvars);
309 :     DoubleVector initialstep(nvars, rho);
310 :     DoubleVector delta(nvars);
311 :     IntVector param(nvars, 0);
312 :     IntVector lbound(nvars, 0);
313 :     IntVector rbounds(nvars, 0);
314 :     IntVector trapped(nvars, 0);
315 :    
316 :     EcoSystem->scaleVariables();
317 : ulcessvp 11 #ifndef NO_OPENMP
318 :     int numThr = omp_get_max_threads ( );
319 :     for (i = 0; i < numThr; i++)
320 :     EcoSystems[i]->scaleVariables();
321 :     #endif
322 : agomez 1 EcoSystem->getOptScaledValues(x);
323 :     EcoSystem->getOptLowerBounds(lowerb);
324 :     EcoSystem->getOptUpperBounds(upperb);
325 :     EcoSystem->getOptInitialValues(init);
326 :    
327 :     for (i = 0; i < nvars; i++) {
328 :     // Scaling the bounds, because the parameters are scaled
329 :     lowerb[i] = lowerb[i] / init[i];
330 :     upperb[i] = upperb[i] / init[i];
331 :     if (lowerb[i] > upperb[i]) {
332 :     tmp = lowerb[i];
333 :     lowerb[i] = upperb[i];
334 :     upperb[i] = tmp;
335 :     }
336 :    
337 :     bestx[i] = x[i];
338 :     trialx[i] = x[i];
339 :     param[i] = i;
340 : ulcessvp 11 //FIXME rand_r
341 : agomez 1 delta[i] = ((2 * (rand() % 2)) - 1) * rho; //JMB - randomise the sign
342 :     }
343 :    
344 :     bestf = EcoSystem->SimulateAndUpdate(trialx);
345 :     if (bestf != bestf) { //check for NaN
346 :     handle.logMessage(LOGINFO, "Error starting Hooke & Jeeves optimisation with f(x) = infinity");
347 :     converge = -1;
348 :     iters = 1;
349 :     return;
350 :     }
351 :    
352 :     offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop
353 :     newf = bestf;
354 :     oldf = bestf;
355 :     steplength = lambda;
356 :     if (isZero(steplength))
357 :     steplength = rho;
358 :    
359 : ulcessvp 11 iters = 0;
360 :    
361 : agomez 1 while (1) {
362 :     if (isZero(bestf)) {
363 : ulcessvp 11 #ifdef NO_OPENMP
364 : agomez 1 iters = EcoSystem->getFuncEval() - offset;
365 : ulcessvp 11 #endif
366 : agomez 1 handle.logMessage(LOGINFO, "Error in Hooke & Jeeves optimisation after", iters, "function evaluations, f(x) = 0");
367 :     converge = -1;
368 :     return;
369 :     }
370 :    
371 :     /* randomize the order of the parameters once in a while */
372 :     rchange = 0;
373 :     while (rchange < nvars) {
374 : ulcessvp 11 //FIXME rand_r
375 : agomez 1 rnumber = rand() % nvars;
376 :     rcheck = 1;
377 :     for (i = 0; i < rchange; i++)
378 :     if (param[i] == rnumber)
379 :     rcheck = 0;
380 :     if (rcheck) {
381 :     param[rchange] = rnumber;
382 :     rchange++;
383 :     }
384 :     }
385 :    
386 :     /* find best new point, one coord at a time */
387 :     for (i = 0; i < nvars; i++)
388 :     trialx[i] = x[i];
389 : ulcessvp 11 #ifndef NO_OPENMP
390 :     newf = this->bestNearbyOMP(delta, trialx, bestf, param);
391 :     if (newf == -1) {
392 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
393 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
394 :     return;
395 :     }
396 :     #else
397 : agomez 1 newf = this->bestNearby(delta, trialx, bestf, param);
398 : ulcessvp 11 #endif
399 :     /* if too many function evaluations occur, terminate the algorithm */
400 : agomez 1
401 : ulcessvp 11 #ifdef NO_OPENMP
402 : agomez 1 iters = EcoSystem->getFuncEval() - offset;
403 : ulcessvp 11 #endif
404 :     // cout << "newf:" << newf << "-" << iters<<endl;
405 : agomez 1 if (iters > hookeiter) {
406 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
407 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
408 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
409 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
410 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
411 :    
412 :     score = EcoSystem->SimulateAndUpdate(trialx);
413 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
414 :     for (i = 0; i < nvars; i++)
415 :     bestx[i] = trialx[i] * init[i];
416 :     EcoSystem->storeVariables(score, bestx);
417 :     return;
418 :     }
419 :    
420 :     /* if we made some improvements, pursue that direction */
421 :     while (newf < bestf) {
422 :     for (i = 0; i < nvars; i++) {
423 :     /* if it has been trapped but f has now gotten better (bndcheck) */
424 :     /* we assume that we are out of the trap, reset the counters */
425 :     /* and go back to the stepsize we had when we got trapped */
426 :     if ((trapped[i]) && (newf < oldf * bndcheck)) {
427 :     trapped[i] = 0;
428 :     lbound[i] = 0;
429 :     rbounds[i] = 0;
430 :     delta[i] = initialstep[i];
431 :    
432 :     } else if (trialx[i] < (lowerb[i] + verysmall)) {
433 :     lbound[i]++;
434 :     trialx[i] = lowerb[i];
435 :     if (!trapped[i]) {
436 :     initialstep[i] = delta[i];
437 :     trapped[i] = 1;
438 :     }
439 :     /* if it has hit the bounds 2 times then increase the stepsize */
440 :     if (lbound[i] >= 2)
441 :     delta[i] /= rho;
442 :    
443 :     } else if (trialx[i] > (upperb[i] - verysmall)) {
444 :     rbounds[i]++;
445 :     trialx[i] = upperb[i];
446 :     if (!trapped[i]) {
447 :     initialstep[i] = delta[i];
448 :     trapped[i] = 1;
449 :     }
450 :     /* if it has hit the bounds 2 times then increase the stepsize */
451 :     if (rbounds[i] >= 2)
452 :     delta[i] /= rho;
453 :     }
454 :     }
455 :    
456 :     for (i = 0; i < nvars; i++) {
457 :     /* firstly, arrange the sign of delta[] */
458 :     if (trialx[i] < x[i])
459 :     delta[i] = 0.0 - fabs(delta[i]);
460 :     else
461 :     delta[i] = fabs(delta[i]);
462 :    
463 :     /* now, move further in this direction */
464 :     tmp = x[i];
465 :     x[i] = trialx[i];
466 :     trialx[i] = trialx[i] + trialx[i] - tmp;
467 :     }
468 :    
469 :     /* only move forward if this is really an improvement */
470 :     oldf = newf;
471 :     newf = EcoSystem->SimulateAndUpdate(trialx);
472 : ulcessvp 11 #ifndef NO_OPENMP
473 :     iters++;
474 :     #endif
475 : agomez 1 if ((isEqual(newf, oldf)) || (newf > oldf)) {
476 :     newf = oldf; //JMB no improvement, so reset the value of newf
477 :     break;
478 :     }
479 :    
480 :     /* OK, it's better, so update variables and look around */
481 :     bestf = newf;
482 :     for (i = 0; i < nvars; i++)
483 :     x[i] = trialx[i];
484 : ulcessvp 11
485 :     #ifndef NO_OPENMP
486 :     newf = this->bestNearbyOMP(delta, trialx, bestf, param);
487 :     if (newf == -1) {
488 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
489 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
490 :     return;
491 :     }
492 :     #else
493 : agomez 1 newf = this->bestNearby(delta, trialx, bestf, param);
494 : ulcessvp 11 #endif
495 : agomez 1 if (isEqual(newf, bestf))
496 :     break;
497 :    
498 :     /* if too many function evaluations occur, terminate the algorithm */
499 : ulcessvp 11 #ifdef NO_OPENMP
500 : agomez 1 iters = EcoSystem->getFuncEval() - offset;
501 : ulcessvp 11 #endif
502 :     // cout << "newf:" << newf << "-" << iters<<endl;
503 : agomez 1 if (iters > hookeiter) {
504 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
505 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
506 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
507 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
508 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
509 :    
510 :     score = EcoSystem->SimulateAndUpdate(trialx);
511 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
512 :     for (i = 0; i < nvars; i++)
513 :     bestx[i] = trialx[i] * init[i];
514 :     EcoSystem->storeVariables(score, bestx);
515 :     return;
516 :     }
517 :     }
518 :    
519 : ulcessvp 11 #ifdef NO_OPENMP
520 : agomez 1 iters = EcoSystem->getFuncEval() - offset;
521 : ulcessvp 11 #endif
522 : agomez 1 if (newf < bestf) {
523 :     for (i = 0; i < nvars; i++)
524 :     bestx[i] = x[i] * init[i];
525 :     bestf = newf;
526 :     handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
527 :     handle.logMessage(LOGINFO, "The likelihood score is", bestf, "at the point");
528 :     EcoSystem->storeVariables(bestf, bestx);
529 :     EcoSystem->writeBestValues();
530 :    
531 :     } else
532 :     handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ...");
533 :    
534 :     /* if the step length is less than hookeeps, terminate the algorithm */
535 :     if (steplength < hookeeps) {
536 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
537 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
538 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
539 :     handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
540 :    
541 :     converge = 1;
542 :     score = bestf;
543 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
544 :     EcoSystem->storeVariables(bestf, bestx);
545 :     return;
546 :     }
547 :    
548 :     steplength *= rho;
549 :     handle.logMessage(LOGINFO, "Reducing the steplength to", steplength);
550 :     for (i = 0; i < nvars; i++)
551 :     delta[i] *= rho;
552 :     }
553 :     }
554 : ulcessvp 11
555 :     #ifdef GADGET_OPENMP
556 :     //TODO doc
557 :     double OptInfoHooke::bestNearbyOMP2(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
558 :     //cout << "beastNearbyINI" << endl;
559 :     double minf;//, ftmp;
560 :     int i, j, k, ii;
561 :     DoubleVector z(point);
562 :     int bestId = 0;
563 :     struct Storage {
564 :     DoubleVector z;
565 :     DoubleVector delta;
566 :     double ftmp;
567 :     int iters;
568 :     };
569 :    
570 :     minf = prevbest;
571 :    
572 :    
573 :     int paral_tokens, numThr, nvars = point.Size();
574 :     numThr = omp_get_max_threads ( );
575 :    
576 :     Storage* storage = new Storage[numThr];
577 :     if ((numThr % 2) == 0)
578 :     paral_tokens = numThr / 2;
579 :     else {
580 :     return -1;
581 :     }
582 :    
583 :     for (ii=0; ii< paral_tokens; ii++) {
584 :     i = 0;
585 :     while ( i < nvars) {
586 :     // for (int k=0;k<point.Size(); k++)
587 :     // cout << z[k] << " " ;
588 :     // cout << endl;
589 :     if ((i + paral_tokens -1) >= nvars)
590 :     paral_tokens = nvars - i;
591 :     //cout << "i:" << i << endl;
592 :     omp_set_dynamic(0);
593 :     omp_set_nested(1);
594 :     #pragma omp parallel for num_threads(paral_tokens) private(k)
595 :     for (j = 0; j < paral_tokens; ++j) {
596 :     // cout << "thr_for:" << omp_get_num_threads()<<endl;
597 :     // cout << "J:" << j << endl;
598 :     storage[j].z = z;
599 :     storage[j].delta = delta;
600 :     DoubleVector v1(z);
601 :     DoubleVector v2(z);
602 :     k = param[i+j];
603 :     v1[k] += delta[k];
604 :     v2[k] -= delta[k];
605 :    
606 :     #pragma omp parallel sections num_threads(2)
607 :     {
608 :     // cout << "thr_sec:" << omp_get_num_threads()<<endl;
609 :     #pragma omp section
610 :     {
611 :     storage[j].ftmp = EcoSystems[j]->SimulateAndUpdate(v1);
612 :     // cout << "->" << j << endl;
613 :     }
614 :     #pragma omp section
615 :     {
616 :     // cout << "->" << j+paral_tokens << endl;
617 :     storage[j+paral_tokens].ftmp = EcoSystems[j+paral_tokens]->SimulateAndUpdate(v2);
618 :     }
619 :     }
620 :     //cout << "-----" << endl;
621 :     // cout <<i+j<< "-z["<< param[i+j]<<"]:" << (storage[j].z)[param[i+j]] << " - " << storage[j].ftmp << endl;
622 :    
623 :     if (storage[j].ftmp < minf) {
624 :     storage[j].iters = 1;
625 :     storage[j].z[k] = v1[k];
626 :     // storage[j].delta[k] += delta[k];
627 :     // minf = ftmp;
628 :     } else {
629 :     storage[j].iters = 2;
630 :     storage[j].delta[k] = 0.0 - delta[k];
631 :     if (storage[j+paral_tokens].ftmp < minf) {
632 :     // minf = ftmp;
633 :     storage[j].ftmp = storage[j+paral_tokens].ftmp;
634 :     storage[j].z[k] = v2[k];
635 :     }
636 :     else iters += 2;
637 :     // storage[j].z[k] = point[k];
638 :     }
639 :     }
640 :    
641 :     // cout << i <<"-z["<< param[i]<<"]:" <<z[param[i]] << " - " ;
642 :     bestId = 0;
643 :     for (j = 1; j < paral_tokens; ++j) {
644 :     if (storage[j].ftmp < storage[bestId].ftmp)
645 :     bestId = j;
646 :     }
647 :     if (storage[bestId].ftmp < minf) {
648 :     iters += storage[bestId].iters;
649 :     minf = storage[bestId].ftmp;
650 :     z = storage[bestId].z;
651 :     delta = storage[bestId].delta;
652 :     // cout << storage[j].ftmp;
653 :    
654 :     }
655 :    
656 :     i += paral_tokens;
657 :     }
658 :     }
659 :    
660 :     // omp_set_num_threads(numThr);
661 :     delete[] storage;
662 :     for (i = 0; i < nvars; ++i)
663 :     point[i] = z[i];
664 :    
665 :     return minf;
666 :     }
667 :    
668 :     void OptInfoHooke::OptimiseLikelihoodOMP() {
669 :     double oldf, newf, bestf, steplength, tmp;
670 :     int i, offset;
671 :     int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
672 :    
673 :     handle.logMessage(LOGINFO, "\nStarting Hooke & Jeeves optimisation algorithm\n");
674 :     int nvars = EcoSystem->numOptVariables();
675 :     DoubleVector x(nvars);
676 :     DoubleVector trialx(nvars);
677 :     DoubleVector bestx(nvars);
678 :     DoubleVector lowerb(nvars);
679 :     DoubleVector upperb(nvars);
680 :     DoubleVector init(nvars);
681 :     DoubleVector initialstep(nvars, rho);
682 :     DoubleVector delta(nvars);
683 :     IntVector param(nvars, 0);
684 :     IntVector lbound(nvars, 0);
685 :     IntVector rbounds(nvars, 0);
686 :     IntVector trapped(nvars, 0);
687 :    
688 :     EcoSystem->scaleVariables();
689 :     #ifndef NO_OPENMP
690 :     int numThr = omp_get_max_threads ( );
691 :     for (i = 0; i < numThr; i++)
692 :     EcoSystems[i]->scaleVariables();
693 :     #endif
694 :     EcoSystem->getOptScaledValues(x);
695 :     EcoSystem->getOptLowerBounds(lowerb);
696 :     EcoSystem->getOptUpperBounds(upperb);
697 :     EcoSystem->getOptInitialValues(init);
698 :    
699 :     for (i = 0; i < nvars; i++) {
700 :     // Scaling the bounds, because the parameters are scaled
701 :     lowerb[i] = lowerb[i] / init[i];
702 :     upperb[i] = upperb[i] / init[i];
703 :     if (lowerb[i] > upperb[i]) {
704 :     tmp = lowerb[i];
705 :     lowerb[i] = upperb[i];
706 :     upperb[i] = tmp;
707 :     }
708 :    
709 :     bestx[i] = x[i];
710 :     trialx[i] = x[i];
711 :     param[i] = i;
712 :     //FIXME rand_r
713 :     delta[i] = ((2 * (rand() % 2)) - 1) * rho; //JMB - randomise the sign
714 :     }
715 :    
716 :     bestf = EcoSystem->SimulateAndUpdate(trialx);
717 :     if (bestf != bestf) { //check for NaN
718 :     handle.logMessage(LOGINFO, "Error starting Hooke & Jeeves optimisation with f(x) = infinity");
719 :     converge = -1;
720 :     iters = 1;
721 :     return;
722 :     }
723 :    
724 :     offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop
725 :     newf = bestf;
726 :     oldf = bestf;
727 :     steplength = lambda;
728 :     if (isZero(steplength))
729 :     steplength = rho;
730 :    
731 :     iters = 0;
732 :    
733 :     while (1) {
734 :     if (isZero(bestf)) {
735 :     #ifdef NO_OPENMP
736 :     iters = EcoSystem->getFuncEval() - offset;
737 :     #endif
738 :     handle.logMessage(LOGINFO, "Error in Hooke & Jeeves optimisation after", iters, "function evaluations, f(x) = 0");
739 :     converge = -1;
740 :     return;
741 :     }
742 :    
743 :     /* randomize the order of the parameters once in a while */
744 :     rchange = 0;
745 :     while (rchange < nvars) {
746 :     //FIXME rand_r
747 :     rnumber = rand() % nvars;
748 :     rcheck = 1;
749 :     for (i = 0; i < rchange; i++)
750 :     if (param[i] == rnumber)
751 :     rcheck = 0;
752 :     if (rcheck) {
753 :     param[rchange] = rnumber;
754 :     rchange++;
755 :     }
756 :     }
757 :    
758 :     /* find best new point, one coord at a time */
759 :     for (i = 0; i < nvars; i++)
760 :     trialx[i] = x[i];
761 :     #ifndef NO_OPENMP
762 :     newf = this->bestNearbyOMP2(delta, trialx, bestf, param);
763 :     if (newf == -1) {
764 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
765 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
766 :     return;
767 :     }
768 :     #else
769 :     newf = this->bestNearby(delta, trialx, bestf, param);
770 :     #endif
771 :     /* if too many function evaluations occur, terminate the algorithm */
772 :    
773 :     #ifdef NO_OPENMP
774 :     iters = EcoSystem->getFuncEval() - offset;
775 :     #endif
776 :     // cout << "newf:" << newf << "-" << iters<<endl;
777 :     if (iters > hookeiter) {
778 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
779 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
780 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
781 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
782 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
783 :    
784 :     score = EcoSystem->SimulateAndUpdate(trialx);
785 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
786 :     for (i = 0; i < nvars; i++)
787 :     bestx[i] = trialx[i] * init[i];
788 :     EcoSystem->storeVariables(score, bestx);
789 :     // EcoSystem->writeOptValuesOMP();
790 :     return;
791 :     }
792 :    
793 :     /* if we made some improvements, pursue that direction */
794 :     while (newf < bestf) {
795 :     for (i = 0; i < nvars; i++) {
796 :     /* if it has been trapped but f has now gotten better (bndcheck) */
797 :     /* we assume that we are out of the trap, reset the counters */
798 :     /* and go back to the stepsize we had when we got trapped */
799 :     if ((trapped[i]) && (newf < oldf * bndcheck)) {
800 :     trapped[i] = 0;
801 :     lbound[i] = 0;
802 :     rbounds[i] = 0;
803 :     delta[i] = initialstep[i];
804 :    
805 :     } else if (trialx[i] < (lowerb[i] + verysmall)) {
806 :     lbound[i]++;
807 :     trialx[i] = lowerb[i];
808 :     if (!trapped[i]) {
809 :     initialstep[i] = delta[i];
810 :     trapped[i] = 1;
811 :     }
812 :     /* if it has hit the bounds 2 times then increase the stepsize */
813 :     if (lbound[i] >= 2)
814 :     delta[i] /= rho;
815 :    
816 :     } else if (trialx[i] > (upperb[i] - verysmall)) {
817 :     rbounds[i]++;
818 :     trialx[i] = upperb[i];
819 :     if (!trapped[i]) {
820 :     initialstep[i] = delta[i];
821 :     trapped[i] = 1;
822 :     }
823 :     /* if it has hit the bounds 2 times then increase the stepsize */
824 :     if (rbounds[i] >= 2)
825 :     delta[i] /= rho;
826 :     }
827 :     }
828 :    
829 :     for (i = 0; i < nvars; i++) {
830 :     /* firstly, arrange the sign of delta[] */
831 :     if (trialx[i] < x[i])
832 :     delta[i] = 0.0 - fabs(delta[i]);
833 :     else
834 :     delta[i] = fabs(delta[i]);
835 :    
836 :     /* now, move further in this direction */
837 :     tmp = x[i];
838 :     x[i] = trialx[i];
839 :     trialx[i] = trialx[i] + trialx[i] - tmp;
840 :     }
841 :    
842 :     /* only move forward if this is really an improvement */
843 :     oldf = newf;
844 :     newf = EcoSystem->SimulateAndUpdate(trialx);
845 :     #ifndef NO_OPENMP
846 :     iters++;
847 :     #endif
848 :     if ((isEqual(newf, oldf)) || (newf > oldf)) {
849 :     newf = oldf; //JMB no improvement, so reset the value of newf
850 :     break;
851 :     }
852 :    
853 :     /* OK, it's better, so update variables and look around */
854 :     bestf = newf;
855 :     for (i = 0; i < nvars; i++)
856 :     x[i] = trialx[i];
857 :    
858 :     #ifndef NO_OPENMP
859 :     newf = this->bestNearbyOMP2(delta, trialx, bestf, param);
860 :     if (newf == -1) {
861 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
862 :     handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
863 :     return;
864 :     }
865 :     #else
866 :     newf = this->bestNearby(delta, trialx, bestf, param);
867 :     #endif
868 :     if (isEqual(newf, bestf))
869 :     break;
870 :    
871 :     /* if too many function evaluations occur, terminate the algorithm */
872 :     #ifdef NO_OPENMP
873 :     iters = EcoSystem->getFuncEval() - offset;
874 :     #endif
875 :     // cout << "newf:" << newf << "-" << iters<<endl;
876 :     if (iters > hookeiter) {
877 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
878 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
879 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
880 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
881 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
882 :    
883 :     score = EcoSystem->SimulateAndUpdate(trialx);
884 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
885 :     for (i = 0; i < nvars; i++)
886 :     bestx[i] = trialx[i] * init[i];
887 :     EcoSystem->storeVariables(score, bestx);
888 :     // EcoSystem->writeOptValuesOMP();
889 :     return;
890 :     }
891 :     }
892 :    
893 :     #ifdef NO_OPENMP
894 :     iters = EcoSystem->getFuncEval() - offset;
895 :     #endif
896 :     if (newf < bestf) {
897 :     for (i = 0; i < nvars; i++)
898 :     bestx[i] = x[i] * init[i];
899 :     bestf = newf;
900 :     handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
901 :     handle.logMessage(LOGINFO, "The likelihood score is", bestf, "at the point");
902 :     EcoSystem->storeVariables(bestf, bestx);
903 :     EcoSystem->writeBestValues();
904 :    
905 :     } else
906 :     handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ...");
907 :    
908 :     /* if the step length is less than hookeeps, terminate the algorithm */
909 :     if (steplength < hookeeps) {
910 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
911 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
912 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
913 :     handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
914 :    
915 :     converge = 1;
916 :     score = bestf;
917 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
918 :     EcoSystem->storeVariables(bestf, bestx);
919 :     // EcoSystem->writeOptValuesOMP();
920 :     return;
921 :     }
922 :    
923 :     steplength *= rho;
924 :     handle.logMessage(LOGINFO, "Reducing the steplength to", steplength);
925 :     for (i = 0; i < nvars; i++)
926 :     delta[i] *= rho;
927 :     }
928 :     }
929 :     #endif

root@forge.cesga.es
ViewVC Help
Powered by ViewVC 1.0.0  

Powered By FusionForge