Log In | Get Help   
Home My Page Projects Code Snippets Project Openings Mareframe
Summary Activity Forums Tracker Lists Tasks Docs Surveys News SCM Files
[mareframe] Annotation of /trunk/gadget/hooke.cc
[mareframe] / trunk / gadget / hooke.cc Repository:
ViewVC logotype

Annotation of /trunk/gadget/hooke.cc

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1 - (view) (download)

1 : agomez 1 /* Nonlinear Optimization using the algorithm of Hooke and Jeeves */
2 :     /* 12 February 1994 author: Mark G. Johnson */
3 :    
4 :     /* Find a point X where the nonlinear function f(X) has a local */
5 :     /* minimum. X is an n-vector and f(X) is a scalar. In mathe- */
6 :     /* matical notation f: R^n -> R^1. The objective function f() */
7 :     /* is not required to be continuous. Nor does f() need to be */
8 :     /* differentiable. The program does not use or require */
9 :     /* derivatives of f(). */
10 :    
11 :     /* The software user supplies three things: a subroutine that */
12 :     /* computes f(X), an initial "starting guess" of the minimum point */
13 :     /* X, and values for the algorithm convergence parameters. Then */
14 :     /* the program searches for a local minimum, beginning from the */
15 :     /* starting guess, using the Direct Search algorithm of Hooke and */
16 :     /* Jeeves. */
17 :    
18 :     /* This C program is adapted from the Algol pseudocode found in */
19 :     /* "Algorithm 178: Direct Search" by Arthur F. Kaupe Jr., Commun- */
20 :     /* ications of the ACM, Vol 6. p.313 (June 1963). It includes the */
21 :     /* improvements suggested by Bell and Pike (CACM v.9, p. 684, Sept */
22 :     /* 1966) and those of Tomlin and Smith, "Remark on Algorithm 178" */
23 :     /* (CACM v.12). The original paper, which I don't recommend as */
24 :     /* highly as the one by A. Kaupe, is: R. Hooke and T. A. Jeeves, */
25 :     /* "Direct Search Solution of Numerical and Statistical Problems", */
26 :     /* Journal of the ACM, Vol. 8, April 1961, pp. 212-229. */
27 :    
28 :     /* Calling sequence: */
29 :     /* int hooke(nvars, startpt, endpt, rho, epsilon, itermax) */
30 :     /* */
31 :     /* nvars {an integer} */
32 :     /* This is the number of dimensions in the domain of f(). */
33 :     /* It is the number of coordinates of the starting point */
34 :     /* (and the minimum point.) */
35 :     /* startpt {an array of doubles} */
36 :     /* This is the user-supplied guess at the minimum. */
37 :     /* endpt {an array of doubles} */
38 :     /* This is the calculated location of the local minimum */
39 :     /* rho {a double} */
40 :     /* This is a user-supplied convergence parameter (more */
41 :     /* detail below), which should be set to a value between */
42 :     /* 0.0 and 1.0. Larger values of rho give greater */
43 :     /* probability of convergence on highly nonlinear */
44 :     /* functions, at a cost of more function evaluations. */
45 :     /* Smaller values of rho reduces the number of evaluations */
46 :     /* (and the program running time), but increases the risk */
47 :     /* of nonconvergence. See below. */
48 :     /* epsilon {a double} */
49 :     /* This is the criterion for halting the search for a */
50 :     /* minimum. When the algorithm begins to make less and */
51 :     /* less progress on each iteration, it checks the halting */
52 :     /* criterion: if the stepsize is below epsilon, terminate */
53 :     /* the iteration and return the current best estimate of */
54 :     /* the minimum. Larger values of epsilon (such as 1.0e-4) */
55 :     /* give quicker running time, but a less accurate estimate */
56 :     /* of the minimum. Smaller values of epsilon (such as */
57 :     /* 1.0e-7) give longer running time, but a more accurate */
58 :     /* estimate of the minimum. */
59 :     /* itermax {an integer} A second, rarely used, halting */
60 :     /* criterion. If the algorithm uses >= itermax */
61 :     /* iterations, halt. */
62 :    
63 :     /* The user-supplied objective function f(x,n) should return a C */
64 :     /* "double". Its arguments are x -- an array of doubles, and */
65 :     /* n -- an integer. x is the point at which f(x) should be */
66 :     /* evaluated, and n is the number of coordinates of x. That is, */
67 :     /* n is the number of coefficients being fitted. */
68 :    
69 :     /* rho, the algorithm convergence control */
70 :    
71 :     /* The algorithm works by taking "steps" from one estimate of */
72 :     /* a minimum, to another (hopefully better) estimate. Taking */
73 :     /* big steps gets to the minimum more quickly, at the risk of */
74 :     /* "stepping right over" an excellent point. The stepsize is */
75 :     /* controlled by a user supplied parameter called rho. At each */
76 :     /* iteration, the stepsize is multiplied by rho (0 < rho < 1), */
77 :     /* so the stepsize is successively reduced. */
78 :     /* Small values of rho correspond to big stepsize changes, */
79 :     /* which make the algorithm run more quickly. However, there */
80 :     /* is a chance (especially with highly nonlinear functions) */
81 :     /* that these big changes will accidentally overlook a */
82 :     /* promising search vector, leading to nonconvergence. */
83 :     /* Large values of rho correspond to small stepsize changes, */
84 :     /* which force the algorithm to carefully examine nearby points */
85 :     /* instead of optimistically forging ahead. This improves the */
86 :     /* probability of convergence. */
87 :     /* The stepsize is reduced until it is equal to (or smaller */
88 :     /* than) epsilon. So the number of iterations performed by */
89 :     /* Hooke-Jeeves is determined by rho and epsilon: */
90 :     /* rho**(number_of_iterations) = epsilon */
91 :     /* In general it is a good idea to set rho to an aggressively */
92 :     /* small value like 0.5 (hoping for fast convergence). Then, */
93 :     /* if the user suspects that the reported minimum is incorrect */
94 :     /* (or perhaps not accurate enough), the program can be run */
95 :     /* again with a larger value of rho such as 0.85, using the */
96 :     /* result of the first minimization as the starting guess to */
97 :     /* begin the second minimization. */
98 :    
99 :     /* Normal use: */
100 :     /* (1) Code your function f() in the C language */
101 :     /* (2) Install your starting guess {or read it in} */
102 :     /* (3) Run the program */
103 :     /* (4) {for the skeptical}: Use the computed minimum */
104 :     /* as the starting point for another run */
105 :    
106 :     /* Data Fitting: */
107 :     /* Code your function f() to be the sum of the squares of the */
108 :     /* errors (differences) between the computed values and the */
109 :     /* measured values. Then minimize f() using Hooke-Jeeves. */
110 :     /* EXAMPLE: you have 20 datapoints (ti, yi) and you want to */
111 :     /* find A,B,C such that (A*t*t) + (B*exp(t)) + (C*tan(t)) */
112 :     /* fits the data as closely as possible. Then f() is just */
113 :     /* f(x) = SUM (measured_y[i] - ((A*t[i]*t[i]) + (B*exp(t[i])) */
114 :     /* + (C*tan(t[i]))))^2 */
115 :     /* where x[] is a 3-vector consisting of {A, B, C}. */
116 :    
117 :     /* The author of this software is M.G. Johnson. */
118 :     /* Permission to use, copy, modify, and distribute this software */
119 :     /* for any purpose without fee is hereby granted, provided that */
120 :     /* this entire notice is included in all copies of any software */
121 :     /* which is or includes a copy or modification of this software */
122 :     /* and in all copies of the supporting documentation for such */
123 :     /* software. THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT */
124 :     /* ANY EXPRESS OR IMPLIED WARRANTY. IN PARTICULAR, NEITHER THE */
125 :     /* AUTHOR NOR AT&T MAKE ANY REPRESENTATION OR WARRANTY OF ANY */
126 :     /* KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS */
127 :     /* FITNESS FOR ANY PARTICULAR PURPOSE. */
128 :    
129 :     /* JMB this has been modified to work with the gadget object structure */
130 :     /* This means that the function has been replaced by a call to ecosystem */
131 :     /* object, and we can use the vector objects that have been defined */
132 :    
133 :     #include "gadget.h"
134 :     #include "optinfo.h"
135 :     #include "mathfunc.h"
136 :     #include "doublevector.h"
137 :     #include "intvector.h"
138 :     #include "errorhandler.h"
139 :     #include "ecosystem.h"
140 :     #include "global.h"
141 :    
142 :     extern Ecosystem* EcoSystem;
143 :    
144 :    
145 :     /* given a point, look for a better one nearby, one coord at a time */
146 :     double OptInfoHooke::bestNearby(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
147 :    
148 :     double minf, ftmp;
149 :     int i;
150 :     DoubleVector z(point);
151 :    
152 :     minf = prevbest;
153 :     for (i = 0; i < point.Size(); i++) {
154 :     z[param[i]] = point[param[i]] + delta[param[i]];
155 :     ftmp = EcoSystem->SimulateAndUpdate(z);
156 :     if (ftmp < minf) {
157 :     minf = ftmp;
158 :     } else {
159 :     delta[param[i]] = 0.0 - delta[param[i]];
160 :     z[param[i]] = point[param[i]] + delta[param[i]];
161 :     ftmp = EcoSystem->SimulateAndUpdate(z);
162 :     if (ftmp < minf)
163 :     minf = ftmp;
164 :     else
165 :     z[param[i]] = point[param[i]];
166 :     }
167 :     }
168 :    
169 :     for (i = 0; i < point.Size(); i++)
170 :     point[i] = z[i];
171 :     return minf;
172 :     }
173 :    
174 :     void OptInfoHooke::OptimiseLikelihood() {
175 :    
176 :     double oldf, newf, bestf, steplength, tmp;
177 :     int i, offset;
178 :     int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
179 :    
180 :     handle.logMessage(LOGINFO, "\nStarting Hooke & Jeeves optimisation algorithm\n");
181 :     int nvars = EcoSystem->numOptVariables();
182 :     DoubleVector x(nvars);
183 :     DoubleVector trialx(nvars);
184 :     DoubleVector bestx(nvars);
185 :     DoubleVector lowerb(nvars);
186 :     DoubleVector upperb(nvars);
187 :     DoubleVector init(nvars);
188 :     DoubleVector initialstep(nvars, rho);
189 :     DoubleVector delta(nvars);
190 :     IntVector param(nvars, 0);
191 :     IntVector lbound(nvars, 0);
192 :     IntVector rbounds(nvars, 0);
193 :     IntVector trapped(nvars, 0);
194 :    
195 :     EcoSystem->scaleVariables();
196 :     EcoSystem->getOptScaledValues(x);
197 :     EcoSystem->getOptLowerBounds(lowerb);
198 :     EcoSystem->getOptUpperBounds(upperb);
199 :     EcoSystem->getOptInitialValues(init);
200 :    
201 :     for (i = 0; i < nvars; i++) {
202 :     // Scaling the bounds, because the parameters are scaled
203 :     lowerb[i] = lowerb[i] / init[i];
204 :     upperb[i] = upperb[i] / init[i];
205 :     if (lowerb[i] > upperb[i]) {
206 :     tmp = lowerb[i];
207 :     lowerb[i] = upperb[i];
208 :     upperb[i] = tmp;
209 :     }
210 :    
211 :     bestx[i] = x[i];
212 :     trialx[i] = x[i];
213 :     param[i] = i;
214 :     delta[i] = ((2 * (rand() % 2)) - 1) * rho; //JMB - randomise the sign
215 :     }
216 :    
217 :     bestf = EcoSystem->SimulateAndUpdate(trialx);
218 :     if (bestf != bestf) { //check for NaN
219 :     handle.logMessage(LOGINFO, "Error starting Hooke & Jeeves optimisation with f(x) = infinity");
220 :     converge = -1;
221 :     iters = 1;
222 :     return;
223 :     }
224 :    
225 :     offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop
226 :     newf = bestf;
227 :     oldf = bestf;
228 :     steplength = lambda;
229 :     if (isZero(steplength))
230 :     steplength = rho;
231 :    
232 :     while (1) {
233 :     if (isZero(bestf)) {
234 :     iters = EcoSystem->getFuncEval() - offset;
235 :     handle.logMessage(LOGINFO, "Error in Hooke & Jeeves optimisation after", iters, "function evaluations, f(x) = 0");
236 :     converge = -1;
237 :     return;
238 :     }
239 :    
240 :     /* randomize the order of the parameters once in a while */
241 :     rchange = 0;
242 :     while (rchange < nvars) {
243 :     rnumber = rand() % nvars;
244 :     rcheck = 1;
245 :     for (i = 0; i < rchange; i++)
246 :     if (param[i] == rnumber)
247 :     rcheck = 0;
248 :     if (rcheck) {
249 :     param[rchange] = rnumber;
250 :     rchange++;
251 :     }
252 :     }
253 :    
254 :     /* find best new point, one coord at a time */
255 :     for (i = 0; i < nvars; i++)
256 :     trialx[i] = x[i];
257 :     newf = this->bestNearby(delta, trialx, bestf, param);
258 :    
259 :     /* if too many function evaluations occur, terminate the algorithm */
260 :     iters = EcoSystem->getFuncEval() - offset;
261 :     if (iters > hookeiter) {
262 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
263 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
264 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
265 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
266 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
267 :    
268 :     score = EcoSystem->SimulateAndUpdate(trialx);
269 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
270 :     for (i = 0; i < nvars; i++)
271 :     bestx[i] = trialx[i] * init[i];
272 :     EcoSystem->storeVariables(score, bestx);
273 :     return;
274 :     }
275 :    
276 :     /* if we made some improvements, pursue that direction */
277 :     while (newf < bestf) {
278 :     for (i = 0; i < nvars; i++) {
279 :     /* if it has been trapped but f has now gotten better (bndcheck) */
280 :     /* we assume that we are out of the trap, reset the counters */
281 :     /* and go back to the stepsize we had when we got trapped */
282 :     if ((trapped[i]) && (newf < oldf * bndcheck)) {
283 :     trapped[i] = 0;
284 :     lbound[i] = 0;
285 :     rbounds[i] = 0;
286 :     delta[i] = initialstep[i];
287 :    
288 :     } else if (trialx[i] < (lowerb[i] + verysmall)) {
289 :     lbound[i]++;
290 :     trialx[i] = lowerb[i];
291 :     if (!trapped[i]) {
292 :     initialstep[i] = delta[i];
293 :     trapped[i] = 1;
294 :     }
295 :     /* if it has hit the bounds 2 times then increase the stepsize */
296 :     if (lbound[i] >= 2)
297 :     delta[i] /= rho;
298 :    
299 :     } else if (trialx[i] > (upperb[i] - verysmall)) {
300 :     rbounds[i]++;
301 :     trialx[i] = upperb[i];
302 :     if (!trapped[i]) {
303 :     initialstep[i] = delta[i];
304 :     trapped[i] = 1;
305 :     }
306 :     /* if it has hit the bounds 2 times then increase the stepsize */
307 :     if (rbounds[i] >= 2)
308 :     delta[i] /= rho;
309 :     }
310 :     }
311 :    
312 :     for (i = 0; i < nvars; i++) {
313 :     /* firstly, arrange the sign of delta[] */
314 :     if (trialx[i] < x[i])
315 :     delta[i] = 0.0 - fabs(delta[i]);
316 :     else
317 :     delta[i] = fabs(delta[i]);
318 :    
319 :     /* now, move further in this direction */
320 :     tmp = x[i];
321 :     x[i] = trialx[i];
322 :     trialx[i] = trialx[i] + trialx[i] - tmp;
323 :     }
324 :    
325 :     /* only move forward if this is really an improvement */
326 :     oldf = newf;
327 :     newf = EcoSystem->SimulateAndUpdate(trialx);
328 :     if ((isEqual(newf, oldf)) || (newf > oldf)) {
329 :     newf = oldf; //JMB no improvement, so reset the value of newf
330 :     break;
331 :     }
332 :    
333 :     /* OK, it's better, so update variables and look around */
334 :     bestf = newf;
335 :     for (i = 0; i < nvars; i++)
336 :     x[i] = trialx[i];
337 :     newf = this->bestNearby(delta, trialx, bestf, param);
338 :     if (isEqual(newf, bestf))
339 :     break;
340 :    
341 :     /* if too many function evaluations occur, terminate the algorithm */
342 :     iters = EcoSystem->getFuncEval() - offset;
343 :     if (iters > hookeiter) {
344 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
345 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
346 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
347 :     handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
348 :     handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
349 :    
350 :     score = EcoSystem->SimulateAndUpdate(trialx);
351 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
352 :     for (i = 0; i < nvars; i++)
353 :     bestx[i] = trialx[i] * init[i];
354 :     EcoSystem->storeVariables(score, bestx);
355 :     return;
356 :     }
357 :     }
358 :    
359 :     iters = EcoSystem->getFuncEval() - offset;
360 :     if (newf < bestf) {
361 :     for (i = 0; i < nvars; i++)
362 :     bestx[i] = x[i] * init[i];
363 :     bestf = newf;
364 :     handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
365 :     handle.logMessage(LOGINFO, "The likelihood score is", bestf, "at the point");
366 :     EcoSystem->storeVariables(bestf, bestx);
367 :     EcoSystem->writeBestValues();
368 :    
369 :     } else
370 :     handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ...");
371 :    
372 :     /* if the step length is less than hookeeps, terminate the algorithm */
373 :     if (steplength < hookeeps) {
374 :     handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
375 :     handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
376 :     handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
377 :     handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
378 :    
379 :     converge = 1;
380 :     score = bestf;
381 :     handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
382 :     EcoSystem->storeVariables(bestf, bestx);
383 :     return;
384 :     }
385 :    
386 :     steplength *= rho;
387 :     handle.logMessage(LOGINFO, "Reducing the steplength to", steplength);
388 :     for (i = 0; i < nvars; i++)
389 :     delta[i] *= rho;
390 :     }
391 :     }

root@forge.cesga.es
ViewVC Help
Powered by ViewVC 1.0.0  

Powered By FusionForge