1 : |
agomez |
1 |
/* Nonlinear Optimization using the algorithm of Hooke and Jeeves */
|
2 : |
|
|
/* 12 February 1994 author: Mark G. Johnson */
|
3 : |
ulcessvp |
11 |
//
|
4 : |
agomez |
1 |
/* Find a point X where the nonlinear function f(X) has a local */
|
5 : |
|
|
/* minimum. X is an n-vector and f(X) is a scalar. In mathe- */
|
6 : |
|
|
/* matical notation f: R^n -> R^1. The objective function f() */
|
7 : |
|
|
/* is not required to be continuous. Nor does f() need to be */
|
8 : |
|
|
/* differentiable. The program does not use or require */
|
9 : |
|
|
/* derivatives of f(). */
|
10 : |
ulcessvp |
11 |
//
|
11 : |
agomez |
1 |
/* The software user supplies three things: a subroutine that */
|
12 : |
|
|
/* computes f(X), an initial "starting guess" of the minimum point */
|
13 : |
|
|
/* X, and values for the algorithm convergence parameters. Then */
|
14 : |
|
|
/* the program searches for a local minimum, beginning from the */
|
15 : |
|
|
/* starting guess, using the Direct Search algorithm of Hooke and */
|
16 : |
|
|
/* Jeeves. */
|
17 : |
ulcessvp |
11 |
//
|
18 : |
agomez |
1 |
/* This C program is adapted from the Algol pseudocode found in */
|
19 : |
|
|
/* "Algorithm 178: Direct Search" by Arthur F. Kaupe Jr., Commun- */
|
20 : |
|
|
/* ications of the ACM, Vol 6. p.313 (June 1963). It includes the */
|
21 : |
|
|
/* improvements suggested by Bell and Pike (CACM v.9, p. 684, Sept */
|
22 : |
|
|
/* 1966) and those of Tomlin and Smith, "Remark on Algorithm 178" */
|
23 : |
|
|
/* (CACM v.12). The original paper, which I don't recommend as */
|
24 : |
|
|
/* highly as the one by A. Kaupe, is: R. Hooke and T. A. Jeeves, */
|
25 : |
|
|
/* "Direct Search Solution of Numerical and Statistical Problems", */
|
26 : |
|
|
/* Journal of the ACM, Vol. 8, April 1961, pp. 212-229. */
|
27 : |
ulcessvp |
11 |
//
|
28 : |
agomez |
1 |
/* Calling sequence: */
|
29 : |
|
|
/* int hooke(nvars, startpt, endpt, rho, epsilon, itermax) */
|
30 : |
|
|
/* */
|
31 : |
|
|
/* nvars {an integer} */
|
32 : |
|
|
/* This is the number of dimensions in the domain of f(). */
|
33 : |
|
|
/* It is the number of coordinates of the starting point */
|
34 : |
|
|
/* (and the minimum point.) */
|
35 : |
|
|
/* startpt {an array of doubles} */
|
36 : |
|
|
/* This is the user-supplied guess at the minimum. */
|
37 : |
|
|
/* endpt {an array of doubles} */
|
38 : |
|
|
/* This is the calculated location of the local minimum */
|
39 : |
|
|
/* rho {a double} */
|
40 : |
|
|
/* This is a user-supplied convergence parameter (more */
|
41 : |
|
|
/* detail below), which should be set to a value between */
|
42 : |
|
|
/* 0.0 and 1.0. Larger values of rho give greater */
|
43 : |
|
|
/* probability of convergence on highly nonlinear */
|
44 : |
|
|
/* functions, at a cost of more function evaluations. */
|
45 : |
|
|
/* Smaller values of rho reduces the number of evaluations */
|
46 : |
|
|
/* (and the program running time), but increases the risk */
|
47 : |
|
|
/* of nonconvergence. See below. */
|
48 : |
|
|
/* epsilon {a double} */
|
49 : |
|
|
/* This is the criterion for halting the search for a */
|
50 : |
|
|
/* minimum. When the algorithm begins to make less and */
|
51 : |
|
|
/* less progress on each iteration, it checks the halting */
|
52 : |
|
|
/* criterion: if the stepsize is below epsilon, terminate */
|
53 : |
|
|
/* the iteration and return the current best estimate of */
|
54 : |
|
|
/* the minimum. Larger values of epsilon (such as 1.0e-4) */
|
55 : |
|
|
/* give quicker running time, but a less accurate estimate */
|
56 : |
|
|
/* of the minimum. Smaller values of epsilon (such as */
|
57 : |
|
|
/* 1.0e-7) give longer running time, but a more accurate */
|
58 : |
|
|
/* estimate of the minimum. */
|
59 : |
|
|
/* itermax {an integer} A second, rarely used, halting */
|
60 : |
|
|
/* criterion. If the algorithm uses >= itermax */
|
61 : |
|
|
/* iterations, halt. */
|
62 : |
ulcessvp |
11 |
//
|
63 : |
agomez |
1 |
/* The user-supplied objective function f(x,n) should return a C */
|
64 : |
|
|
/* "double". Its arguments are x -- an array of doubles, and */
|
65 : |
|
|
/* n -- an integer. x is the point at which f(x) should be */
|
66 : |
|
|
/* evaluated, and n is the number of coordinates of x. That is, */
|
67 : |
|
|
/* n is the number of coefficients being fitted. */
|
68 : |
ulcessvp |
11 |
//
|
69 : |
agomez |
1 |
/* rho, the algorithm convergence control */
|
70 : |
ulcessvp |
11 |
//
|
71 : |
agomez |
1 |
/* The algorithm works by taking "steps" from one estimate of */
|
72 : |
|
|
/* a minimum, to another (hopefully better) estimate. Taking */
|
73 : |
|
|
/* big steps gets to the minimum more quickly, at the risk of */
|
74 : |
|
|
/* "stepping right over" an excellent point. The stepsize is */
|
75 : |
|
|
/* controlled by a user supplied parameter called rho. At each */
|
76 : |
|
|
/* iteration, the stepsize is multiplied by rho (0 < rho < 1), */
|
77 : |
|
|
/* so the stepsize is successively reduced. */
|
78 : |
|
|
/* Small values of rho correspond to big stepsize changes, */
|
79 : |
|
|
/* which make the algorithm run more quickly. However, there */
|
80 : |
|
|
/* is a chance (especially with highly nonlinear functions) */
|
81 : |
|
|
/* that these big changes will accidentally overlook a */
|
82 : |
|
|
/* promising search vector, leading to nonconvergence. */
|
83 : |
|
|
/* Large values of rho correspond to small stepsize changes, */
|
84 : |
|
|
/* which force the algorithm to carefully examine nearby points */
|
85 : |
|
|
/* instead of optimistically forging ahead. This improves the */
|
86 : |
|
|
/* probability of convergence. */
|
87 : |
|
|
/* The stepsize is reduced until it is equal to (or smaller */
|
88 : |
|
|
/* than) epsilon. So the number of iterations performed by */
|
89 : |
|
|
/* Hooke-Jeeves is determined by rho and epsilon: */
|
90 : |
|
|
/* rho**(number_of_iterations) = epsilon */
|
91 : |
|
|
/* In general it is a good idea to set rho to an aggressively */
|
92 : |
|
|
/* small value like 0.5 (hoping for fast convergence). Then, */
|
93 : |
|
|
/* if the user suspects that the reported minimum is incorrect */
|
94 : |
|
|
/* (or perhaps not accurate enough), the program can be run */
|
95 : |
|
|
/* again with a larger value of rho such as 0.85, using the */
|
96 : |
|
|
/* result of the first minimization as the starting guess to */
|
97 : |
|
|
/* begin the second minimization. */
|
98 : |
ulcessvp |
11 |
//
|
99 : |
agomez |
1 |
/* Normal use: */
|
100 : |
|
|
/* (1) Code your function f() in the C language */
|
101 : |
|
|
/* (2) Install your starting guess {or read it in} */
|
102 : |
|
|
/* (3) Run the program */
|
103 : |
|
|
/* (4) {for the skeptical}: Use the computed minimum */
|
104 : |
|
|
/* as the starting point for another run */
|
105 : |
ulcessvp |
11 |
//
|
106 : |
agomez |
1 |
/* Data Fitting: */
|
107 : |
|
|
/* Code your function f() to be the sum of the squares of the */
|
108 : |
|
|
/* errors (differences) between the computed values and the */
|
109 : |
|
|
/* measured values. Then minimize f() using Hooke-Jeeves. */
|
110 : |
|
|
/* EXAMPLE: you have 20 datapoints (ti, yi) and you want to */
|
111 : |
|
|
/* find A,B,C such that (A*t*t) + (B*exp(t)) + (C*tan(t)) */
|
112 : |
|
|
/* fits the data as closely as possible. Then f() is just */
|
113 : |
|
|
/* f(x) = SUM (measured_y[i] - ((A*t[i]*t[i]) + (B*exp(t[i])) */
|
114 : |
|
|
/* + (C*tan(t[i]))))^2 */
|
115 : |
|
|
/* where x[] is a 3-vector consisting of {A, B, C}. */
|
116 : |
ulcessvp |
11 |
//
|
117 : |
agomez |
1 |
/* The author of this software is M.G. Johnson. */
|
118 : |
|
|
/* Permission to use, copy, modify, and distribute this software */
|
119 : |
|
|
/* for any purpose without fee is hereby granted, provided that */
|
120 : |
|
|
/* this entire notice is included in all copies of any software */
|
121 : |
|
|
/* which is or includes a copy or modification of this software */
|
122 : |
|
|
/* and in all copies of the supporting documentation for such */
|
123 : |
|
|
/* software. THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT */
|
124 : |
|
|
/* ANY EXPRESS OR IMPLIED WARRANTY. IN PARTICULAR, NEITHER THE */
|
125 : |
|
|
/* AUTHOR NOR AT&T MAKE ANY REPRESENTATION OR WARRANTY OF ANY */
|
126 : |
|
|
/* KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS */
|
127 : |
|
|
/* FITNESS FOR ANY PARTICULAR PURPOSE. */
|
128 : |
ulcessvp |
11 |
//
|
129 : |
agomez |
1 |
/* JMB this has been modified to work with the gadget object structure */
|
130 : |
|
|
/* This means that the function has been replaced by a call to ecosystem */
|
131 : |
|
|
/* object, and we can use the vector objects that have been defined */
|
132 : |
|
|
|
133 : |
|
|
#include "gadget.h" |
134 : |
|
|
#include "optinfo.h" |
135 : |
|
|
#include "mathfunc.h" |
136 : |
|
|
#include "doublevector.h" |
137 : |
|
|
#include "intvector.h" |
138 : |
|
|
#include "errorhandler.h" |
139 : |
|
|
#include "ecosystem.h" |
140 : |
|
|
#include "global.h" |
141 : |
|
|
|
142 : |
ulcessvp |
16 |
#ifdef _OPENMP
|
143 : |
ulcessvp |
11 |
#include "omp.h"
|
144 : |
|
|
#endif
|
145 : |
|
|
|
146 : |
agomez |
1 |
extern Ecosystem* EcoSystem;
|
147 : |
ulcessvp |
16 |
#ifdef _OPENMP
|
148 : |
ulcessvp |
11 |
extern Ecosystem** EcoSystems;
|
149 : |
|
|
#endif
|
150 : |
agomez |
1 |
|
151 : |
|
|
/* given a point, look for a better one nearby, one coord at a time */
|
152 : |
|
|
double OptInfoHooke::bestNearby(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
|
153 : |
|
|
|
154 : |
|
|
double minf, ftmp;
|
155 : |
|
|
int i;
|
156 : |
|
|
DoubleVector z(point);
|
157 : |
|
|
|
158 : |
|
|
minf = prevbest;
|
159 : |
|
|
for (i = 0; i < point.Size(); i++) {
|
160 : |
|
|
z[param[i]] = point[param[i]] + delta[param[i]];
|
161 : |
|
|
ftmp = EcoSystem->SimulateAndUpdate(z);
|
162 : |
|
|
if (ftmp < minf) {
|
163 : |
|
|
minf = ftmp;
|
164 : |
|
|
} else {
|
165 : |
|
|
delta[param[i]] = 0.0 - delta[param[i]];
|
166 : |
|
|
z[param[i]] = point[param[i]] + delta[param[i]];
|
167 : |
|
|
ftmp = EcoSystem->SimulateAndUpdate(z);
|
168 : |
|
|
if (ftmp < minf)
|
169 : |
|
|
minf = ftmp;
|
170 : |
|
|
else
|
171 : |
|
|
z[param[i]] = point[param[i]];
|
172 : |
|
|
}
|
173 : |
|
|
}
|
174 : |
|
|
|
175 : |
|
|
for (i = 0; i < point.Size(); i++)
|
176 : |
|
|
point[i] = z[i];
|
177 : |
|
|
return minf;
|
178 : |
|
|
}
|
179 : |
|
|
|
180 : |
ulcessvp |
11 |
/* given a point, look for a better one nearby, one coord at a time */
|
181 : |
ulcessvp |
16 |
#ifdef _OPENMP
|
182 : |
ulcessvp |
12 |
/*
|
183 : |
|
|
* function bestBeraby parallelized with OpenMP
|
184 : |
|
|
* · 2 threads per coord to parallelize the calculation of +delta/-delta
|
185 : |
|
|
* · parallelize the calculation of the best nearby of the coord
|
186 : |
|
|
*/
|
187 : |
ulcessvp |
15 |
double OptInfoHooke::bestNearbyRepro(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
|
188 : |
ulcessvp |
11 |
double minf;//, ftmp;
|
189 : |
|
|
int i, j, k;
|
190 : |
|
|
DoubleVector z(point);
|
191 : |
|
|
|
192 : |
|
|
struct Storage {
|
193 : |
|
|
DoubleVector z;
|
194 : |
|
|
DoubleVector delta;
|
195 : |
|
|
double ftmp;
|
196 : |
|
|
int iters;
|
197 : |
|
|
};
|
198 : |
|
|
|
199 : |
|
|
minf = prevbest;
|
200 : |
|
|
i = 0;
|
201 : |
|
|
|
202 : |
|
|
int paral_tokens, numThr, nvars = point.Size();
|
203 : |
|
|
numThr = omp_get_max_threads ( );
|
204 : |
|
|
|
205 : |
|
|
Storage* storage = new Storage[numThr];
|
206 : |
|
|
if ((numThr % 2) == 0)
|
207 : |
|
|
paral_tokens = numThr / 2;
|
208 : |
|
|
else {
|
209 : |
|
|
return -1;
|
210 : |
|
|
}
|
211 : |
|
|
|
212 : |
ulcessvp |
19 |
// omp_set_dynamic(0);
|
213 : |
|
|
// omp_set_nested(1); //permit the nested parallelization
|
214 : |
ulcessvp |
11 |
while ( i < nvars) {
|
215 : |
|
|
if ((i + paral_tokens -1) >= nvars)
|
216 : |
|
|
paral_tokens = nvars - i;
|
217 : |
ulcessvp |
19 |
#pragma omp parallel for num_threads(paral_tokens*2) private(k) //parallelize the parameters (numThr)
|
218 : |
|
|
for (j = 0; j < (paral_tokens*2); ++j) {
|
219 : |
ulcessvp |
11 |
storage[j].z = z;
|
220 : |
|
|
storage[j].delta = delta;
|
221 : |
ulcessvp |
19 |
DoubleVector v(z);
|
222 : |
ulcessvp |
11 |
|
223 : |
ulcessvp |
19 |
if (j<paral_tokens) {
|
224 : |
|
|
k = param[i+j];
|
225 : |
|
|
v[k] += delta[k];
|
226 : |
ulcessvp |
11 |
}
|
227 : |
ulcessvp |
19 |
else {
|
228 : |
|
|
k = param[i+j-paral_tokens];
|
229 : |
|
|
v[k] -= delta[k];
|
230 : |
|
|
}
|
231 : |
ulcessvp |
11 |
|
232 : |
ulcessvp |
19 |
storage[j].ftmp = EcoSystems[j]->SimulateAndUpdate(v);
|
233 : |
|
|
storage[j].z[k] = v[k];
|
234 : |
|
|
}
|
235 : |
|
|
for (j = 0; j < paral_tokens; ++j) {
|
236 : |
|
|
k = param[i+j];
|
237 : |
|
|
if (storage[j].ftmp < minf) {
|
238 : |
ulcessvp |
11 |
storage[j].iters = 1;
|
239 : |
ulcessvp |
19 |
// storage[j].z[k] = v1[k];
|
240 : |
ulcessvp |
11 |
} else {
|
241 : |
|
|
storage[j].iters = 2;
|
242 : |
|
|
storage[j].delta[k] = 0.0 - delta[k];
|
243 : |
|
|
if (storage[j+paral_tokens].ftmp < minf) {
|
244 : |
|
|
storage[j].ftmp = storage[j+paral_tokens].ftmp;
|
245 : |
ulcessvp |
19 |
storage[j].z[k] = storage[j+paral_tokens].z[k];;
|
246 : |
ulcessvp |
11 |
}
|
247 : |
|
|
}
|
248 : |
|
|
}
|
249 : |
|
|
|
250 : |
|
|
for (j = 0; j < paral_tokens; ++j) {
|
251 : |
|
|
i++;
|
252 : |
|
|
iters += storage[j].iters;
|
253 : |
|
|
if (storage[j].ftmp < minf) {
|
254 : |
|
|
minf = storage[j].ftmp;
|
255 : |
|
|
z = storage[j].z;
|
256 : |
|
|
delta = storage[j].delta;
|
257 : |
|
|
break;
|
258 : |
|
|
}
|
259 : |
|
|
}
|
260 : |
|
|
}
|
261 : |
ulcessvp |
15 |
delete[] storage;
|
262 : |
ulcessvp |
11 |
for (i = 0; i < nvars; ++i)
|
263 : |
|
|
point[i] = z[i];
|
264 : |
|
|
return minf;
|
265 : |
|
|
}
|
266 : |
|
|
#endif
|
267 : |
|
|
|
268 : |
agomez |
1 |
void OptInfoHooke::OptimiseLikelihood() {
|
269 : |
|
|
|
270 : |
|
|
double oldf, newf, bestf, steplength, tmp;
|
271 : |
|
|
int i, offset;
|
272 : |
|
|
int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
|
273 : |
|
|
|
274 : |
|
|
handle.logMessage(LOGINFO, "\nStarting Hooke & Jeeves optimisation algorithm\n");
|
275 : |
|
|
int nvars = EcoSystem->numOptVariables();
|
276 : |
|
|
DoubleVector x(nvars);
|
277 : |
|
|
DoubleVector trialx(nvars);
|
278 : |
|
|
DoubleVector bestx(nvars);
|
279 : |
|
|
DoubleVector lowerb(nvars);
|
280 : |
|
|
DoubleVector upperb(nvars);
|
281 : |
|
|
DoubleVector init(nvars);
|
282 : |
|
|
DoubleVector initialstep(nvars, rho);
|
283 : |
|
|
DoubleVector delta(nvars);
|
284 : |
|
|
IntVector param(nvars, 0);
|
285 : |
|
|
IntVector lbound(nvars, 0);
|
286 : |
|
|
IntVector rbounds(nvars, 0);
|
287 : |
|
|
IntVector trapped(nvars, 0);
|
288 : |
|
|
|
289 : |
|
|
EcoSystem->scaleVariables();
|
290 : |
ulcessvp |
16 |
#ifdef _OPENMP
|
291 : |
ulcessvp |
11 |
int numThr = omp_get_max_threads ( );
|
292 : |
ulcessvp |
14 |
for (i = 0; i < numThr; i++) // scale the variables for the ecosystem of every thread
|
293 : |
ulcessvp |
11 |
EcoSystems[i]->scaleVariables();
|
294 : |
|
|
#endif
|
295 : |
agomez |
1 |
EcoSystem->getOptScaledValues(x);
|
296 : |
|
|
EcoSystem->getOptLowerBounds(lowerb);
|
297 : |
|
|
EcoSystem->getOptUpperBounds(upperb);
|
298 : |
|
|
EcoSystem->getOptInitialValues(init);
|
299 : |
|
|
|
300 : |
|
|
for (i = 0; i < nvars; i++) {
|
301 : |
|
|
// Scaling the bounds, because the parameters are scaled
|
302 : |
|
|
lowerb[i] = lowerb[i] / init[i];
|
303 : |
|
|
upperb[i] = upperb[i] / init[i];
|
304 : |
|
|
if (lowerb[i] > upperb[i]) {
|
305 : |
|
|
tmp = lowerb[i];
|
306 : |
|
|
lowerb[i] = upperb[i];
|
307 : |
|
|
upperb[i] = tmp;
|
308 : |
|
|
}
|
309 : |
|
|
|
310 : |
|
|
bestx[i] = x[i];
|
311 : |
|
|
trialx[i] = x[i];
|
312 : |
|
|
param[i] = i;
|
313 : |
|
|
delta[i] = ((2 * (rand() % 2)) - 1) * rho; //JMB - randomise the sign
|
314 : |
|
|
}
|
315 : |
|
|
|
316 : |
|
|
bestf = EcoSystem->SimulateAndUpdate(trialx);
|
317 : |
|
|
if (bestf != bestf) { //check for NaN
|
318 : |
|
|
handle.logMessage(LOGINFO, "Error starting Hooke & Jeeves optimisation with f(x) = infinity");
|
319 : |
|
|
converge = -1;
|
320 : |
|
|
iters = 1;
|
321 : |
|
|
return;
|
322 : |
|
|
}
|
323 : |
|
|
|
324 : |
|
|
offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop
|
325 : |
|
|
newf = bestf;
|
326 : |
|
|
oldf = bestf;
|
327 : |
|
|
steplength = lambda;
|
328 : |
|
|
if (isZero(steplength))
|
329 : |
|
|
steplength = rho;
|
330 : |
|
|
|
331 : |
ulcessvp |
11 |
iters = 0;
|
332 : |
|
|
|
333 : |
agomez |
1 |
while (1) {
|
334 : |
|
|
if (isZero(bestf)) {
|
335 : |
ulcessvp |
16 |
#ifndef _OPENMP
|
336 : |
agomez |
1 |
iters = EcoSystem->getFuncEval() - offset;
|
337 : |
ulcessvp |
11 |
#endif
|
338 : |
agomez |
1 |
handle.logMessage(LOGINFO, "Error in Hooke & Jeeves optimisation after", iters, "function evaluations, f(x) = 0");
|
339 : |
|
|
converge = -1;
|
340 : |
|
|
return;
|
341 : |
|
|
}
|
342 : |
|
|
|
343 : |
|
|
/* randomize the order of the parameters once in a while */
|
344 : |
|
|
rchange = 0;
|
345 : |
|
|
while (rchange < nvars) {
|
346 : |
|
|
rnumber = rand() % nvars;
|
347 : |
|
|
rcheck = 1;
|
348 : |
|
|
for (i = 0; i < rchange; i++)
|
349 : |
|
|
if (param[i] == rnumber)
|
350 : |
|
|
rcheck = 0;
|
351 : |
|
|
if (rcheck) {
|
352 : |
|
|
param[rchange] = rnumber;
|
353 : |
|
|
rchange++;
|
354 : |
|
|
}
|
355 : |
|
|
}
|
356 : |
|
|
|
357 : |
|
|
/* find best new point, one coord at a time */
|
358 : |
|
|
for (i = 0; i < nvars; i++)
|
359 : |
|
|
trialx[i] = x[i];
|
360 : |
ulcessvp |
16 |
#ifdef _OPENMP
|
361 : |
ulcessvp |
15 |
newf = this->bestNearbyRepro(delta, trialx, bestf, param);
|
362 : |
ulcessvp |
11 |
if (newf == -1) {
|
363 : |
|
|
handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
|
364 : |
|
|
handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
|
365 : |
|
|
return;
|
366 : |
|
|
}
|
367 : |
|
|
#else
|
368 : |
agomez |
1 |
newf = this->bestNearby(delta, trialx, bestf, param);
|
369 : |
ulcessvp |
11 |
#endif
|
370 : |
|
|
/* if too many function evaluations occur, terminate the algorithm */
|
371 : |
agomez |
1 |
|
372 : |
ulcessvp |
16 |
#ifndef _OPENMP
|
373 : |
agomez |
1 |
iters = EcoSystem->getFuncEval() - offset;
|
374 : |
ulcessvp |
11 |
#endif
|
375 : |
agomez |
1 |
if (iters > hookeiter) {
|
376 : |
|
|
handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
|
377 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
|
378 : |
|
|
handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
|
379 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
|
380 : |
|
|
handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
|
381 : |
|
|
|
382 : |
|
|
score = EcoSystem->SimulateAndUpdate(trialx);
|
383 : |
|
|
handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
|
384 : |
|
|
for (i = 0; i < nvars; i++)
|
385 : |
|
|
bestx[i] = trialx[i] * init[i];
|
386 : |
|
|
EcoSystem->storeVariables(score, bestx);
|
387 : |
|
|
return;
|
388 : |
|
|
}
|
389 : |
|
|
|
390 : |
|
|
/* if we made some improvements, pursue that direction */
|
391 : |
|
|
while (newf < bestf) {
|
392 : |
|
|
for (i = 0; i < nvars; i++) {
|
393 : |
|
|
/* if it has been trapped but f has now gotten better (bndcheck) */
|
394 : |
|
|
/* we assume that we are out of the trap, reset the counters */
|
395 : |
|
|
/* and go back to the stepsize we had when we got trapped */
|
396 : |
|
|
if ((trapped[i]) && (newf < oldf * bndcheck)) {
|
397 : |
|
|
trapped[i] = 0;
|
398 : |
|
|
lbound[i] = 0;
|
399 : |
|
|
rbounds[i] = 0;
|
400 : |
|
|
delta[i] = initialstep[i];
|
401 : |
|
|
|
402 : |
|
|
} else if (trialx[i] < (lowerb[i] + verysmall)) {
|
403 : |
|
|
lbound[i]++;
|
404 : |
|
|
trialx[i] = lowerb[i];
|
405 : |
|
|
if (!trapped[i]) {
|
406 : |
|
|
initialstep[i] = delta[i];
|
407 : |
|
|
trapped[i] = 1;
|
408 : |
|
|
}
|
409 : |
|
|
/* if it has hit the bounds 2 times then increase the stepsize */
|
410 : |
|
|
if (lbound[i] >= 2)
|
411 : |
|
|
delta[i] /= rho;
|
412 : |
|
|
|
413 : |
|
|
} else if (trialx[i] > (upperb[i] - verysmall)) {
|
414 : |
|
|
rbounds[i]++;
|
415 : |
|
|
trialx[i] = upperb[i];
|
416 : |
|
|
if (!trapped[i]) {
|
417 : |
|
|
initialstep[i] = delta[i];
|
418 : |
|
|
trapped[i] = 1;
|
419 : |
|
|
}
|
420 : |
|
|
/* if it has hit the bounds 2 times then increase the stepsize */
|
421 : |
|
|
if (rbounds[i] >= 2)
|
422 : |
|
|
delta[i] /= rho;
|
423 : |
|
|
}
|
424 : |
|
|
}
|
425 : |
|
|
|
426 : |
|
|
for (i = 0; i < nvars; i++) {
|
427 : |
|
|
/* firstly, arrange the sign of delta[] */
|
428 : |
|
|
if (trialx[i] < x[i])
|
429 : |
|
|
delta[i] = 0.0 - fabs(delta[i]);
|
430 : |
|
|
else
|
431 : |
|
|
delta[i] = fabs(delta[i]);
|
432 : |
|
|
|
433 : |
|
|
/* now, move further in this direction */
|
434 : |
|
|
tmp = x[i];
|
435 : |
|
|
x[i] = trialx[i];
|
436 : |
|
|
trialx[i] = trialx[i] + trialx[i] - tmp;
|
437 : |
|
|
}
|
438 : |
|
|
|
439 : |
|
|
/* only move forward if this is really an improvement */
|
440 : |
|
|
oldf = newf;
|
441 : |
|
|
newf = EcoSystem->SimulateAndUpdate(trialx);
|
442 : |
ulcessvp |
16 |
#ifdef _OPENMP
|
443 : |
ulcessvp |
11 |
iters++;
|
444 : |
|
|
#endif
|
445 : |
agomez |
1 |
if ((isEqual(newf, oldf)) || (newf > oldf)) {
|
446 : |
|
|
newf = oldf; //JMB no improvement, so reset the value of newf
|
447 : |
|
|
break;
|
448 : |
|
|
}
|
449 : |
|
|
|
450 : |
|
|
/* OK, it's better, so update variables and look around */
|
451 : |
|
|
bestf = newf;
|
452 : |
|
|
for (i = 0; i < nvars; i++)
|
453 : |
|
|
x[i] = trialx[i];
|
454 : |
ulcessvp |
11 |
|
455 : |
ulcessvp |
16 |
#ifdef _OPENMP
|
456 : |
ulcessvp |
15 |
newf = this->bestNearbyRepro(delta, trialx, bestf, param);
|
457 : |
ulcessvp |
11 |
if (newf == -1) {
|
458 : |
|
|
handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
|
459 : |
|
|
handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
|
460 : |
|
|
return;
|
461 : |
|
|
}
|
462 : |
|
|
#else
|
463 : |
agomez |
1 |
newf = this->bestNearby(delta, trialx, bestf, param);
|
464 : |
ulcessvp |
11 |
#endif
|
465 : |
agomez |
1 |
if (isEqual(newf, bestf))
|
466 : |
|
|
break;
|
467 : |
|
|
|
468 : |
|
|
/* if too many function evaluations occur, terminate the algorithm */
|
469 : |
ulcessvp |
16 |
#ifndef _OPENMP
|
470 : |
agomez |
1 |
iters = EcoSystem->getFuncEval() - offset;
|
471 : |
ulcessvp |
11 |
#endif
|
472 : |
agomez |
1 |
if (iters > hookeiter) {
|
473 : |
|
|
handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
|
474 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
|
475 : |
|
|
handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
|
476 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
|
477 : |
|
|
handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
|
478 : |
|
|
|
479 : |
|
|
score = EcoSystem->SimulateAndUpdate(trialx);
|
480 : |
|
|
handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
|
481 : |
|
|
for (i = 0; i < nvars; i++)
|
482 : |
|
|
bestx[i] = trialx[i] * init[i];
|
483 : |
|
|
EcoSystem->storeVariables(score, bestx);
|
484 : |
|
|
return;
|
485 : |
|
|
}
|
486 : |
ulcessvp |
16 |
} // while (newf < bestf)
|
487 : |
agomez |
1 |
|
488 : |
ulcessvp |
16 |
#ifndef _OPENMP
|
489 : |
agomez |
1 |
iters = EcoSystem->getFuncEval() - offset;
|
490 : |
ulcessvp |
11 |
#endif
|
491 : |
agomez |
1 |
if (newf < bestf) {
|
492 : |
|
|
for (i = 0; i < nvars; i++)
|
493 : |
|
|
bestx[i] = x[i] * init[i];
|
494 : |
|
|
bestf = newf;
|
495 : |
|
|
handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
|
496 : |
|
|
handle.logMessage(LOGINFO, "The likelihood score is", bestf, "at the point");
|
497 : |
|
|
EcoSystem->storeVariables(bestf, bestx);
|
498 : |
|
|
EcoSystem->writeBestValues();
|
499 : |
|
|
|
500 : |
|
|
} else
|
501 : |
|
|
handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ...");
|
502 : |
|
|
|
503 : |
|
|
/* if the step length is less than hookeeps, terminate the algorithm */
|
504 : |
|
|
if (steplength < hookeeps) {
|
505 : |
|
|
handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
|
506 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
|
507 : |
|
|
handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
|
508 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
|
509 : |
|
|
|
510 : |
|
|
converge = 1;
|
511 : |
|
|
score = bestf;
|
512 : |
|
|
handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
|
513 : |
|
|
EcoSystem->storeVariables(bestf, bestx);
|
514 : |
|
|
return;
|
515 : |
|
|
}
|
516 : |
|
|
|
517 : |
|
|
steplength *= rho;
|
518 : |
|
|
handle.logMessage(LOGINFO, "Reducing the steplength to", steplength);
|
519 : |
|
|
for (i = 0; i < nvars; i++)
|
520 : |
|
|
delta[i] *= rho;
|
521 : |
|
|
}
|
522 : |
|
|
}
|
523 : |
ulcessvp |
11 |
|
524 : |
ulcessvp |
12 |
/* Functions to perform the parallelization of the algorithm of HJ with OpenMP*/
|
525 : |
ulcessvp |
15 |
#ifdef SPECULATIVE
|
526 : |
|
|
double OptInfoHooke::bestNearbySpec(DoubleVector& delta, DoubleVector& point, double prevbest, IntVector& param) {
|
527 : |
ulcessvp |
12 |
double minf;
|
528 : |
ulcessvp |
11 |
int i, j, k, ii;
|
529 : |
|
|
DoubleVector z(point);
|
530 : |
|
|
int bestId = 0;
|
531 : |
|
|
struct Storage {
|
532 : |
|
|
DoubleVector z;
|
533 : |
|
|
DoubleVector delta;
|
534 : |
|
|
double ftmp;
|
535 : |
|
|
int iters;
|
536 : |
|
|
};
|
537 : |
|
|
|
538 : |
|
|
minf = prevbest;
|
539 : |
|
|
|
540 : |
|
|
int paral_tokens, numThr, nvars = point.Size();
|
541 : |
|
|
numThr = omp_get_max_threads ( );
|
542 : |
|
|
|
543 : |
|
|
Storage* storage = new Storage[numThr];
|
544 : |
|
|
if ((numThr % 2) == 0)
|
545 : |
|
|
paral_tokens = numThr / 2;
|
546 : |
|
|
else {
|
547 : |
|
|
return -1;
|
548 : |
|
|
}
|
549 : |
|
|
|
550 : |
ulcessvp |
19 |
// omp_set_dynamic(0);
|
551 : |
|
|
// omp_set_nested(1); //permit the nested parallelization
|
552 : |
ulcessvp |
11 |
for (ii=0; ii< paral_tokens; ii++) {
|
553 : |
|
|
i = 0;
|
554 : |
|
|
while ( i < nvars) {
|
555 : |
|
|
if ((i + paral_tokens -1) >= nvars)
|
556 : |
|
|
paral_tokens = nvars - i;
|
557 : |
ulcessvp |
19 |
#pragma omp parallel for num_threads(paral_tokens*2) private(k)
|
558 : |
|
|
for (j = 0; j < (paral_tokens*2); ++j) {
|
559 : |
ulcessvp |
11 |
storage[j].z = z;
|
560 : |
|
|
storage[j].delta = delta;
|
561 : |
ulcessvp |
19 |
DoubleVector v(z);
|
562 : |
ulcessvp |
11 |
|
563 : |
ulcessvp |
19 |
if (j<paral_tokens) {
|
564 : |
|
|
k = param[i+j];
|
565 : |
|
|
v[k] += delta[k];
|
566 : |
ulcessvp |
11 |
}
|
567 : |
ulcessvp |
19 |
else {
|
568 : |
|
|
k = param[i+j-paral_tokens];
|
569 : |
|
|
v[k] -= delta[k];
|
570 : |
|
|
}
|
571 : |
|
|
|
572 : |
|
|
storage[j].ftmp = EcoSystems[j]->SimulateAndUpdate(v);
|
573 : |
|
|
storage[j].z[k] = v[k];
|
574 : |
|
|
}
|
575 : |
|
|
|
576 : |
|
|
for (j = 0; j < paral_tokens; ++j) {
|
577 : |
|
|
k = param[i+j];
|
578 : |
ulcessvp |
11 |
if (storage[j].ftmp < minf) {
|
579 : |
|
|
storage[j].iters = 1;
|
580 : |
ulcessvp |
19 |
// storage[j].z[k] = v1[k];
|
581 : |
ulcessvp |
11 |
} else {
|
582 : |
|
|
storage[j].iters = 2;
|
583 : |
|
|
storage[j].delta[k] = 0.0 - delta[k];
|
584 : |
|
|
if (storage[j+paral_tokens].ftmp < minf) {
|
585 : |
|
|
storage[j].ftmp = storage[j+paral_tokens].ftmp;
|
586 : |
ulcessvp |
19 |
storage[j].z[k] = storage[j+paral_tokens].z[k];
|
587 : |
ulcessvp |
11 |
}
|
588 : |
|
|
else iters += 2;
|
589 : |
|
|
}
|
590 : |
|
|
}
|
591 : |
|
|
|
592 : |
|
|
bestId = 0;
|
593 : |
|
|
for (j = 1; j < paral_tokens; ++j) {
|
594 : |
|
|
if (storage[j].ftmp < storage[bestId].ftmp)
|
595 : |
|
|
bestId = j;
|
596 : |
|
|
}
|
597 : |
|
|
if (storage[bestId].ftmp < minf) {
|
598 : |
|
|
iters += storage[bestId].iters;
|
599 : |
|
|
minf = storage[bestId].ftmp;
|
600 : |
|
|
z = storage[bestId].z;
|
601 : |
|
|
delta = storage[bestId].delta;
|
602 : |
|
|
}
|
603 : |
|
|
|
604 : |
|
|
i += paral_tokens;
|
605 : |
|
|
}
|
606 : |
ulcessvp |
19 |
paral_tokens = numThr / 2;
|
607 : |
ulcessvp |
11 |
}
|
608 : |
|
|
|
609 : |
|
|
delete[] storage;
|
610 : |
|
|
for (i = 0; i < nvars; ++i)
|
611 : |
|
|
point[i] = z[i];
|
612 : |
|
|
|
613 : |
|
|
return minf;
|
614 : |
|
|
}
|
615 : |
|
|
|
616 : |
|
|
void OptInfoHooke::OptimiseLikelihoodOMP() {
|
617 : |
|
|
double oldf, newf, bestf, steplength, tmp;
|
618 : |
|
|
int i, offset;
|
619 : |
|
|
int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
|
620 : |
|
|
|
621 : |
|
|
handle.logMessage(LOGINFO, "\nStarting Hooke & Jeeves optimisation algorithm\n");
|
622 : |
|
|
int nvars = EcoSystem->numOptVariables();
|
623 : |
|
|
DoubleVector x(nvars);
|
624 : |
|
|
DoubleVector trialx(nvars);
|
625 : |
|
|
DoubleVector bestx(nvars);
|
626 : |
|
|
DoubleVector lowerb(nvars);
|
627 : |
|
|
DoubleVector upperb(nvars);
|
628 : |
|
|
DoubleVector init(nvars);
|
629 : |
|
|
DoubleVector initialstep(nvars, rho);
|
630 : |
|
|
DoubleVector delta(nvars);
|
631 : |
|
|
IntVector param(nvars, 0);
|
632 : |
|
|
IntVector lbound(nvars, 0);
|
633 : |
|
|
IntVector rbounds(nvars, 0);
|
634 : |
|
|
IntVector trapped(nvars, 0);
|
635 : |
|
|
|
636 : |
|
|
EcoSystem->scaleVariables();
|
637 : |
|
|
int numThr = omp_get_max_threads ( );
|
638 : |
ulcessvp |
14 |
for (i = 0; i < numThr; i++) // scale the variables for the ecosystem of every thread
|
639 : |
ulcessvp |
11 |
EcoSystems[i]->scaleVariables();
|
640 : |
|
|
EcoSystem->getOptScaledValues(x);
|
641 : |
|
|
EcoSystem->getOptLowerBounds(lowerb);
|
642 : |
|
|
EcoSystem->getOptUpperBounds(upperb);
|
643 : |
|
|
EcoSystem->getOptInitialValues(init);
|
644 : |
|
|
|
645 : |
|
|
for (i = 0; i < nvars; i++) {
|
646 : |
|
|
// Scaling the bounds, because the parameters are scaled
|
647 : |
|
|
lowerb[i] = lowerb[i] / init[i];
|
648 : |
|
|
upperb[i] = upperb[i] / init[i];
|
649 : |
|
|
if (lowerb[i] > upperb[i]) {
|
650 : |
|
|
tmp = lowerb[i];
|
651 : |
|
|
lowerb[i] = upperb[i];
|
652 : |
|
|
upperb[i] = tmp;
|
653 : |
|
|
}
|
654 : |
|
|
|
655 : |
|
|
bestx[i] = x[i];
|
656 : |
|
|
trialx[i] = x[i];
|
657 : |
|
|
param[i] = i;
|
658 : |
|
|
delta[i] = ((2 * (rand() % 2)) - 1) * rho; //JMB - randomise the sign
|
659 : |
|
|
}
|
660 : |
|
|
|
661 : |
|
|
bestf = EcoSystem->SimulateAndUpdate(trialx);
|
662 : |
|
|
if (bestf != bestf) { //check for NaN
|
663 : |
|
|
handle.logMessage(LOGINFO, "Error starting Hooke & Jeeves optimisation with f(x) = infinity");
|
664 : |
|
|
converge = -1;
|
665 : |
|
|
iters = 1;
|
666 : |
|
|
return;
|
667 : |
|
|
}
|
668 : |
|
|
|
669 : |
|
|
offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop
|
670 : |
|
|
newf = bestf;
|
671 : |
|
|
oldf = bestf;
|
672 : |
|
|
steplength = lambda;
|
673 : |
|
|
if (isZero(steplength))
|
674 : |
|
|
steplength = rho;
|
675 : |
|
|
|
676 : |
|
|
iters = 0;
|
677 : |
|
|
|
678 : |
|
|
while (1) {
|
679 : |
|
|
if (isZero(bestf)) {
|
680 : |
ulcessvp |
16 |
#ifndef _OPENMP
|
681 : |
ulcessvp |
11 |
iters = EcoSystem->getFuncEval() - offset;
|
682 : |
|
|
#endif
|
683 : |
|
|
handle.logMessage(LOGINFO, "Error in Hooke & Jeeves optimisation after", iters, "function evaluations, f(x) = 0");
|
684 : |
|
|
converge = -1;
|
685 : |
|
|
return;
|
686 : |
|
|
}
|
687 : |
|
|
|
688 : |
|
|
/* randomize the order of the parameters once in a while */
|
689 : |
|
|
rchange = 0;
|
690 : |
|
|
while (rchange < nvars) {
|
691 : |
|
|
rnumber = rand() % nvars;
|
692 : |
|
|
rcheck = 1;
|
693 : |
|
|
for (i = 0; i < rchange; i++)
|
694 : |
|
|
if (param[i] == rnumber)
|
695 : |
|
|
rcheck = 0;
|
696 : |
|
|
if (rcheck) {
|
697 : |
|
|
param[rchange] = rnumber;
|
698 : |
|
|
rchange++;
|
699 : |
|
|
}
|
700 : |
|
|
}
|
701 : |
|
|
|
702 : |
|
|
/* find best new point, one coord at a time */
|
703 : |
|
|
for (i = 0; i < nvars; i++)
|
704 : |
|
|
trialx[i] = x[i];
|
705 : |
ulcessvp |
16 |
#ifdef _OPENMP
|
706 : |
ulcessvp |
15 |
newf = this->bestNearbySpec(delta, trialx, bestf, param);
|
707 : |
ulcessvp |
11 |
if (newf == -1) {
|
708 : |
|
|
handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
|
709 : |
|
|
handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
|
710 : |
|
|
return;
|
711 : |
|
|
}
|
712 : |
|
|
#else
|
713 : |
|
|
newf = this->bestNearby(delta, trialx, bestf, param);
|
714 : |
|
|
#endif
|
715 : |
|
|
/* if too many function evaluations occur, terminate the algorithm */
|
716 : |
|
|
|
717 : |
ulcessvp |
16 |
#ifndef _OPENMP
|
718 : |
ulcessvp |
11 |
iters = EcoSystem->getFuncEval() - offset;
|
719 : |
|
|
#endif
|
720 : |
|
|
if (iters > hookeiter) {
|
721 : |
|
|
handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
|
722 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
|
723 : |
|
|
handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
|
724 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
|
725 : |
|
|
handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
|
726 : |
|
|
|
727 : |
|
|
score = EcoSystem->SimulateAndUpdate(trialx);
|
728 : |
|
|
handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
|
729 : |
|
|
for (i = 0; i < nvars; i++)
|
730 : |
|
|
bestx[i] = trialx[i] * init[i];
|
731 : |
|
|
EcoSystem->storeVariables(score, bestx);
|
732 : |
|
|
return;
|
733 : |
|
|
}
|
734 : |
|
|
|
735 : |
|
|
/* if we made some improvements, pursue that direction */
|
736 : |
|
|
while (newf < bestf) {
|
737 : |
|
|
for (i = 0; i < nvars; i++) {
|
738 : |
|
|
/* if it has been trapped but f has now gotten better (bndcheck) */
|
739 : |
|
|
/* we assume that we are out of the trap, reset the counters */
|
740 : |
|
|
/* and go back to the stepsize we had when we got trapped */
|
741 : |
|
|
if ((trapped[i]) && (newf < oldf * bndcheck)) {
|
742 : |
|
|
trapped[i] = 0;
|
743 : |
|
|
lbound[i] = 0;
|
744 : |
|
|
rbounds[i] = 0;
|
745 : |
|
|
delta[i] = initialstep[i];
|
746 : |
|
|
|
747 : |
|
|
} else if (trialx[i] < (lowerb[i] + verysmall)) {
|
748 : |
|
|
lbound[i]++;
|
749 : |
|
|
trialx[i] = lowerb[i];
|
750 : |
|
|
if (!trapped[i]) {
|
751 : |
|
|
initialstep[i] = delta[i];
|
752 : |
|
|
trapped[i] = 1;
|
753 : |
|
|
}
|
754 : |
|
|
/* if it has hit the bounds 2 times then increase the stepsize */
|
755 : |
|
|
if (lbound[i] >= 2)
|
756 : |
|
|
delta[i] /= rho;
|
757 : |
|
|
|
758 : |
|
|
} else if (trialx[i] > (upperb[i] - verysmall)) {
|
759 : |
|
|
rbounds[i]++;
|
760 : |
|
|
trialx[i] = upperb[i];
|
761 : |
|
|
if (!trapped[i]) {
|
762 : |
|
|
initialstep[i] = delta[i];
|
763 : |
|
|
trapped[i] = 1;
|
764 : |
|
|
}
|
765 : |
|
|
/* if it has hit the bounds 2 times then increase the stepsize */
|
766 : |
|
|
if (rbounds[i] >= 2)
|
767 : |
|
|
delta[i] /= rho;
|
768 : |
|
|
}
|
769 : |
|
|
}
|
770 : |
|
|
|
771 : |
|
|
for (i = 0; i < nvars; i++) {
|
772 : |
|
|
/* firstly, arrange the sign of delta[] */
|
773 : |
|
|
if (trialx[i] < x[i])
|
774 : |
|
|
delta[i] = 0.0 - fabs(delta[i]);
|
775 : |
|
|
else
|
776 : |
|
|
delta[i] = fabs(delta[i]);
|
777 : |
|
|
|
778 : |
|
|
/* now, move further in this direction */
|
779 : |
|
|
tmp = x[i];
|
780 : |
|
|
x[i] = trialx[i];
|
781 : |
|
|
trialx[i] = trialx[i] + trialx[i] - tmp;
|
782 : |
|
|
}
|
783 : |
|
|
|
784 : |
|
|
/* only move forward if this is really an improvement */
|
785 : |
|
|
oldf = newf;
|
786 : |
|
|
newf = EcoSystem->SimulateAndUpdate(trialx);
|
787 : |
ulcessvp |
16 |
#ifdef _OPENMP
|
788 : |
ulcessvp |
11 |
iters++;
|
789 : |
|
|
#endif
|
790 : |
|
|
if ((isEqual(newf, oldf)) || (newf > oldf)) {
|
791 : |
|
|
newf = oldf; //JMB no improvement, so reset the value of newf
|
792 : |
|
|
break;
|
793 : |
|
|
}
|
794 : |
|
|
|
795 : |
|
|
/* OK, it's better, so update variables and look around */
|
796 : |
|
|
bestf = newf;
|
797 : |
|
|
for (i = 0; i < nvars; i++)
|
798 : |
|
|
x[i] = trialx[i];
|
799 : |
|
|
|
800 : |
ulcessvp |
16 |
#ifdef _OPENMP
|
801 : |
ulcessvp |
15 |
newf = this->bestNearbySpec(delta, trialx, bestf, param);
|
802 : |
ulcessvp |
11 |
if (newf == -1) {
|
803 : |
|
|
handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
|
804 : |
|
|
handle.logMessage(LOGINFO, "\nThe number of threads must be a multiple of 2\n");
|
805 : |
|
|
return;
|
806 : |
|
|
}
|
807 : |
|
|
#else
|
808 : |
|
|
newf = this->bestNearby(delta, trialx, bestf, param);
|
809 : |
|
|
#endif
|
810 : |
|
|
if (isEqual(newf, bestf))
|
811 : |
|
|
break;
|
812 : |
|
|
|
813 : |
|
|
/* if too many function evaluations occur, terminate the algorithm */
|
814 : |
ulcessvp |
16 |
#ifndef _OPENMP
|
815 : |
ulcessvp |
11 |
iters = EcoSystem->getFuncEval() - offset;
|
816 : |
|
|
#endif
|
817 : |
|
|
if (iters > hookeiter) {
|
818 : |
|
|
handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
|
819 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
|
820 : |
|
|
handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
|
821 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
|
822 : |
|
|
handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
|
823 : |
|
|
|
824 : |
|
|
score = EcoSystem->SimulateAndUpdate(trialx);
|
825 : |
|
|
handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
|
826 : |
|
|
for (i = 0; i < nvars; i++)
|
827 : |
|
|
bestx[i] = trialx[i] * init[i];
|
828 : |
|
|
EcoSystem->storeVariables(score, bestx);
|
829 : |
|
|
return;
|
830 : |
|
|
}
|
831 : |
|
|
}
|
832 : |
|
|
|
833 : |
ulcessvp |
16 |
#ifndef _OPENMP
|
834 : |
ulcessvp |
11 |
iters = EcoSystem->getFuncEval() - offset;
|
835 : |
|
|
#endif
|
836 : |
|
|
if (newf < bestf) {
|
837 : |
|
|
for (i = 0; i < nvars; i++)
|
838 : |
|
|
bestx[i] = x[i] * init[i];
|
839 : |
|
|
bestf = newf;
|
840 : |
|
|
handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
|
841 : |
|
|
handle.logMessage(LOGINFO, "The likelihood score is", bestf, "at the point");
|
842 : |
|
|
EcoSystem->storeVariables(bestf, bestx);
|
843 : |
|
|
EcoSystem->writeBestValues();
|
844 : |
|
|
|
845 : |
|
|
} else
|
846 : |
|
|
handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ...");
|
847 : |
|
|
|
848 : |
|
|
/* if the step length is less than hookeeps, terminate the algorithm */
|
849 : |
|
|
if (steplength < hookeeps) {
|
850 : |
|
|
handle.logMessage(LOGINFO, "\nStopping Hooke & Jeeves optimisation algorithm\n");
|
851 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
|
852 : |
|
|
handle.logMessage(LOGINFO, "The steplength was reduced to", steplength);
|
853 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
|
854 : |
|
|
|
855 : |
|
|
converge = 1;
|
856 : |
|
|
score = bestf;
|
857 : |
|
|
handle.logMessage(LOGINFO, "\nHooke & Jeeves finished with a likelihood score of", score);
|
858 : |
|
|
EcoSystem->storeVariables(bestf, bestx);
|
859 : |
|
|
return;
|
860 : |
|
|
}
|
861 : |
|
|
|
862 : |
|
|
steplength *= rho;
|
863 : |
|
|
handle.logMessage(LOGINFO, "Reducing the steplength to", steplength);
|
864 : |
|
|
for (i = 0; i < nvars; i++)
|
865 : |
|
|
delta[i] *= rho;
|
866 : |
|
|
}
|
867 : |
|
|
}
|
868 : |
|
|
#endif
|