13 |
/* can be used as a local optimizer for difficult functions. */ |
/* can be used as a local optimizer for difficult functions. */ |
14 |
/* */ |
/* */ |
15 |
/* The author can be contacted at h2zr1001@vm.cis.smu.edu */ |
/* The author can be contacted at h2zr1001@vm.cis.smu.edu */ |
16 |
|
// |
17 |
/* This file is a translation of a fortran code, which is an example of the*/ |
/* This file is a translation of a fortran code, which is an example of the*/ |
18 |
/* Corana et al. simulated annealing algorithm for multimodal and robust */ |
/* Corana et al. simulated annealing algorithm for multimodal and robust */ |
19 |
/* optimization as implemented and modified by by Goffe et al. */ |
/* optimization as implemented and modified by by Goffe et al. */ |
46 |
/* parameters one should adjust to modify the runtime of the */ |
/* parameters one should adjust to modify the runtime of the */ |
47 |
/* algorithm and its robustness. */ |
/* algorithm and its robustness. */ |
48 |
/* 5. Try constraining the algorithm with either LB or UB. */ |
/* 5. Try constraining the algorithm with either LB or UB. */ |
49 |
|
// |
50 |
/* Synopsis: */ |
/* Synopsis: */ |
51 |
/* This routine implements the continuous simulated annealing global */ |
/* This routine implements the continuous simulated annealing global */ |
52 |
/* optimization algorithm described in Corana et al.'s article */ |
/* optimization algorithm described in Corana et al.'s article */ |
54 |
/* "Simulated Annealing" Algorithm" in the September 1987 (vol. 13, */ |
/* "Simulated Annealing" Algorithm" in the September 1987 (vol. 13, */ |
55 |
/* no. 3, pp. 262-280) issue of the ACM Transactions on Mathematical */ |
/* no. 3, pp. 262-280) issue of the ACM Transactions on Mathematical */ |
56 |
/* Software. */ |
/* Software. */ |
57 |
|
// |
58 |
/* A very quick (perhaps too quick) overview of SA: */ |
/* A very quick (perhaps too quick) overview of SA: */ |
59 |
/* SA tries to find the global optimum of an N dimensional function. */ |
/* SA tries to find the global optimum of an N dimensional function. */ |
60 |
/* It moves both up and downhill and as the optimization process */ |
/* It moves both up and downhill and as the optimization process */ |
79 |
/* rejections rise. Given the scheme for the selection for VM, VM falls. */ |
/* rejections rise. Given the scheme for the selection for VM, VM falls. */ |
80 |
/* Thus, as T declines, VM falls and SA focuses upon the most promising */ |
/* Thus, as T declines, VM falls and SA focuses upon the most promising */ |
81 |
/* area for optimization. */ |
/* area for optimization. */ |
82 |
|
// |
83 |
/* The importance of the parameter T: */ |
/* The importance of the parameter T: */ |
84 |
/* The parameter T is crucial in using SA successfully. It influences */ |
/* The parameter T is crucial in using SA successfully. It influences */ |
85 |
/* VM, the step length over which the algorithm searches for optima. For */ |
/* VM, the step length over which the algorithm searches for optima. For */ |
93 |
/* optimizing a function, it is worthwhile to run a trial run first. Set */ |
/* optimizing a function, it is worthwhile to run a trial run first. Set */ |
94 |
/* RT = 1.5 and T = 1.0. With RT > 1.0, the temperature increases and VM */ |
/* RT = 1.5 and T = 1.0. With RT > 1.0, the temperature increases and VM */ |
95 |
/* rises as well. Then select the T that produces a large enough VM. */ |
/* rises as well. Then select the T that produces a large enough VM. */ |
96 |
|
// |
97 |
/* For modifications to the algorithm and many details on its use, */ |
/* For modifications to the algorithm and many details on its use, */ |
98 |
/* (particularly for econometric applications) see Goffe, Ferrier */ |
/* (particularly for econometric applications) see Goffe, Ferrier */ |
99 |
/* and Rogers, "Global Optimization of Statistical Functions with */ |
/* and Rogers, "Global Optimization of Statistical Functions with */ |
105 |
/* Dallas, TX 75275 */ |
/* Dallas, TX 75275 */ |
106 |
/* h2zr1001 @ smuvm1 (Bitnet) */ |
/* h2zr1001 @ smuvm1 (Bitnet) */ |
107 |
/* h2zr1001 @ vm.cis.smu.edu (Internet) */ |
/* h2zr1001 @ vm.cis.smu.edu (Internet) */ |
108 |
|
// |
109 |
/* As far as possible, the parameters here have the same name as in */ |
/* As far as possible, the parameters here have the same name as in */ |
110 |
/* the description of the algorithm on pp. 266-8 of Corana et al. */ |
/* the description of the algorithm on pp. 266-8 of Corana et al. */ |
111 |
|
// |
112 |
/* Input Parameters: */ |
/* Input Parameters: */ |
113 |
/* Note: The suggested values generally come from Corana et al. To */ |
/* Note: The suggested values generally come from Corana et al. To */ |
114 |
/* drastically reduce runtime, see Goffe et al., pp. 17-8 for */ |
/* drastically reduce runtime, see Goffe et al., pp. 17-8 for */ |
160 |
/* of all points are accepted, the input value is not very */ |
/* of all points are accepted, the input value is not very */ |
161 |
/* important (i.e. is the value is off, SA adjusts VM to the */ |
/* important (i.e. is the value is off, SA adjusts VM to the */ |
162 |
/* correct value). (DP(N)) */ |
/* correct value). (DP(N)) */ |
163 |
|
// |
164 |
/* Output Parameters: */ |
/* Output Parameters: */ |
165 |
/* xopt - The variables that optimize the function. (DP(N)) */ |
/* xopt - The variables that optimize the function. (DP(N)) */ |
166 |
/* fopt - The optimal value of the function. (DP) */ |
/* fopt - The optimal value of the function. (DP) */ |
167 |
|
// |
168 |
/* JMB this has been modified to work with the gadget object structure */ |
/* JMB this has been modified to work with the gadget object structure */ |
169 |
/* This means that the function has been replaced by a call to ecosystem */ |
/* This means that the function has been replaced by a call to ecosystem */ |
170 |
/* object, and we can use the vector objects that have been defined */ |
/* object, and we can use the vector objects that have been defined */ |
177 |
#include "errorhandler.h" |
#include "errorhandler.h" |
178 |
#include "ecosystem.h" |
#include "ecosystem.h" |
179 |
#include "global.h" |
#include "global.h" |
180 |
|
#include "seq_optimize_template.h" |
181 |
|
#ifdef GADGET_OPENMP |
182 |
|
#include <omp.h> |
183 |
|
#endif |
184 |
|
|
185 |
extern Ecosystem* EcoSystem; |
extern Ecosystem* EcoSystem; |
186 |
|
#ifndef NO_OPENMP |
187 |
|
extern Ecosystem** EcoSystems; |
188 |
|
#endif |
189 |
|
|
190 |
|
/*sequential code replaced at seq_optimize_template.h*/ |
191 |
|
//void OptInfoSimann::OptimiseLikelihood() { |
192 |
|
// |
193 |
|
// //set initial values |
194 |
|
// int nacc = 0; //The number of accepted function evaluations |
195 |
|
// int nrej = 0; //The number of rejected function evaluations |
196 |
|
// int naccmet = 0; //The number of metropolis accepted function evaluations |
197 |
|
// |
198 |
|
// double tmp, p, pp, ratio, nsdiv; |
199 |
|
// double fopt, funcval, trialf; |
200 |
|
// int a, i, j, k, l, offset, quit; |
201 |
|
// int rchange, rcheck, rnumber; //Used to randomise the order of the parameters |
202 |
|
// |
203 |
|
// handle.logMessage(LOGINFO, "\nStarting Simulated Annealing optimisation algorithm\n"); |
204 |
|
// int nvars = EcoSystem->numOptVariables(); |
205 |
|
// DoubleVector x(nvars); |
206 |
|
// DoubleVector init(nvars); |
207 |
|
// DoubleVector trialx(nvars, 0.0); |
208 |
|
// DoubleVector bestx(nvars); |
209 |
|
// DoubleVector scalex(nvars); |
210 |
|
// DoubleVector lowerb(nvars); |
211 |
|
// DoubleVector upperb(nvars); |
212 |
|
// DoubleVector fstar(tempcheck); |
213 |
|
// DoubleVector vm(nvars, vminit); |
214 |
|
// IntVector param(nvars, 0); |
215 |
|
// IntVector nacp(nvars, 0); |
216 |
|
// |
217 |
|
// EcoSystem->resetVariables(); //JMB need to reset variables in case they have been scaled |
218 |
|
// if (scale) |
219 |
|
// EcoSystem->scaleVariables(); |
220 |
|
// EcoSystem->getOptScaledValues(x); |
221 |
|
// EcoSystem->getOptLowerBounds(lowerb); |
222 |
|
// EcoSystem->getOptUpperBounds(upperb); |
223 |
|
// EcoSystem->getOptInitialValues(init); |
224 |
|
// |
225 |
|
// for (i = 0; i < nvars; i++) { |
226 |
|
// bestx[i] = x[i]; |
227 |
|
// param[i] = i; |
228 |
|
// } |
229 |
|
// |
230 |
|
// if (scale) { |
231 |
|
// for (i = 0; i < nvars; i++) { |
232 |
|
// scalex[i] = x[i]; |
233 |
|
// // Scaling the bounds, because the parameters are scaled |
234 |
|
// lowerb[i] = lowerb[i] / init[i]; |
235 |
|
// upperb[i] = upperb[i] / init[i]; |
236 |
|
// if (lowerb[i] > upperb[i]) { |
237 |
|
// tmp = lowerb[i]; |
238 |
|
// lowerb[i] = upperb[i]; |
239 |
|
// upperb[i] = tmp; |
240 |
|
// } |
241 |
|
// } |
242 |
|
// } |
243 |
|
// |
244 |
|
// //funcval is the function value at x |
245 |
|
// funcval = EcoSystem->SimulateAndUpdate(x); |
246 |
|
// if (funcval != funcval) { //check for NaN |
247 |
|
// handle.logMessage(LOGINFO, "Error starting Simulated Annealing optimisation with f(x) = infinity"); |
248 |
|
// converge = -1; |
249 |
|
// iters = 1; |
250 |
|
// return; |
251 |
|
// } |
252 |
|
// |
253 |
|
// //the function is to be minimised so switch the sign of funcval (and trialf) |
254 |
|
// funcval = -funcval; |
255 |
|
// offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop |
256 |
|
// nacc++; |
257 |
|
// cs /= lratio; //JMB save processing time |
258 |
|
// nsdiv = 1.0 / ns; |
259 |
|
// fopt = funcval; |
260 |
|
// for (i = 0; i < tempcheck; i++) |
261 |
|
// fstar[i] = funcval; |
262 |
|
// |
263 |
|
// //Start the main loop. Note that it terminates if |
264 |
|
// //(i) the algorithm succesfully optimises the function or |
265 |
|
// //(ii) there are too many function evaluations |
266 |
|
// while (1) { |
267 |
|
// for (a = 0; a < nt; a++) { |
268 |
|
// //Randomize the order of the parameters once in a while, to avoid |
269 |
|
// //the order having an influence on which changes are accepted |
270 |
|
// rchange = 0; |
271 |
|
// while (rchange < nvars) { |
272 |
|
// rnumber = rand_r(&seedP) % nvars; |
273 |
|
// rcheck = 1; |
274 |
|
// for (i = 0; i < rchange; i++) |
275 |
|
// if (param[i] == rnumber) |
276 |
|
// rcheck = 0; |
277 |
|
// if (rcheck) { |
278 |
|
// param[rchange] = rnumber; |
279 |
|
// rchange++; |
280 |
|
// } |
281 |
|
// } |
282 |
|
// |
283 |
|
// for (j = 0; j < ns; j++) { |
284 |
|
// for (l = 0; l < nvars; l++) { |
285 |
|
// //Generate trialx, the trial value of x |
286 |
|
// newValue(nvars, l, param, trialx, x, lowerb, upperb, vm); |
287 |
|
//// for (i = 0; i < nvars; i++) { |
288 |
|
//// if (i == param[l]) { |
289 |
|
//// trialx[i] = x[i] + ((randomNumber() * 2.0) - 1.0) * vm[i]; |
290 |
|
//// |
291 |
|
//// //If trialx is out of bounds, try again until we find a point that is OK |
292 |
|
//// if ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
293 |
|
//// //JMB - this used to just select a random point between the bounds |
294 |
|
//// k = 0; |
295 |
|
//// while ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
296 |
|
//// trialx[i] = x[i] + ((randomNumber() * 2.0) - 1.0) * vm[i]; |
297 |
|
//// k++; |
298 |
|
//// if (k > 10) //we've had 10 tries to find a point neatly, so give up |
299 |
|
//// trialx[i] = lowerb[i] + (upperb[i] - lowerb[i]) * randomNumber(); |
300 |
|
//// } |
301 |
|
//// } |
302 |
|
//// |
303 |
|
//// } else |
304 |
|
//// trialx[i] = x[i]; |
305 |
|
//// } |
306 |
|
// |
307 |
|
// //Evaluate the function with the trial point trialx and return as -trialf |
308 |
|
// trialf = EcoSystem->SimulateAndUpdate(trialx); |
309 |
|
// trialf = -trialf; |
310 |
|
// |
311 |
|
// //If too many function evaluations occur, terminate the algorithm |
312 |
|
// iters = EcoSystem->getFuncEval() - offset; |
313 |
|
// if (iters > simanniter) { |
314 |
|
// handle.logMessage(LOGINFO, "\nStopping Simulated Annealing optimisation algorithm\n"); |
315 |
|
// handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations"); |
316 |
|
// handle.logMessage(LOGINFO, "The temperature was reduced to", t); |
317 |
|
// handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations"); |
318 |
|
// handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run"); |
319 |
|
// handle.logMessage(LOGINFO, "Number of directly accepted points", nacc); |
320 |
|
// handle.logMessage(LOGINFO, "Number of metropolis accepted points", naccmet); |
321 |
|
// handle.logMessage(LOGINFO, "Number of rejected points", nrej); |
322 |
|
// |
323 |
|
// score = EcoSystem->SimulateAndUpdate(bestx); |
324 |
|
// handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score); |
325 |
|
// return; |
326 |
|
// } |
327 |
|
// //Accept the new point if the new function value better |
328 |
|
// if ((trialf - funcval) > verysmall) { |
329 |
|
// for (i = 0; i < nvars; i++) |
330 |
|
// x[i] = trialx[i]; |
331 |
|
// funcval = trialf; |
332 |
|
// nacc++; |
333 |
|
// nacp[param[l]]++; //JMB - not sure about this ... |
334 |
|
// |
335 |
|
// } else { |
336 |
|
// //Accept according to metropolis condition |
337 |
|
// p = expRep((trialf - funcval) / t); |
338 |
|
// pp = randomNumber(&seedM); |
339 |
|
// if (pp < p) { |
340 |
|
// //Accept point |
341 |
|
// for (i = 0; i < nvars; i++) |
342 |
|
// x[i] = trialx[i]; |
343 |
|
// funcval = trialf; |
344 |
|
// naccmet++; |
345 |
|
// nacp[param[l]]++; |
346 |
|
// } else { |
347 |
|
// //Reject point |
348 |
|
// nrej++; |
349 |
|
// } |
350 |
|
// } |
351 |
|
// // JMB added check for really silly values |
352 |
|
// if (isZero(trialf)) { |
353 |
|
// handle.logMessage(LOGINFO, "Error in Simulated Annealing optimisation after", iters, "function evaluations, f(x) = 0"); |
354 |
|
// converge = -1; |
355 |
|
// return; |
356 |
|
// } |
357 |
|
// |
358 |
|
// //If greater than any other point, record as new optimum |
359 |
|
// if ((trialf > fopt) && (trialf == trialf)) { |
360 |
|
// for (i = 0; i < nvars; i++) |
361 |
|
// bestx[i] = trialx[i]; |
362 |
|
// fopt = trialf; |
363 |
|
// |
364 |
|
// if (scale) { |
365 |
|
// for (i = 0; i < nvars; i++) |
366 |
|
// scalex[i] = bestx[i] * init[i]; |
367 |
|
// EcoSystem->storeVariables(-fopt, scalex); |
368 |
|
// } else |
369 |
|
// EcoSystem->storeVariables(-fopt, bestx); |
370 |
|
// |
371 |
|
// handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations"); |
372 |
|
// handle.logMessage(LOGINFO, "The likelihood score is", -fopt, "at the point"); |
373 |
|
// EcoSystem->writeBestValues(); |
374 |
|
// } |
375 |
|
// } |
376 |
|
// } |
377 |
|
// |
378 |
|
// //Adjust vm so that approximately half of all evaluations are accepted |
379 |
|
// for (i = 0; i < nvars; i++) { |
380 |
|
// ratio = nsdiv * nacp[i]; |
381 |
|
// nacp[i] = 0; |
382 |
|
// if (ratio > uratio) { |
383 |
|
// vm[i] = vm[i] * (1.0 + cs * (ratio - uratio)); |
384 |
|
// } else if (ratio < lratio) { |
385 |
|
// vm[i] = vm[i] / (1.0 + cs * (lratio - ratio)); |
386 |
|
// } |
387 |
|
// |
388 |
|
// if (vm[i] < rathersmall) |
389 |
|
// vm[i] = rathersmall; |
390 |
|
// if (vm[i] > (upperb[i] - lowerb[i])) |
391 |
|
// vm[i] = upperb[i] - lowerb[i]; |
392 |
|
// } |
393 |
|
// } |
394 |
|
// |
395 |
|
// //Check termination criteria |
396 |
|
// for (i = tempcheck - 1; i > 0; i--) |
397 |
|
// fstar[i] = fstar[i - 1]; |
398 |
|
// fstar[0] = funcval; |
399 |
|
// |
400 |
|
// quit = 0; |
401 |
|
// if (fabs(fopt - funcval) < simanneps) { |
402 |
|
// quit = 1; |
403 |
|
// for (i = 0; i < tempcheck - 1; i++) |
404 |
|
// if (fabs(fstar[i + 1] - fstar[i]) > simanneps) |
405 |
|
// quit = 0; |
406 |
|
// } |
407 |
|
// |
408 |
|
// handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ..."); |
409 |
|
// |
410 |
|
// //Terminate SA if appropriate |
411 |
|
// if (quit) { |
412 |
|
// handle.logMessage(LOGINFO, "\nStopping Simulated Annealing optimisation algorithm\n"); |
413 |
|
// handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations"); |
414 |
|
// handle.logMessage(LOGINFO, "The temperature was reduced to", t); |
415 |
|
// handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run"); |
416 |
|
// handle.logMessage(LOGINFO, "Number of directly accepted points", nacc); |
417 |
|
// handle.logMessage(LOGINFO, "Number of metropolis accepted points", naccmet); |
418 |
|
// handle.logMessage(LOGINFO, "Number of rejected points", nrej); |
419 |
|
// |
420 |
|
// converge = 1; |
421 |
|
// score = EcoSystem->SimulateAndUpdate(bestx); |
422 |
|
// handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score); |
423 |
|
// return; |
424 |
|
// } |
425 |
|
// |
426 |
|
// //If termination criteria is not met, prepare for another loop. |
427 |
|
// t *= rt; |
428 |
|
// if (t < rathersmall) |
429 |
|
// t = rathersmall; //JMB make sure temperature doesnt get too small |
430 |
|
// |
431 |
|
// handle.logMessage(LOGINFO, "Reducing the temperature to", t); |
432 |
|
// funcval = fopt; |
433 |
|
// for (i = 0; i < nvars; i++) |
434 |
|
// x[i] = bestx[i]; |
435 |
|
// } |
436 |
|
//} |
437 |
|
|
438 |
|
|
439 |
void OptInfoSimann::OptimiseLikelihood() { |
#ifdef GADGET_OPENMP |
440 |
|
void OptInfoSimann::OptimiseLikelihoodOMP() { |
441 |
|
|
442 |
//set initial values |
//set initial values |
443 |
int nacc = 0; //The number of accepted function evaluations |
int nacc = 0; //The number of accepted function evaluations |
446 |
|
|
447 |
double tmp, p, pp, ratio, nsdiv; |
double tmp, p, pp, ratio, nsdiv; |
448 |
double fopt, funcval, trialf; |
double fopt, funcval, trialf; |
449 |
int a, i, j, k, l, offset, quit; |
int a, i, j, k, l, quit; |
450 |
int rchange, rcheck, rnumber; //Used to randomise the order of the parameters |
int rchange, rcheck, rnumber; //Used to randomise the order of the parameters |
451 |
|
|
452 |
|
// store the info of the different threads |
453 |
|
struct Storage { |
454 |
|
DoubleVector trialx; |
455 |
|
double newLikelihood; |
456 |
|
}; |
457 |
|
|
458 |
handle.logMessage(LOGINFO, "\nStarting Simulated Annealing optimisation algorithm\n"); |
handle.logMessage(LOGINFO, "\nStarting Simulated Annealing optimisation algorithm\n"); |
459 |
int nvars = EcoSystem->numOptVariables(); |
int nvars = EcoSystem->numOptVariables(); |
460 |
DoubleVector x(nvars); |
DoubleVector x(nvars); |
470 |
IntVector nacp(nvars, 0); |
IntVector nacp(nvars, 0); |
471 |
|
|
472 |
EcoSystem->resetVariables(); //JMB need to reset variables in case they have been scaled |
EcoSystem->resetVariables(); //JMB need to reset variables in case they have been scaled |
473 |
if (scale) |
if (scale) { |
474 |
EcoSystem->scaleVariables(); |
EcoSystem->scaleVariables(); |
475 |
|
int numThr = omp_get_max_threads ( ); |
476 |
|
for(i = 0; i < numThr; i++) // scale the variables for the ecosystem of every thread |
477 |
|
EcoSystems[i]->scaleVariables(); |
478 |
|
} |
479 |
EcoSystem->getOptScaledValues(x); |
EcoSystem->getOptScaledValues(x); |
480 |
EcoSystem->getOptLowerBounds(lowerb); |
EcoSystem->getOptLowerBounds(lowerb); |
481 |
EcoSystem->getOptUpperBounds(upperb); |
EcoSystem->getOptUpperBounds(upperb); |
482 |
EcoSystem->getOptInitialValues(init); |
EcoSystem->getOptInitialValues(init); |
483 |
|
|
484 |
for (i = 0; i < nvars; i++) { |
for (i = 0; i < nvars; ++i) { |
485 |
bestx[i] = x[i]; |
bestx[i] = x[i]; |
486 |
param[i] = i; |
param[i] = i; |
487 |
} |
} |
488 |
|
|
489 |
if (scale) { |
if (scale) { |
490 |
for (i = 0; i < nvars; i++) { |
for (i = 0; i < nvars; ++i) { |
491 |
scalex[i] = x[i]; |
scalex[i] = x[i]; |
492 |
// Scaling the bounds, because the parameters are scaled |
// Scaling the bounds, because the parameters are scaled |
493 |
lowerb[i] = lowerb[i] / init[i]; |
lowerb[i] = lowerb[i] / init[i]; |
511 |
|
|
512 |
//the function is to be minimised so switch the sign of funcval (and trialf) |
//the function is to be minimised so switch the sign of funcval (and trialf) |
513 |
funcval = -funcval; |
funcval = -funcval; |
|
offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop |
|
514 |
nacc++; |
nacc++; |
515 |
cs /= lratio; //JMB save processing time |
cs /= lratio; //JMB save processing time |
516 |
nsdiv = 1.0 / ns; |
nsdiv = 1.0 / ns; |
517 |
fopt = funcval; |
fopt = funcval; |
518 |
for (i = 0; i < tempcheck; i++) |
for (i = 0; i < tempcheck; ++i) |
519 |
fstar[i] = funcval; |
fstar[i] = funcval; |
520 |
|
|
521 |
|
|
522 |
|
|
523 |
|
int numThr = omp_get_max_threads ( ); |
524 |
|
int bestId=0; |
525 |
|
int ini=0; |
526 |
|
|
527 |
|
Storage* storage = new Storage[numThr]; |
528 |
|
|
529 |
|
for (i=0; i<numThr; ++i) |
530 |
|
storage[i].trialx = trialx; |
531 |
|
|
532 |
|
|
533 |
|
int aux; //store the number of evaluations that are not useful for the algorithm |
534 |
|
|
535 |
|
DoubleVector vns(nvars, 0); //vector of ns |
536 |
|
int ns_ = ceil(numThr/2.); |
537 |
|
double res; |
538 |
|
aux=0; |
539 |
//Start the main loop. Note that it terminates if |
//Start the main loop. Note that it terminates if |
540 |
//(i) the algorithm succesfully optimises the function or |
//(i) the algorithm succesfully optimises the function or |
541 |
//(ii) there are too many function evaluations |
//(ii) there are too many function evaluations |
542 |
while (1) { |
while (1) { |
543 |
for (a = 0; a < nt; a++) { |
for (a = 0; a < nt; ++a) { |
544 |
//Randomize the order of the parameters once in a while, to avoid |
//Randomize the order of the parameters once in a while, to avoid |
545 |
//the order having an influence on which changes are accepted |
//the order having an influence on which changes are accepted |
546 |
rchange = 0; |
rchange = 0; |
547 |
while (rchange < nvars) { |
while (rchange < nvars) { |
548 |
rnumber = rand() % nvars; |
rnumber = rand_r(&seedP) % nvars; |
549 |
rcheck = 1; |
rcheck = 1; |
550 |
for (i = 0; i < rchange; i++) |
for (i = 0; i < rchange; ++i) |
551 |
if (param[i] == rnumber) |
if (param[i] == rnumber) |
552 |
rcheck = 0; |
rcheck = 0; |
553 |
if (rcheck) { |
if (rcheck) { |
556 |
} |
} |
557 |
} |
} |
558 |
|
|
|
for (j = 0; j < ns; j++) { |
|
|
for (l = 0; l < nvars; l++) { |
|
|
//Generate trialx, the trial value of x |
|
|
for (i = 0; i < nvars; i++) { |
|
|
if (i == param[l]) { |
|
|
trialx[i] = x[i] + ((randomNumber() * 2.0) - 1.0) * vm[i]; |
|
559 |
|
|
560 |
//If trialx is out of bounds, try again until we find a point that is OK |
for (j = 0; j < (ns*ns_); ++j) { |
561 |
if ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
for (l = ini; l < nvars; l+=numThr) { |
562 |
//JMB - this used to just select a random point between the bounds |
for (i=0; i<numThr;++i) |
563 |
k = 0; |
{ |
564 |
while ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
if ((l+i) < nvars) |
565 |
trialx[i] = x[i] + ((randomNumber() * 2.0) - 1.0) * vm[i]; |
newValue(nvars, l+i, param, storage[i].trialx, x, lowerb, upperb, vm); |
566 |
k++; |
else { |
567 |
if (k > 10) //we've had 10 tries to find a point neatly, so give up |
newValue(nvars, ini, param, storage[i].trialx, x, lowerb, upperb, vm); |
568 |
trialx[i] = lowerb[i] + (upperb[i] - lowerb[i]) * randomNumber(); |
ini++; |
569 |
} |
if (ini >= nvars) |
570 |
|
ini=0; |
571 |
} |
} |
|
|
|
|
} else |
|
|
trialx[i] = x[i]; |
|
572 |
} |
} |
573 |
|
|
574 |
|
# pragma omp parallel private(res) |
575 |
|
{ |
576 |
//Evaluate the function with the trial point trialx and return as -trialf |
//Evaluate the function with the trial point trialx and return as -trialf |
577 |
trialf = EcoSystem->SimulateAndUpdate(trialx); |
int id = omp_get_thread_num (); |
578 |
trialf = -trialf; |
res = EcoSystems[id]->SimulateAndUpdate(storage[id].trialx); |
579 |
|
storage[id].newLikelihood = -res; |
580 |
|
} |
581 |
|
//best value from omp |
582 |
|
trialf = storage[0].newLikelihood; |
583 |
|
bestId=0; |
584 |
|
for (i=0;i<numThr;++i) |
585 |
|
{ |
586 |
|
if (storage[i].newLikelihood > trialf) |
587 |
|
{ |
588 |
|
trialf=storage[i].newLikelihood; |
589 |
|
bestId=i; |
590 |
|
} |
591 |
|
k = param[(l+i)%nvars]; |
592 |
|
|
593 |
|
if ((storage[i].newLikelihood - funcval) > verysmall) |
594 |
|
{ |
595 |
|
nacp[k]++; |
596 |
|
aux++; |
597 |
|
vns[k]++; |
598 |
|
} |
599 |
|
else { |
600 |
|
//Accept according to metropolis condition |
601 |
|
p = expRep((storage[i].newLikelihood - funcval) / t); |
602 |
|
pp = randomNumber(&seedM); |
603 |
|
if (pp < p) |
604 |
|
aux++; |
605 |
|
else { |
606 |
|
vns[k]++; |
607 |
|
nrej++; |
608 |
|
} |
609 |
|
} |
610 |
|
|
611 |
//If too many function evaluations occur, terminate the algorithm |
if (vns[k] >= ns) { |
612 |
iters = EcoSystem->getFuncEval() - offset; |
ratio = nsdiv * nacp[k]; |
613 |
|
nacp[k] = 0; |
614 |
|
if (ratio > uratio) { |
615 |
|
vm[k] = vm[k] * (1.0 + cs * (ratio - uratio)); |
616 |
|
} else if (ratio < lratio) { |
617 |
|
vm[k] = vm[k] / (1.0 + cs * (lratio - ratio)); |
618 |
|
} |
619 |
|
if (vm[k] < rathersmall){ |
620 |
|
vm[k] = rathersmall; |
621 |
|
} |
622 |
|
if (vm[k] > (upperb[k] - lowerb[k])) |
623 |
|
{ |
624 |
|
vm[k] = upperb[k] - lowerb[k]; |
625 |
|
} |
626 |
|
vns[k]=0; |
627 |
|
} |
628 |
|
} |
629 |
|
aux--; |
630 |
|
iters = (EcoSystems[bestId]->getFuncEval() * numThr) -aux; |
631 |
if (iters > simanniter) { |
if (iters > simanniter) { |
632 |
handle.logMessage(LOGINFO, "\nStopping Simulated Annealing optimisation algorithm\n"); |
handle.logMessage(LOGINFO, "\nStopping Simulated Annealing optimisation algorithm\n"); |
633 |
handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations"); |
handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations"); |
640 |
|
|
641 |
score = EcoSystem->SimulateAndUpdate(bestx); |
score = EcoSystem->SimulateAndUpdate(bestx); |
642 |
handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score); |
handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score); |
643 |
|
delete[] storage; |
644 |
return; |
return; |
645 |
} |
} |
646 |
|
|
647 |
//Accept the new point if the new function value better |
//Accept the new point if the new function value better |
648 |
if ((trialf - funcval) > verysmall) { |
if ((trialf - funcval) > verysmall) { |
649 |
for (i = 0; i < nvars; i++) |
for (i = 0; i < nvars; ++i) |
650 |
x[i] = trialx[i]; |
x[i] = storage[bestId].trialx[i]; |
651 |
funcval = trialf; |
funcval = trialf; |
652 |
nacc++; |
nacc++; |
|
nacp[param[l]]++; //JMB - not sure about this ... |
|
|
|
|
653 |
} else { |
} else { |
654 |
//Accept according to metropolis condition |
//Accept according to metropolis condition |
655 |
p = expRep((trialf - funcval) / t); |
p = expRep((trialf - funcval) / t); |
656 |
pp = randomNumber(); |
pp = randomNumber(&seedP); |
657 |
if (pp < p) { |
if (pp < p) { |
658 |
//Accept point |
//Accept point |
659 |
for (i = 0; i < nvars; i++) |
for (i = 0; i < nvars; ++i) |
660 |
x[i] = trialx[i]; |
x[i] = storage[bestId].trialx[i]; |
661 |
funcval = trialf; |
funcval = trialf; |
662 |
naccmet++; |
naccmet++; |
663 |
nacp[param[l]]++; |
nacp[param[(l+bestId)%nvars]]++; |
664 |
} else { |
} else { |
665 |
//Reject point |
//Reject point |
666 |
nrej++; |
nrej++; |
667 |
} |
} |
668 |
} |
} |
|
|
|
669 |
// JMB added check for really silly values |
// JMB added check for really silly values |
670 |
if (isZero(trialf)) { |
if (isZero(trialf)) { |
671 |
handle.logMessage(LOGINFO, "Error in Simulated Annealing optimisation after", iters, "function evaluations, f(x) = 0"); |
handle.logMessage(LOGINFO, "Error in Simulated Annealing optimisation after", iters, "function evaluations, f(x) = 0"); |
672 |
converge = -1; |
converge = -1; |
673 |
|
delete[] storage; |
674 |
return; |
return; |
675 |
} |
} |
676 |
|
|
677 |
//If greater than any other point, record as new optimum |
//If greater than any other point, record as new optimum |
678 |
if ((trialf > fopt) && (trialf == trialf)) { |
if ((trialf > fopt) && (trialf == trialf)) { |
679 |
for (i = 0; i < nvars; i++) |
for (i = 0; i < nvars; ++i) |
680 |
bestx[i] = trialx[i]; |
bestx[i] = storage[bestId].trialx[i]; |
681 |
fopt = trialf; |
fopt = trialf; |
682 |
|
|
683 |
if (scale) { |
if (scale) { |
684 |
for (i = 0; i < nvars; i++) |
for (i = 0; i < nvars; ++i) |
685 |
scalex[i] = bestx[i] * init[i]; |
scalex[i] = bestx[i] * init[i]; |
686 |
EcoSystem->storeVariables(-fopt, scalex); |
EcoSystem->storeVariables(-fopt, scalex); |
687 |
} else |
} else |
693 |
} |
} |
694 |
} |
} |
695 |
} |
} |
|
|
|
|
//Adjust vm so that approximately half of all evaluations are accepted |
|
|
for (i = 0; i < nvars; i++) { |
|
|
ratio = nsdiv * nacp[i]; |
|
|
nacp[i] = 0; |
|
|
if (ratio > uratio) { |
|
|
vm[i] = vm[i] * (1.0 + cs * (ratio - uratio)); |
|
|
} else if (ratio < lratio) { |
|
|
vm[i] = vm[i] / (1.0 + cs * (lratio - ratio)); |
|
|
} |
|
|
|
|
|
if (vm[i] < rathersmall) |
|
|
vm[i] = rathersmall; |
|
|
if (vm[i] > (upperb[i] - lowerb[i])) |
|
|
vm[i] = upperb[i] - lowerb[i]; |
|
|
} |
|
696 |
} |
} |
697 |
|
|
698 |
//Check termination criteria |
//Check termination criteria |
703 |
quit = 0; |
quit = 0; |
704 |
if (fabs(fopt - funcval) < simanneps) { |
if (fabs(fopt - funcval) < simanneps) { |
705 |
quit = 1; |
quit = 1; |
706 |
for (i = 0; i < tempcheck - 1; i++) |
for (i = 0; i < tempcheck - 1; ++i) |
707 |
if (fabs(fstar[i + 1] - fstar[i]) > simanneps) |
if (fabs(fstar[i + 1] - fstar[i]) > simanneps) |
708 |
quit = 0; |
quit = 0; |
709 |
} |
} |
723 |
converge = 1; |
converge = 1; |
724 |
score = EcoSystem->SimulateAndUpdate(bestx); |
score = EcoSystem->SimulateAndUpdate(bestx); |
725 |
handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score); |
handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score); |
726 |
|
delete[] storage; |
727 |
return; |
return; |
728 |
} |
} |
729 |
|
|
734 |
|
|
735 |
handle.logMessage(LOGINFO, "Reducing the temperature to", t); |
handle.logMessage(LOGINFO, "Reducing the temperature to", t); |
736 |
funcval = fopt; |
funcval = fopt; |
737 |
for (i = 0; i < nvars; i++) |
for (i = 0; i < nvars; ++i) |
738 |
x[i] = bestx[i]; |
x[i] = bestx[i]; |
739 |
} |
} |
740 |
} |
} |
741 |
|
#endif |
742 |
|
|
743 |
|
// calcule a new point |
744 |
|
void OptInfoSimann::newValue(int nvars, int l, IntVector& param, DoubleVector& trialx, |
745 |
|
DoubleVector& x, DoubleVector& lowerb, DoubleVector& upperb, DoubleVector& vm) |
746 |
|
{ |
747 |
|
int i, k; |
748 |
|
for (i = 0; i < nvars; ++i) { |
749 |
|
if (i == param[l]) { |
750 |
|
trialx[i] = x[i] + ((randomNumber(&seed) * 2.0) - 1.0) * vm[i]; |
751 |
|
|
752 |
|
//If trialx is out of bounds, try again until we find a point that is OK |
753 |
|
if ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
754 |
|
//JMB - this used to just select a random point between the bounds |
755 |
|
k = 0; |
756 |
|
while ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
757 |
|
trialx[i] = x[i] + ((randomNumber(&seed) * 2.0) - 1.0) * vm[i]; |
758 |
|
k++; |
759 |
|
if (k > 10) //we've had 10 tries to find a point neatly, so give up |
760 |
|
trialx[i] = lowerb[i] + (upperb[i] - lowerb[i]) * randomNumber(&seed); |
761 |
|
} |
762 |
|
} |
763 |
|
} else |
764 |
|
trialx[i] = x[i]; |
765 |
|
} |
766 |
|
} |
767 |
|
|
768 |
|
/*################################################################################ |
769 |
|
* code use in the template (seq_optimize_template.h) |
770 |
|
################################################################################*/ |
771 |
|
|
772 |
|
|
773 |
|
//Generate trialx, the trial value of x |
774 |
|
void newValue(int nvars, int l, IntVector& param, DoubleVector& trialx, |
775 |
|
DoubleVector& x, DoubleVector& lowerb, DoubleVector& upperb, DoubleVector& vm, unsigned* seed) |
776 |
|
{ |
777 |
|
int i, k; |
778 |
|
for (i = 0; i < nvars; ++i) { |
779 |
|
if (i == param[l]) { |
780 |
|
trialx[i] = x[i] + ((randomNumber(&*seed) * 2.0) - 1.0) * vm[i]; |
781 |
|
|
782 |
|
//If trialx is out of bounds, try again until we find a point that is OK |
783 |
|
if ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
784 |
|
//JMB - this used to just select a random point between the bounds |
785 |
|
k = 0; |
786 |
|
while ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
787 |
|
trialx[i] = x[i] + ((randomNumber(&*seed) * 2.0) - 1.0) * vm[i]; |
788 |
|
k++; |
789 |
|
if (k > 10) //we've had 10 tries to find a point neatly, so give up |
790 |
|
trialx[i] = lowerb[i] + (upperb[i] - lowerb[i]) * randomNumber(&*seed); |
791 |
|
} |
792 |
|
} |
793 |
|
} else |
794 |
|
trialx[i] = x[i]; |
795 |
|
} |
796 |
|
} |
797 |
|
|
798 |
|
void buildNewParams_f(Siman& seed, DoubleVector& params) { |
799 |
|
|
800 |
|
newValue(seed.getNvars(), seed.getL(), seed.getParam(), params, seed.getX(), |
801 |
|
seed.getLowerb(), seed.getUpperb(), seed.getVm(), seed.getSeed()); |
802 |
|
|
803 |
|
} |
804 |
|
|
805 |
|
/// Represents the function that computes how good the parameters are |
806 |
|
double evaluate_f(const DoubleVector& params) { |
807 |
|
double trialf; |
808 |
|
#ifndef NO_OPENMP |
809 |
|
int id = omp_get_thread_num (); |
810 |
|
trialf = EcoSystems[id]->SimulateAndUpdate(params); |
811 |
|
#else |
812 |
|
trialf = EcoSystem->SimulateAndUpdate(params); |
813 |
|
#endif |
814 |
|
return -trialf; |
815 |
|
} |
816 |
|
|
817 |
|
struct ControlClass { |
818 |
|
|
819 |
|
void adjustVm(Siman& seed) { |
820 |
|
//Adjust vm so that approximately half of all evaluations are accepted |
821 |
|
int i; |
822 |
|
double ratio, nsdiv = seed.getNsdiv(); |
823 |
|
|
824 |
|
DoubleVector vm = seed.getVm(); |
825 |
|
DoubleVector upperb = seed.getUpperb(); |
826 |
|
DoubleVector lowerb = seed.getLowerb(); |
827 |
|
|
828 |
|
double uratio = seed.getUratio(); |
829 |
|
double lratio = seed.getLratio(); |
830 |
|
|
831 |
|
for (i = 0; i < seed.getNvars(); i++) { |
832 |
|
ratio = nsdiv * seed.getNacp(i); |
833 |
|
seed.setNacp(i,0); |
834 |
|
if (ratio > uratio) { |
835 |
|
(vm)[i] = (vm)[i] * (1.0 + seed.getCs() * (ratio - seed.getUratio())); |
836 |
|
} else if (ratio < lratio) { |
837 |
|
(vm)[i] = (vm)[i] / (1.0 + seed.getCs() * (seed.getLratio() - ratio)); |
838 |
|
} |
839 |
|
|
840 |
|
if ((vm)[i] < rathersmall) |
841 |
|
(vm)[i] = rathersmall; |
842 |
|
if ((vm)[i] > (upperb[i] - lowerb[i])) |
843 |
|
(vm)[i] = upperb[i] - lowerb[i]; |
844 |
|
} |
845 |
|
seed.setVm(vm); |
846 |
|
} |
847 |
|
|
848 |
|
void temperature(Siman& seed, DoubleVector& x){ |
849 |
|
int i; |
850 |
|
double t = seed.getT(); |
851 |
|
t *= seed.getRt(); |
852 |
|
if (t < rathersmall) |
853 |
|
t = rathersmall; //JMB make sure temperature doesnt get too small |
854 |
|
|
855 |
|
handle.logMessage(LOGINFO, "Reducing the temperature to", t); |
856 |
|
seed.setT(t); |
857 |
|
|
858 |
|
DoubleVector* bestx = seed.getBestx(); |
859 |
|
|
860 |
|
for (i = 0; i < seed.getNvars(); i++) |
861 |
|
x[i] = (*bestx)[i]; |
862 |
|
} |
863 |
|
|
864 |
|
void optimum(double trialf, double &fopt, int iters, DoubleVector trialx, |
865 |
|
DoubleVector init, Siman siman){ |
866 |
|
//If greater than any other point, record as new optimum |
867 |
|
int i, nvars = siman.getNvars(); |
868 |
|
DoubleVector scalex(nvars); |
869 |
|
DoubleVector* bestx = siman.getBestx(); |
870 |
|
|
871 |
|
if ((trialf > fopt) && (trialf == trialf)) { |
872 |
|
for (i = 0; i < nvars; i++) |
873 |
|
(*bestx)[i] = trialx[i]; |
874 |
|
fopt = trialf; |
875 |
|
|
876 |
|
if (siman.getScale()) { |
877 |
|
for (i = 0; i < nvars; i++) |
878 |
|
scalex[i] = (*bestx)[i] * init[i]; |
879 |
|
EcoSystem->storeVariables(-fopt, scalex); |
880 |
|
} else |
881 |
|
EcoSystem->storeVariables(-fopt, (*bestx)); |
882 |
|
|
883 |
|
handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations"); |
884 |
|
handle.logMessage(LOGINFO, "The likelihood score is", -fopt, "at the point"); |
885 |
|
EcoSystem->writeBestValues(); |
886 |
|
} |
887 |
|
} |
888 |
|
|
889 |
|
/** |
890 |
|
@brief Decides wheter the current item evaluated must be chosen as new optimum |
891 |
|
@param funcval value for old optimum |
892 |
|
@param trialf value for current item evaluated |
893 |
|
*/ |
894 |
|
bool mustAccept(double funcval, double trialf, Siman &siman, int iters) { |
895 |
|
//Accept the new point if the new function value better |
896 |
|
bool ret = true; |
897 |
|
int i; |
898 |
|
int aux = siman.getParam()[siman.getL()]; |
899 |
|
if ((trialf - funcval) > verysmall) { |
900 |
|
siman.incrementNacp(aux); |
901 |
|
siman.incrementNacc(); |
902 |
|
//JMB - not sure about this ... |
903 |
|
} else { |
904 |
|
double p, pp; |
905 |
|
//Accept according to metropolis condition |
906 |
|
|
907 |
|
p = expRep((trialf - funcval) / siman.getT()); |
908 |
|
pp = randomNumber(siman.getSeedM()); |
909 |
|
if (pp < p) { |
910 |
|
siman.incrementNacp(aux); |
911 |
|
siman.incrementNaccmet(); |
912 |
|
// //Accept point |
913 |
|
} else { |
914 |
|
//Reject point |
915 |
|
ret = false; |
916 |
|
siman.incrementNrej(); |
917 |
|
} |
918 |
|
} |
919 |
|
// JMB added check for really silly values |
920 |
|
if (isZero(trialf)) { |
921 |
|
handle.logMessage(LOGINFO, "Error in Simulated Annealing optimisation after", |
922 |
|
iters, "function evaluations, f(x) = 0"); |
923 |
|
siman.setConverge(-1); |
924 |
|
return false; |
925 |
|
} |
926 |
|
|
927 |
|
siman.incrementL(); |
928 |
|
if (siman.getL() == 0) |
929 |
|
siman.incrementNS(); |
930 |
|
if (siman.getNS() >= siman.getNs()){ |
931 |
|
siman.setNS(0); |
932 |
|
siman.incrementNT(); |
933 |
|
adjustVm(siman); |
934 |
|
siman.randomizeParams(); |
935 |
|
} |
936 |
|
|
937 |
|
return ret; |
938 |
|
|
939 |
|
} |
940 |
|
|
941 |
|
/** |
942 |
|
@brief Decides whether the search must stop. |
943 |
|
It does not take into account the number of iterations, which is already considered by the template |
944 |
|
@param prev old optimum |
945 |
|
@param funcval new/current optimum |
946 |
|
*/ |
947 |
|
bool mustTerminate(double prev, double& funcval, Siman &siman, int iters) { |
948 |
|
bool quit = false; |
949 |
|
if (siman.getNT() >= siman.getNt()) |
950 |
|
{ |
951 |
|
int i; |
952 |
|
DoubleVector fstar = siman.getFstar(); |
953 |
|
|
954 |
|
siman.setNT(0); |
955 |
|
|
956 |
|
for (i = siman.getTempcheck() - 1; i > 0; i--) |
957 |
|
fstar[i] = fstar[i - 1]; |
958 |
|
fstar[0] = funcval; |
959 |
|
|
960 |
|
if (fabs(prev - funcval) < siman.getSimanneps()) { |
961 |
|
quit = true; |
962 |
|
for (i = 0; i < siman.getTempcheck() - 1; i++) |
963 |
|
if (fabs(fstar[i + 1] - fstar[i]) > siman.getSimanneps()) |
964 |
|
quit = false; |
965 |
|
} |
966 |
|
|
967 |
|
handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, |
968 |
|
"function evaluations ..."); |
969 |
|
|
970 |
|
temperature(siman, siman.getX()); |
971 |
|
|
972 |
|
funcval = prev; |
973 |
|
} |
974 |
|
return quit; |
975 |
|
} |
976 |
|
|
977 |
|
void printResult(bool quit, Siman siman, int iters) |
978 |
|
{ |
979 |
|
double * score = siman.getScore(); |
980 |
|
DoubleVector * bestX = siman.getBestx(); |
981 |
|
|
982 |
|
handle.logMessage(LOGINFO, "\nStopping Simulated Annealing optimisation algorithm\n"); |
983 |
|
handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations"); |
984 |
|
handle.logMessage(LOGINFO, "The temperature was reduced to", siman.getT()); |
985 |
|
if (quit) { |
986 |
|
int* converge = siman.getConverge(); |
987 |
|
handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run"); |
988 |
|
|
989 |
|
|
990 |
|
*converge = 1; |
991 |
|
} |
992 |
|
else { |
993 |
|
handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations"); |
994 |
|
handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run"); |
995 |
|
} |
996 |
|
handle.logMessage(LOGINFO, "Number of directly accepted points", siman.getNacc()); |
997 |
|
handle.logMessage(LOGINFO, "Number of metropolis accepted points", siman.getNaccmet()); |
998 |
|
handle.logMessage(LOGINFO, "Number of rejected points", siman.getNrej()); |
999 |
|
*score = EcoSystem->SimulateAndUpdate(*bestX); |
1000 |
|
handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", *score); |
1001 |
|
} |
1002 |
|
}; |
1003 |
|
|
1004 |
|
// Required |
1005 |
|
std::ostream &operator<<(std::ostream &os, const DoubleVector &p) |
1006 |
|
{ |
1007 |
|
os << ""; |
1008 |
|
return os; |
1009 |
|
} |
1010 |
|
|
1011 |
|
|
1012 |
|
void OptInfoSimann::OptimiseLikelihood() { |
1013 |
|
|
1014 |
|
//set initial values |
1015 |
|
|
1016 |
|
double tmp, p, pp; |
1017 |
|
double funcval, trialf; |
1018 |
|
int a, i, j, k, l, quit; |
1019 |
|
int rchange, rcheck, rnumber; //Used to randomise the order of the parameters |
1020 |
|
|
1021 |
|
handle.logMessage(LOGINFO, |
1022 |
|
"\nStarting Simulated Annealing optimisation algorithm\n"); |
1023 |
|
int nvars = EcoSystem->numOptVariables(); |
1024 |
|
DoubleVector x(nvars); |
1025 |
|
DoubleVector init(nvars); |
1026 |
|
DoubleVector trialx(nvars, 0.0); |
1027 |
|
DoubleVector bestx(nvars); |
1028 |
|
DoubleVector scalex(nvars); |
1029 |
|
DoubleVector lowerb(nvars); |
1030 |
|
DoubleVector upperb(nvars); |
1031 |
|
DoubleVector fstar(tempcheck); |
1032 |
|
DoubleVector vm(nvars, vminit); |
1033 |
|
IntVector param(nvars, 0); |
1034 |
|
|
1035 |
|
EcoSystem->resetVariables(); //JMB need to reset variables in case they have been scaled |
1036 |
|
if (scale) { |
1037 |
|
EcoSystem->scaleVariables(); |
1038 |
|
#ifndef NO_OPENMP |
1039 |
|
int numThr = omp_get_max_threads ( ); |
1040 |
|
for(i = 0; i < numThr; i++) // scale the variables for the ecosystem of every thread |
1041 |
|
EcoSystems[i]->scaleVariables(); |
1042 |
|
#endif |
1043 |
|
} |
1044 |
|
EcoSystem->getOptScaledValues(x); |
1045 |
|
EcoSystem->getOptLowerBounds(lowerb); |
1046 |
|
EcoSystem->getOptUpperBounds(upperb); |
1047 |
|
EcoSystem->getOptInitialValues(init); |
1048 |
|
|
1049 |
|
for (i = 0; i < nvars; i++) { |
1050 |
|
bestx[i] = x[i]; |
1051 |
|
param[i] = i; |
1052 |
|
} |
1053 |
|
|
1054 |
|
if (scale) { |
1055 |
|
for (i = 0; i < nvars; i++) { |
1056 |
|
scalex[i] = x[i]; |
1057 |
|
// Scaling the bounds, because the parameters are scaled |
1058 |
|
lowerb[i] = lowerb[i] / init[i]; |
1059 |
|
upperb[i] = upperb[i] / init[i]; |
1060 |
|
if (lowerb[i] > upperb[i]) { |
1061 |
|
tmp = lowerb[i]; |
1062 |
|
lowerb[i] = upperb[i]; |
1063 |
|
upperb[i] = tmp; |
1064 |
|
} |
1065 |
|
} |
1066 |
|
} |
1067 |
|
|
1068 |
|
//funcval is the function value at x |
1069 |
|
funcval = EcoSystem->SimulateAndUpdate(x); |
1070 |
|
if (funcval != funcval) { //check for NaN |
1071 |
|
handle.logMessage(LOGINFO, |
1072 |
|
"Error starting Simulated Annealing optimisation with f(x) = infinity"); |
1073 |
|
converge = -1; |
1074 |
|
iters = 1; |
1075 |
|
return; |
1076 |
|
} |
1077 |
|
|
1078 |
|
//the function is to be minimised so switch the sign of funcval (and trialf) |
1079 |
|
funcval = -funcval; |
1080 |
|
cs /= lratio; //JMB save processing time |
1081 |
|
for (i = 0; i < tempcheck; i++) |
1082 |
|
fstar[i] = funcval; |
1083 |
|
|
1084 |
|
Siman s(seed, seedM, seedP, nvars, nt, ns, param, &x, &lowerb, &upperb, vm, t, rt, (1.0 / ns), |
1085 |
|
tempcheck, simanneps, fstar, lratio, uratio, cs, &bestx, scale, &converge, &score); |
1086 |
|
|
1087 |
|
ReproducibleSearch<Siman, DoubleVector, ControlClass, evaluate_f, buildNewParams_f> |
1088 |
|
pa(s, x, simanniter); |
1089 |
|
|
1090 |
|
#ifndef NO_OPENMP |
1091 |
|
// OpenMP parallelization |
1092 |
|
int numThr = omp_get_max_threads ( ); |
1093 |
|
pa.paral_opt_omp(funcval,numThr,numThr); |
1094 |
|
#else |
1095 |
|
// sequential code |
1096 |
|
pa.seq_opt(funcval); |
1097 |
|
#endif |
1098 |
|
|
1099 |
|
} |
1100 |
|
|
1101 |
|
|
1102 |
|
|
1103 |
|
|