13 |
/* can be used as a local optimizer for difficult functions. */ |
/* can be used as a local optimizer for difficult functions. */ |
14 |
/* */ |
/* */ |
15 |
/* The author can be contacted at h2zr1001@vm.cis.smu.edu */ |
/* The author can be contacted at h2zr1001@vm.cis.smu.edu */ |
16 |
|
// |
17 |
/* This file is a translation of a fortran code, which is an example of the*/ |
/* This file is a translation of a fortran code, which is an example of the*/ |
18 |
/* Corana et al. simulated annealing algorithm for multimodal and robust */ |
/* Corana et al. simulated annealing algorithm for multimodal and robust */ |
19 |
/* optimization as implemented and modified by by Goffe et al. */ |
/* optimization as implemented and modified by by Goffe et al. */ |
46 |
/* parameters one should adjust to modify the runtime of the */ |
/* parameters one should adjust to modify the runtime of the */ |
47 |
/* algorithm and its robustness. */ |
/* algorithm and its robustness. */ |
48 |
/* 5. Try constraining the algorithm with either LB or UB. */ |
/* 5. Try constraining the algorithm with either LB or UB. */ |
49 |
|
// |
50 |
/* Synopsis: */ |
/* Synopsis: */ |
51 |
/* This routine implements the continuous simulated annealing global */ |
/* This routine implements the continuous simulated annealing global */ |
52 |
/* optimization algorithm described in Corana et al.'s article */ |
/* optimization algorithm described in Corana et al.'s article */ |
54 |
/* "Simulated Annealing" Algorithm" in the September 1987 (vol. 13, */ |
/* "Simulated Annealing" Algorithm" in the September 1987 (vol. 13, */ |
55 |
/* no. 3, pp. 262-280) issue of the ACM Transactions on Mathematical */ |
/* no. 3, pp. 262-280) issue of the ACM Transactions on Mathematical */ |
56 |
/* Software. */ |
/* Software. */ |
57 |
|
// |
58 |
/* A very quick (perhaps too quick) overview of SA: */ |
/* A very quick (perhaps too quick) overview of SA: */ |
59 |
/* SA tries to find the global optimum of an N dimensional function. */ |
/* SA tries to find the global optimum of an N dimensional function. */ |
60 |
/* It moves both up and downhill and as the optimization process */ |
/* It moves both up and downhill and as the optimization process */ |
79 |
/* rejections rise. Given the scheme for the selection for VM, VM falls. */ |
/* rejections rise. Given the scheme for the selection for VM, VM falls. */ |
80 |
/* Thus, as T declines, VM falls and SA focuses upon the most promising */ |
/* Thus, as T declines, VM falls and SA focuses upon the most promising */ |
81 |
/* area for optimization. */ |
/* area for optimization. */ |
82 |
|
// |
83 |
/* The importance of the parameter T: */ |
/* The importance of the parameter T: */ |
84 |
/* The parameter T is crucial in using SA successfully. It influences */ |
/* The parameter T is crucial in using SA successfully. It influences */ |
85 |
/* VM, the step length over which the algorithm searches for optima. For */ |
/* VM, the step length over which the algorithm searches for optima. For */ |
93 |
/* optimizing a function, it is worthwhile to run a trial run first. Set */ |
/* optimizing a function, it is worthwhile to run a trial run first. Set */ |
94 |
/* RT = 1.5 and T = 1.0. With RT > 1.0, the temperature increases and VM */ |
/* RT = 1.5 and T = 1.0. With RT > 1.0, the temperature increases and VM */ |
95 |
/* rises as well. Then select the T that produces a large enough VM. */ |
/* rises as well. Then select the T that produces a large enough VM. */ |
96 |
|
// |
97 |
/* For modifications to the algorithm and many details on its use, */ |
/* For modifications to the algorithm and many details on its use, */ |
98 |
/* (particularly for econometric applications) see Goffe, Ferrier */ |
/* (particularly for econometric applications) see Goffe, Ferrier */ |
99 |
/* and Rogers, "Global Optimization of Statistical Functions with */ |
/* and Rogers, "Global Optimization of Statistical Functions with */ |
105 |
/* Dallas, TX 75275 */ |
/* Dallas, TX 75275 */ |
106 |
/* h2zr1001 @ smuvm1 (Bitnet) */ |
/* h2zr1001 @ smuvm1 (Bitnet) */ |
107 |
/* h2zr1001 @ vm.cis.smu.edu (Internet) */ |
/* h2zr1001 @ vm.cis.smu.edu (Internet) */ |
108 |
|
// |
109 |
/* As far as possible, the parameters here have the same name as in */ |
/* As far as possible, the parameters here have the same name as in */ |
110 |
/* the description of the algorithm on pp. 266-8 of Corana et al. */ |
/* the description of the algorithm on pp. 266-8 of Corana et al. */ |
111 |
|
// |
112 |
/* Input Parameters: */ |
/* Input Parameters: */ |
113 |
/* Note: The suggested values generally come from Corana et al. To */ |
/* Note: The suggested values generally come from Corana et al. To */ |
114 |
/* drastically reduce runtime, see Goffe et al., pp. 17-8 for */ |
/* drastically reduce runtime, see Goffe et al., pp. 17-8 for */ |
160 |
/* of all points are accepted, the input value is not very */ |
/* of all points are accepted, the input value is not very */ |
161 |
/* important (i.e. is the value is off, SA adjusts VM to the */ |
/* important (i.e. is the value is off, SA adjusts VM to the */ |
162 |
/* correct value). (DP(N)) */ |
/* correct value). (DP(N)) */ |
163 |
|
// |
164 |
/* Output Parameters: */ |
/* Output Parameters: */ |
165 |
/* xopt - The variables that optimize the function. (DP(N)) */ |
/* xopt - The variables that optimize the function. (DP(N)) */ |
166 |
/* fopt - The optimal value of the function. (DP) */ |
/* fopt - The optimal value of the function. (DP) */ |
167 |
|
// |
168 |
/* JMB this has been modified to work with the gadget object structure */ |
/* JMB this has been modified to work with the gadget object structure */ |
169 |
/* This means that the function has been replaced by a call to ecosystem */ |
/* This means that the function has been replaced by a call to ecosystem */ |
170 |
/* object, and we can use the vector objects that have been defined */ |
/* object, and we can use the vector objects that have been defined */ |
177 |
#include "errorhandler.h" |
#include "errorhandler.h" |
178 |
#include "ecosystem.h" |
#include "ecosystem.h" |
179 |
#include "global.h" |
#include "global.h" |
180 |
|
#include "seq_optimize_template.h" |
181 |
|
#ifdef SPECULATIVE |
182 |
|
#include <omp.h> |
183 |
|
#endif |
184 |
|
|
185 |
extern Ecosystem* EcoSystem; |
extern Ecosystem* EcoSystem; |
186 |
|
#ifdef _OPENMP |
187 |
|
extern Ecosystem** EcoSystems; |
188 |
void OptInfoSimann::OptimiseLikelihood() { |
#endif |
189 |
|
|
190 |
|
/*sequential code replaced at seq_optimize_template.h*/ |
191 |
|
//void OptInfoSimann::OptimiseLikelihood() { |
192 |
|
// |
193 |
|
// //set initial values |
194 |
|
// int nacc = 0; //The number of accepted function evaluations |
195 |
|
// int nrej = 0; //The number of rejected function evaluations |
196 |
|
// int naccmet = 0; //The number of metropolis accepted function evaluations |
197 |
|
// |
198 |
|
// double tmp, p, pp, ratio, nsdiv; |
199 |
|
// double fopt, funcval, trialf; |
200 |
|
// int a, i, j, k, l, offset, quit; |
201 |
|
// int rchange, rcheck, rnumber; //Used to randomise the order of the parameters |
202 |
|
// |
203 |
|
// handle.logMessage(LOGINFO, "\nStarting Simulated Annealing optimisation algorithm\n"); |
204 |
|
// int nvars = EcoSystem->numOptVariables(); |
205 |
|
// DoubleVector x(nvars); |
206 |
|
// DoubleVector init(nvars); |
207 |
|
// DoubleVector trialx(nvars, 0.0); |
208 |
|
// DoubleVector bestx(nvars); |
209 |
|
// DoubleVector scalex(nvars); |
210 |
|
// DoubleVector lowerb(nvars); |
211 |
|
// DoubleVector upperb(nvars); |
212 |
|
// DoubleVector fstar(tempcheck); |
213 |
|
// DoubleVector vm(nvars, vminit); |
214 |
|
// IntVector param(nvars, 0); |
215 |
|
// IntVector nacp(nvars, 0); |
216 |
|
// |
217 |
|
// EcoSystem->resetVariables(); //JMB need to reset variables in case they have been scaled |
218 |
|
// if (scale) |
219 |
|
// EcoSystem->scaleVariables(); |
220 |
|
// EcoSystem->getOptScaledValues(x); |
221 |
|
// EcoSystem->getOptLowerBounds(lowerb); |
222 |
|
// EcoSystem->getOptUpperBounds(upperb); |
223 |
|
// EcoSystem->getOptInitialValues(init); |
224 |
|
// |
225 |
|
// for (i = 0; i < nvars; i++) { |
226 |
|
// bestx[i] = x[i]; |
227 |
|
// param[i] = i; |
228 |
|
// } |
229 |
|
// |
230 |
|
// if (scale) { |
231 |
|
// for (i = 0; i < nvars; i++) { |
232 |
|
// scalex[i] = x[i]; |
233 |
|
// // Scaling the bounds, because the parameters are scaled |
234 |
|
// lowerb[i] = lowerb[i] / init[i]; |
235 |
|
// upperb[i] = upperb[i] / init[i]; |
236 |
|
// if (lowerb[i] > upperb[i]) { |
237 |
|
// tmp = lowerb[i]; |
238 |
|
// lowerb[i] = upperb[i]; |
239 |
|
// upperb[i] = tmp; |
240 |
|
// } |
241 |
|
// } |
242 |
|
// } |
243 |
|
// |
244 |
|
// //funcval is the function value at x |
245 |
|
// funcval = EcoSystem->SimulateAndUpdate(x); |
246 |
|
// if (funcval != funcval) { //check for NaN |
247 |
|
// handle.logMessage(LOGINFO, "Error starting Simulated Annealing optimisation with f(x) = infinity"); |
248 |
|
// converge = -1; |
249 |
|
// iters = 1; |
250 |
|
// return; |
251 |
|
// } |
252 |
|
// |
253 |
|
// //the function is to be minimised so switch the sign of funcval (and trialf) |
254 |
|
// funcval = -funcval; |
255 |
|
// offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop |
256 |
|
// nacc++; |
257 |
|
// cs /= lratio; //JMB save processing time |
258 |
|
// nsdiv = 1.0 / ns; |
259 |
|
// fopt = funcval; |
260 |
|
// for (i = 0; i < tempcheck; i++) |
261 |
|
// fstar[i] = funcval; |
262 |
|
// |
263 |
|
// //Start the main loop. Note that it terminates if |
264 |
|
// //(i) the algorithm succesfully optimises the function or |
265 |
|
// //(ii) there are too many function evaluations |
266 |
|
// while (1) { |
267 |
|
// for (a = 0; a < nt; a++) { |
268 |
|
// //Randomize the order of the parameters once in a while, to avoid |
269 |
|
// //the order having an influence on which changes are accepted |
270 |
|
// rchange = 0; |
271 |
|
// while (rchange < nvars) { |
272 |
|
// rnumber = rand_r(&seedP) % nvars; |
273 |
|
// rcheck = 1; |
274 |
|
// for (i = 0; i < rchange; i++) |
275 |
|
// if (param[i] == rnumber) |
276 |
|
// rcheck = 0; |
277 |
|
// if (rcheck) { |
278 |
|
// param[rchange] = rnumber; |
279 |
|
// rchange++; |
280 |
|
// } |
281 |
|
// } |
282 |
|
// |
283 |
|
// for (j = 0; j < ns; j++) { |
284 |
|
// for (l = 0; l < nvars; l++) { |
285 |
|
// //Generate trialx, the trial value of x |
286 |
|
// newValue(nvars, l, param, trialx, x, lowerb, upperb, vm); |
287 |
|
//// for (i = 0; i < nvars; i++) { |
288 |
|
//// if (i == param[l]) { |
289 |
|
//// trialx[i] = x[i] + ((randomNumber() * 2.0) - 1.0) * vm[i]; |
290 |
|
//// |
291 |
|
//// //If trialx is out of bounds, try again until we find a point that is OK |
292 |
|
//// if ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
293 |
|
//// //JMB - this used to just select a random point between the bounds |
294 |
|
//// k = 0; |
295 |
|
//// while ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
296 |
|
//// trialx[i] = x[i] + ((randomNumber() * 2.0) - 1.0) * vm[i]; |
297 |
|
//// k++; |
298 |
|
//// if (k > 10) //we've had 10 tries to find a point neatly, so give up |
299 |
|
//// trialx[i] = lowerb[i] + (upperb[i] - lowerb[i]) * randomNumber(); |
300 |
|
//// } |
301 |
|
//// } |
302 |
|
//// |
303 |
|
//// } else |
304 |
|
//// trialx[i] = x[i]; |
305 |
|
//// } |
306 |
|
// |
307 |
|
// //Evaluate the function with the trial point trialx and return as -trialf |
308 |
|
// trialf = EcoSystem->SimulateAndUpdate(trialx); |
309 |
|
// trialf = -trialf; |
310 |
|
// |
311 |
|
// //If too many function evaluations occur, terminate the algorithm |
312 |
|
// iters = EcoSystem->getFuncEval() - offset; |
313 |
|
// if (iters > simanniter) { |
314 |
|
// handle.logMessage(LOGINFO, "\nStopping Simulated Annealing optimisation algorithm\n"); |
315 |
|
// handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations"); |
316 |
|
// handle.logMessage(LOGINFO, "The temperature was reduced to", t); |
317 |
|
// handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations"); |
318 |
|
// handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run"); |
319 |
|
// handle.logMessage(LOGINFO, "Number of directly accepted points", nacc); |
320 |
|
// handle.logMessage(LOGINFO, "Number of metropolis accepted points", naccmet); |
321 |
|
// handle.logMessage(LOGINFO, "Number of rejected points", nrej); |
322 |
|
// |
323 |
|
// score = EcoSystem->SimulateAndUpdate(bestx); |
324 |
|
// handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score); |
325 |
|
// return; |
326 |
|
// } |
327 |
|
// //Accept the new point if the new function value better |
328 |
|
// if ((trialf - funcval) > verysmall) { |
329 |
|
// for (i = 0; i < nvars; i++) |
330 |
|
// x[i] = trialx[i]; |
331 |
|
// funcval = trialf; |
332 |
|
// nacc++; |
333 |
|
// nacp[param[l]]++; //JMB - not sure about this ... |
334 |
|
// |
335 |
|
// } else { |
336 |
|
// //Accept according to metropolis condition |
337 |
|
// p = expRep((trialf - funcval) / t); |
338 |
|
// pp = randomNumber(&seedM); |
339 |
|
// if (pp < p) { |
340 |
|
// //Accept point |
341 |
|
// for (i = 0; i < nvars; i++) |
342 |
|
// x[i] = trialx[i]; |
343 |
|
// funcval = trialf; |
344 |
|
// naccmet++; |
345 |
|
// nacp[param[l]]++; |
346 |
|
// } else { |
347 |
|
// //Reject point |
348 |
|
// nrej++; |
349 |
|
// } |
350 |
|
// } |
351 |
|
// // JMB added check for really silly values |
352 |
|
// if (isZero(trialf)) { |
353 |
|
// handle.logMessage(LOGINFO, "Error in Simulated Annealing optimisation after", iters, "function evaluations, f(x) = 0"); |
354 |
|
// converge = -1; |
355 |
|
// return; |
356 |
|
// } |
357 |
|
// |
358 |
|
// //If greater than any other point, record as new optimum |
359 |
|
// if ((trialf > fopt) && (trialf == trialf)) { |
360 |
|
// for (i = 0; i < nvars; i++) |
361 |
|
// bestx[i] = trialx[i]; |
362 |
|
// fopt = trialf; |
363 |
|
// |
364 |
|
// if (scale) { |
365 |
|
// for (i = 0; i < nvars; i++) |
366 |
|
// scalex[i] = bestx[i] * init[i]; |
367 |
|
// EcoSystem->storeVariables(-fopt, scalex); |
368 |
|
// } else |
369 |
|
// EcoSystem->storeVariables(-fopt, bestx); |
370 |
|
// |
371 |
|
// handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations"); |
372 |
|
// handle.logMessage(LOGINFO, "The likelihood score is", -fopt, "at the point"); |
373 |
|
// EcoSystem->writeBestValues(); |
374 |
|
// } |
375 |
|
// } |
376 |
|
// } |
377 |
|
// |
378 |
|
// //Adjust vm so that approximately half of all evaluations are accepted |
379 |
|
// for (i = 0; i < nvars; i++) { |
380 |
|
// ratio = nsdiv * nacp[i]; |
381 |
|
// nacp[i] = 0; |
382 |
|
// if (ratio > uratio) { |
383 |
|
// vm[i] = vm[i] * (1.0 + cs * (ratio - uratio)); |
384 |
|
// } else if (ratio < lratio) { |
385 |
|
// vm[i] = vm[i] / (1.0 + cs * (lratio - ratio)); |
386 |
|
// } |
387 |
|
// |
388 |
|
// if (vm[i] < rathersmall) |
389 |
|
// vm[i] = rathersmall; |
390 |
|
// if (vm[i] > (upperb[i] - lowerb[i])) |
391 |
|
// vm[i] = upperb[i] - lowerb[i]; |
392 |
|
// } |
393 |
|
// } |
394 |
|
// |
395 |
|
// //Check termination criteria |
396 |
|
// for (i = tempcheck - 1; i > 0; i--) |
397 |
|
// fstar[i] = fstar[i - 1]; |
398 |
|
// fstar[0] = funcval; |
399 |
|
// |
400 |
|
// quit = 0; |
401 |
|
// if (fabs(fopt - funcval) < simanneps) { |
402 |
|
// quit = 1; |
403 |
|
// for (i = 0; i < tempcheck - 1; i++) |
404 |
|
// if (fabs(fstar[i + 1] - fstar[i]) > simanneps) |
405 |
|
// quit = 0; |
406 |
|
// } |
407 |
|
// |
408 |
|
// handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ..."); |
409 |
|
// |
410 |
|
// //Terminate SA if appropriate |
411 |
|
// if (quit) { |
412 |
|
// handle.logMessage(LOGINFO, "\nStopping Simulated Annealing optimisation algorithm\n"); |
413 |
|
// handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations"); |
414 |
|
// handle.logMessage(LOGINFO, "The temperature was reduced to", t); |
415 |
|
// handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run"); |
416 |
|
// handle.logMessage(LOGINFO, "Number of directly accepted points", nacc); |
417 |
|
// handle.logMessage(LOGINFO, "Number of metropolis accepted points", naccmet); |
418 |
|
// handle.logMessage(LOGINFO, "Number of rejected points", nrej); |
419 |
|
// |
420 |
|
// converge = 1; |
421 |
|
// score = EcoSystem->SimulateAndUpdate(bestx); |
422 |
|
// handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score); |
423 |
|
// return; |
424 |
|
// } |
425 |
|
// |
426 |
|
// //If termination criteria is not met, prepare for another loop. |
427 |
|
// t *= rt; |
428 |
|
// if (t < rathersmall) |
429 |
|
// t = rathersmall; //JMB make sure temperature doesnt get too small |
430 |
|
// |
431 |
|
// handle.logMessage(LOGINFO, "Reducing the temperature to", t); |
432 |
|
// funcval = fopt; |
433 |
|
// for (i = 0; i < nvars; i++) |
434 |
|
// x[i] = bestx[i]; |
435 |
|
// } |
436 |
|
//} |
437 |
|
|
438 |
|
|
439 |
|
#ifdef _OPENMP |
440 |
|
//#ifdef SPECULATIVE |
441 |
|
void OptInfoSimann::OptimiseLikelihoodOMP() { |
442 |
|
|
443 |
//set initial values |
//set initial values |
444 |
int nacc = 0; //The number of accepted function evaluations |
int nacc = 0; //The number of accepted function evaluations |
447 |
|
|
448 |
double tmp, p, pp, ratio, nsdiv; |
double tmp, p, pp, ratio, nsdiv; |
449 |
double fopt, funcval, trialf; |
double fopt, funcval, trialf; |
450 |
int a, i, j, k, l, offset, quit; |
int a, i, j, k, l, quit; |
451 |
int rchange, rcheck, rnumber; //Used to randomise the order of the parameters |
int rchange, rcheck, rnumber; //Used to randomise the order of the parameters |
452 |
|
|
453 |
|
// store the info of the different threads |
454 |
|
struct Storage { |
455 |
|
DoubleVector trialx; |
456 |
|
double newLikelihood; |
457 |
|
}; |
458 |
|
|
459 |
handle.logMessage(LOGINFO, "\nStarting Simulated Annealing optimisation algorithm\n"); |
handle.logMessage(LOGINFO, "\nStarting Simulated Annealing optimisation algorithm\n"); |
460 |
int nvars = EcoSystem->numOptVariables(); |
int nvars = EcoSystem->numOptVariables(); |
461 |
DoubleVector x(nvars); |
DoubleVector x(nvars); |
471 |
IntVector nacp(nvars, 0); |
IntVector nacp(nvars, 0); |
472 |
|
|
473 |
EcoSystem->resetVariables(); //JMB need to reset variables in case they have been scaled |
EcoSystem->resetVariables(); //JMB need to reset variables in case they have been scaled |
474 |
if (scale) |
if (scale) { |
475 |
EcoSystem->scaleVariables(); |
EcoSystem->scaleVariables(); |
476 |
|
int numThr = omp_get_max_threads ( ); |
477 |
|
for(i = 0; i < numThr; i++) // scale the variables for the ecosystem of every thread |
478 |
|
EcoSystems[i]->scaleVariables(); |
479 |
|
} |
480 |
EcoSystem->getOptScaledValues(x); |
EcoSystem->getOptScaledValues(x); |
481 |
EcoSystem->getOptLowerBounds(lowerb); |
EcoSystem->getOptLowerBounds(lowerb); |
482 |
EcoSystem->getOptUpperBounds(upperb); |
EcoSystem->getOptUpperBounds(upperb); |
483 |
EcoSystem->getOptInitialValues(init); |
EcoSystem->getOptInitialValues(init); |
484 |
|
|
485 |
for (i = 0; i < nvars; i++) { |
for (i = 0; i < nvars; ++i) { |
486 |
bestx[i] = x[i]; |
bestx[i] = x[i]; |
487 |
param[i] = i; |
param[i] = i; |
488 |
} |
} |
489 |
|
|
490 |
if (scale) { |
if (scale) { |
491 |
for (i = 0; i < nvars; i++) { |
for (i = 0; i < nvars; ++i) { |
492 |
scalex[i] = x[i]; |
scalex[i] = x[i]; |
493 |
// Scaling the bounds, because the parameters are scaled |
// Scaling the bounds, because the parameters are scaled |
494 |
lowerb[i] = lowerb[i] / init[i]; |
lowerb[i] = lowerb[i] / init[i]; |
512 |
|
|
513 |
//the function is to be minimised so switch the sign of funcval (and trialf) |
//the function is to be minimised so switch the sign of funcval (and trialf) |
514 |
funcval = -funcval; |
funcval = -funcval; |
|
offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop |
|
515 |
nacc++; |
nacc++; |
516 |
cs /= lratio; //JMB save processing time |
cs /= lratio; //JMB save processing time |
517 |
nsdiv = 1.0 / ns; |
nsdiv = 1.0 / ns; |
518 |
fopt = funcval; |
fopt = funcval; |
519 |
for (i = 0; i < tempcheck; i++) |
for (i = 0; i < tempcheck; ++i) |
520 |
fstar[i] = funcval; |
fstar[i] = funcval; |
521 |
|
|
522 |
|
|
523 |
|
|
524 |
|
int numThr = omp_get_max_threads ( ); |
525 |
|
int bestId=0; |
526 |
|
int ini=0; |
527 |
|
|
528 |
|
Storage* storage = new Storage[numThr]; |
529 |
|
|
530 |
|
for (i=0; i<numThr; ++i) |
531 |
|
storage[i].trialx = trialx; |
532 |
|
|
533 |
|
|
534 |
|
int aux; //store the number of evaluations that are not useful for the algorithm |
535 |
|
|
536 |
|
DoubleVector vns(nvars, 0); //vector of ns |
537 |
|
int ns_ = ceil(numThr/2.); |
538 |
|
double res; |
539 |
|
aux=0; |
540 |
//Start the main loop. Note that it terminates if |
//Start the main loop. Note that it terminates if |
541 |
//(i) the algorithm succesfully optimises the function or |
//(i) the algorithm succesfully optimises the function or |
542 |
//(ii) there are too many function evaluations |
//(ii) there are too many function evaluations |
543 |
while (1) { |
while (1) { |
544 |
for (a = 0; a < nt; a++) { |
for (a = 0; a < nt; ++a) { |
545 |
//Randomize the order of the parameters once in a while, to avoid |
//Randomize the order of the parameters once in a while, to avoid |
546 |
//the order having an influence on which changes are accepted |
//the order having an influence on which changes are accepted |
547 |
rchange = 0; |
rchange = 0; |
548 |
while (rchange < nvars) { |
while (rchange < nvars) { |
549 |
rnumber = rand() % nvars; |
rnumber = rand_r(&seedP) % nvars; |
550 |
rcheck = 1; |
rcheck = 1; |
551 |
for (i = 0; i < rchange; i++) |
for (i = 0; i < rchange; ++i) |
552 |
if (param[i] == rnumber) |
if (param[i] == rnumber) |
553 |
rcheck = 0; |
rcheck = 0; |
554 |
if (rcheck) { |
if (rcheck) { |
557 |
} |
} |
558 |
} |
} |
559 |
|
|
|
for (j = 0; j < ns; j++) { |
|
|
for (l = 0; l < nvars; l++) { |
|
|
//Generate trialx, the trial value of x |
|
|
for (i = 0; i < nvars; i++) { |
|
|
if (i == param[l]) { |
|
|
trialx[i] = x[i] + ((randomNumber() * 2.0) - 1.0) * vm[i]; |
|
560 |
|
|
561 |
//If trialx is out of bounds, try again until we find a point that is OK |
for (j = 0; j < (ns*ns_); ++j) { |
562 |
if ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
for (l = ini; l < nvars; l+=numThr) { |
563 |
//JMB - this used to just select a random point between the bounds |
for (i=0; i<numThr;++i) |
564 |
k = 0; |
{ |
565 |
while ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
if ((l+i) < nvars) |
566 |
trialx[i] = x[i] + ((randomNumber() * 2.0) - 1.0) * vm[i]; |
newValue(nvars, l+i, param, storage[i].trialx, x, lowerb, upperb, vm); |
567 |
k++; |
else { |
568 |
if (k > 10) //we've had 10 tries to find a point neatly, so give up |
newValue(nvars, ini, param, storage[i].trialx, x, lowerb, upperb, vm); |
569 |
trialx[i] = lowerb[i] + (upperb[i] - lowerb[i]) * randomNumber(); |
ini++; |
570 |
} |
if (ini >= nvars) |
571 |
|
ini=0; |
572 |
} |
} |
|
|
|
|
} else |
|
|
trialx[i] = x[i]; |
|
573 |
} |
} |
574 |
|
|
575 |
|
# pragma omp parallel private(res) |
576 |
|
{ |
577 |
//Evaluate the function with the trial point trialx and return as -trialf |
//Evaluate the function with the trial point trialx and return as -trialf |
578 |
trialf = EcoSystem->SimulateAndUpdate(trialx); |
int id = omp_get_thread_num (); |
579 |
trialf = -trialf; |
res = EcoSystems[id]->SimulateAndUpdate(storage[id].trialx); |
580 |
|
storage[id].newLikelihood = -res; |
581 |
|
} |
582 |
|
//best value from omp |
583 |
|
trialf = storage[0].newLikelihood; |
584 |
|
bestId=0; |
585 |
|
for (i=0;i<numThr;++i) |
586 |
|
{ |
587 |
|
if (storage[i].newLikelihood > trialf) |
588 |
|
{ |
589 |
|
trialf=storage[i].newLikelihood; |
590 |
|
bestId=i; |
591 |
|
} |
592 |
|
k = param[(l+i)%nvars]; |
593 |
|
|
594 |
|
if ((storage[i].newLikelihood - funcval) > verysmall) |
595 |
|
{ |
596 |
|
nacp[k]++; |
597 |
|
aux++; |
598 |
|
vns[k]++; |
599 |
|
} |
600 |
|
else { |
601 |
|
//Accept according to metropolis condition |
602 |
|
p = expRep((storage[i].newLikelihood - funcval) / t); |
603 |
|
pp = randomNumber(&seedM); |
604 |
|
if (pp < p) |
605 |
|
aux++; |
606 |
|
else { |
607 |
|
vns[k]++; |
608 |
|
nrej++; |
609 |
|
} |
610 |
|
} |
611 |
|
|
612 |
//If too many function evaluations occur, terminate the algorithm |
if (vns[k] >= ns) { |
613 |
iters = EcoSystem->getFuncEval() - offset; |
ratio = nsdiv * nacp[k]; |
614 |
|
nacp[k] = 0; |
615 |
|
if (ratio > uratio) { |
616 |
|
vm[k] = vm[k] * (1.0 + cs * (ratio - uratio)); |
617 |
|
} else if (ratio < lratio) { |
618 |
|
vm[k] = vm[k] / (1.0 + cs * (lratio - ratio)); |
619 |
|
} |
620 |
|
if (vm[k] < rathersmall){ |
621 |
|
vm[k] = rathersmall; |
622 |
|
} |
623 |
|
if (vm[k] > (upperb[k] - lowerb[k])) |
624 |
|
{ |
625 |
|
vm[k] = upperb[k] - lowerb[k]; |
626 |
|
} |
627 |
|
vns[k]=0; |
628 |
|
} |
629 |
|
} |
630 |
|
aux--; |
631 |
|
iters = (EcoSystems[bestId]->getFuncEval() * numThr) -aux; |
632 |
if (iters > simanniter) { |
if (iters > simanniter) { |
633 |
handle.logMessage(LOGINFO, "\nStopping Simulated Annealing optimisation algorithm\n"); |
handle.logMessage(LOGINFO, "\nStopping Simulated Annealing optimisation algorithm\n"); |
634 |
handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations"); |
handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations"); |
641 |
|
|
642 |
score = EcoSystem->SimulateAndUpdate(bestx); |
score = EcoSystem->SimulateAndUpdate(bestx); |
643 |
handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score); |
handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score); |
644 |
|
delete[] storage; |
645 |
return; |
return; |
646 |
} |
} |
647 |
|
|
648 |
//Accept the new point if the new function value better |
//Accept the new point if the new function value better |
649 |
if ((trialf - funcval) > verysmall) { |
if ((trialf - funcval) > verysmall) { |
650 |
for (i = 0; i < nvars; i++) |
for (i = 0; i < nvars; ++i) |
651 |
x[i] = trialx[i]; |
x[i] = storage[bestId].trialx[i]; |
652 |
funcval = trialf; |
funcval = trialf; |
653 |
nacc++; |
nacc++; |
|
nacp[param[l]]++; //JMB - not sure about this ... |
|
|
|
|
654 |
} else { |
} else { |
655 |
//Accept according to metropolis condition |
//Accept according to metropolis condition |
656 |
p = expRep((trialf - funcval) / t); |
p = expRep((trialf - funcval) / t); |
657 |
pp = randomNumber(); |
pp = randomNumber(&seedP); |
658 |
if (pp < p) { |
if (pp < p) { |
659 |
//Accept point |
//Accept point |
660 |
for (i = 0; i < nvars; i++) |
for (i = 0; i < nvars; ++i) |
661 |
x[i] = trialx[i]; |
x[i] = storage[bestId].trialx[i]; |
662 |
funcval = trialf; |
funcval = trialf; |
663 |
naccmet++; |
naccmet++; |
664 |
nacp[param[l]]++; |
nacp[param[(l+bestId)%nvars]]++; |
665 |
} else { |
} else { |
666 |
//Reject point |
//Reject point |
667 |
nrej++; |
nrej++; |
668 |
} |
} |
669 |
} |
} |
|
|
|
670 |
// JMB added check for really silly values |
// JMB added check for really silly values |
671 |
if (isZero(trialf)) { |
if (isZero(trialf)) { |
672 |
handle.logMessage(LOGINFO, "Error in Simulated Annealing optimisation after", iters, "function evaluations, f(x) = 0"); |
handle.logMessage(LOGINFO, "Error in Simulated Annealing optimisation after", iters, "function evaluations, f(x) = 0"); |
673 |
converge = -1; |
converge = -1; |
674 |
|
delete[] storage; |
675 |
return; |
return; |
676 |
} |
} |
677 |
|
|
678 |
//If greater than any other point, record as new optimum |
//If greater than any other point, record as new optimum |
679 |
if ((trialf > fopt) && (trialf == trialf)) { |
if ((trialf > fopt) && (trialf == trialf)) { |
680 |
for (i = 0; i < nvars; i++) |
for (i = 0; i < nvars; ++i) |
681 |
bestx[i] = trialx[i]; |
bestx[i] = storage[bestId].trialx[i]; |
682 |
fopt = trialf; |
fopt = trialf; |
683 |
|
|
684 |
if (scale) { |
if (scale) { |
685 |
for (i = 0; i < nvars; i++) |
for (i = 0; i < nvars; ++i) |
686 |
scalex[i] = bestx[i] * init[i]; |
scalex[i] = bestx[i] * init[i]; |
687 |
EcoSystem->storeVariables(-fopt, scalex); |
EcoSystem->storeVariables(-fopt, scalex); |
688 |
} else |
} else |
694 |
} |
} |
695 |
} |
} |
696 |
} |
} |
|
|
|
|
//Adjust vm so that approximately half of all evaluations are accepted |
|
|
for (i = 0; i < nvars; i++) { |
|
|
ratio = nsdiv * nacp[i]; |
|
|
nacp[i] = 0; |
|
|
if (ratio > uratio) { |
|
|
vm[i] = vm[i] * (1.0 + cs * (ratio - uratio)); |
|
|
} else if (ratio < lratio) { |
|
|
vm[i] = vm[i] / (1.0 + cs * (lratio - ratio)); |
|
|
} |
|
|
|
|
|
if (vm[i] < rathersmall) |
|
|
vm[i] = rathersmall; |
|
|
if (vm[i] > (upperb[i] - lowerb[i])) |
|
|
vm[i] = upperb[i] - lowerb[i]; |
|
|
} |
|
697 |
} |
} |
698 |
|
|
699 |
//Check termination criteria |
//Check termination criteria |
704 |
quit = 0; |
quit = 0; |
705 |
if (fabs(fopt - funcval) < simanneps) { |
if (fabs(fopt - funcval) < simanneps) { |
706 |
quit = 1; |
quit = 1; |
707 |
for (i = 0; i < tempcheck - 1; i++) |
for (i = 0; i < tempcheck - 1; ++i) |
708 |
if (fabs(fstar[i + 1] - fstar[i]) > simanneps) |
if (fabs(fstar[i + 1] - fstar[i]) > simanneps) |
709 |
quit = 0; |
quit = 0; |
710 |
} |
} |
724 |
converge = 1; |
converge = 1; |
725 |
score = EcoSystem->SimulateAndUpdate(bestx); |
score = EcoSystem->SimulateAndUpdate(bestx); |
726 |
handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score); |
handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score); |
727 |
|
delete[] storage; |
728 |
return; |
return; |
729 |
} |
} |
730 |
|
|
735 |
|
|
736 |
handle.logMessage(LOGINFO, "Reducing the temperature to", t); |
handle.logMessage(LOGINFO, "Reducing the temperature to", t); |
737 |
funcval = fopt; |
funcval = fopt; |
738 |
for (i = 0; i < nvars; i++) |
for (i = 0; i < nvars; ++i) |
739 |
x[i] = bestx[i]; |
x[i] = bestx[i]; |
740 |
} |
} |
741 |
} |
} |
742 |
|
//#endif |
743 |
|
#endif |
744 |
|
|
745 |
|
// calcule a new point |
746 |
|
void OptInfoSimann::newValue(int nvars, int l, IntVector& param, DoubleVector& trialx, |
747 |
|
DoubleVector& x, DoubleVector& lowerb, DoubleVector& upperb, DoubleVector& vm) |
748 |
|
{ |
749 |
|
int i, k; |
750 |
|
for (i = 0; i < nvars; ++i) { |
751 |
|
if (i == param[l]) { |
752 |
|
trialx[i] = x[i] + ((randomNumber(&seed) * 2.0) - 1.0) * vm[i]; |
753 |
|
|
754 |
|
//If trialx is out of bounds, try again until we find a point that is OK |
755 |
|
if ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
756 |
|
//JMB - this used to just select a random point between the bounds |
757 |
|
k = 0; |
758 |
|
while ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
759 |
|
trialx[i] = x[i] + ((randomNumber(&seed) * 2.0) - 1.0) * vm[i]; |
760 |
|
k++; |
761 |
|
if (k > 10) //we've had 10 tries to find a point neatly, so give up |
762 |
|
trialx[i] = lowerb[i] + (upperb[i] - lowerb[i]) * randomNumber(&seed); |
763 |
|
} |
764 |
|
} |
765 |
|
} else |
766 |
|
trialx[i] = x[i]; |
767 |
|
} |
768 |
|
} |
769 |
|
|
770 |
|
/*################################################################################ |
771 |
|
* code use in the template (seq_optimize_template.h) |
772 |
|
################################################################################*/ |
773 |
|
|
774 |
|
|
775 |
|
//Generate trialx, the trial value of x |
776 |
|
void newValue(int nvars, int l, IntVector& param, DoubleVector& trialx, |
777 |
|
DoubleVector& x, DoubleVector& lowerb, DoubleVector& upperb, DoubleVector& vm, unsigned* seed) |
778 |
|
{ |
779 |
|
int i, k; |
780 |
|
for (i = 0; i < nvars; ++i) { |
781 |
|
if (i == param[l]) { |
782 |
|
trialx[i] = x[i] + ((randomNumber(&*seed) * 2.0) - 1.0) * vm[i]; |
783 |
|
|
784 |
|
//If trialx is out of bounds, try again until we find a point that is OK |
785 |
|
if ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
786 |
|
//JMB - this used to just select a random point between the bounds |
787 |
|
k = 0; |
788 |
|
while ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) { |
789 |
|
trialx[i] = x[i] + ((randomNumber(&*seed) * 2.0) - 1.0) * vm[i]; |
790 |
|
k++; |
791 |
|
if (k > 10) //we've had 10 tries to find a point neatly, so give up |
792 |
|
trialx[i] = lowerb[i] + (upperb[i] - lowerb[i]) * randomNumber(&*seed); |
793 |
|
} |
794 |
|
} |
795 |
|
} else |
796 |
|
trialx[i] = x[i]; |
797 |
|
} |
798 |
|
} |
799 |
|
|
800 |
|
void buildNewParams_f(Siman& seed, DoubleVector& params) { |
801 |
|
|
802 |
|
newValue(seed.getNvars(), seed.getL(), seed.getParam(), params, seed.getX(), |
803 |
|
seed.getLowerb(), seed.getUpperb(), seed.getVm(), seed.getSeed()); |
804 |
|
|
805 |
|
} |
806 |
|
|
807 |
|
/// Represents the function that computes how good the parameters are |
808 |
|
#ifdef _OPENMP |
809 |
|
double evaluate_par_f(const DoubleVector& params) { |
810 |
|
double trialf; |
811 |
|
int id = omp_get_thread_num (); |
812 |
|
trialf = EcoSystems[id]->SimulateAndUpdate(params); |
813 |
|
return -trialf; |
814 |
|
} |
815 |
|
#endif |
816 |
|
double evaluate_f(const DoubleVector& params) { |
817 |
|
double trialf; |
818 |
|
trialf = EcoSystem->SimulateAndUpdate(params); |
819 |
|
return -trialf; |
820 |
|
} |
821 |
|
|
822 |
|
struct ControlClass { |
823 |
|
|
824 |
|
void adjustVm(Siman& seed) { |
825 |
|
//Adjust vm so that approximately half of all evaluations are accepted |
826 |
|
int i; |
827 |
|
double ratio, nsdiv = seed.getNsdiv(); |
828 |
|
|
829 |
|
DoubleVector vm = seed.getVm(); |
830 |
|
DoubleVector upperb = seed.getUpperb(); |
831 |
|
DoubleVector lowerb = seed.getLowerb(); |
832 |
|
|
833 |
|
double uratio = seed.getUratio(); |
834 |
|
double lratio = seed.getLratio(); |
835 |
|
|
836 |
|
for (i = 0; i < seed.getNvars(); i++) { |
837 |
|
ratio = nsdiv * seed.getNacp(i); |
838 |
|
seed.setNacp(i,0); |
839 |
|
if (ratio > uratio) { |
840 |
|
(vm)[i] = (vm)[i] * (1.0 + seed.getCs() * (ratio - seed.getUratio())); |
841 |
|
} else if (ratio < lratio) { |
842 |
|
(vm)[i] = (vm)[i] / (1.0 + seed.getCs() * (seed.getLratio() - ratio)); |
843 |
|
} |
844 |
|
|
845 |
|
if ((vm)[i] < rathersmall) |
846 |
|
(vm)[i] = rathersmall; |
847 |
|
if ((vm)[i] > (upperb[i] - lowerb[i])) |
848 |
|
(vm)[i] = upperb[i] - lowerb[i]; |
849 |
|
} |
850 |
|
seed.setVm(vm); |
851 |
|
} |
852 |
|
|
853 |
|
void temperature(Siman& seed, DoubleVector& x){ |
854 |
|
int i; |
855 |
|
double t = seed.getT(); |
856 |
|
t *= seed.getRt(); |
857 |
|
if (t < rathersmall) |
858 |
|
t = rathersmall; //JMB make sure temperature doesnt get too small |
859 |
|
|
860 |
|
handle.logMessage(LOGINFO, "Reducing the temperature to", t); |
861 |
|
seed.setT(t); |
862 |
|
|
863 |
|
DoubleVector* bestx = seed.getBestx(); |
864 |
|
|
865 |
|
for (i = 0; i < seed.getNvars(); i++) |
866 |
|
x[i] = (*bestx)[i]; |
867 |
|
} |
868 |
|
|
869 |
|
void optimum(double trialf, double &fopt, int iters, DoubleVector trialx, |
870 |
|
DoubleVector init, Siman siman){ |
871 |
|
//If greater than any other point, record as new optimum |
872 |
|
int i, nvars = siman.getNvars(); |
873 |
|
DoubleVector scalex(nvars); |
874 |
|
DoubleVector* bestx = siman.getBestx(); |
875 |
|
|
876 |
|
if ((trialf > fopt) && (trialf == trialf)) { |
877 |
|
for (i = 0; i < nvars; i++) |
878 |
|
(*bestx)[i] = trialx[i]; |
879 |
|
fopt = trialf; |
880 |
|
|
881 |
|
if (siman.getScale()) { |
882 |
|
for (i = 0; i < nvars; i++) |
883 |
|
scalex[i] = (*bestx)[i] * init[i]; |
884 |
|
EcoSystem->storeVariables(-fopt, scalex); |
885 |
|
} else |
886 |
|
EcoSystem->storeVariables(-fopt, (*bestx)); |
887 |
|
|
888 |
|
handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations"); |
889 |
|
handle.logMessage(LOGINFO, "The likelihood score is", -fopt, "at the point"); |
890 |
|
EcoSystem->writeBestValues(); |
891 |
|
} |
892 |
|
} |
893 |
|
|
894 |
|
/** |
895 |
|
@brief Decides wheter the current item evaluated must be chosen as new optimum |
896 |
|
@param funcval value for old optimum |
897 |
|
@param trialf value for current item evaluated |
898 |
|
*/ |
899 |
|
bool mustAccept(double funcval, double trialf, Siman &siman, int iters) { |
900 |
|
//Accept the new point if the new function value better |
901 |
|
bool ret = true; |
902 |
|
int i; |
903 |
|
int aux = siman.getParam()[siman.getL()]; |
904 |
|
if ((trialf - funcval) > verysmall) { |
905 |
|
siman.incrementNacp(aux); |
906 |
|
siman.incrementNacc(); |
907 |
|
//JMB - not sure about this ... |
908 |
|
} else { |
909 |
|
double p, pp; |
910 |
|
//Accept according to metropolis condition |
911 |
|
|
912 |
|
p = expRep((trialf - funcval) / siman.getT()); |
913 |
|
pp = randomNumber(siman.getSeedM()); |
914 |
|
if (pp < p) { |
915 |
|
siman.incrementNacp(aux); |
916 |
|
siman.incrementNaccmet(); |
917 |
|
// //Accept point |
918 |
|
} else { |
919 |
|
//Reject point |
920 |
|
ret = false; |
921 |
|
siman.incrementNrej(); |
922 |
|
} |
923 |
|
} |
924 |
|
// JMB added check for really silly values |
925 |
|
if (isZero(trialf)) { |
926 |
|
handle.logMessage(LOGINFO, "Error in Simulated Annealing optimisation after", |
927 |
|
iters, "function evaluations, f(x) = 0"); |
928 |
|
siman.setConverge(-1); |
929 |
|
return false; |
930 |
|
} |
931 |
|
|
932 |
|
siman.incrementL(); |
933 |
|
if (siman.getL() == 0) |
934 |
|
siman.incrementNS(); |
935 |
|
if (siman.getNS() >= siman.getNs()){ |
936 |
|
siman.setNS(0); |
937 |
|
siman.incrementNT(); |
938 |
|
adjustVm(siman); |
939 |
|
siman.randomizeParams(); |
940 |
|
} |
941 |
|
|
942 |
|
return ret; |
943 |
|
|
944 |
|
} |
945 |
|
|
946 |
|
/** |
947 |
|
@brief Decides whether the search must stop. |
948 |
|
It does not take into account the number of iterations, which is already considered by the template |
949 |
|
@param prev old optimum |
950 |
|
@param funcval new/current optimum |
951 |
|
*/ |
952 |
|
bool mustTerminate(double prev, double& funcval, Siman &siman, int iters) { |
953 |
|
bool quit = false; |
954 |
|
if (siman.getNT() >= siman.getNt()) |
955 |
|
{ |
956 |
|
int i; |
957 |
|
DoubleVector fstar = siman.getFstar(); |
958 |
|
|
959 |
|
siman.setNT(0); |
960 |
|
|
961 |
|
for (i = siman.getTempcheck() - 1; i > 0; i--) |
962 |
|
fstar[i] = fstar[i - 1]; |
963 |
|
fstar[0] = funcval; |
964 |
|
|
965 |
|
if (fabs(prev - funcval) < siman.getSimanneps()) { |
966 |
|
quit = true; |
967 |
|
for (i = 0; i < siman.getTempcheck() - 1; i++) |
968 |
|
if (fabs(fstar[i + 1] - fstar[i]) > siman.getSimanneps()) |
969 |
|
quit = false; |
970 |
|
} |
971 |
|
|
972 |
|
handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, |
973 |
|
"function evaluations ..."); |
974 |
|
|
975 |
|
temperature(siman, siman.getX()); |
976 |
|
|
977 |
|
funcval = prev; |
978 |
|
} |
979 |
|
return quit; |
980 |
|
} |
981 |
|
|
982 |
|
void printResult(bool quit, Siman siman, int iters) |
983 |
|
{ |
984 |
|
double * score = siman.getScore(); |
985 |
|
DoubleVector * bestX = siman.getBestx(); |
986 |
|
|
987 |
|
handle.logMessage(LOGINFO, "\nStopping Simulated Annealing optimisation algorithm\n"); |
988 |
|
handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations"); |
989 |
|
handle.logMessage(LOGINFO, "The temperature was reduced to", siman.getT()); |
990 |
|
if (quit) { |
991 |
|
int* converge = siman.getConverge(); |
992 |
|
handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run"); |
993 |
|
|
994 |
|
|
995 |
|
*converge = 1; |
996 |
|
} |
997 |
|
else { |
998 |
|
handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations"); |
999 |
|
handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run"); |
1000 |
|
} |
1001 |
|
handle.logMessage(LOGINFO, "Number of directly accepted points", siman.getNacc()); |
1002 |
|
handle.logMessage(LOGINFO, "Number of metropolis accepted points", siman.getNaccmet()); |
1003 |
|
handle.logMessage(LOGINFO, "Number of rejected points", siman.getNrej()); |
1004 |
|
*score = EcoSystem->SimulateAndUpdate(*bestX); |
1005 |
|
handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", *score); |
1006 |
|
} |
1007 |
|
}; |
1008 |
|
|
1009 |
|
// Required |
1010 |
|
std::ostream &operator<<(std::ostream &os, const DoubleVector &p) |
1011 |
|
{ |
1012 |
|
os << ""; |
1013 |
|
return os; |
1014 |
|
} |
1015 |
|
|
1016 |
|
|
1017 |
|
#ifdef _OPENMP |
1018 |
|
void OptInfoSimann::OptimiseLikelihoodREP() { |
1019 |
|
|
1020 |
|
//set initial values |
1021 |
|
|
1022 |
|
double tmp, p, pp; |
1023 |
|
double funcval, trialf; |
1024 |
|
int a, i, j, k, l, quit; |
1025 |
|
int rchange, rcheck, rnumber; //Used to randomise the order of the parameters |
1026 |
|
|
1027 |
|
handle.logMessage(LOGINFO, |
1028 |
|
"\nStarting Simulated Annealing optimisation algorithm\n"); |
1029 |
|
int nvars = EcoSystem->numOptVariables(); |
1030 |
|
DoubleVector x(nvars); |
1031 |
|
DoubleVector init(nvars); |
1032 |
|
DoubleVector trialx(nvars, 0.0); |
1033 |
|
DoubleVector bestx(nvars); |
1034 |
|
DoubleVector scalex(nvars); |
1035 |
|
DoubleVector lowerb(nvars); |
1036 |
|
DoubleVector upperb(nvars); |
1037 |
|
DoubleVector fstar(tempcheck); |
1038 |
|
DoubleVector vm(nvars, vminit); |
1039 |
|
IntVector param(nvars, 0); |
1040 |
|
|
1041 |
|
EcoSystem->resetVariables(); //JMB need to reset variables in case they have been scaled |
1042 |
|
if (scale) { |
1043 |
|
EcoSystem->scaleVariables(); |
1044 |
|
|
1045 |
|
int numThr = omp_get_max_threads ( ); |
1046 |
|
for(i = 0; i < numThr; i++) // scale the variables for the ecosystem of every thread |
1047 |
|
EcoSystems[i]->scaleVariables(); |
1048 |
|
} |
1049 |
|
EcoSystem->getOptScaledValues(x); |
1050 |
|
EcoSystem->getOptLowerBounds(lowerb); |
1051 |
|
EcoSystem->getOptUpperBounds(upperb); |
1052 |
|
EcoSystem->getOptInitialValues(init); |
1053 |
|
|
1054 |
|
for (i = 0; i < nvars; i++) { |
1055 |
|
bestx[i] = x[i]; |
1056 |
|
param[i] = i; |
1057 |
|
} |
1058 |
|
|
1059 |
|
if (scale) { |
1060 |
|
for (i = 0; i < nvars; i++) { |
1061 |
|
scalex[i] = x[i]; |
1062 |
|
// Scaling the bounds, because the parameters are scaled |
1063 |
|
lowerb[i] = lowerb[i] / init[i]; |
1064 |
|
upperb[i] = upperb[i] / init[i]; |
1065 |
|
if (lowerb[i] > upperb[i]) { |
1066 |
|
tmp = lowerb[i]; |
1067 |
|
lowerb[i] = upperb[i]; |
1068 |
|
upperb[i] = tmp; |
1069 |
|
} |
1070 |
|
} |
1071 |
|
} |
1072 |
|
|
1073 |
|
//funcval is the function value at x |
1074 |
|
funcval = EcoSystem->SimulateAndUpdate(x); |
1075 |
|
if (funcval != funcval) { //check for NaN |
1076 |
|
handle.logMessage(LOGINFO, |
1077 |
|
"Error starting Simulated Annealing optimisation with f(x) = infinity"); |
1078 |
|
converge = -1; |
1079 |
|
iters = 1; |
1080 |
|
return; |
1081 |
|
} |
1082 |
|
|
1083 |
|
//the function is to be minimised so switch the sign of funcval (and trialf) |
1084 |
|
funcval = -funcval; |
1085 |
|
cs /= lratio; //JMB save processing time |
1086 |
|
for (i = 0; i < tempcheck; i++) |
1087 |
|
fstar[i] = funcval; |
1088 |
|
|
1089 |
|
Siman s(seed, seedM, seedP, nvars, nt, ns, param, &x, &lowerb, &upperb, vm, t, rt, (1.0 / ns), |
1090 |
|
tempcheck, simanneps, fstar, lratio, uratio, cs, &bestx, scale, &converge, &score); |
1091 |
|
|
1092 |
|
ReproducibleSearch<Siman, DoubleVector, ControlClass, evaluate_par_f, buildNewParams_f> |
1093 |
|
pa(s, x, simanniter); |
1094 |
|
|
1095 |
|
// OpenMP parallelization |
1096 |
|
int numThr = omp_get_max_threads ( ); |
1097 |
|
pa.paral_opt_omp(funcval,numThr,numThr); |
1098 |
|
iters = pa.iterations(); |
1099 |
|
|
1100 |
|
} |
1101 |
|
#endif |
1102 |
|
void OptInfoSimann::OptimiseLikelihood() { |
1103 |
|
|
1104 |
|
//set initial values |
1105 |
|
|
1106 |
|
double tmp, p, pp; |
1107 |
|
double funcval, trialf; |
1108 |
|
int a, i, j, k, l, quit; |
1109 |
|
int rchange, rcheck, rnumber; //Used to randomise the order of the parameters |
1110 |
|
|
1111 |
|
handle.logMessage(LOGINFO, |
1112 |
|
"\nStarting Simulated Annealing optimisation algorithm\n"); |
1113 |
|
int nvars = EcoSystem->numOptVariables(); |
1114 |
|
DoubleVector x(nvars); |
1115 |
|
DoubleVector init(nvars); |
1116 |
|
DoubleVector trialx(nvars, 0.0); |
1117 |
|
DoubleVector bestx(nvars); |
1118 |
|
DoubleVector scalex(nvars); |
1119 |
|
DoubleVector lowerb(nvars); |
1120 |
|
DoubleVector upperb(nvars); |
1121 |
|
DoubleVector fstar(tempcheck); |
1122 |
|
DoubleVector vm(nvars, vminit); |
1123 |
|
IntVector param(nvars, 0); |
1124 |
|
|
1125 |
|
EcoSystem->resetVariables(); //JMB need to reset variables in case they have been scaled |
1126 |
|
if (scale) { |
1127 |
|
EcoSystem->scaleVariables(); |
1128 |
|
} |
1129 |
|
EcoSystem->getOptScaledValues(x); |
1130 |
|
EcoSystem->getOptLowerBounds(lowerb); |
1131 |
|
EcoSystem->getOptUpperBounds(upperb); |
1132 |
|
EcoSystem->getOptInitialValues(init); |
1133 |
|
|
1134 |
|
for (i = 0; i < nvars; i++) { |
1135 |
|
bestx[i] = x[i]; |
1136 |
|
param[i] = i; |
1137 |
|
} |
1138 |
|
|
1139 |
|
if (scale) { |
1140 |
|
for (i = 0; i < nvars; i++) { |
1141 |
|
scalex[i] = x[i]; |
1142 |
|
// Scaling the bounds, because the parameters are scaled |
1143 |
|
lowerb[i] = lowerb[i] / init[i]; |
1144 |
|
upperb[i] = upperb[i] / init[i]; |
1145 |
|
if (lowerb[i] > upperb[i]) { |
1146 |
|
tmp = lowerb[i]; |
1147 |
|
lowerb[i] = upperb[i]; |
1148 |
|
upperb[i] = tmp; |
1149 |
|
} |
1150 |
|
} |
1151 |
|
} |
1152 |
|
|
1153 |
|
//funcval is the function value at x |
1154 |
|
funcval = EcoSystem->SimulateAndUpdate(x); |
1155 |
|
if (funcval != funcval) { //check for NaN |
1156 |
|
handle.logMessage(LOGINFO, |
1157 |
|
"Error starting Simulated Annealing optimisation with f(x) = infinity"); |
1158 |
|
converge = -1; |
1159 |
|
iters = 1; |
1160 |
|
return; |
1161 |
|
} |
1162 |
|
|
1163 |
|
//the function is to be minimised so switch the sign of funcval (and trialf) |
1164 |
|
funcval = -funcval; |
1165 |
|
cs /= lratio; //JMB save processing time |
1166 |
|
for (i = 0; i < tempcheck; i++) |
1167 |
|
fstar[i] = funcval; |
1168 |
|
|
1169 |
|
Siman s(seed, seedM, seedP, nvars, nt, ns, param, &x, &lowerb, &upperb, vm, t, rt, (1.0 / ns), |
1170 |
|
tempcheck, simanneps, fstar, lratio, uratio, cs, &bestx, scale, &converge, &score); |
1171 |
|
|
1172 |
|
ReproducibleSearch<Siman, DoubleVector, ControlClass, evaluate_f, buildNewParams_f> |
1173 |
|
pa(s, x, simanniter); |
1174 |
|
|
1175 |
|
// sequential code |
1176 |
|
pa.seq_opt(funcval); |
1177 |
|
iters = pa.iterations(); |
1178 |
|
|
1179 |
|
} |
1180 |
|
|
1181 |
|
|
1182 |
|
|
1183 |
|
|
1184 |
|
|
1185 |
|
|
1186 |
|
|
1187 |
|
|