1 : |
agomez |
1 |
/* ABSTRACT: */
|
2 : |
|
|
/* Simulated annealing is a global optimization method that distinguishes */
|
3 : |
|
|
/* different local optima. Starting from an initial point, the algorithm */
|
4 : |
|
|
/* takes a step and the function is evaluated. When minimizing a function,*/
|
5 : |
|
|
/* any downhill step is accepted and the process repeats from this new */
|
6 : |
|
|
/* point. An uphill step may be accepted (thus, it can escape from local */
|
7 : |
|
|
/* optima). This uphill decision is made by the Metropolis criteria. As */
|
8 : |
|
|
/* the optimization process proceeds, the length of the steps decline and */
|
9 : |
|
|
/* the algorithm closes in on the global optimum. Since the algorithm */
|
10 : |
|
|
/* makes very few assumptions regarding the function to be optimized, it */
|
11 : |
|
|
/* is quite robust with respect to non-quadratic surfaces. The degree of */
|
12 : |
|
|
/* robustness can be adjusted by the user. In fact, simulated annealing */
|
13 : |
|
|
/* can be used as a local optimizer for difficult functions. */
|
14 : |
|
|
/* */
|
15 : |
|
|
/* The author can be contacted at h2zr1001@vm.cis.smu.edu */
|
16 : |
ulcessvp |
11 |
//
|
17 : |
agomez |
1 |
/* This file is a translation of a fortran code, which is an example of the*/
|
18 : |
|
|
/* Corana et al. simulated annealing algorithm for multimodal and robust */
|
19 : |
|
|
/* optimization as implemented and modified by by Goffe et al. */
|
20 : |
|
|
/* */
|
21 : |
|
|
/* Use the sample function from Judge with the following suggestions */
|
22 : |
|
|
/* to get a feel for how SA works. When you've done this, you should be */
|
23 : |
|
|
/* ready to use it on most any function with a fair amount of expertise. */
|
24 : |
|
|
/* 1. Run the program as is to make sure it runs okay. Take a look at */
|
25 : |
|
|
/* the intermediate output and see how it optimizes as temperature */
|
26 : |
|
|
/* (T) falls. Notice how the optimal point is reached and how */
|
27 : |
|
|
/* falling T reduces VM. */
|
28 : |
|
|
/* 2. Look through the documentation to SA so the following makes a */
|
29 : |
|
|
/* bit of sense. In line with the paper, it shouldn't be that hard */
|
30 : |
|
|
/* to figure out. The core of the algorithm is described on pp. 4-6 */
|
31 : |
|
|
/* and on pp. 28. Also see Corana et al. pp. 264-9. */
|
32 : |
|
|
/* 3. To see the importance of different temperatures, try starting */
|
33 : |
|
|
/* with a very low one (say T = 10E-5). You'll see (i) it never */
|
34 : |
|
|
/* escapes from the local optima (in annealing terminology, it */
|
35 : |
|
|
/* quenches) & (ii) the step length (VM) will be quite small. This */
|
36 : |
|
|
/* is a key part of the algorithm: as temperature (T) falls, step */
|
37 : |
|
|
/* length does too. In a minor point here, note how VM is quickly */
|
38 : |
|
|
/* reset from its initial value. Thus, the input VM is not very */
|
39 : |
|
|
/* important. This is all the more reason to examine VM once the */
|
40 : |
|
|
/* algorithm is underway. */
|
41 : |
|
|
/* 4. To see the effect of different parameters and their effect on */
|
42 : |
|
|
/* the speed of the algorithm, try RT = .95 & RT = .1. Notice the */
|
43 : |
|
|
/* vastly different speed for optimization. Also try NT = 20. Note */
|
44 : |
|
|
/* that this sample function is quite easy to optimize, so it will */
|
45 : |
|
|
/* tolerate big changes in these parameters. RT and NT are the */
|
46 : |
|
|
/* parameters one should adjust to modify the runtime of the */
|
47 : |
|
|
/* algorithm and its robustness. */
|
48 : |
|
|
/* 5. Try constraining the algorithm with either LB or UB. */
|
49 : |
ulcessvp |
11 |
//
|
50 : |
agomez |
1 |
/* Synopsis: */
|
51 : |
|
|
/* This routine implements the continuous simulated annealing global */
|
52 : |
|
|
/* optimization algorithm described in Corana et al.'s article */
|
53 : |
|
|
/* "Minimizing Multimodal Functions of Continuous Variables with the */
|
54 : |
|
|
/* "Simulated Annealing" Algorithm" in the September 1987 (vol. 13, */
|
55 : |
|
|
/* no. 3, pp. 262-280) issue of the ACM Transactions on Mathematical */
|
56 : |
|
|
/* Software. */
|
57 : |
ulcessvp |
11 |
//
|
58 : |
agomez |
1 |
/* A very quick (perhaps too quick) overview of SA: */
|
59 : |
|
|
/* SA tries to find the global optimum of an N dimensional function. */
|
60 : |
|
|
/* It moves both up and downhill and as the optimization process */
|
61 : |
|
|
/* proceeds, it focuses on the most promising area. */
|
62 : |
|
|
/* To start, it randomly chooses a trial point within the step length */
|
63 : |
|
|
/* VM (a vector of length N) of the user selected starting point. The */
|
64 : |
|
|
/* function is evaluated at this trial point and its value is compared */
|
65 : |
|
|
/* to its value at the initial point. */
|
66 : |
|
|
/* In a maximization problem, all uphill moves are accepted and the */
|
67 : |
|
|
/* algorithm continues from that trial point. Downhill moves may be */
|
68 : |
|
|
/* accepted; the decision is made by the Metropolis criteria. It uses T */
|
69 : |
|
|
/* (temperature) and the size of the downhill move in a probabilistic */
|
70 : |
|
|
/* manner. The smaller T and the size of the downhill move are, the more */
|
71 : |
|
|
/* likely that move will be accepted. If the trial is accepted, the */
|
72 : |
|
|
/* algorithm moves on from that point. If it is rejected, another point */
|
73 : |
|
|
/* is chosen instead for a trial evaluation. */
|
74 : |
|
|
/* Each element of VM periodically adjusted so that half of all */
|
75 : |
|
|
/* function evaluations in that direction are accepted. */
|
76 : |
|
|
/* A fall in T is imposed upon the system with the RT variable by */
|
77 : |
|
|
/* T(i+1) = RT*T(i) where i is the ith iteration. Thus, as T declines, */
|
78 : |
|
|
/* downhill moves are less likely to be accepted and the percentage of */
|
79 : |
|
|
/* rejections rise. Given the scheme for the selection for VM, VM falls. */
|
80 : |
|
|
/* Thus, as T declines, VM falls and SA focuses upon the most promising */
|
81 : |
|
|
/* area for optimization. */
|
82 : |
ulcessvp |
11 |
//
|
83 : |
agomez |
1 |
/* The importance of the parameter T: */
|
84 : |
|
|
/* The parameter T is crucial in using SA successfully. It influences */
|
85 : |
|
|
/* VM, the step length over which the algorithm searches for optima. For */
|
86 : |
|
|
/* a small intial T, the step length may be too small; thus not enough */
|
87 : |
|
|
/* of the function might be evaluated to find the global optima. The user */
|
88 : |
|
|
/* should carefully examine VM in the intermediate output (set IPRINT = */
|
89 : |
|
|
/* 1) to make sure that VM is appropriate. The relationship between the */
|
90 : |
|
|
/* initial temperature and the resulting step length is function */
|
91 : |
|
|
/* dependent. */
|
92 : |
|
|
/* To determine the starting temperature that is consistent with */
|
93 : |
|
|
/* optimizing a function, it is worthwhile to run a trial run first. Set */
|
94 : |
|
|
/* RT = 1.5 and T = 1.0. With RT > 1.0, the temperature increases and VM */
|
95 : |
|
|
/* rises as well. Then select the T that produces a large enough VM. */
|
96 : |
ulcessvp |
11 |
//
|
97 : |
agomez |
1 |
/* For modifications to the algorithm and many details on its use, */
|
98 : |
|
|
/* (particularly for econometric applications) see Goffe, Ferrier */
|
99 : |
|
|
/* and Rogers, "Global Optimization of Statistical Functions with */
|
100 : |
|
|
/* the Simulated Annealing," Journal of Econometrics (forthcoming) */
|
101 : |
|
|
/* For a pre-publication copy, contact */
|
102 : |
|
|
/* Bill Goffe */
|
103 : |
|
|
/* Department of Economics */
|
104 : |
|
|
/* Southern Methodist University */
|
105 : |
|
|
/* Dallas, TX 75275 */
|
106 : |
|
|
/* h2zr1001 @ smuvm1 (Bitnet) */
|
107 : |
|
|
/* h2zr1001 @ vm.cis.smu.edu (Internet) */
|
108 : |
ulcessvp |
11 |
//
|
109 : |
agomez |
1 |
/* As far as possible, the parameters here have the same name as in */
|
110 : |
|
|
/* the description of the algorithm on pp. 266-8 of Corana et al. */
|
111 : |
ulcessvp |
11 |
//
|
112 : |
agomez |
1 |
/* Input Parameters: */
|
113 : |
|
|
/* Note: The suggested values generally come from Corana et al. To */
|
114 : |
|
|
/* drastically reduce runtime, see Goffe et al., pp. 17-8 for */
|
115 : |
|
|
/* suggestions on choosing the appropriate RT and NT. */
|
116 : |
|
|
/* n - Number of variables in the function to be optimized. (INT) */
|
117 : |
|
|
/* x - The starting values for the variables of the function to be */
|
118 : |
|
|
/* optimized. (DP(N)) */
|
119 : |
|
|
/* max - Denotes whether the function should be maximized or */
|
120 : |
|
|
/* minimized. A true value denotes maximization while a false */
|
121 : |
|
|
/* value denotes minimization. */
|
122 : |
|
|
/* RT - The temperature reduction factor. The value suggested by */
|
123 : |
|
|
/* Corana et al. is .85. See Goffe et al. for more advice. (DP) */
|
124 : |
|
|
/* EPS - Error tolerance for termination. If the final function */
|
125 : |
|
|
/* values from the last neps temperatures differ from the */
|
126 : |
|
|
/* corresponding value at the current temperature by less than */
|
127 : |
|
|
/* EPS and the final function value at the current temperature */
|
128 : |
|
|
/* differs from the current optimal function value by less than */
|
129 : |
|
|
/* EPS, execution terminates and IER = 0 is returned. (EP) */
|
130 : |
|
|
/* NS - Number of cycles. After NS*N function evaluations, each element */
|
131 : |
|
|
/* of VM is adjusted so that approximately half of all function */
|
132 : |
|
|
/* evaluations are accepted. The suggested value is 20. (INT) */
|
133 : |
|
|
/* nt - Number of iterations before temperature reduction. After */
|
134 : |
|
|
/* NT*NS*N function evaluations, temperature (T) is changed */
|
135 : |
|
|
/* by the factor RT. Value suggested by Corana et al. is */
|
136 : |
|
|
/* MAX(100, 5*N). See Goffe et al. for further advice. (INT) */
|
137 : |
|
|
/* NEPS - Number of final function values used to decide upon termi- */
|
138 : |
|
|
/* nation. See EPS. Suggested value is 4. (INT) */
|
139 : |
|
|
/* maxevl - The maximum number of function evaluations. If it is */
|
140 : |
|
|
/* exceeded, IER = 1. (INT) */
|
141 : |
|
|
/* lb - The lower bound for the allowable solution variables. (DP(N)) */
|
142 : |
|
|
/* ub - The upper bound for the allowable solution variables. (DP(N)) */
|
143 : |
|
|
/* If the algorithm chooses X(I) .LT. LB(I) or X(I) .GT. UB(I), */
|
144 : |
|
|
/* I = 1, N, a point is from inside is randomly selected. This */
|
145 : |
|
|
/* This focuses the algorithm on the region inside UB and LB. */
|
146 : |
|
|
/* Unless the user wishes to concentrate the search to a par- */
|
147 : |
|
|
/* ticular region, UB and LB should be set to very large positive */
|
148 : |
|
|
/* and negative values, respectively. Note that the starting */
|
149 : |
|
|
/* vector X should be inside this region. Also note that LB and */
|
150 : |
|
|
/* UB are fixed in position, while VM is centered on the last */
|
151 : |
|
|
/* accepted trial set of variables that optimizes the function. */
|
152 : |
|
|
/* c - Vector that controls the step length adjustment. The suggested */
|
153 : |
|
|
/* value for all elements is 2.0. (DP(N)) */
|
154 : |
|
|
/* t - On input, the initial temperature. See Goffe et al. for advice. */
|
155 : |
|
|
/* On output, the final temperature. (DP) */
|
156 : |
|
|
/* vm - The step length vector. On input it should encompass the */
|
157 : |
|
|
/* region of interest given the starting value X. For point */
|
158 : |
|
|
/* X(I), the next trial point is selected is from X(I) - VM(I) */
|
159 : |
|
|
/* to X(I) + VM(I). Since VM is adjusted so that about half */
|
160 : |
|
|
/* of all points are accepted, the input value is not very */
|
161 : |
|
|
/* important (i.e. is the value is off, SA adjusts VM to the */
|
162 : |
|
|
/* correct value). (DP(N)) */
|
163 : |
ulcessvp |
11 |
//
|
164 : |
agomez |
1 |
/* Output Parameters: */
|
165 : |
|
|
/* xopt - The variables that optimize the function. (DP(N)) */
|
166 : |
|
|
/* fopt - The optimal value of the function. (DP) */
|
167 : |
ulcessvp |
11 |
//
|
168 : |
agomez |
1 |
/* JMB this has been modified to work with the gadget object structure */
|
169 : |
|
|
/* This means that the function has been replaced by a call to ecosystem */
|
170 : |
|
|
/* object, and we can use the vector objects that have been defined */
|
171 : |
|
|
|
172 : |
|
|
#include "gadget.h" |
173 : |
|
|
#include "optinfo.h" |
174 : |
|
|
#include "mathfunc.h" |
175 : |
|
|
#include "doublevector.h" |
176 : |
|
|
#include "intvector.h" |
177 : |
|
|
#include "errorhandler.h" |
178 : |
|
|
#include "ecosystem.h" |
179 : |
|
|
#include "global.h" |
180 : |
ulcessvp |
11 |
#include "seq_optimize_template.h" |
181 : |
ulcessvp |
15 |
#ifdef SPECULATIVE
|
182 : |
ulcessvp |
11 |
#include <omp.h>
|
183 : |
|
|
#endif
|
184 : |
agomez |
1 |
|
185 : |
|
|
extern Ecosystem* EcoSystem;
|
186 : |
ulcessvp |
16 |
#ifdef _OPENMP
|
187 : |
ulcessvp |
11 |
extern Ecosystem** EcoSystems;
|
188 : |
|
|
#endif
|
189 : |
agomez |
1 |
|
190 : |
ulcessvp |
12 |
/*sequential code replaced at seq_optimize_template.h*/
|
191 : |
ulcessvp |
11 |
//void OptInfoSimann::OptimiseLikelihood() {
|
192 : |
|
|
//
|
193 : |
|
|
// //set initial values
|
194 : |
|
|
// int nacc = 0; //The number of accepted function evaluations
|
195 : |
|
|
// int nrej = 0; //The number of rejected function evaluations
|
196 : |
|
|
// int naccmet = 0; //The number of metropolis accepted function evaluations
|
197 : |
|
|
//
|
198 : |
|
|
// double tmp, p, pp, ratio, nsdiv;
|
199 : |
|
|
// double fopt, funcval, trialf;
|
200 : |
|
|
// int a, i, j, k, l, offset, quit;
|
201 : |
|
|
// int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
|
202 : |
|
|
//
|
203 : |
|
|
// handle.logMessage(LOGINFO, "\nStarting Simulated Annealing optimisation algorithm\n");
|
204 : |
|
|
// int nvars = EcoSystem->numOptVariables();
|
205 : |
|
|
// DoubleVector x(nvars);
|
206 : |
|
|
// DoubleVector init(nvars);
|
207 : |
|
|
// DoubleVector trialx(nvars, 0.0);
|
208 : |
|
|
// DoubleVector bestx(nvars);
|
209 : |
|
|
// DoubleVector scalex(nvars);
|
210 : |
|
|
// DoubleVector lowerb(nvars);
|
211 : |
|
|
// DoubleVector upperb(nvars);
|
212 : |
|
|
// DoubleVector fstar(tempcheck);
|
213 : |
|
|
// DoubleVector vm(nvars, vminit);
|
214 : |
|
|
// IntVector param(nvars, 0);
|
215 : |
|
|
// IntVector nacp(nvars, 0);
|
216 : |
|
|
//
|
217 : |
|
|
// EcoSystem->resetVariables(); //JMB need to reset variables in case they have been scaled
|
218 : |
|
|
// if (scale)
|
219 : |
|
|
// EcoSystem->scaleVariables();
|
220 : |
|
|
// EcoSystem->getOptScaledValues(x);
|
221 : |
|
|
// EcoSystem->getOptLowerBounds(lowerb);
|
222 : |
|
|
// EcoSystem->getOptUpperBounds(upperb);
|
223 : |
|
|
// EcoSystem->getOptInitialValues(init);
|
224 : |
|
|
//
|
225 : |
|
|
// for (i = 0; i < nvars; i++) {
|
226 : |
|
|
// bestx[i] = x[i];
|
227 : |
|
|
// param[i] = i;
|
228 : |
|
|
// }
|
229 : |
|
|
//
|
230 : |
|
|
// if (scale) {
|
231 : |
|
|
// for (i = 0; i < nvars; i++) {
|
232 : |
|
|
// scalex[i] = x[i];
|
233 : |
|
|
// // Scaling the bounds, because the parameters are scaled
|
234 : |
|
|
// lowerb[i] = lowerb[i] / init[i];
|
235 : |
|
|
// upperb[i] = upperb[i] / init[i];
|
236 : |
|
|
// if (lowerb[i] > upperb[i]) {
|
237 : |
|
|
// tmp = lowerb[i];
|
238 : |
|
|
// lowerb[i] = upperb[i];
|
239 : |
|
|
// upperb[i] = tmp;
|
240 : |
|
|
// }
|
241 : |
|
|
// }
|
242 : |
|
|
// }
|
243 : |
|
|
//
|
244 : |
|
|
// //funcval is the function value at x
|
245 : |
|
|
// funcval = EcoSystem->SimulateAndUpdate(x);
|
246 : |
|
|
// if (funcval != funcval) { //check for NaN
|
247 : |
|
|
// handle.logMessage(LOGINFO, "Error starting Simulated Annealing optimisation with f(x) = infinity");
|
248 : |
|
|
// converge = -1;
|
249 : |
|
|
// iters = 1;
|
250 : |
|
|
// return;
|
251 : |
|
|
// }
|
252 : |
|
|
//
|
253 : |
|
|
// //the function is to be minimised so switch the sign of funcval (and trialf)
|
254 : |
|
|
// funcval = -funcval;
|
255 : |
|
|
// offset = EcoSystem->getFuncEval(); //number of function evaluations done before loop
|
256 : |
|
|
// nacc++;
|
257 : |
|
|
// cs /= lratio; //JMB save processing time
|
258 : |
|
|
// nsdiv = 1.0 / ns;
|
259 : |
|
|
// fopt = funcval;
|
260 : |
|
|
// for (i = 0; i < tempcheck; i++)
|
261 : |
|
|
// fstar[i] = funcval;
|
262 : |
|
|
//
|
263 : |
|
|
// //Start the main loop. Note that it terminates if
|
264 : |
|
|
// //(i) the algorithm succesfully optimises the function or
|
265 : |
|
|
// //(ii) there are too many function evaluations
|
266 : |
|
|
// while (1) {
|
267 : |
|
|
// for (a = 0; a < nt; a++) {
|
268 : |
|
|
// //Randomize the order of the parameters once in a while, to avoid
|
269 : |
|
|
// //the order having an influence on which changes are accepted
|
270 : |
|
|
// rchange = 0;
|
271 : |
|
|
// while (rchange < nvars) {
|
272 : |
|
|
// rnumber = rand_r(&seedP) % nvars;
|
273 : |
|
|
// rcheck = 1;
|
274 : |
|
|
// for (i = 0; i < rchange; i++)
|
275 : |
|
|
// if (param[i] == rnumber)
|
276 : |
|
|
// rcheck = 0;
|
277 : |
|
|
// if (rcheck) {
|
278 : |
|
|
// param[rchange] = rnumber;
|
279 : |
|
|
// rchange++;
|
280 : |
|
|
// }
|
281 : |
|
|
// }
|
282 : |
|
|
//
|
283 : |
|
|
// for (j = 0; j < ns; j++) {
|
284 : |
|
|
// for (l = 0; l < nvars; l++) {
|
285 : |
|
|
// //Generate trialx, the trial value of x
|
286 : |
|
|
// newValue(nvars, l, param, trialx, x, lowerb, upperb, vm);
|
287 : |
|
|
//// for (i = 0; i < nvars; i++) {
|
288 : |
|
|
//// if (i == param[l]) {
|
289 : |
|
|
//// trialx[i] = x[i] + ((randomNumber() * 2.0) - 1.0) * vm[i];
|
290 : |
|
|
////
|
291 : |
|
|
//// //If trialx is out of bounds, try again until we find a point that is OK
|
292 : |
|
|
//// if ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) {
|
293 : |
|
|
//// //JMB - this used to just select a random point between the bounds
|
294 : |
|
|
//// k = 0;
|
295 : |
|
|
//// while ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) {
|
296 : |
|
|
//// trialx[i] = x[i] + ((randomNumber() * 2.0) - 1.0) * vm[i];
|
297 : |
|
|
//// k++;
|
298 : |
|
|
//// if (k > 10) //we've had 10 tries to find a point neatly, so give up
|
299 : |
|
|
//// trialx[i] = lowerb[i] + (upperb[i] - lowerb[i]) * randomNumber();
|
300 : |
|
|
//// }
|
301 : |
|
|
//// }
|
302 : |
|
|
////
|
303 : |
|
|
//// } else
|
304 : |
|
|
//// trialx[i] = x[i];
|
305 : |
|
|
//// }
|
306 : |
|
|
//
|
307 : |
|
|
// //Evaluate the function with the trial point trialx and return as -trialf
|
308 : |
|
|
// trialf = EcoSystem->SimulateAndUpdate(trialx);
|
309 : |
|
|
// trialf = -trialf;
|
310 : |
|
|
//
|
311 : |
|
|
// //If too many function evaluations occur, terminate the algorithm
|
312 : |
|
|
// iters = EcoSystem->getFuncEval() - offset;
|
313 : |
|
|
// if (iters > simanniter) {
|
314 : |
|
|
// handle.logMessage(LOGINFO, "\nStopping Simulated Annealing optimisation algorithm\n");
|
315 : |
|
|
// handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
|
316 : |
|
|
// handle.logMessage(LOGINFO, "The temperature was reduced to", t);
|
317 : |
|
|
// handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
|
318 : |
|
|
// handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
|
319 : |
|
|
// handle.logMessage(LOGINFO, "Number of directly accepted points", nacc);
|
320 : |
|
|
// handle.logMessage(LOGINFO, "Number of metropolis accepted points", naccmet);
|
321 : |
|
|
// handle.logMessage(LOGINFO, "Number of rejected points", nrej);
|
322 : |
|
|
//
|
323 : |
|
|
// score = EcoSystem->SimulateAndUpdate(bestx);
|
324 : |
|
|
// handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score);
|
325 : |
|
|
// return;
|
326 : |
|
|
// }
|
327 : |
|
|
// //Accept the new point if the new function value better
|
328 : |
|
|
// if ((trialf - funcval) > verysmall) {
|
329 : |
|
|
// for (i = 0; i < nvars; i++)
|
330 : |
|
|
// x[i] = trialx[i];
|
331 : |
|
|
// funcval = trialf;
|
332 : |
|
|
// nacc++;
|
333 : |
|
|
// nacp[param[l]]++; //JMB - not sure about this ...
|
334 : |
|
|
//
|
335 : |
|
|
// } else {
|
336 : |
|
|
// //Accept according to metropolis condition
|
337 : |
|
|
// p = expRep((trialf - funcval) / t);
|
338 : |
|
|
// pp = randomNumber(&seedM);
|
339 : |
|
|
// if (pp < p) {
|
340 : |
|
|
// //Accept point
|
341 : |
|
|
// for (i = 0; i < nvars; i++)
|
342 : |
|
|
// x[i] = trialx[i];
|
343 : |
|
|
// funcval = trialf;
|
344 : |
|
|
// naccmet++;
|
345 : |
|
|
// nacp[param[l]]++;
|
346 : |
|
|
// } else {
|
347 : |
|
|
// //Reject point
|
348 : |
|
|
// nrej++;
|
349 : |
|
|
// }
|
350 : |
|
|
// }
|
351 : |
|
|
// // JMB added check for really silly values
|
352 : |
|
|
// if (isZero(trialf)) {
|
353 : |
|
|
// handle.logMessage(LOGINFO, "Error in Simulated Annealing optimisation after", iters, "function evaluations, f(x) = 0");
|
354 : |
|
|
// converge = -1;
|
355 : |
|
|
// return;
|
356 : |
|
|
// }
|
357 : |
|
|
//
|
358 : |
|
|
// //If greater than any other point, record as new optimum
|
359 : |
|
|
// if ((trialf > fopt) && (trialf == trialf)) {
|
360 : |
|
|
// for (i = 0; i < nvars; i++)
|
361 : |
|
|
// bestx[i] = trialx[i];
|
362 : |
|
|
// fopt = trialf;
|
363 : |
|
|
//
|
364 : |
|
|
// if (scale) {
|
365 : |
|
|
// for (i = 0; i < nvars; i++)
|
366 : |
|
|
// scalex[i] = bestx[i] * init[i];
|
367 : |
|
|
// EcoSystem->storeVariables(-fopt, scalex);
|
368 : |
|
|
// } else
|
369 : |
|
|
// EcoSystem->storeVariables(-fopt, bestx);
|
370 : |
|
|
//
|
371 : |
|
|
// handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
|
372 : |
|
|
// handle.logMessage(LOGINFO, "The likelihood score is", -fopt, "at the point");
|
373 : |
|
|
// EcoSystem->writeBestValues();
|
374 : |
|
|
// }
|
375 : |
|
|
// }
|
376 : |
|
|
// }
|
377 : |
|
|
//
|
378 : |
|
|
// //Adjust vm so that approximately half of all evaluations are accepted
|
379 : |
|
|
// for (i = 0; i < nvars; i++) {
|
380 : |
|
|
// ratio = nsdiv * nacp[i];
|
381 : |
|
|
// nacp[i] = 0;
|
382 : |
|
|
// if (ratio > uratio) {
|
383 : |
|
|
// vm[i] = vm[i] * (1.0 + cs * (ratio - uratio));
|
384 : |
|
|
// } else if (ratio < lratio) {
|
385 : |
|
|
// vm[i] = vm[i] / (1.0 + cs * (lratio - ratio));
|
386 : |
|
|
// }
|
387 : |
|
|
//
|
388 : |
|
|
// if (vm[i] < rathersmall)
|
389 : |
|
|
// vm[i] = rathersmall;
|
390 : |
|
|
// if (vm[i] > (upperb[i] - lowerb[i]))
|
391 : |
|
|
// vm[i] = upperb[i] - lowerb[i];
|
392 : |
|
|
// }
|
393 : |
|
|
// }
|
394 : |
|
|
//
|
395 : |
|
|
// //Check termination criteria
|
396 : |
|
|
// for (i = tempcheck - 1; i > 0; i--)
|
397 : |
|
|
// fstar[i] = fstar[i - 1];
|
398 : |
|
|
// fstar[0] = funcval;
|
399 : |
|
|
//
|
400 : |
|
|
// quit = 0;
|
401 : |
|
|
// if (fabs(fopt - funcval) < simanneps) {
|
402 : |
|
|
// quit = 1;
|
403 : |
|
|
// for (i = 0; i < tempcheck - 1; i++)
|
404 : |
|
|
// if (fabs(fstar[i + 1] - fstar[i]) > simanneps)
|
405 : |
|
|
// quit = 0;
|
406 : |
|
|
// }
|
407 : |
|
|
//
|
408 : |
|
|
// handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ...");
|
409 : |
|
|
//
|
410 : |
|
|
// //Terminate SA if appropriate
|
411 : |
|
|
// if (quit) {
|
412 : |
|
|
// handle.logMessage(LOGINFO, "\nStopping Simulated Annealing optimisation algorithm\n");
|
413 : |
|
|
// handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
|
414 : |
|
|
// handle.logMessage(LOGINFO, "The temperature was reduced to", t);
|
415 : |
|
|
// handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
|
416 : |
|
|
// handle.logMessage(LOGINFO, "Number of directly accepted points", nacc);
|
417 : |
|
|
// handle.logMessage(LOGINFO, "Number of metropolis accepted points", naccmet);
|
418 : |
|
|
// handle.logMessage(LOGINFO, "Number of rejected points", nrej);
|
419 : |
|
|
//
|
420 : |
|
|
// converge = 1;
|
421 : |
|
|
// score = EcoSystem->SimulateAndUpdate(bestx);
|
422 : |
|
|
// handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score);
|
423 : |
|
|
// return;
|
424 : |
|
|
// }
|
425 : |
|
|
//
|
426 : |
|
|
// //If termination criteria is not met, prepare for another loop.
|
427 : |
|
|
// t *= rt;
|
428 : |
|
|
// if (t < rathersmall)
|
429 : |
|
|
// t = rathersmall; //JMB make sure temperature doesnt get too small
|
430 : |
|
|
//
|
431 : |
|
|
// handle.logMessage(LOGINFO, "Reducing the temperature to", t);
|
432 : |
|
|
// funcval = fopt;
|
433 : |
|
|
// for (i = 0; i < nvars; i++)
|
434 : |
|
|
// x[i] = bestx[i];
|
435 : |
|
|
// }
|
436 : |
|
|
//}
|
437 : |
agomez |
1 |
|
438 : |
ulcessvp |
11 |
|
439 : |
agomez |
20 |
#ifdef _OPENMP
|
440 : |
|
|
//#ifdef SPECULATIVE
|
441 : |
ulcessvp |
11 |
void OptInfoSimann::OptimiseLikelihoodOMP() {
|
442 : |
|
|
|
443 : |
agomez |
1 |
//set initial values
|
444 : |
|
|
int nacc = 0; //The number of accepted function evaluations
|
445 : |
|
|
int nrej = 0; //The number of rejected function evaluations
|
446 : |
|
|
int naccmet = 0; //The number of metropolis accepted function evaluations
|
447 : |
|
|
|
448 : |
|
|
double tmp, p, pp, ratio, nsdiv;
|
449 : |
|
|
double fopt, funcval, trialf;
|
450 : |
ulcessvp |
14 |
int a, i, j, k, l, quit;
|
451 : |
agomez |
1 |
int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
|
452 : |
|
|
|
453 : |
ulcessvp |
14 |
// store the info of the different threads
|
454 : |
ulcessvp |
11 |
struct Storage {
|
455 : |
|
|
DoubleVector trialx;
|
456 : |
|
|
double newLikelihood;
|
457 : |
|
|
};
|
458 : |
|
|
|
459 : |
agomez |
1 |
handle.logMessage(LOGINFO, "\nStarting Simulated Annealing optimisation algorithm\n");
|
460 : |
|
|
int nvars = EcoSystem->numOptVariables();
|
461 : |
|
|
DoubleVector x(nvars);
|
462 : |
|
|
DoubleVector init(nvars);
|
463 : |
|
|
DoubleVector trialx(nvars, 0.0);
|
464 : |
|
|
DoubleVector bestx(nvars);
|
465 : |
|
|
DoubleVector scalex(nvars);
|
466 : |
|
|
DoubleVector lowerb(nvars);
|
467 : |
|
|
DoubleVector upperb(nvars);
|
468 : |
|
|
DoubleVector fstar(tempcheck);
|
469 : |
|
|
DoubleVector vm(nvars, vminit);
|
470 : |
|
|
IntVector param(nvars, 0);
|
471 : |
|
|
IntVector nacp(nvars, 0);
|
472 : |
|
|
|
473 : |
|
|
EcoSystem->resetVariables(); //JMB need to reset variables in case they have been scaled
|
474 : |
ulcessvp |
14 |
if (scale) {
|
475 : |
|
|
EcoSystem->scaleVariables();
|
476 : |
|
|
int numThr = omp_get_max_threads ( );
|
477 : |
|
|
for(i = 0; i < numThr; i++) // scale the variables for the ecosystem of every thread
|
478 : |
|
|
EcoSystems[i]->scaleVariables();
|
479 : |
|
|
}
|
480 : |
agomez |
1 |
EcoSystem->getOptScaledValues(x);
|
481 : |
|
|
EcoSystem->getOptLowerBounds(lowerb);
|
482 : |
|
|
EcoSystem->getOptUpperBounds(upperb);
|
483 : |
|
|
EcoSystem->getOptInitialValues(init);
|
484 : |
|
|
|
485 : |
ulcessvp |
11 |
for (i = 0; i < nvars; ++i) {
|
486 : |
agomez |
1 |
bestx[i] = x[i];
|
487 : |
|
|
param[i] = i;
|
488 : |
|
|
}
|
489 : |
|
|
|
490 : |
|
|
if (scale) {
|
491 : |
ulcessvp |
11 |
for (i = 0; i < nvars; ++i) {
|
492 : |
agomez |
1 |
scalex[i] = x[i];
|
493 : |
|
|
// Scaling the bounds, because the parameters are scaled
|
494 : |
|
|
lowerb[i] = lowerb[i] / init[i];
|
495 : |
|
|
upperb[i] = upperb[i] / init[i];
|
496 : |
|
|
if (lowerb[i] > upperb[i]) {
|
497 : |
|
|
tmp = lowerb[i];
|
498 : |
|
|
lowerb[i] = upperb[i];
|
499 : |
|
|
upperb[i] = tmp;
|
500 : |
|
|
}
|
501 : |
|
|
}
|
502 : |
|
|
}
|
503 : |
|
|
|
504 : |
|
|
//funcval is the function value at x
|
505 : |
|
|
funcval = EcoSystem->SimulateAndUpdate(x);
|
506 : |
|
|
if (funcval != funcval) { //check for NaN
|
507 : |
|
|
handle.logMessage(LOGINFO, "Error starting Simulated Annealing optimisation with f(x) = infinity");
|
508 : |
|
|
converge = -1;
|
509 : |
|
|
iters = 1;
|
510 : |
|
|
return;
|
511 : |
|
|
}
|
512 : |
|
|
|
513 : |
|
|
//the function is to be minimised so switch the sign of funcval (and trialf)
|
514 : |
|
|
funcval = -funcval;
|
515 : |
|
|
nacc++;
|
516 : |
|
|
cs /= lratio; //JMB save processing time
|
517 : |
|
|
nsdiv = 1.0 / ns;
|
518 : |
|
|
fopt = funcval;
|
519 : |
ulcessvp |
11 |
for (i = 0; i < tempcheck; ++i)
|
520 : |
agomez |
1 |
fstar[i] = funcval;
|
521 : |
|
|
|
522 : |
ulcessvp |
11 |
|
523 : |
|
|
|
524 : |
|
|
int numThr = omp_get_max_threads ( );
|
525 : |
|
|
int bestId=0;
|
526 : |
|
|
int ini=0;
|
527 : |
|
|
|
528 : |
|
|
Storage* storage = new Storage[numThr];
|
529 : |
|
|
|
530 : |
|
|
for (i=0; i<numThr; ++i)
|
531 : |
|
|
storage[i].trialx = trialx;
|
532 : |
|
|
|
533 : |
|
|
|
534 : |
ulcessvp |
14 |
int aux; //store the number of evaluations that are not useful for the algorithm
|
535 : |
|
|
|
536 : |
|
|
DoubleVector vns(nvars, 0); //vector of ns
|
537 : |
ulcessvp |
11 |
int ns_ = ceil(numThr/2.);
|
538 : |
|
|
double res;
|
539 : |
|
|
aux=0;
|
540 : |
ulcessvp |
14 |
//Start the main loop. Note that it terminates if
|
541 : |
|
|
//(i) the algorithm succesfully optimises the function or
|
542 : |
|
|
//(ii) there are too many function evaluations
|
543 : |
agomez |
1 |
while (1) {
|
544 : |
ulcessvp |
11 |
for (a = 0; a < nt; ++a) {
|
545 : |
agomez |
1 |
//Randomize the order of the parameters once in a while, to avoid
|
546 : |
|
|
//the order having an influence on which changes are accepted
|
547 : |
|
|
rchange = 0;
|
548 : |
|
|
while (rchange < nvars) {
|
549 : |
ulcessvp |
11 |
rnumber = rand_r(&seedP) % nvars;
|
550 : |
agomez |
1 |
rcheck = 1;
|
551 : |
ulcessvp |
11 |
for (i = 0; i < rchange; ++i)
|
552 : |
agomez |
1 |
if (param[i] == rnumber)
|
553 : |
|
|
rcheck = 0;
|
554 : |
|
|
if (rcheck) {
|
555 : |
|
|
param[rchange] = rnumber;
|
556 : |
|
|
rchange++;
|
557 : |
|
|
}
|
558 : |
|
|
}
|
559 : |
|
|
|
560 : |
|
|
|
561 : |
ulcessvp |
11 |
for (j = 0; j < (ns*ns_); ++j) {
|
562 : |
|
|
for (l = ini; l < nvars; l+=numThr) {
|
563 : |
|
|
for (i=0; i<numThr;++i)
|
564 : |
|
|
{
|
565 : |
|
|
if ((l+i) < nvars)
|
566 : |
|
|
newValue(nvars, l+i, param, storage[i].trialx, x, lowerb, upperb, vm);
|
567 : |
|
|
else {
|
568 : |
|
|
newValue(nvars, ini, param, storage[i].trialx, x, lowerb, upperb, vm);
|
569 : |
|
|
ini++;
|
570 : |
|
|
if (ini >= nvars)
|
571 : |
|
|
ini=0;
|
572 : |
|
|
}
|
573 : |
|
|
}
|
574 : |
agomez |
1 |
|
575 : |
ulcessvp |
11 |
# pragma omp parallel private(res)
|
576 : |
|
|
{
|
577 : |
|
|
//Evaluate the function with the trial point trialx and return as -trialf
|
578 : |
|
|
int id = omp_get_thread_num ();
|
579 : |
|
|
res = EcoSystems[id]->SimulateAndUpdate(storage[id].trialx);
|
580 : |
|
|
storage[id].newLikelihood = -res;
|
581 : |
|
|
}
|
582 : |
|
|
//best value from omp
|
583 : |
|
|
trialf = storage[0].newLikelihood;
|
584 : |
|
|
bestId=0;
|
585 : |
|
|
for (i=0;i<numThr;++i)
|
586 : |
|
|
{
|
587 : |
|
|
if (storage[i].newLikelihood > trialf)
|
588 : |
|
|
{
|
589 : |
|
|
trialf=storage[i].newLikelihood;
|
590 : |
|
|
bestId=i;
|
591 : |
|
|
}
|
592 : |
|
|
k = param[(l+i)%nvars];
|
593 : |
agomez |
1 |
|
594 : |
ulcessvp |
11 |
if ((storage[i].newLikelihood - funcval) > verysmall)
|
595 : |
|
|
{
|
596 : |
|
|
nacp[k]++;
|
597 : |
|
|
aux++;
|
598 : |
|
|
vns[k]++;
|
599 : |
|
|
}
|
600 : |
|
|
else {
|
601 : |
|
|
//Accept according to metropolis condition
|
602 : |
|
|
p = expRep((storage[i].newLikelihood - funcval) / t);
|
603 : |
|
|
pp = randomNumber(&seedM);
|
604 : |
|
|
if (pp < p)
|
605 : |
|
|
aux++;
|
606 : |
|
|
else {
|
607 : |
|
|
vns[k]++;
|
608 : |
|
|
nrej++;
|
609 : |
|
|
}
|
610 : |
|
|
}
|
611 : |
agomez |
1 |
|
612 : |
ulcessvp |
11 |
if (vns[k] >= ns) {
|
613 : |
|
|
ratio = nsdiv * nacp[k];
|
614 : |
|
|
nacp[k] = 0;
|
615 : |
|
|
if (ratio > uratio) {
|
616 : |
|
|
vm[k] = vm[k] * (1.0 + cs * (ratio - uratio));
|
617 : |
|
|
} else if (ratio < lratio) {
|
618 : |
|
|
vm[k] = vm[k] / (1.0 + cs * (lratio - ratio));
|
619 : |
|
|
}
|
620 : |
|
|
if (vm[k] < rathersmall){
|
621 : |
|
|
vm[k] = rathersmall;
|
622 : |
|
|
}
|
623 : |
|
|
if (vm[k] > (upperb[k] - lowerb[k]))
|
624 : |
|
|
{
|
625 : |
|
|
vm[k] = upperb[k] - lowerb[k];
|
626 : |
|
|
}
|
627 : |
|
|
vns[k]=0;
|
628 : |
|
|
}
|
629 : |
|
|
}
|
630 : |
|
|
aux--;
|
631 : |
|
|
iters = (EcoSystems[bestId]->getFuncEval() * numThr) -aux;
|
632 : |
|
|
if (iters > simanniter) {
|
633 : |
|
|
handle.logMessage(LOGINFO, "\nStopping Simulated Annealing optimisation algorithm\n");
|
634 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
|
635 : |
|
|
handle.logMessage(LOGINFO, "The temperature was reduced to", t);
|
636 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
|
637 : |
|
|
handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
|
638 : |
|
|
handle.logMessage(LOGINFO, "Number of directly accepted points", nacc);
|
639 : |
|
|
handle.logMessage(LOGINFO, "Number of metropolis accepted points", naccmet);
|
640 : |
|
|
handle.logMessage(LOGINFO, "Number of rejected points", nrej);
|
641 : |
agomez |
1 |
|
642 : |
ulcessvp |
11 |
score = EcoSystem->SimulateAndUpdate(bestx);
|
643 : |
|
|
handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score);
|
644 : |
|
|
delete[] storage;
|
645 : |
|
|
return;
|
646 : |
agomez |
1 |
}
|
647 : |
|
|
|
648 : |
|
|
//Accept the new point if the new function value better
|
649 : |
|
|
if ((trialf - funcval) > verysmall) {
|
650 : |
ulcessvp |
11 |
for (i = 0; i < nvars; ++i)
|
651 : |
|
|
x[i] = storage[bestId].trialx[i];
|
652 : |
agomez |
1 |
funcval = trialf;
|
653 : |
|
|
nacc++;
|
654 : |
|
|
} else {
|
655 : |
|
|
//Accept according to metropolis condition
|
656 : |
|
|
p = expRep((trialf - funcval) / t);
|
657 : |
ulcessvp |
11 |
pp = randomNumber(&seedP);
|
658 : |
agomez |
1 |
if (pp < p) {
|
659 : |
|
|
//Accept point
|
660 : |
ulcessvp |
11 |
for (i = 0; i < nvars; ++i)
|
661 : |
|
|
x[i] = storage[bestId].trialx[i];
|
662 : |
agomez |
1 |
funcval = trialf;
|
663 : |
|
|
naccmet++;
|
664 : |
ulcessvp |
11 |
nacp[param[(l+bestId)%nvars]]++;
|
665 : |
agomez |
1 |
} else {
|
666 : |
|
|
//Reject point
|
667 : |
|
|
nrej++;
|
668 : |
|
|
}
|
669 : |
|
|
}
|
670 : |
|
|
// JMB added check for really silly values
|
671 : |
|
|
if (isZero(trialf)) {
|
672 : |
|
|
handle.logMessage(LOGINFO, "Error in Simulated Annealing optimisation after", iters, "function evaluations, f(x) = 0");
|
673 : |
|
|
converge = -1;
|
674 : |
ulcessvp |
11 |
delete[] storage;
|
675 : |
agomez |
1 |
return;
|
676 : |
|
|
}
|
677 : |
|
|
|
678 : |
|
|
//If greater than any other point, record as new optimum
|
679 : |
|
|
if ((trialf > fopt) && (trialf == trialf)) {
|
680 : |
ulcessvp |
11 |
for (i = 0; i < nvars; ++i)
|
681 : |
|
|
bestx[i] = storage[bestId].trialx[i];
|
682 : |
agomez |
1 |
fopt = trialf;
|
683 : |
|
|
|
684 : |
|
|
if (scale) {
|
685 : |
ulcessvp |
11 |
for (i = 0; i < nvars; ++i)
|
686 : |
agomez |
1 |
scalex[i] = bestx[i] * init[i];
|
687 : |
|
|
EcoSystem->storeVariables(-fopt, scalex);
|
688 : |
|
|
} else
|
689 : |
ulcessvp |
11 |
EcoSystem->storeVariables(-fopt, bestx);
|
690 : |
agomez |
1 |
|
691 : |
|
|
handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
|
692 : |
|
|
handle.logMessage(LOGINFO, "The likelihood score is", -fopt, "at the point");
|
693 : |
|
|
EcoSystem->writeBestValues();
|
694 : |
|
|
}
|
695 : |
|
|
}
|
696 : |
|
|
}
|
697 : |
|
|
}
|
698 : |
|
|
|
699 : |
|
|
//Check termination criteria
|
700 : |
|
|
for (i = tempcheck - 1; i > 0; i--)
|
701 : |
|
|
fstar[i] = fstar[i - 1];
|
702 : |
|
|
fstar[0] = funcval;
|
703 : |
|
|
|
704 : |
|
|
quit = 0;
|
705 : |
|
|
if (fabs(fopt - funcval) < simanneps) {
|
706 : |
|
|
quit = 1;
|
707 : |
ulcessvp |
11 |
for (i = 0; i < tempcheck - 1; ++i)
|
708 : |
agomez |
1 |
if (fabs(fstar[i + 1] - fstar[i]) > simanneps)
|
709 : |
|
|
quit = 0;
|
710 : |
|
|
}
|
711 : |
|
|
|
712 : |
|
|
handle.logMessage(LOGINFO, "Checking convergence criteria after", iters, "function evaluations ...");
|
713 : |
|
|
|
714 : |
|
|
//Terminate SA if appropriate
|
715 : |
|
|
if (quit) {
|
716 : |
|
|
handle.logMessage(LOGINFO, "\nStopping Simulated Annealing optimisation algorithm\n");
|
717 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
|
718 : |
|
|
handle.logMessage(LOGINFO, "The temperature was reduced to", t);
|
719 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
|
720 : |
|
|
handle.logMessage(LOGINFO, "Number of directly accepted points", nacc);
|
721 : |
|
|
handle.logMessage(LOGINFO, "Number of metropolis accepted points", naccmet);
|
722 : |
|
|
handle.logMessage(LOGINFO, "Number of rejected points", nrej);
|
723 : |
|
|
|
724 : |
|
|
converge = 1;
|
725 : |
|
|
score = EcoSystem->SimulateAndUpdate(bestx);
|
726 : |
|
|
handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", score);
|
727 : |
ulcessvp |
11 |
delete[] storage;
|
728 : |
agomez |
1 |
return;
|
729 : |
|
|
}
|
730 : |
|
|
|
731 : |
|
|
//If termination criteria is not met, prepare for another loop.
|
732 : |
|
|
t *= rt;
|
733 : |
|
|
if (t < rathersmall)
|
734 : |
|
|
t = rathersmall; //JMB make sure temperature doesnt get too small
|
735 : |
|
|
|
736 : |
|
|
handle.logMessage(LOGINFO, "Reducing the temperature to", t);
|
737 : |
|
|
funcval = fopt;
|
738 : |
ulcessvp |
11 |
for (i = 0; i < nvars; ++i)
|
739 : |
agomez |
1 |
x[i] = bestx[i];
|
740 : |
|
|
}
|
741 : |
|
|
}
|
742 : |
agomez |
20 |
//#endif
|
743 : |
ulcessvp |
11 |
#endif
|
744 : |
|
|
|
745 : |
ulcessvp |
14 |
// calcule a new point
|
746 : |
ulcessvp |
11 |
void OptInfoSimann::newValue(int nvars, int l, IntVector& param, DoubleVector& trialx,
|
747 : |
|
|
DoubleVector& x, DoubleVector& lowerb, DoubleVector& upperb, DoubleVector& vm)
|
748 : |
|
|
{
|
749 : |
|
|
int i, k;
|
750 : |
|
|
for (i = 0; i < nvars; ++i) {
|
751 : |
|
|
if (i == param[l]) {
|
752 : |
|
|
trialx[i] = x[i] + ((randomNumber(&seed) * 2.0) - 1.0) * vm[i];
|
753 : |
|
|
|
754 : |
|
|
//If trialx is out of bounds, try again until we find a point that is OK
|
755 : |
|
|
if ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) {
|
756 : |
|
|
//JMB - this used to just select a random point between the bounds
|
757 : |
|
|
k = 0;
|
758 : |
|
|
while ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) {
|
759 : |
|
|
trialx[i] = x[i] + ((randomNumber(&seed) * 2.0) - 1.0) * vm[i];
|
760 : |
|
|
k++;
|
761 : |
|
|
if (k > 10) //we've had 10 tries to find a point neatly, so give up
|
762 : |
|
|
trialx[i] = lowerb[i] + (upperb[i] - lowerb[i]) * randomNumber(&seed);
|
763 : |
|
|
}
|
764 : |
|
|
}
|
765 : |
|
|
} else
|
766 : |
|
|
trialx[i] = x[i];
|
767 : |
|
|
}
|
768 : |
|
|
}
|
769 : |
|
|
|
770 : |
ulcessvp |
14 |
/*################################################################################
|
771 : |
|
|
* code use in the template (seq_optimize_template.h)
|
772 : |
|
|
################################################################################*/
|
773 : |
ulcessvp |
11 |
|
774 : |
ulcessvp |
14 |
|
775 : |
ulcessvp |
11 |
//Generate trialx, the trial value of x
|
776 : |
|
|
void newValue(int nvars, int l, IntVector& param, DoubleVector& trialx,
|
777 : |
|
|
DoubleVector& x, DoubleVector& lowerb, DoubleVector& upperb, DoubleVector& vm, unsigned* seed)
|
778 : |
|
|
{
|
779 : |
|
|
int i, k;
|
780 : |
|
|
for (i = 0; i < nvars; ++i) {
|
781 : |
|
|
if (i == param[l]) {
|
782 : |
|
|
trialx[i] = x[i] + ((randomNumber(&*seed) * 2.0) - 1.0) * vm[i];
|
783 : |
|
|
|
784 : |
|
|
//If trialx is out of bounds, try again until we find a point that is OK
|
785 : |
|
|
if ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) {
|
786 : |
|
|
//JMB - this used to just select a random point between the bounds
|
787 : |
|
|
k = 0;
|
788 : |
|
|
while ((trialx[i] < lowerb[i]) || (trialx[i] > upperb[i])) {
|
789 : |
|
|
trialx[i] = x[i] + ((randomNumber(&*seed) * 2.0) - 1.0) * vm[i];
|
790 : |
|
|
k++;
|
791 : |
|
|
if (k > 10) //we've had 10 tries to find a point neatly, so give up
|
792 : |
|
|
trialx[i] = lowerb[i] + (upperb[i] - lowerb[i]) * randomNumber(&*seed);
|
793 : |
|
|
}
|
794 : |
|
|
}
|
795 : |
|
|
} else
|
796 : |
|
|
trialx[i] = x[i];
|
797 : |
|
|
}
|
798 : |
|
|
}
|
799 : |
|
|
|
800 : |
|
|
void buildNewParams_f(Siman& seed, DoubleVector& params) {
|
801 : |
|
|
|
802 : |
|
|
newValue(seed.getNvars(), seed.getL(), seed.getParam(), params, seed.getX(),
|
803 : |
|
|
seed.getLowerb(), seed.getUpperb(), seed.getVm(), seed.getSeed());
|
804 : |
|
|
|
805 : |
|
|
}
|
806 : |
|
|
|
807 : |
|
|
/// Represents the function that computes how good the parameters are
|
808 : |
agomez |
20 |
#ifdef _OPENMP
|
809 : |
|
|
double evaluate_par_f(const DoubleVector& params) {
|
810 : |
ulcessvp |
11 |
double trialf;
|
811 : |
|
|
int id = omp_get_thread_num ();
|
812 : |
|
|
trialf = EcoSystems[id]->SimulateAndUpdate(params);
|
813 : |
agomez |
20 |
return -trialf;
|
814 : |
|
|
}
|
815 : |
|
|
#endif
|
816 : |
|
|
double evaluate_f(const DoubleVector& params) {
|
817 : |
|
|
double trialf;
|
818 : |
ulcessvp |
11 |
trialf = EcoSystem->SimulateAndUpdate(params);
|
819 : |
|
|
return -trialf;
|
820 : |
|
|
}
|
821 : |
|
|
|
822 : |
|
|
struct ControlClass {
|
823 : |
|
|
|
824 : |
|
|
void adjustVm(Siman& seed) {
|
825 : |
|
|
//Adjust vm so that approximately half of all evaluations are accepted
|
826 : |
|
|
int i;
|
827 : |
|
|
double ratio, nsdiv = seed.getNsdiv();
|
828 : |
|
|
|
829 : |
|
|
DoubleVector vm = seed.getVm();
|
830 : |
|
|
DoubleVector upperb = seed.getUpperb();
|
831 : |
|
|
DoubleVector lowerb = seed.getLowerb();
|
832 : |
|
|
|
833 : |
|
|
double uratio = seed.getUratio();
|
834 : |
|
|
double lratio = seed.getLratio();
|
835 : |
|
|
|
836 : |
|
|
for (i = 0; i < seed.getNvars(); i++) {
|
837 : |
|
|
ratio = nsdiv * seed.getNacp(i);
|
838 : |
|
|
seed.setNacp(i,0);
|
839 : |
|
|
if (ratio > uratio) {
|
840 : |
|
|
(vm)[i] = (vm)[i] * (1.0 + seed.getCs() * (ratio - seed.getUratio()));
|
841 : |
|
|
} else if (ratio < lratio) {
|
842 : |
|
|
(vm)[i] = (vm)[i] / (1.0 + seed.getCs() * (seed.getLratio() - ratio));
|
843 : |
|
|
}
|
844 : |
|
|
|
845 : |
|
|
if ((vm)[i] < rathersmall)
|
846 : |
|
|
(vm)[i] = rathersmall;
|
847 : |
|
|
if ((vm)[i] > (upperb[i] - lowerb[i]))
|
848 : |
|
|
(vm)[i] = upperb[i] - lowerb[i];
|
849 : |
|
|
}
|
850 : |
|
|
seed.setVm(vm);
|
851 : |
|
|
}
|
852 : |
|
|
|
853 : |
|
|
void temperature(Siman& seed, DoubleVector& x){
|
854 : |
|
|
int i;
|
855 : |
|
|
double t = seed.getT();
|
856 : |
|
|
t *= seed.getRt();
|
857 : |
|
|
if (t < rathersmall)
|
858 : |
|
|
t = rathersmall; //JMB make sure temperature doesnt get too small
|
859 : |
|
|
|
860 : |
|
|
handle.logMessage(LOGINFO, "Reducing the temperature to", t);
|
861 : |
|
|
seed.setT(t);
|
862 : |
|
|
|
863 : |
|
|
DoubleVector* bestx = seed.getBestx();
|
864 : |
|
|
|
865 : |
|
|
for (i = 0; i < seed.getNvars(); i++)
|
866 : |
|
|
x[i] = (*bestx)[i];
|
867 : |
|
|
}
|
868 : |
|
|
|
869 : |
|
|
void optimum(double trialf, double &fopt, int iters, DoubleVector trialx,
|
870 : |
|
|
DoubleVector init, Siman siman){
|
871 : |
|
|
//If greater than any other point, record as new optimum
|
872 : |
|
|
int i, nvars = siman.getNvars();
|
873 : |
|
|
DoubleVector scalex(nvars);
|
874 : |
|
|
DoubleVector* bestx = siman.getBestx();
|
875 : |
|
|
|
876 : |
|
|
if ((trialf > fopt) && (trialf == trialf)) {
|
877 : |
|
|
for (i = 0; i < nvars; i++)
|
878 : |
|
|
(*bestx)[i] = trialx[i];
|
879 : |
|
|
fopt = trialf;
|
880 : |
|
|
|
881 : |
|
|
if (siman.getScale()) {
|
882 : |
|
|
for (i = 0; i < nvars; i++)
|
883 : |
|
|
scalex[i] = (*bestx)[i] * init[i];
|
884 : |
|
|
EcoSystem->storeVariables(-fopt, scalex);
|
885 : |
|
|
} else
|
886 : |
|
|
EcoSystem->storeVariables(-fopt, (*bestx));
|
887 : |
|
|
|
888 : |
|
|
handle.logMessage(LOGINFO, "\nNew optimum found after", iters, "function evaluations");
|
889 : |
|
|
handle.logMessage(LOGINFO, "The likelihood score is", -fopt, "at the point");
|
890 : |
|
|
EcoSystem->writeBestValues();
|
891 : |
|
|
}
|
892 : |
|
|
}
|
893 : |
|
|
|
894 : |
|
|
/**
|
895 : |
|
|
@brief Decides wheter the current item evaluated must be chosen as new optimum
|
896 : |
|
|
@param funcval value for old optimum
|
897 : |
|
|
@param trialf value for current item evaluated
|
898 : |
|
|
*/
|
899 : |
|
|
bool mustAccept(double funcval, double trialf, Siman &siman, int iters) {
|
900 : |
|
|
//Accept the new point if the new function value better
|
901 : |
|
|
bool ret = true;
|
902 : |
|
|
int i;
|
903 : |
|
|
int aux = siman.getParam()[siman.getL()];
|
904 : |
|
|
if ((trialf - funcval) > verysmall) {
|
905 : |
|
|
siman.incrementNacp(aux);
|
906 : |
|
|
siman.incrementNacc();
|
907 : |
|
|
//JMB - not sure about this ...
|
908 : |
|
|
} else {
|
909 : |
|
|
double p, pp;
|
910 : |
|
|
//Accept according to metropolis condition
|
911 : |
|
|
|
912 : |
|
|
p = expRep((trialf - funcval) / siman.getT());
|
913 : |
|
|
pp = randomNumber(siman.getSeedM());
|
914 : |
|
|
if (pp < p) {
|
915 : |
|
|
siman.incrementNacp(aux);
|
916 : |
|
|
siman.incrementNaccmet();
|
917 : |
|
|
// //Accept point
|
918 : |
|
|
} else {
|
919 : |
|
|
//Reject point
|
920 : |
|
|
ret = false;
|
921 : |
|
|
siman.incrementNrej();
|
922 : |
|
|
}
|
923 : |
|
|
}
|
924 : |
|
|
// JMB added check for really silly values
|
925 : |
|
|
if (isZero(trialf)) {
|
926 : |
|
|
handle.logMessage(LOGINFO, "Error in Simulated Annealing optimisation after",
|
927 : |
|
|
iters, "function evaluations, f(x) = 0");
|
928 : |
|
|
siman.setConverge(-1);
|
929 : |
|
|
return false;
|
930 : |
|
|
}
|
931 : |
|
|
|
932 : |
|
|
siman.incrementL();
|
933 : |
|
|
if (siman.getL() == 0)
|
934 : |
|
|
siman.incrementNS();
|
935 : |
|
|
if (siman.getNS() >= siman.getNs()){
|
936 : |
|
|
siman.setNS(0);
|
937 : |
|
|
siman.incrementNT();
|
938 : |
|
|
adjustVm(siman);
|
939 : |
|
|
siman.randomizeParams();
|
940 : |
|
|
}
|
941 : |
|
|
|
942 : |
|
|
return ret;
|
943 : |
|
|
|
944 : |
|
|
}
|
945 : |
|
|
|
946 : |
|
|
/**
|
947 : |
|
|
@brief Decides whether the search must stop.
|
948 : |
|
|
It does not take into account the number of iterations, which is already considered by the template
|
949 : |
|
|
@param prev old optimum
|
950 : |
|
|
@param funcval new/current optimum
|
951 : |
|
|
*/
|
952 : |
|
|
bool mustTerminate(double prev, double& funcval, Siman &siman, int iters) {
|
953 : |
|
|
bool quit = false;
|
954 : |
|
|
if (siman.getNT() >= siman.getNt())
|
955 : |
|
|
{
|
956 : |
|
|
int i;
|
957 : |
|
|
DoubleVector fstar = siman.getFstar();
|
958 : |
|
|
|
959 : |
|
|
siman.setNT(0);
|
960 : |
|
|
|
961 : |
|
|
for (i = siman.getTempcheck() - 1; i > 0; i--)
|
962 : |
|
|
fstar[i] = fstar[i - 1];
|
963 : |
|
|
fstar[0] = funcval;
|
964 : |
|
|
|
965 : |
|
|
if (fabs(prev - funcval) < siman.getSimanneps()) {
|
966 : |
|
|
quit = true;
|
967 : |
|
|
for (i = 0; i < siman.getTempcheck() - 1; i++)
|
968 : |
|
|
if (fabs(fstar[i + 1] - fstar[i]) > siman.getSimanneps())
|
969 : |
|
|
quit = false;
|
970 : |
|
|
}
|
971 : |
|
|
|
972 : |
|
|
handle.logMessage(LOGINFO, "Checking convergence criteria after", iters,
|
973 : |
|
|
"function evaluations ...");
|
974 : |
|
|
|
975 : |
|
|
temperature(siman, siman.getX());
|
976 : |
|
|
|
977 : |
|
|
funcval = prev;
|
978 : |
|
|
}
|
979 : |
|
|
return quit;
|
980 : |
|
|
}
|
981 : |
|
|
|
982 : |
|
|
void printResult(bool quit, Siman siman, int iters)
|
983 : |
|
|
{
|
984 : |
|
|
double * score = siman.getScore();
|
985 : |
|
|
DoubleVector * bestX = siman.getBestx();
|
986 : |
|
|
|
987 : |
|
|
handle.logMessage(LOGINFO, "\nStopping Simulated Annealing optimisation algorithm\n");
|
988 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped after", iters, "function evaluations");
|
989 : |
|
|
handle.logMessage(LOGINFO, "The temperature was reduced to", siman.getT());
|
990 : |
|
|
if (quit) {
|
991 : |
|
|
int* converge = siman.getConverge();
|
992 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped because an optimum was found for this run");
|
993 : |
|
|
|
994 : |
|
|
|
995 : |
|
|
*converge = 1;
|
996 : |
|
|
}
|
997 : |
|
|
else {
|
998 : |
|
|
handle.logMessage(LOGINFO, "The optimisation stopped because the maximum number of function evaluations");
|
999 : |
|
|
handle.logMessage(LOGINFO, "was reached and NOT because an optimum was found for this run");
|
1000 : |
|
|
}
|
1001 : |
|
|
handle.logMessage(LOGINFO, "Number of directly accepted points", siman.getNacc());
|
1002 : |
|
|
handle.logMessage(LOGINFO, "Number of metropolis accepted points", siman.getNaccmet());
|
1003 : |
|
|
handle.logMessage(LOGINFO, "Number of rejected points", siman.getNrej());
|
1004 : |
|
|
*score = EcoSystem->SimulateAndUpdate(*bestX);
|
1005 : |
|
|
handle.logMessage(LOGINFO, "\nSimulated Annealing finished with a likelihood score of", *score);
|
1006 : |
|
|
}
|
1007 : |
|
|
};
|
1008 : |
|
|
|
1009 : |
|
|
// Required
|
1010 : |
|
|
std::ostream &operator<<(std::ostream &os, const DoubleVector &p)
|
1011 : |
|
|
{
|
1012 : |
|
|
os << "";
|
1013 : |
|
|
return os;
|
1014 : |
|
|
}
|
1015 : |
|
|
|
1016 : |
|
|
|
1017 : |
agomez |
20 |
#ifdef _OPENMP
|
1018 : |
|
|
void OptInfoSimann::OptimiseLikelihoodREP() {
|
1019 : |
ulcessvp |
11 |
|
1020 : |
|
|
//set initial values
|
1021 : |
|
|
|
1022 : |
|
|
double tmp, p, pp;
|
1023 : |
ulcessvp |
12 |
double funcval, trialf;
|
1024 : |
ulcessvp |
11 |
int a, i, j, k, l, quit;
|
1025 : |
|
|
int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
|
1026 : |
|
|
|
1027 : |
|
|
handle.logMessage(LOGINFO,
|
1028 : |
ulcessvp |
12 |
"\nStarting Simulated Annealing optimisation algorithm\n");
|
1029 : |
ulcessvp |
11 |
int nvars = EcoSystem->numOptVariables();
|
1030 : |
|
|
DoubleVector x(nvars);
|
1031 : |
|
|
DoubleVector init(nvars);
|
1032 : |
|
|
DoubleVector trialx(nvars, 0.0);
|
1033 : |
|
|
DoubleVector bestx(nvars);
|
1034 : |
|
|
DoubleVector scalex(nvars);
|
1035 : |
|
|
DoubleVector lowerb(nvars);
|
1036 : |
|
|
DoubleVector upperb(nvars);
|
1037 : |
|
|
DoubleVector fstar(tempcheck);
|
1038 : |
|
|
DoubleVector vm(nvars, vminit);
|
1039 : |
|
|
IntVector param(nvars, 0);
|
1040 : |
|
|
|
1041 : |
|
|
EcoSystem->resetVariables(); //JMB need to reset variables in case they have been scaled
|
1042 : |
|
|
if (scale) {
|
1043 : |
|
|
EcoSystem->scaleVariables();
|
1044 : |
agomez |
20 |
|
1045 : |
ulcessvp |
11 |
int numThr = omp_get_max_threads ( );
|
1046 : |
ulcessvp |
14 |
for(i = 0; i < numThr; i++) // scale the variables for the ecosystem of every thread
|
1047 : |
ulcessvp |
11 |
EcoSystems[i]->scaleVariables();
|
1048 : |
|
|
}
|
1049 : |
|
|
EcoSystem->getOptScaledValues(x);
|
1050 : |
|
|
EcoSystem->getOptLowerBounds(lowerb);
|
1051 : |
|
|
EcoSystem->getOptUpperBounds(upperb);
|
1052 : |
|
|
EcoSystem->getOptInitialValues(init);
|
1053 : |
|
|
|
1054 : |
|
|
for (i = 0; i < nvars; i++) {
|
1055 : |
|
|
bestx[i] = x[i];
|
1056 : |
|
|
param[i] = i;
|
1057 : |
|
|
}
|
1058 : |
|
|
|
1059 : |
|
|
if (scale) {
|
1060 : |
|
|
for (i = 0; i < nvars; i++) {
|
1061 : |
|
|
scalex[i] = x[i];
|
1062 : |
|
|
// Scaling the bounds, because the parameters are scaled
|
1063 : |
|
|
lowerb[i] = lowerb[i] / init[i];
|
1064 : |
|
|
upperb[i] = upperb[i] / init[i];
|
1065 : |
|
|
if (lowerb[i] > upperb[i]) {
|
1066 : |
|
|
tmp = lowerb[i];
|
1067 : |
|
|
lowerb[i] = upperb[i];
|
1068 : |
|
|
upperb[i] = tmp;
|
1069 : |
|
|
}
|
1070 : |
|
|
}
|
1071 : |
|
|
}
|
1072 : |
|
|
|
1073 : |
|
|
//funcval is the function value at x
|
1074 : |
|
|
funcval = EcoSystem->SimulateAndUpdate(x);
|
1075 : |
|
|
if (funcval != funcval) { //check for NaN
|
1076 : |
|
|
handle.logMessage(LOGINFO,
|
1077 : |
|
|
"Error starting Simulated Annealing optimisation with f(x) = infinity");
|
1078 : |
|
|
converge = -1;
|
1079 : |
|
|
iters = 1;
|
1080 : |
|
|
return;
|
1081 : |
|
|
}
|
1082 : |
|
|
|
1083 : |
|
|
//the function is to be minimised so switch the sign of funcval (and trialf)
|
1084 : |
|
|
funcval = -funcval;
|
1085 : |
|
|
cs /= lratio; //JMB save processing time
|
1086 : |
|
|
for (i = 0; i < tempcheck; i++)
|
1087 : |
|
|
fstar[i] = funcval;
|
1088 : |
|
|
|
1089 : |
|
|
Siman s(seed, seedM, seedP, nvars, nt, ns, param, &x, &lowerb, &upperb, vm, t, rt, (1.0 / ns),
|
1090 : |
|
|
tempcheck, simanneps, fstar, lratio, uratio, cs, &bestx, scale, &converge, &score);
|
1091 : |
|
|
|
1092 : |
agomez |
20 |
ReproducibleSearch<Siman, DoubleVector, ControlClass, evaluate_par_f, buildNewParams_f>
|
1093 : |
ulcessvp |
11 |
pa(s, x, simanniter);
|
1094 : |
|
|
|
1095 : |
ulcessvp |
12 |
// OpenMP parallelization
|
1096 : |
ulcessvp |
11 |
int numThr = omp_get_max_threads ( );
|
1097 : |
ulcessvp |
12 |
pa.paral_opt_omp(funcval,numThr,numThr);
|
1098 : |
agomez |
20 |
iters = pa.iterations();
|
1099 : |
|
|
|
1100 : |
|
|
}
|
1101 : |
|
|
#endif
|
1102 : |
|
|
void OptInfoSimann::OptimiseLikelihood() {
|
1103 : |
|
|
|
1104 : |
|
|
//set initial values
|
1105 : |
|
|
|
1106 : |
|
|
double tmp, p, pp;
|
1107 : |
|
|
double funcval, trialf;
|
1108 : |
|
|
int a, i, j, k, l, quit;
|
1109 : |
|
|
int rchange, rcheck, rnumber; //Used to randomise the order of the parameters
|
1110 : |
|
|
|
1111 : |
|
|
handle.logMessage(LOGINFO,
|
1112 : |
|
|
"\nStarting Simulated Annealing optimisation algorithm\n");
|
1113 : |
|
|
int nvars = EcoSystem->numOptVariables();
|
1114 : |
|
|
DoubleVector x(nvars);
|
1115 : |
|
|
DoubleVector init(nvars);
|
1116 : |
|
|
DoubleVector trialx(nvars, 0.0);
|
1117 : |
|
|
DoubleVector bestx(nvars);
|
1118 : |
|
|
DoubleVector scalex(nvars);
|
1119 : |
|
|
DoubleVector lowerb(nvars);
|
1120 : |
|
|
DoubleVector upperb(nvars);
|
1121 : |
|
|
DoubleVector fstar(tempcheck);
|
1122 : |
|
|
DoubleVector vm(nvars, vminit);
|
1123 : |
|
|
IntVector param(nvars, 0);
|
1124 : |
|
|
|
1125 : |
|
|
EcoSystem->resetVariables(); //JMB need to reset variables in case they have been scaled
|
1126 : |
|
|
if (scale) {
|
1127 : |
|
|
EcoSystem->scaleVariables();
|
1128 : |
|
|
}
|
1129 : |
|
|
EcoSystem->getOptScaledValues(x);
|
1130 : |
|
|
EcoSystem->getOptLowerBounds(lowerb);
|
1131 : |
|
|
EcoSystem->getOptUpperBounds(upperb);
|
1132 : |
|
|
EcoSystem->getOptInitialValues(init);
|
1133 : |
|
|
|
1134 : |
|
|
for (i = 0; i < nvars; i++) {
|
1135 : |
|
|
bestx[i] = x[i];
|
1136 : |
|
|
param[i] = i;
|
1137 : |
|
|
}
|
1138 : |
|
|
|
1139 : |
|
|
if (scale) {
|
1140 : |
|
|
for (i = 0; i < nvars; i++) {
|
1141 : |
|
|
scalex[i] = x[i];
|
1142 : |
|
|
// Scaling the bounds, because the parameters are scaled
|
1143 : |
|
|
lowerb[i] = lowerb[i] / init[i];
|
1144 : |
|
|
upperb[i] = upperb[i] / init[i];
|
1145 : |
|
|
if (lowerb[i] > upperb[i]) {
|
1146 : |
|
|
tmp = lowerb[i];
|
1147 : |
|
|
lowerb[i] = upperb[i];
|
1148 : |
|
|
upperb[i] = tmp;
|
1149 : |
|
|
}
|
1150 : |
|
|
}
|
1151 : |
|
|
}
|
1152 : |
|
|
|
1153 : |
|
|
//funcval is the function value at x
|
1154 : |
|
|
funcval = EcoSystem->SimulateAndUpdate(x);
|
1155 : |
|
|
if (funcval != funcval) { //check for NaN
|
1156 : |
|
|
handle.logMessage(LOGINFO,
|
1157 : |
|
|
"Error starting Simulated Annealing optimisation with f(x) = infinity");
|
1158 : |
|
|
converge = -1;
|
1159 : |
|
|
iters = 1;
|
1160 : |
|
|
return;
|
1161 : |
|
|
}
|
1162 : |
|
|
|
1163 : |
|
|
//the function is to be minimised so switch the sign of funcval (and trialf)
|
1164 : |
|
|
funcval = -funcval;
|
1165 : |
|
|
cs /= lratio; //JMB save processing time
|
1166 : |
|
|
for (i = 0; i < tempcheck; i++)
|
1167 : |
|
|
fstar[i] = funcval;
|
1168 : |
|
|
|
1169 : |
|
|
Siman s(seed, seedM, seedP, nvars, nt, ns, param, &x, &lowerb, &upperb, vm, t, rt, (1.0 / ns),
|
1170 : |
|
|
tempcheck, simanneps, fstar, lratio, uratio, cs, &bestx, scale, &converge, &score);
|
1171 : |
|
|
|
1172 : |
|
|
ReproducibleSearch<Siman, DoubleVector, ControlClass, evaluate_f, buildNewParams_f>
|
1173 : |
|
|
pa(s, x, simanniter);
|
1174 : |
|
|
|
1175 : |
ulcessvp |
12 |
// sequential code
|
1176 : |
|
|
pa.seq_opt(funcval);
|
1177 : |
ulcessvp |
17 |
iters = pa.iterations();
|
1178 : |
ulcessvp |
11 |
|
1179 : |
|
|
}
|
1180 : |
|
|
|
1181 : |
|
|
|
1182 : |
|
|
|
1183 : |
|
|
|
1184 : |
agomez |
20 |
|
1185 : |
|
|
|
1186 : |
|
|
|
1187 : |
|
|
|