expanded.c
1 
/*


2 
* This file is part of the GROMACS molecular simulation package.

3 
*

4 
* Copyright (c) 2012,2013,2014, by the GROMACS development team, led by

5 
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,

6 
* and including many others, as listed in the AUTHORS file in the

7 
* toplevel source directory and at http://www.gromacs.org.

8 
*

9 
* GROMACS is free software; you can redistribute it and/or

10 
* modify it under the terms of the GNU Lesser General Public License

11 
* as published by the Free Software Foundation; either version 2.1

12 
* of the License, or (at your option) any later version.

13 
*

14 
* GROMACS is distributed in the hope that it will be useful,

15 
* but WITHOUT ANY WARRANTY; without even the implied warranty of

16 
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU

17 
* Lesser General Public License for more details.

18 
*

19 
* You should have received a copy of the GNU Lesser General Public

20 
* License along with GROMACS; if not, see

21 
* http://www.gnu.org/licenses, or write to the Free Software Foundation,

22 
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 021101301 USA.

23 
*

24 
* If you want to redistribute modifications to GROMACS, please

25 
* consider that scientific software is very special. Version

26 
* control is crucial  bugs must be traceable. We will be happy to

27 
* consider code for inclusion in the official distribution, but

28 
* derived work must not be called official GROMACS. Details are found

29 
* in the README & COPYING files  if they are missing, get the

30 
* official version at http://www.gromacs.org.

31 
*

32 
* To help us fund GROMACS development, we humbly ask that you cite

33 
* the research papers on the package. Check out http://www.gromacs.org.

34 
*/

35 
#ifdef HAVE_CONFIG_H

36 
#include <config.h> 
37 
#endif

38  
39 
#include <stdio.h> 
40 
#include <math.h> 
41 
#include "typedefs.h" 
42 
#include "gromacs/utility/smalloc.h" 
43 
#include "names.h" 
44 
#include "gromacs/fileio/confio.h" 
45 
#include "txtdump.h" 
46 
#include "pbc.h" 
47 
#include "chargegroup.h" 
48 
#include "vec.h" 
49 
#include "nrnb.h" 
50 
#include "mshift.h" 
51 
#include "mdrun.h" 
52 
#include "update.h" 
53 
#include "physics.h" 
54 
#include "main.h" 
55 
#include "mdatoms.h" 
56 
#include "force.h" 
57 
#include "bondf.h" 
58 
#include "pme.h" 
59 
#include "disre.h" 
60 
#include "orires.h" 
61 
#include "network.h" 
62 
#include "calcmu.h" 
63 
#include "constr.h" 
64 
#include "xvgr.h" 
65 
#include "gromacs/random/random.h" 
66 
#include "domdec.h" 
67 
#include "macros.h" 
68  
69 
#include "gromacs/fileio/confio.h" 
70 
#include "gromacs/fileio/gmxfio.h" 
71 
#include "gromacs/fileio/trnio.h" 
72 
#include "gromacs/fileio/xtcio.h" 
73 
#include "gromacs/timing/wallcycle.h" 
74 
#include "gmx_fatal.h" 
75 
#include "gromacs/utility/gmxmpi.h" 
76  
77 
static void init_df_history_weights(df_history_t *dfhist, t_expanded *expand, int nlim) 
78 
{ 
79 
int i;

80 
dfhist>wl_delta = expand>init_wl_delta; 
81 
for (i = 0; i < nlim; i++) 
82 
{ 
83 
dfhist>sum_weights[i] = expand>init_lambda_weights[i]; 
84 
dfhist>sum_dg[i] = expand>init_lambda_weights[i]; 
85 
} 
86 
} 
87  
88 
/* Eventually should contain all the functions needed to initialize expanded ensemble

89 
before the md loop starts */

90 
extern void init_expanded_ensemble(gmx_bool bStateFromCP, t_inputrec *ir, df_history_t *dfhist) 
91 
{ 
92 
if (!bStateFromCP)

93 
{ 
94 
init_df_history_weights(dfhist, ir>expandedvals, ir>fepvals>n_lambda); 
95 
} 
96 
} 
97  
98 
static void GenerateGibbsProbabilities(real *ene, double *p_k, double *pks, int minfep, int maxfep) 
99 
{ 
100  
101 
int i;

102 
real maxene; 
103  
104 
*pks = 0.0; 
105 
maxene = ene[minfep]; 
106 
/* find the maximum value */

107 
for (i = minfep; i <= maxfep; i++)

108 
{ 
109 
if (ene[i] > maxene)

110 
{ 
111 
maxene = ene[i]; 
112 
} 
113 
} 
114 
/* find the denominator */

115 
for (i = minfep; i <= maxfep; i++)

116 
{ 
117 
*pks += exp(ene[i]maxene); 
118 
} 
119 
/*numerators*/

120 
for (i = minfep; i <= maxfep; i++)

121 
{ 
122 
p_k[i] = exp(ene[i]maxene) / *pks; 
123 
} 
124 
} 
125  
126 
static void GenerateWeightedGibbsProbabilities(real *ene, double *p_k, double *pks, int nlim, real *nvals, real delta) 
127 
{ 
128  
129 
int i;

130 
real maxene; 
131 
real *nene; 
132 
*pks = 0.0; 
133  
134 
snew(nene, nlim); 
135 
for (i = 0; i < nlim; i++) 
136 
{ 
137 
if (nvals[i] == 0) 
138 
{ 
139 
/* add the delta, since we need to make sure it's greater than zero, and

140 
we need a nonarbitrary number? */

141 
nene[i] = ene[i] + log(nvals[i]+delta); 
142 
} 
143 
else

144 
{ 
145 
nene[i] = ene[i] + log(nvals[i]); 
146 
} 
147 
} 
148  
149 
/* find the maximum value */

150 
maxene = nene[0];

151 
for (i = 0; i < nlim; i++) 
152 
{ 
153 
if (nene[i] > maxene)

154 
{ 
155 
maxene = nene[i]; 
156 
} 
157 
} 
158  
159 
/* subtract off the maximum, avoiding overflow */

160 
for (i = 0; i < nlim; i++) 
161 
{ 
162 
nene[i] = maxene; 
163 
} 
164  
165 
/* find the denominator */

166 
for (i = 0; i < nlim; i++) 
167 
{ 
168 
*pks += exp(nene[i]); 
169 
} 
170  
171 
/*numerators*/

172 
for (i = 0; i < nlim; i++) 
173 
{ 
174 
p_k[i] = exp(nene[i]) / *pks; 
175 
} 
176 
sfree(nene); 
177 
} 
178  
179 
real do_logsum(int N, real *a_n)

180 
{ 
181  
182 
/* RETURN VALUE */

183 
/* log(\sum_{i=0}^(N1) exp[a_n]) */

184 
real maxarg; 
185 
real sum; 
186 
int i;

187 
real logsum; 
188 
/* compute maximum argument to exp(.) */

189  
190 
maxarg = a_n[0];

191 
for (i = 1; i < N; i++) 
192 
{ 
193 
maxarg = max(maxarg, a_n[i]); 
194 
} 
195  
196 
/* compute sum of exp(a_n  maxarg) */

197 
sum = 0.0; 
198 
for (i = 0; i < N; i++) 
199 
{ 
200 
sum = sum + exp(a_n[i]  maxarg); 
201 
} 
202  
203 
/* compute log sum */

204 
logsum = log(sum) + maxarg; 
205 
return logsum;

206 
} 
207  
208 
int FindMinimum(real *min_metric, int N) 
209 
{ 
210  
211 
real min_val; 
212 
int min_nval, nval;

213  
214 
min_nval = 0;

215 
min_val = min_metric[0];

216  
217 
for (nval = 0; nval < N; nval++) 
218 
{ 
219 
if (min_metric[nval] < min_val)

220 
{ 
221 
min_val = min_metric[nval]; 
222 
min_nval = nval; 
223 
} 
224 
} 
225 
return min_nval;

226 
} 
227  
228 
static gmx_bool CheckHistogramRatios(int nhisto, real *histo, real ratio) 
229 
{ 
230  
231 
int i;

232 
real nmean; 
233 
gmx_bool bIfFlat; 
234  
235 
nmean = 0;

236 
for (i = 0; i < nhisto; i++) 
237 
{ 
238 
nmean += histo[i]; 
239 
} 
240  
241 
if (nmean == 0) 
242 
{ 
243 
/* no samples! is bad!*/

244 
bIfFlat = FALSE; 
245 
return bIfFlat;

246 
} 
247 
nmean /= (real)nhisto; 
248  
249 
bIfFlat = TRUE; 
250 
for (i = 0; i < nhisto; i++) 
251 
{ 
252 
/* make sure that all points are in the ratio < x < 1/ratio range */

253 
if (!((histo[i]/nmean < 1.0/ratio) && (histo[i]/nmean > ratio))) 
254 
{ 
255 
bIfFlat = FALSE; 
256 
break;

257 
} 
258 
} 
259 
return bIfFlat;

260 
} 
261  
262 
static gmx_bool CheckIfDoneEquilibrating(int nlim, t_expanded *expand, df_history_t *dfhist, gmx_int64_t step) 
263 
{ 
264  
265 
int i, totalsamples;

266 
gmx_bool bDoneEquilibrating = TRUE; 
267 
gmx_bool bIfFlat; 
268  
269 
/* assume we have equilibrated the weights, then check to see if any of the conditions are not met */

270  
271 
/* calculate the total number of samples */

272 
switch (expand>elmceq)

273 
{ 
274 
case elmceqNO:

275 
/* We have not equilibrated, and won't, ever. */

276 
return FALSE;

277 
case elmceqYES:

278 
/* we have equilibrated  we're done */

279 
return TRUE;

280 
case elmceqSTEPS:

281 
/* first, check if we are equilibrating by steps, if we're still under */

282 
if (step < expand>equil_steps)

283 
{ 
284 
bDoneEquilibrating = FALSE; 
285 
} 
286 
break;

287 
case elmceqSAMPLES:

288 
totalsamples = 0;

289 
for (i = 0; i < nlim; i++) 
290 
{ 
291 
totalsamples += dfhist>n_at_lam[i]; 
292 
} 
293 
if (totalsamples < expand>equil_samples)

294 
{ 
295 
bDoneEquilibrating = FALSE; 
296 
} 
297 
break;

298 
case elmceqNUMATLAM:

299 
for (i = 0; i < nlim; i++) 
300 
{ 
301 
if (dfhist>n_at_lam[i] < expand>equil_n_at_lam) /* we are still doing the initial sweep, so we're definitely not 
302 
done equilibrating*/

303 
{ 
304 
bDoneEquilibrating = FALSE; 
305 
break;

306 
} 
307 
} 
308 
break;

309 
case elmceqWLDELTA:

310 
if (EWL(expand>elamstats)) /* This check is in readir as well, but 
311 
just to be sure */

312 
{ 
313 
if (dfhist>wl_delta > expand>equil_wl_delta)

314 
{ 
315 
bDoneEquilibrating = FALSE; 
316 
} 
317 
} 
318 
break;

319 
case elmceqRATIO:

320 
/* we can use the flatness as a judge of good weights, as long as

321 
we're not doing minvar, or WangLandau.

322 
But turn off for now until we figure out exactly how we do this.

323 
*/

324  
325 
if (!(EWL(expand>elamstats)  expand>elamstats == elamstatsMINVAR))

326 
{ 
327 
/* we want to use flatness avoiding the forcedthrough samples. Plus, we need to convert to

328 
floats for this histogram function. */

329  
330 
real *modhisto; 
331 
snew(modhisto, nlim); 
332 
for (i = 0; i < nlim; i++) 
333 
{ 
334 
modhisto[i] = 1.0*(dfhist>n_at_lam[i]expand>lmc_forced_nstart); 
335 
} 
336 
bIfFlat = CheckHistogramRatios(nlim, modhisto, expand>equil_ratio); 
337 
sfree(modhisto); 
338 
if (!bIfFlat)

339 
{ 
340 
bDoneEquilibrating = FALSE; 
341 
} 
342 
} 
343 
default:

344 
bDoneEquilibrating = TRUE; 
345 
} 
346 
/* one last case to go though, if we are doing slow growth to get initial values, we haven't finished equilibrating */

347  
348 
if (expand>lmc_forced_nstart > 0) 
349 
{ 
350 
for (i = 0; i < nlim; i++) 
351 
{ 
352 
if (dfhist>n_at_lam[i] < expand>lmc_forced_nstart) /* we are still doing the initial sweep, so we're definitely not 
353 
done equilibrating*/

354 
{ 
355 
bDoneEquilibrating = FALSE; 
356 
break;

357 
} 
358 
} 
359 
} 
360 
return bDoneEquilibrating;

361 
} 
362  
363 
static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist, 
364 
int fep_state, real *scaled_lamee, real *weighted_lamee, gmx_int64_t step)

365 
{ 
366 
real maxdiff = 0.000000001; 
367 
gmx_bool bSufficientSamples; 
368 
int i, k, n, nz, indexi, indexk, min_n, max_n, totali;

369 
int n0, np1, nm1, nval, min_nvalm, min_nvalp, maxc;

370 
real omega_m1_0, omega_p1_m1, omega_m1_p1, omega_p1_0, clam_osum; 
371 
real de, de_function, dr, denom, maxdr; 
372 
real min_val, cnval, zero_sum_weights; 
373 
real *omegam_array, *weightsm_array, *omegap_array, *weightsp_array, *varm_array, *varp_array, *dwp_array, *dwm_array; 
374 
real clam_varm, clam_varp, clam_weightsm, clam_weightsp, clam_minvar; 
375 
real *lam_weights, *lam_minvar_corr, *lam_variance, *lam_dg; 
376 
double *p_k;

377 
double pks = 0; 
378 
real *numweighted_lamee, *logfrac; 
379 
int *nonzero;

380 
real chi_m1_0, chi_p1_0, chi_m2_0, chi_p2_0, chi_p1_m1, chi_p2_m1, chi_m1_p1, chi_m2_p1; 
381  
382 
/* if we have equilibrated the weights, exit now */

383 
if (dfhist>bEquil)

384 
{ 
385 
return FALSE;

386 
} 
387  
388 
if (CheckIfDoneEquilibrating(nlim, expand, dfhist, step))

389 
{ 
390 
dfhist>bEquil = TRUE; 
391 
/* zero out the visited states so we know how many equilibrated states we have

392 
from here on out.*/

393 
for (i = 0; i < nlim; i++) 
394 
{ 
395 
dfhist>n_at_lam[i] = 0;

396 
} 
397 
return TRUE;

398 
} 
399  
400 
/* If we reached this far, we have not equilibrated yet, keep on

401 
going resetting the weights */

402  
403 
if (EWL(expand>elamstats))

404 
{ 
405 
if (expand>elamstats == elamstatsWL) /* Standard WangLandau */ 
406 
{ 
407 
dfhist>sum_weights[fep_state] = dfhist>wl_delta; 
408 
dfhist>wl_histo[fep_state] += 1.0; 
409 
} 
410 
else if (expand>elamstats == elamstatsWWL) /* Weighted WangLandau */ 
411 
{ 
412 
snew(p_k, nlim); 
413  
414 
/* first increment count */

415 
GenerateGibbsProbabilities(weighted_lamee, p_k, &pks, 0, nlim1); 
416 
for (i = 0; i < nlim; i++) 
417 
{ 
418 
dfhist>wl_histo[i] += (real)p_k[i]; 
419 
} 
420  
421 
/* then increment weights (uses count) */

422 
pks = 0.0; 
423 
GenerateWeightedGibbsProbabilities(weighted_lamee, p_k, &pks, nlim, dfhist>wl_histo, dfhist>wl_delta); 
424  
425 
for (i = 0; i < nlim; i++) 
426 
{ 
427 
dfhist>sum_weights[i] = dfhist>wl_delta*(real)p_k[i]; 
428 
} 
429 
/* Alternate definition, using logarithms. Shouldn't make very much difference! */

430 
/*

431 
real di;

432 
for (i=0;i<nlim;i++)

433 
{

434 
di = (real)1.0 + dfhist>wl_delta*(real)p_k[i];

435 
dfhist>sum_weights[i] = log(di);

436 
}

437 
*/

438 
sfree(p_k); 
439 
} 
440  
441 
zero_sum_weights = dfhist>sum_weights[0];

442 
for (i = 0; i < nlim; i++) 
443 
{ 
444 
dfhist>sum_weights[i] = zero_sum_weights; 
445 
} 
446 
} 
447  
448 
if (expand>elamstats == elamstatsBARKER  expand>elamstats == elamstatsMETROPOLIS  expand>elamstats == elamstatsMINVAR)

449 
{ 
450  
451 
de_function = 0; /* to get rid of warnings, but this value will not be used because of the logic */ 
452 
maxc = 2*expand>c_range+1; 
453  
454 
snew(lam_dg, nlim); 
455 
snew(lam_variance, nlim); 
456  
457 
snew(omegap_array, maxc); 
458 
snew(weightsp_array, maxc); 
459 
snew(varp_array, maxc); 
460 
snew(dwp_array, maxc); 
461  
462 
snew(omegam_array, maxc); 
463 
snew(weightsm_array, maxc); 
464 
snew(varm_array, maxc); 
465 
snew(dwm_array, maxc); 
466  
467 
/* unpack the current lambdas  we will only update 2 of these */

468  
469 
for (i = 0; i < nlim1; i++) 
470 
{ /* only through the second to last */

471 
lam_dg[i] = dfhist>sum_dg[i+1]  dfhist>sum_dg[i];

472 
lam_variance[i] = pow(dfhist>sum_variance[i+1], 2)  pow(dfhist>sum_variance[i], 2); 
473 
} 
474  
475 
/* accumulate running averages */

476 
for (nval = 0; nval < maxc; nval++) 
477 
{ 
478 
/* constants for later use */

479 
cnval = (real)(nvalexpand>c_range); 
480 
/* actually, should be able to rewrite it w/o exponential, for better numerical stability */

481 
if (fep_state > 0) 
482 
{ 
483 
de = exp(cnval  (scaled_lamee[fep_state]scaled_lamee[fep_state1]));

484 
if (expand>elamstats == elamstatsBARKER  expand>elamstats == elamstatsMINVAR)

485 
{ 
486 
de_function = 1.0/(1.0+de); 
487 
} 
488 
else if (expand>elamstats == elamstatsMETROPOLIS) 
489 
{ 
490 
if (de < 1.0) 
491 
{ 
492 
de_function = 1.0; 
493 
} 
494 
else

495 
{ 
496 
de_function = 1.0/de; 
497 
} 
498 
} 
499 
dfhist>accum_m[fep_state][nval] += de_function; 
500 
dfhist>accum_m2[fep_state][nval] += de_function*de_function; 
501 
} 
502  
503 
if (fep_state < nlim1) 
504 
{ 
505 
de = exp(cnval + (scaled_lamee[fep_state+1]scaled_lamee[fep_state]));

506 
if (expand>elamstats == elamstatsBARKER  expand>elamstats == elamstatsMINVAR)

507 
{ 
508 
de_function = 1.0/(1.0+de); 
509 
} 
510 
else if (expand>elamstats == elamstatsMETROPOLIS) 
511 
{ 
512 
if (de < 1.0) 
513 
{ 
514 
de_function = 1.0; 
515 
} 
516 
else

517 
{ 
518 
de_function = 1.0/de; 
519 
} 
520 
} 
521 
dfhist>accum_p[fep_state][nval] += de_function; 
522 
dfhist>accum_p2[fep_state][nval] += de_function*de_function; 
523 
} 
524  
525 
/* Metropolis transition and Barker transition (unoptimized Bennett) acceptance weight determination */

526  
527 
n0 = dfhist>n_at_lam[fep_state]; 
528 
if (fep_state > 0) 
529 
{ 
530 
nm1 = dfhist>n_at_lam[fep_state1];

531 
} 
532 
else

533 
{ 
534 
nm1 = 0;

535 
} 
536 
if (fep_state < nlim1) 
537 
{ 
538 
np1 = dfhist>n_at_lam[fep_state+1];

539 
} 
540 
else

541 
{ 
542 
np1 = 0;

543 
} 
544  
545 
/* logic SHOULD keep these all set correctly whatever the logic, but apparently it can't figure it out. */

546 
chi_m1_0 = chi_p1_0 = chi_m2_0 = chi_p2_0 = chi_p1_m1 = chi_p2_m1 = chi_m1_p1 = chi_m2_p1 = 0;

547  
548 
if (n0 > 0) 
549 
{ 
550 
chi_m1_0 = dfhist>accum_m[fep_state][nval]/n0; 
551 
chi_p1_0 = dfhist>accum_p[fep_state][nval]/n0; 
552 
chi_m2_0 = dfhist>accum_m2[fep_state][nval]/n0; 
553 
chi_p2_0 = dfhist>accum_p2[fep_state][nval]/n0; 
554 
} 
555  
556 
if ((fep_state > 0 ) && (nm1 > 0)) 
557 
{ 
558 
chi_p1_m1 = dfhist>accum_p[fep_state1][nval]/nm1;

559 
chi_p2_m1 = dfhist>accum_p2[fep_state1][nval]/nm1;

560 
} 
561  
562 
if ((fep_state < nlim1) && (np1 > 0)) 
563 
{ 
564 
chi_m1_p1 = dfhist>accum_m[fep_state+1][nval]/np1;

565 
chi_m2_p1 = dfhist>accum_m2[fep_state+1][nval]/np1;

566 
} 
567  
568 
omega_m1_0 = 0;

569 
omega_p1_0 = 0;

570 
clam_weightsm = 0;

571 
clam_weightsp = 0;

572 
clam_varm = 0;

573 
clam_varp = 0;

574  
575 
if (fep_state > 0) 
576 
{ 
577 
if (n0 > 0) 
578 
{ 
579 
omega_m1_0 = chi_m2_0/(chi_m1_0*chi_m1_0)  1.0; 
580 
} 
581 
if (nm1 > 0) 
582 
{ 
583 
omega_p1_m1 = chi_p2_m1/(chi_p1_m1*chi_p1_m1)  1.0; 
584 
} 
585 
if ((n0 > 0) && (nm1 > 0)) 
586 
{ 
587 
clam_weightsm = (log(chi_m1_0)  log(chi_p1_m1)) + cnval; 
588 
clam_varm = (1.0/n0)*(omega_m1_0) + (1.0/nm1)*(omega_p1_m1); 
589 
} 
590 
} 
591  
592 
if (fep_state < nlim1) 
593 
{ 
594 
if (n0 > 0) 
595 
{ 
596 
omega_p1_0 = chi_p2_0/(chi_p1_0*chi_p1_0)  1.0; 
597 
} 
598 
if (np1 > 0) 
599 
{ 
600 
omega_m1_p1 = chi_m2_p1/(chi_m1_p1*chi_m1_p1)  1.0; 
601 
} 
602 
if ((n0 > 0) && (np1 > 0)) 
603 
{ 
604 
clam_weightsp = (log(chi_m1_p1)  log(chi_p1_0)) + cnval; 
605 
clam_varp = (1.0/np1)*(omega_m1_p1) + (1.0/n0)*(omega_p1_0); 
606 
} 
607 
} 
608  
609 
if (n0 > 0) 
610 
{ 
611 
omegam_array[nval] = omega_m1_0; 
612 
} 
613 
else

614 
{ 
615 
omegam_array[nval] = 0;

616 
} 
617 
weightsm_array[nval] = clam_weightsm; 
618 
varm_array[nval] = clam_varm; 
619 
if (nm1 > 0) 
620 
{ 
621 
dwm_array[nval] = fabs( (cnval + log((1.0*n0)/nm1))  lam_dg[fep_state1] ); 
622 
} 
623 
else

624 
{ 
625 
dwm_array[nval] = fabs( cnval  lam_dg[fep_state1] );

626 
} 
627  
628 
if (n0 > 0) 
629 
{ 
630 
omegap_array[nval] = omega_p1_0; 
631 
} 
632 
else

633 
{ 
634 
omegap_array[nval] = 0;

635 
} 
636 
weightsp_array[nval] = clam_weightsp; 
637 
varp_array[nval] = clam_varp; 
638 
if ((np1 > 0) && (n0 > 0)) 
639 
{ 
640 
dwp_array[nval] = fabs( (cnval + log((1.0*np1)/n0))  lam_dg[fep_state] ); 
641 
} 
642 
else

643 
{ 
644 
dwp_array[nval] = fabs( cnval  lam_dg[fep_state] ); 
645 
} 
646  
647 
} 
648  
649 
/* find the C's closest to the old weights value */

650  
651 
min_nvalm = FindMinimum(dwm_array, maxc); 
652 
omega_m1_0 = omegam_array[min_nvalm]; 
653 
clam_weightsm = weightsm_array[min_nvalm]; 
654 
clam_varm = varm_array[min_nvalm]; 
655  
656 
min_nvalp = FindMinimum(dwp_array, maxc); 
657 
omega_p1_0 = omegap_array[min_nvalp]; 
658 
clam_weightsp = weightsp_array[min_nvalp]; 
659 
clam_varp = varp_array[min_nvalp]; 
660  
661 
clam_osum = omega_m1_0 + omega_p1_0; 
662 
clam_minvar = 0;

663 
if (clam_osum > 0) 
664 
{ 
665 
clam_minvar = 0.5*log(clam_osum); 
666 
} 
667  
668 
if (fep_state > 0) 
669 
{ 
670 
lam_dg[fep_state1] = clam_weightsm;

671 
lam_variance[fep_state1] = clam_varm;

672 
} 
673  
674 
if (fep_state < nlim1) 
675 
{ 
676 
lam_dg[fep_state] = clam_weightsp; 
677 
lam_variance[fep_state] = clam_varp; 
678 
} 
679  
680 
if (expand>elamstats == elamstatsMINVAR)

681 
{ 
682 
bSufficientSamples = TRUE; 
683 
/* make sure they are all past a threshold */

684 
for (i = 0; i < nlim; i++) 
685 
{ 
686 
if (dfhist>n_at_lam[i] < expand>minvarmin)

687 
{ 
688 
bSufficientSamples = FALSE; 
689 
} 
690 
} 
691 
if (bSufficientSamples)

692 
{ 
693 
dfhist>sum_minvar[fep_state] = clam_minvar; 
694 
if (fep_state == 0) 
695 
{ 
696 
for (i = 0; i < nlim; i++) 
697 
{ 
698 
dfhist>sum_minvar[i] += (expand>minvar_constclam_minvar); 
699 
} 
700 
expand>minvar_const = clam_minvar; 
701 
dfhist>sum_minvar[fep_state] = 0.0; 
702 
} 
703 
else

704 
{ 
705 
dfhist>sum_minvar[fep_state] = expand>minvar_const; 
706 
} 
707 
} 
708 
} 
709  
710 
/* we need to rezero minvar now, since it could change at fep_state = 0 */

711 
dfhist>sum_dg[0] = 0.0; 
712 
dfhist>sum_variance[0] = 0.0; 
713 
dfhist>sum_weights[0] = dfhist>sum_dg[0] + dfhist>sum_minvar[0]; /* should be zero */ 
714  
715 
for (i = 1; i < nlim; i++) 
716 
{ 
717 
dfhist>sum_dg[i] = lam_dg[i1] + dfhist>sum_dg[i1]; 
718 
dfhist>sum_variance[i] = sqrt(lam_variance[i1] + pow(dfhist>sum_variance[i1], 2)); 
719 
dfhist>sum_weights[i] = dfhist>sum_dg[i] + dfhist>sum_minvar[i]; 
720 
} 
721  
722 
sfree(lam_dg); 
723 
sfree(lam_variance); 
724  
725 
sfree(omegam_array); 
726 
sfree(weightsm_array); 
727 
sfree(varm_array); 
728 
sfree(dwm_array); 
729  
730 
sfree(omegap_array); 
731 
sfree(weightsp_array); 
732 
sfree(varp_array); 
733 
sfree(dwp_array); 
734 
} 
735 
return FALSE;

736 
} 
737  
738  
739 
static int AIMChooseNewLambda(int nlim, t_expanded *expand, df_history_t *dfhist, int fep_state, real *weighted_lamee, 
740 
gmx_int64_t seed, gmx_int64_t step, gmx_enerdata_t *enerd) 
741 
{ 
742 
/* Choose new lambda value, and update transition matrix */

743  
744 
int i, j, ifep, jfep, minfep, maxfep, lamnew, lamtrial, starting_fep_state;

745 
real r1, r2, de_old, de_new, de, trialprob=0.0, tprob = 0.0; 
746 
real **Tij; 
747 
double *propose, *accept, *remainder;

748 
double pks;

749 
real sum, pnorm; 
750 
gmx_bool bRestricted; 
751 
real df, avg, delta = 0;

752 
real kT = BOLTZ*300.0; 
753 
real beta = 1.0/(double)kT; 
754  
755 
starting_fep_state = fep_state; /* so we can track lambda acceptance */

756 
lamnew = fep_state; /* so that there is a default setting  stays the same */

757  
758 
snew(propose, nlim); 
759 
snew(accept, nlim); 
760 
snew(remainder, nlim); 
761 

762 
dfhist>store_fepot[fep_state] = enerd>term[F_EPOT]; 
763 
for (i = 0; i < expand>lmc_repeats; i++) 
764 
{ 
765 
double rnd[2]; 
766  
767 
gmx_rng_cycle_2uniform(step, i, seed, RND_SEED_EXPANDED, rnd); 
768  
769 
/* use the metropolis sampler with trial +/ 1 */

770 
r1 = rnd[0];

771 
if (r1 < 0.5) 
772 
{ 
773 
if (fep_state == 0) 
774 
{ 
775 
lamtrial = fep_state; 
776 
} 
777 
else

778 
{ 
779 
lamtrial = fep_state1;

780 
} 
781 
} 
782 
else

783 
{ 
784 
if (fep_state == nlim1) 
785 
{ 
786 
lamtrial = fep_state; 
787 
} 
788 
else

789 
{ 
790 
lamtrial = fep_state+1;

791 
} 
792 
} 
793  
794 
/* AIM accept or reject step using the potential energy difference */

795 
/* we want to initialize the values of the potential energy at

796 
* every position before we can actually use the values to accpept

797 
* or reject

798 
* If the potential at lamtrail is zero, accept lamtrial and move on

799 
* after storing the potential at lamtrial */

800  
801 
// we want values for de so we wait for a while

802 
// the possible mistake is that I am assuming the first 20 values of

803 
// df equal zero

804 
if (dfhist>aim_at_lam[lamtrial] > 1000) 
805 
{ 
806 
de = (double)dfhist>store_fepot[lamtrial](double)dfhist>store_fepot[fep_state]; 
807 
df = 0.5*(double)(lamtrialfep_state)*(1.0/(double)(nlim1.0))*(dfhist>dfavg[lamtrial]+dfhist>dfavg[fep_state]); 
808 
} 
809 
else

810 
{ 
811 
de = 0.0; 
812 
df = 0.0; 
813 
} 
814  
815 
trialprob = (double)exp(beta*(dedf));

816 
propose[fep_state] = 0;

817 
propose[lamtrial] = 1.0; 
818 
accept[fep_state] = 1.0; 
819 
accept[lamtrial] = 1.0; 
820 

821 
r2 = rnd[1];

822 
if (r2 < trialprob)

823 
{ 
824 
lamnew = lamtrial; 
825 
dfhist>laccept[lamtrial]++; //update the acceptance count at lambda new

826 
} 
827 
else

828 
{ 
829 
lamnew = fep_state; 
830 
} 
831  
832 

833 
for (ifep = 0; ifep < nlim; ifep++) 
834 
{ 
835 
dfhist>Tij[fep_state][ifep] += propose[ifep]*accept[ifep]; 
836 
dfhist>Tij[fep_state][fep_state] += propose[ifep]*(1.0accept[ifep]); 
837 
} 
838  
839 
fep_state = lamnew; 
840 
// update the count at fep_state

841 
dfhist>aim_at_lam[fep_state] ++; 
842 

843 
} 
844 
dfhist>Tij_empirical[starting_fep_state][lamnew] += 1.0; 
845  
846 
sfree(propose); 
847 
sfree(accept); 
848 
sfree(remainder); 
849  
850 
// calculate running average.

851 
delta = enerd>term[F_DVDL]dfhist>dfavg[fep_state]; 
852 
// update averages

853 
dfhist>dfavg[fep_state] += delta/dfhist>aim_at_lam[fep_state]; 
854  
855 
// print out some stuff. print out the previous fep state, the current state

856 
if (mod(step, 1000) == 0) 
857 
{ 
858 
fprintf(stderr, "\n");

859 
fprintf(stderr, "MD Step %d \n",(int)step); 
860 
fprintf(stderr, "Print out status of current AIM step \n");

861 
fprintf(stderr, "Lambda new is %3d \n",lamnew);

862 
fprintf(stderr, "Lambda old was %3d \n ",starting_fep_state);

863 
fprintf(stderr, "Lambda AIM Count laccept de_avg df_avg \n");

864 
for (ifep = 0; ifep < nlim; ifep++) 
865 
{ 
866 
fprintf(stderr, "%3d %10d %10.5f %10.5f %10.5f \n", ifep, dfhist>aim_at_lam[ifep], 100.0*(double)dfhist>laccept[ifep]/(double)dfhist>aim_at_lam[ifep], dfhist>store_fepot[ifep]/dfhist>aim_at_lam[ifep],dfhist>dfavg[ifep]); 
867 
} 
868 
} 
869  
870 
// test difference between f_dvdl f_dvdl_vdw using a single neutral

871 
// atom that's changing vdw size or something like that.

872 
return lamnew;

873 
} 
874  
875 
static int ChooseNewLambda(int nlim, t_expanded *expand, df_history_t *dfhist, int fep_state, real *weighted_lamee, double *p_k, 
876 
gmx_int64_t seed, gmx_int64_t step) 
877 
{ 
878 
/* Choose new lambda value, and update transition matrix */

879  
880 
int i, ifep, jfep, minfep, maxfep, lamnew, lamtrial, starting_fep_state;

881 
real r1, r2, de_old, de_new, de, trialprob, tprob = 0, df = 0; 
882 
real **Tij; 
883 
double *propose, *accept, *remainder;

884 
double pks;

885 
real sum, pnorm; 
886 
gmx_bool bRestricted; 
887  
888 
starting_fep_state = fep_state; 
889 
lamnew = fep_state; /* so that there is a default setting  stays the same */

890  
891 
if (!EWL(expand>elamstats)) /* ignore equilibrating the weights if using WL */ 
892 
{ 
893 
if ((expand>lmc_forced_nstart > 0) && (dfhist>n_at_lam[nlim1] <= expand>lmc_forced_nstart)) 
894 
{ 
895 
/* Use a marching method to run through the lambdas and get preliminary free energy data,

896 
before starting 'free' sampling. We start free sampling when we have enough at each lambda */

897  
898 
/* if we have enough at this lambda, move on to the next one */

899  
900 
if (dfhist>n_at_lam[fep_state] == expand>lmc_forced_nstart)

901 
{ 
902 
lamnew = fep_state+1;

903 
if (lamnew == nlim) /* whoops, stepped too far! */ 
904 
{ 
905 
lamnew = 1;

906 
} 
907 
} 
908 
else

909 
{ 
910 
lamnew = fep_state; 
911 
} 
912 
return lamnew;

913 
} 
914 
} 
915  
916 
snew(propose, nlim); 
917 
snew(accept, nlim); 
918 
snew(remainder, nlim); 
919  
920 
for (i = 0; i < expand>lmc_repeats; i++) 
921 
{ 
922 
double rnd[2]; 
923  
924 
gmx_rng_cycle_2uniform(step, i, seed, RND_SEED_EXPANDED, rnd); 
925  
926 
for (ifep = 0; ifep < nlim; ifep++) 
927 
{ 
928 
propose[ifep] = 0;

929 
accept[ifep] = 0;

930 
} 
931  
932 
if ((expand>elmcmove == elmcmoveGIBBS)  (expand>elmcmove == elmcmoveMETGIBBS))

933 
{ 
934 
bRestricted = TRUE; 
935 
/* use the Gibbs sampler, with restricted range */

936 
if (expand>gibbsdeltalam < 0) 
937 
{ 
938 
minfep = 0;

939 
maxfep = nlim1;

940 
bRestricted = FALSE; 
941 
} 
942 
else

943 
{ 
944 
minfep = fep_state  expand>gibbsdeltalam; 
945 
maxfep = fep_state + expand>gibbsdeltalam; 
946 
if (minfep < 0) 
947 
{ 
948 
minfep = 0;

949 
} 
950 
if (maxfep > nlim1) 
951 
{ 
952 
maxfep = nlim1;

953 
} 
954 
} 
955  
956 
GenerateGibbsProbabilities(weighted_lamee, p_k, &pks, minfep, maxfep); 
957  
958 
if (expand>elmcmove == elmcmoveGIBBS)

959 
{ 
960 
for (ifep = minfep; ifep <= maxfep; ifep++)

961 
{ 
962 
propose[ifep] = p_k[ifep]; 
963 
accept[ifep] = 1.0; 
964 
} 
965 
/* Gibbs sampling */

966 
r1 = rnd[0];

967 
for (lamnew = minfep; lamnew <= maxfep; lamnew++)

968 
{ 
969 
if (r1 <= p_k[lamnew])

970 
{ 
971 
break;

972 
} 
973 
r1 = p_k[lamnew]; 
974 
} 
975 
} 
976 
else if (expand>elmcmove == elmcmoveMETGIBBS) 
977 
{ 
978  
979 
/* Metropolized Gibbs sampling */

980 
for (ifep = minfep; ifep <= maxfep; ifep++)

981 
{ 
982 
remainder[ifep] = 1  p_k[ifep];

983 
} 
984  
985 
/* find the proposal probabilities */

986  
987 
if (remainder[fep_state] == 0) 
988 
{ 
989 
/* only the current state has any probability */

990 
/* we have to stay at the current state */

991 
lamnew = fep_state; 
992 
} 
993 
else

994 
{ 
995 
for (ifep = minfep; ifep <= maxfep; ifep++)

996 
{ 
997 
if (ifep != fep_state)

998 
{ 
999 
propose[ifep] = p_k[ifep]/remainder[fep_state]; 
1000 
} 
1001 
else

1002 
{ 
1003 
propose[ifep] = 0;

1004 
} 
1005 
} 
1006  
1007 
r1 = rnd[0];

1008 
for (lamtrial = minfep; lamtrial <= maxfep; lamtrial++)

1009 
{ 
1010 
pnorm = p_k[lamtrial]/remainder[fep_state]; 
1011 
if (lamtrial != fep_state)

1012 
{ 
1013 
if (r1 <= pnorm)

1014 
{ 
1015 
break;

1016 
} 
1017 
r1 = pnorm; 
1018 
} 
1019 
} 
1020  
1021 
/* we have now selected lamtrial according to p(lamtrial)/1p(fep_state) */

1022 
tprob = 1.0; 
1023 
/* trial probability is min{1,\frac{1  p(old)}{1p(new)} MRS 1/8/2008 */

1024 
trialprob = (remainder[fep_state])/(remainder[lamtrial]); 
1025 
if (trialprob < tprob)

1026 
{ 
1027 
tprob = trialprob; 
1028 
} 
1029 
r2 = rnd[1];

1030 
if (r2 < tprob)

1031 
{ 
1032 
lamnew = lamtrial; 
1033 
} 
1034 
else

1035 
{ 
1036 
lamnew = fep_state; 
1037 
} 
1038 
} 
1039  
1040 
/* now figure out the acceptance probability for each */

1041 
for (ifep = minfep; ifep <= maxfep; ifep++)

1042 
{ 
1043 
tprob = 1.0; 
1044 
if (remainder[ifep] != 0) 
1045 
{ 
1046 
trialprob = (remainder[fep_state])/(remainder[ifep]); 
1047 
} 
1048 
else

1049 
{ 
1050 
trialprob = 1.0; /* this state is the only choice! */ 
1051 
} 
1052 
if (trialprob < tprob)

1053 
{ 
1054 
tprob = trialprob; 
1055 
} 
1056 
/* probability for fep_state=0, but that's fine, it's never proposed! */

1057 
accept[ifep] = tprob; 
1058 
} 
1059 
} 
1060  
1061 
if (lamnew > maxfep)

1062 
{ 
1063 
/* it's possible some rounding is failing */

1064 
if (gmx_within_tol(remainder[fep_state], 0, 50*GMX_DOUBLE_EPS)) 
1065 
{ 
1066 
/* numerical rounding error  no state other than the original has weight */

1067 
lamnew = fep_state; 
1068 
} 
1069 
else

1070 
{ 
1071 
/* probably not a numerical issue */

1072 
int loc = 0; 
1073 
int nerror = 200+(maxfepminfep+1)*60; 
1074 
char *errorstr;

1075 
snew(errorstr, nerror); 
1076 
/* if its greater than maxfep, then something went wrong  probably underflow in the calculation

1077 
of sum weights. Generated detailed info for failure */

1078 
loc += sprintf(errorstr, "Something wrong in choosing new lambda state with a Gibbs move  probably underflow in weight determination.\nDenominator is: %3d%17.10e\n i dE numerator weights\n", 0, pks); 
1079 
for (ifep = minfep; ifep <= maxfep; ifep++)

1080 
{ 
1081 
loc += sprintf(&errorstr[loc], "%3d %17.10e%17.10e%17.10e\n", ifep, weighted_lamee[ifep], p_k[ifep], dfhist>sum_weights[ifep]);

1082 
} 
1083 
gmx_fatal(FARGS, errorstr); 
1084 
} 
1085 
} 
1086 
} 
1087 
else if ((expand>elmcmove == elmcmoveMETROPOLIS)  (expand>elmcmove == elmcmoveBARKER)) 
1088 
{ 
1089 
/* use the metropolis sampler with trial +/ 1 */

1090 
r1 = rnd[0];

1091 
if (r1 < 0.5) 
1092 
{ 
1093 
if (fep_state == 0) 
1094 
{ 
1095 
lamtrial = fep_state; 
1096 
} 
1097 
else

1098 
{ 
1099 
lamtrial = fep_state1;

1100 
} 
1101 
} 
1102 
else

1103 
{ 
1104 
if (fep_state == nlim1) 
1105 
{ 
1106 
lamtrial = fep_state; 
1107 
} 
1108 
else

1109 
{ 
1110 
lamtrial = fep_state+1;

1111 
} 
1112 
} 
1113  
1114 
de = weighted_lamee[lamtrial]  weighted_lamee[fep_state]; 
1115 
if (expand>elmcmove == elmcmoveMETROPOLIS)

1116 
{ 
1117 
tprob = 1.0; 
1118 
trialprob = exp(de); 
1119 
if (trialprob < tprob)

1120 
{ 
1121 
tprob = trialprob; 
1122 
} 
1123 
propose[fep_state] = 0;

1124 
propose[lamtrial] = 1.0; /* note that this overwrites the above line if fep_state = ntrial, which only occurs at the ends */ 
1125 
accept[fep_state] = 1.0; /* doesn't actually matter, never proposed unless fep_state = ntrial, in which case it's 1.0 anyway */ 
1126 
accept[lamtrial] = tprob; 
1127  
1128 
} 
1129 
else if (expand>elmcmove == elmcmoveBARKER) 
1130 
{ 
1131 
tprob = 1.0/(1.0+exp(de)); 
1132  
1133 
propose[fep_state] = (1tprob);

1134 
propose[lamtrial] += tprob; /* we add, to account for the fact that at the end, they might be the same point */

1135 
accept[fep_state] = 1.0; 
1136 
accept[lamtrial] = 1.0; 
1137 
} 
1138 

1139 
r2 = rnd[1];

1140 
if (r2 < tprob)

1141 
{ 
1142 
lamnew = lamtrial; 
1143 
} 
1144 
else

1145 
{ 
1146 
lamnew = fep_state; 
1147 
} 
1148  
1149 
} 
1150  
1151 
for (ifep = 0; ifep < nlim; ifep++) 
1152 
{ 
1153 
dfhist>Tij[fep_state][ifep] += propose[ifep]*accept[ifep]; 
1154 
dfhist>Tij[fep_state][fep_state] += propose[ifep]*(1.0accept[ifep]); 
1155 
} 
1156 
fep_state = lamnew; 
1157 
} 
1158  
1159 
dfhist>Tij_empirical[starting_fep_state][lamnew] += 1.0; 
1160  
1161 
sfree(propose); 
1162 
sfree(accept); 
1163 
sfree(remainder); 
1164  
1165 
return lamnew;

1166 
} 
1167  
1168 
/* print out the weights to the log, along with current state */

1169 
extern void PrintFreeEnergyInfoToFile(FILE *outfile, t_lambda *fep, t_expanded *expand, t_simtemp *simtemp, df_history_t *dfhist, 
1170 
int fep_state, int frequency, gmx_int64_t step) 
1171 
{ 
1172 
int nlim, i, ifep, jfep;

1173 
real dw, dg, dv, dm, Tprint; 
1174 
real *temps; 
1175 
const char *print_names[efptNR] = {" FEPL", "MassL", "CoulL", " VdwL", "BondL", "RestT", "Temp.(K)"}; 
1176 
gmx_bool bSimTemp = FALSE; 
1177  
1178 
nlim = fep>n_lambda; 
1179 
if (simtemp != NULL) 
1180 
{ 
1181 
bSimTemp = TRUE; 
1182 
} 
1183  
1184 
if (mod(step, frequency) == 0) 
1185 
{ 
1186 
fprintf(outfile, " MClambda information\n");

1187 
if (EWL(expand>elamstats) && (!(dfhist>bEquil)))

1188 
{ 
1189 
fprintf(outfile, " WangLandau incrementor is: %11.5g\n", dfhist>wl_delta);

1190 
} 
1191 
fprintf(outfile, " N");

1192 
for (i = 0; i < efptNR; i++) 
1193 
{ 
1194 
if (fep>separate_dvdl[i])

1195 
{ 
1196 
fprintf(outfile, "%7s", print_names[i]);

1197 
} 
1198 
else if ((i == efptTEMPERATURE) && bSimTemp) 
1199 
{ 
1200 
fprintf(outfile, "%10s", print_names[i]); /* more space for temperature formats */ 
1201 
} 
1202 
} 
1203 
fprintf(outfile, " Count ");

1204 
if (expand>elamstats == elamstatsMINVAR)

1205 
{ 
1206 
fprintf(outfile, "W(in kT) G(in kT) dG(in kT) dV(in kT)\n");

1207 
} 
1208 
else

1209 
{ 
1210 
fprintf(outfile, "G(in kT) dG(in kT) dfavg \n");

1211 
} 
1212 
for (ifep = 0; ifep < nlim; ifep++) 
1213 
{ 
1214 
if (ifep == nlim1) 
1215 
{ 
1216 
dw = 0.0; 
1217 
dg = 0.0; 
1218 
dv = 0.0; 
1219 
dm = 0.0; 
1220 
} 
1221 
else

1222 
{ 
1223 
dw = dfhist>sum_weights[ifep+1]  dfhist>sum_weights[ifep];

1224 
dg = dfhist>sum_dg[ifep+1]  dfhist>sum_dg[ifep];

1225 
dv = sqrt(pow(dfhist>sum_variance[ifep+1], 2)  pow(dfhist>sum_variance[ifep], 2)); 
1226 
dm = dfhist>sum_minvar[ifep+1]  dfhist>sum_minvar[ifep];

1227 
} 
1228 
fprintf(outfile, "%3d", (ifep+1)); 
1229 
for (i = 0; i < efptNR; i++) 
1230 
{ 
1231 
if (fep>separate_dvdl[i])

1232 
{ 
1233 
fprintf(outfile, "%7.3f", fep>all_lambda[i][ifep]);

1234 
} 
1235 
else if (i == efptTEMPERATURE && bSimTemp) 
1236 
{ 
1237 
fprintf(outfile, "%9.3f", simtemp>temperatures[ifep]);

1238 
} 
1239 
} 
1240 
if (EWL(expand>elamstats) && (!(dfhist>bEquil))) /* if performing WL and still haven't equilibrated */ 
1241 
{ 
1242 
if (expand>elamstats == elamstatsWL)

1243 
{ 
1244 
fprintf(outfile, " %8d", (int)dfhist>wl_histo[ifep]); 
1245 
} 
1246 
else

1247 
{ 
1248 
fprintf(outfile, " %8.3f", dfhist>wl_histo[ifep]);

1249 
} 
1250 
} 
1251 
else /* we have equilibrated weights */ 
1252 
{ 
1253 
fprintf(outfile, " %8d", dfhist>n_at_lam[ifep]);

1254 
} 
1255 
if (expand>elamstats == elamstatsMINVAR)

1256 
{ 
1257 
fprintf(outfile, " %10.5f %10.5f %10.5f %10.5f", dfhist>sum_weights[ifep], dfhist>sum_dg[ifep], dg, dv);

1258 
} 
1259 
else

1260 
{ 
1261 
fprintf(outfile, " %10.5f %10.5f %10.5f", dfhist>sum_weights[ifep], dw, dfhist>dfavg[ifep] );

1262 
} 
1263 
if (ifep == fep_state)

1264 
{ 
1265 
fprintf(outfile, " <<\n");

1266 
} 
1267 
else

1268 
{ 
1269 
fprintf(outfile, " \n");

1270 
} 
1271 
} 
1272 
fprintf(outfile, "\n");

1273  
1274 
if ((mod(step, expand>nstTij) == 0) && (expand>nstTij > 0) && (step > 0)) 
1275 
{ 
1276 
fprintf(outfile, " Transition Matrix\n");

1277 
for (ifep = 0; ifep < nlim; ifep++) 
1278 
{ 
1279 
fprintf(outfile, "%12d", (ifep+1)); 
1280 
} 
1281 
fprintf(outfile, "\n");

1282 
for (ifep = 0; ifep < nlim; ifep++) 
1283 
{ 
1284 
for (jfep = 0; jfep < nlim; jfep++) 
1285 
{ 
1286 
if (dfhist>n_at_lam[ifep] > 0) 
1287 
{ 
1288 
if (expand>bSymmetrizedTMatrix)

1289 
{ 
1290 
Tprint = (dfhist>Tij[ifep][jfep]+dfhist>Tij[jfep][ifep])/(dfhist>n_at_lam[ifep]+dfhist>n_at_lam[jfep]); 
1291 
} 
1292 
else

1293 
{ 
1294 
Tprint = (dfhist>Tij[ifep][jfep])/(dfhist>n_at_lam[ifep]); 
1295 
} 
1296 
} 
1297 
else

1298 
{ 
1299 
Tprint = 0.0; 
1300 
} 
1301 
fprintf(outfile, "%12.8f", Tprint);

1302 
} 
1303 
fprintf(outfile, "%3d\n", (ifep+1)); 
1304 
} 
1305  
1306 
fprintf(outfile, " Empirical Transition Matrix\n");

1307 
for (ifep = 0; ifep < nlim; ifep++) 
1308 
{ 
1309 
fprintf(outfile, "%12d", (ifep+1)); 
1310 
} 
1311 
fprintf(outfile, "\n");

1312 
for (ifep = 0; ifep < nlim; ifep++) 
1313 
{ 
1314 
for (jfep = 0; jfep < nlim; jfep++) 
1315 
{ 
1316 
if (dfhist>n_at_lam[ifep] > 0) 
1317 
{ 
1318 
if (expand>bSymmetrizedTMatrix)

1319 
{ 
1320 
Tprint = (dfhist>Tij_empirical[ifep][jfep]+dfhist>Tij_empirical[jfep][ifep])/(dfhist>n_at_lam[ifep]+dfhist>n_at_lam[jfep]); 
1321 
} 
1322 
else

1323 
{ 
1324 
Tprint = dfhist>Tij_empirical[ifep][jfep]/(dfhist>n_at_lam[ifep]); 
1325 
} 
1326 
} 
1327 
else

1328 
{ 
1329 
Tprint = 0.0; 
1330 
} 
1331 
fprintf(outfile, "%12.8f", Tprint);

1332 
} 
1333 
fprintf(outfile, "%3d\n", (ifep+1)); 
1334 
} 
1335 
} 
1336 
} 
1337 
} 
1338  
1339 
extern int ExpandedEnsembleDynamics(FILE *log, t_inputrec *ir, gmx_enerdata_t *enerd, 
1340 
t_state *state, t_extmass *MassQ, int fep_state, df_history_t *dfhist,

1341 
gmx_int64_t step, 
1342 
rvec *v, t_mdatoms *mdatoms) 
1343 
/* Note that the state variable is only needed for simulated tempering, not

1344 
Hamiltonian expanded ensemble. May be able to remove it after integrator refactoring. */

1345 
{ 
1346 
real *pfep_lamee, *scaled_lamee, *weighted_lamee; 
1347 
double *p_k;

1348 
int i, nlim, lamnew, totalsamples;

1349 
real oneovert, maxscaled = 0, maxweighted = 0; 
1350 
t_expanded *expand; 
1351 
t_simtemp *simtemp; 
1352 
double *temperature_lambdas;

1353 
gmx_bool bIfReset, bSwitchtoOneOverT, bDoneEquilibrating = FALSE; 
1354  
1355 
expand = ir>expandedvals; 
1356 
simtemp = ir>simtempvals; 
1357 
nlim = ir>fepvals>n_lambda; 
1358  
1359 
snew(scaled_lamee, nlim); 
1360 
snew(weighted_lamee, nlim); 
1361 
snew(pfep_lamee, nlim); 
1362 
snew(p_k, nlim); 
1363  
1364 
/* update the count at the current lambda*/

1365 
dfhist>n_at_lam[fep_state]++; 
1366  
1367 
/* need to calculate the PV term somewhere, but not needed here? Not until there's a lambda state that's

1368 
pressure controlled.*/

1369 
/*

1370 
pVTerm = 0;

1371 
where does this PV term go?

1372 
for (i=0;i<nlim;i++)

1373 
{

1374 
fep_lamee[i] += pVTerm;

1375 
}

1376 
*/

1377  
1378 
/* determine the minimum value to avoid overflow. Probably a better way to do this */

1379 
/* we don't need to include the pressure term, since the volume is the same between the two.

1380 
is there some term we are neglecting, however? */

1381  
1382 
if (ir>efep != efepNO)

1383 
{ 
1384 
for (i = 0; i < nlim; i++) 
1385 
{ 
1386 
if (ir>bSimTemp)

1387 
{ 
1388 
/* Note  this assumes no mass changes, since kinetic energy is not added . . . */

1389 
scaled_lamee[i] = (enerd>enerpart_lambda[i+1]enerd>enerpart_lambda[0])/(simtemp>temperatures[i]*BOLTZ) 
1390 
+ enerd>term[F_EPOT]*(1.0/(simtemp>temperatures[i]) 1.0/(simtemp>temperatures[fep_state]))/BOLTZ; 
1391 
} 
1392 
else

1393 
{ 
1394 
scaled_lamee[i] = (enerd>enerpart_lambda[i+1]enerd>enerpart_lambda[0])/(expand>mc_temp*BOLTZ); 
1395 
/* mc_temp is currently set to the system reft unless otherwise defined */

1396 
} 
1397  
1398 
/* save these energies for printing, so they don't get overwritten by the next step */

1399 
/* they aren't overwritten in the nonfree energy case, but we always print with these

1400 
for simplicity */

1401 
} 
1402 
} 
1403 
else

1404 
{ 
1405 
if (ir>bSimTemp)

1406 
{ 
1407 
for (i = 0; i < nlim; i++) 
1408 
{ 
1409 
scaled_lamee[i] = enerd>term[F_EPOT]*(1.0/simtemp>temperatures[i]  1.0/simtemp>temperatures[fep_state])/BOLTZ; 
1410 
} 
1411 
} 
1412 
} 
1413  
1414 
for (i = 0; i < nlim; i++) 
1415 
{ 
1416 
pfep_lamee[i] = scaled_lamee[i]; 
1417  
1418 
weighted_lamee[i] = dfhist>sum_weights[i]  scaled_lamee[i]; 
1419 
if (i == 0) 
1420 
{ 
1421 
maxscaled = scaled_lamee[i]; 
1422 
maxweighted = weighted_lamee[i]; 
1423 
} 
1424 
else

1425 
{ 
1426 
if (scaled_lamee[i] > maxscaled)

1427 
{ 
1428 
maxscaled = scaled_lamee[i]; 
1429 
} 
1430 
if (weighted_lamee[i] > maxweighted)

1431 
{ 
1432 
maxweighted = weighted_lamee[i]; 
1433 
} 
1434 
} 
1435 
} 
1436  
1437 
for (i = 0; i < nlim; i++) 
1438 
{ 
1439 
scaled_lamee[i] = maxscaled; 
1440 
weighted_lamee[i] = maxweighted; 
1441 
} 
1442  
1443 
/* update weights  we decide whether or not to actually do this inside */

1444  
1445 
bDoneEquilibrating = UpdateWeights(nlim, expand, dfhist, fep_state, scaled_lamee, weighted_lamee, step); 
1446 
if (bDoneEquilibrating)

1447 
{ 
1448 
if (log)

1449 
{ 
1450 
fprintf(log, "\nStep %d: Weights have equilibrated, using criteria: %s\n", (int)step, elmceq_names[expand>elmceq]); 
1451 
} 
1452 
} 
1453 

1454  
1455 
if (expand>elmcmove == elmcmoveAIM)

1456 
{ 
1457  
1458 
lamnew = AIMChooseNewLambda(nlim, expand, dfhist, fep_state, weighted_lamee, 
1459 
ir>expandedvals>lmc_seed, step, enerd); 
1460 
} 
1461 
else

1462 
{ 
1463  
1464 
lamnew = ChooseNewLambda(nlim, expand, dfhist, fep_state, weighted_lamee, p_k, 
1465 
ir>expandedvals>lmc_seed, step); 
1466 
} 
1467  
1468 
/* if using simulated tempering, we need to adjust the temperatures */

1469 
if (ir>bSimTemp && (lamnew != fep_state)) /* only need to change the temperatures if we change the state */ 
1470 
{ 
1471 
int i, j, n, d;

1472 
real *buf_ngtc; 
1473 
real told; 
1474 
int nstart, nend, gt;

1475  
1476 
snew(buf_ngtc, ir>opts.ngtc); 
1477  
1478 
for (i = 0; i < ir>opts.ngtc; i++) 
1479 
{ 
1480 
if (ir>opts.ref_t[i] > 0) 
1481 
{ 
1482 
told = ir>opts.ref_t[i]; 
1483 
ir>opts.ref_t[i] = simtemp>temperatures[lamnew]; 
1484 
buf_ngtc[i] = sqrt(ir>opts.ref_t[i]/told); /* using the buffer as temperature scaling */

1485 
} 
1486 
} 
1487  
1488 
/* we don't need to manipulate the ekind information, as it isn't due to be reset until the next step anyway */

1489  
1490 
nstart = 0;

1491 
nend = mdatoms>homenr; 
1492 
for (n = nstart; n < nend; n++)

1493 
{ 
1494 
gt = 0;

1495 
if (mdatoms>cTC)

1496 
{ 
1497 
gt = mdatoms>cTC[n]; 
1498 
} 
1499 
for (d = 0; d < DIM; d++) 
1500 
{ 
1501 
v[n][d] *= buf_ngtc[gt]; 
1502 
} 
1503 
} 
1504  
1505 
if (IR_NPT_TROTTER(ir)  IR_NPH_TROTTER(ir)  IR_NVT_TROTTER(ir))

1506 
{ 
1507 
/* we need to recalculate the masses if the temperature has changed */

1508 
init_npt_masses(ir, state, MassQ, FALSE); 
1509 
for (i = 0; i < state>nnhpres; i++) 
1510 
{ 
1511 
for (j = 0; j < ir>opts.nhchainlength; j++) 
1512 
{ 
1513 
state>nhpres_vxi[i+j] *= buf_ngtc[i]; 
1514 
} 
1515 
} 
1516 
for (i = 0; i < ir>opts.ngtc; i++) 
1517 
{ 
1518 
for (j = 0; j < ir>opts.nhchainlength; j++) 
1519 
{ 
1520 
state>nosehoover_vxi[i+j] *= buf_ngtc[i]; 
1521 
} 
1522 
} 
1523 
} 
1524 
sfree(buf_ngtc); 
1525 
} 
1526  
1527 
/* now check on the WangLandau updating critera */

1528  
1529 
if (EWL(expand>elamstats))

1530 
{ 
1531 
bSwitchtoOneOverT = FALSE; 
1532 
if (expand>bWLoneovert)

1533 
{ 
1534 
totalsamples = 0;

1535 
for (i = 0; i < nlim; i++) 
1536 
{ 
1537 
totalsamples += dfhist>n_at_lam[i]; 
1538 
} 
1539 
oneovert = (1.0*nlim)/totalsamples; 
1540 
/* oneovert has decreasd by a bit since last time, so we actually make sure its within one of this number */

1541 
/* switch to 1/t incrementing when wl_delta has decreased at least once, and wl_delta is now less than 1/t */

1542 
if ((dfhist>wl_delta <= ((totalsamples)/(totalsamples1.00001))*oneovert) && 
1543 
(dfhist>wl_delta < expand>init_wl_delta)) 
1544 
{ 
1545 
bSwitchtoOneOverT = TRUE; 
1546 
} 
1547 
} 
1548 
if (bSwitchtoOneOverT)

1549 
{ 
1550 
dfhist>wl_delta = oneovert; /* now we reduce by this each time, instead of only at flatness */

1551 
} 
1552 
else

1553 
{ 
1554 
bIfReset = CheckHistogramRatios(nlim, dfhist>wl_histo, expand>wl_ratio); 
1555 
if (bIfReset)

1556 
{ 
1557 
for (i = 0; i < nlim; i++) 
1558 
{ 
1559 
dfhist>wl_histo[i] = 0;

1560 
} 
1561 
dfhist>wl_delta *= expand>wl_scale; 
1562 
if (log)

1563 
{ 
1564 
fprintf(log, "\nStep %d: weights are now:", (int)step); 
1565 
for (i = 0; i < nlim; i++) 
1566 
{ 
1567 
fprintf(log, " %.5f", dfhist>sum_weights[i]);

1568 
} 
1569 
fprintf(log, "\n");

1570 
} 
1571 
} 
1572 
} 
1573 
} 
1574 
sfree(pfep_lamee); 
1575 
sfree(scaled_lamee); 
1576 
sfree(weighted_lamee); 
1577 
sfree(p_k); 
1578  
1579 
return lamnew;

1580 
} 