23 #ifndef O2SCL_ANNEAL_PARA_H
24 #define O2SCL_ANNEAL_PARA_H
30 #include <o2scl/anneal_gsl.h>
31 #include <o2scl/vector.h>
33 #ifndef DOXYGEN_NO_O2NS
83 std::vector<rng_gsl> vrng;
87 virtual int step(vec_t &x, vec_t &sx,
int nvar,
size_t ithread) {
89 for(
int i=0;i<nvar;i++) {
90 double u=vrng[ithread].random();
99 sx[i]=(2.0*u-1.0)*step_i+x[i];
108 virtual int mmin(
size_t nv, vec_t &x0,
double &fmin,
109 std::vector<func_t> &func) {
113 O2SCL_ERR2(
"Tried to minimize over zero variables ",
120 std::cout <<
"mcmc_para::mcmc(): Not enough functions for "
121 <<
n_threads <<
" threads. Setting n_threads to "
122 << func.size() <<
"." << std::endl;
162 unsigned long int s=time(0);
164 vrng[it].set_seed(s+it);
171 #pragma omp parallel default(shared)
181 new_x[it].resize(nv);
182 best_x[it].resize(nv);
183 old_x[it].resize(nv);
186 for(
size_t j=0;j<nv;j++) {
192 E[it]=func[it](nv,x[it]);
205 #pragma omp parallel default(shared)
214 for(
size_t j=0;j<nv;j++) old_x[it][j]=x[it][j];
217 for (
int i=0;i<this->
ntrial;++i) {
219 step(x[it],new_x[it],nv,it);
221 new_E[it]=func[it](nv,new_x[it]);
233 if(new_E[it]<=best_E[it]) {
234 for(
size_t j=0;j<nv;j++) best_x[it][j]=new_x[it][j];
235 best_E[it]=new_E[it];
241 if (new_E[it]<E[it]) {
242 for(
size_t j=0;j<nv;j++) x[it][j]=new_x[it][j];
246 r[it]=vrng[it].random();
247 if (r[it] < exp(-(new_E[it]-E[it])/(this->
boltz*T))) {
248 for(
size_t j=0;j<nv;j++) x[it][j]=new_x[it][j];
262 for(
size_t iv=0;iv<nv;iv++) {
263 x0[iv]=best_x[0][iv];
266 if (best_E[i]<fmin) {
268 for(
size_t iv=0;iv<nv;iv++) {
269 x0[iv]=best_x[i][iv];
275 this->
print_iter(nv,x0,fmin,iter,T,
"anneal_gsl");
291 #pragma omp parallel default(shared)
299 next(nv,old_x[it],old_E[it],x[it],E[it],T,nmoves,
300 x0,fmin,done_arr[it]);
311 if (done_arr[it]==0) done=
false;
321 int mpi_rank, mpi_size;
323 MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
324 MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
328 vector<double> x0_new(nv);
330 if (mpi_rank<size-1) {
334 MPI_Recv(&fmin_new,1,MPI_DOUBLE,mpi_rank+1,tag,MPI_COMM_WORLD,
337 MPI_Recv(&(x0_new[0]),nv,MPI_DOUBLE,mpi_rank+1,tag,MPI_COMM_WORLD,
342 for(
size_t ik=0;ik<nv;ik++) {
347 for(
size_t ik=0;ik<nv;ik++) {
353 if (mpi_size>1 && mpi_rank>0) {
356 for(
size_t ik=0;ik<nv;ik++) x0_new[ik]=x0[ik];
359 MPI_Send(&fmin,1,MPI_DOUBLE,mpi_rank-1,0,MPI_COMM_WORLD);
360 MPI_Send(&(x0_new[0]),nv,MPI_DOUBLE,mpi_rank-1,1,MPI_COMM_WORLD);
363 if (mpi_size>1 && mpi_rank>0) {
366 MPI_Recv(&(fmin_new),1,MPI_DOUBLE,mpi_rank-1,tag,MPI_COMM_WORLD,
369 MPI_Recv(&(x0_new[0]),nv,MPI_DOUBLE,mpi_rank-1,tag,MPI_COMM_WORLD,
374 for(
size_t ik=0;ik<nv;ik++) {
381 if (mpi_rank<size-1) {
383 MPI_Send(&fmin,1,MPI_DOUBLE,mpi_rank+1,2,MPI_COMM_WORLD);
384 MPI_Send(&(x0_new[0]),nv,MPI_DOUBLE,mpi_rank+1,3,MPI_COMM_WORLD);
389 MPI_Barrier(MPI_COMM_WORLD);
401 virtual int next(
size_t nvar, vec_t &x_old,
double min_old,
402 vec_t &x_new,
double min_new,
double &T,
403 size_t n_moves, vec_t &best_x,
double best_E,
406 if (T/this->T_dec<this->
tol_abs) {
415 for(
size_t i=0;i<nvar;i++) {
425 virtual int mmin(
size_t nv, vec_t &x0,
double &fmin,
440 return mmin(nv,x0,fmin,vf);
445 virtual const char *
type() {
return "anneal_para"; }
449 #ifndef DOXYGEN_NO_O2NS