C C $Id: divcon.F,v 1.9 1999/04/06 20:41:44 arjan Exp arjan $ C C------------------------------------------------------------------------ subroutine sebomd_energy( & iflag, iflaggr, iflagch, ntb, nat, virial, edc, & boxroar, xyzroar, gradroar, nresid, presid, pmechg, symbol2, & keywords, options, pdmx) C iflag = -1 error occured C . 0 initialize divcon variables, do not subset C . and do not calculate energy (occurs only once) C . 1 subset and calculate energy C . 2 do no subset (use old subsetting scheme) and calculate C . energy C C iflaggr = 0 return intermolecular gradient in gradroar (cheap) C . 1 return inter + intramolecular gradient in gradroar (more expensive) C C iflagch = 0 do not write charges to divcon.ch C . 1 write charges to divcon.ch C C ntb = 0 do not use periodic boundaries conditions C . > 0 use PBC = .true. C C nat = the number of atoms C . it is IMPERATIVE that the number of atoms, the ordering C . of atoms and the atomtypes are identical in the divcon C . input and roar input files!!! C C virial = the virial (in kcal/mol) C C edc = total energy (in kcal/mol) C C boxroar = box dimensions (3 dim.) C C xyzroar = the coordinates, format xyzroar(3*nat) C C gradroar = the gradient, format gradroar(3*nat) (in kcal/A) C . dependent on the value of iflaggr, gradroar contains the C . intermolecular or the inter + intramolecular gradient. C C nresid = number of residues C C presid = residues pointers C C pmechg = starting charges used when PME is on C C symbol2 = atom types of the system C C keywords = keywords to run Divcon C C options = more information about how to run Divcon (corresponding C to the after END_COORD part) C C pdmx = 0 do not print density matrix C = 1 print density matrix to divcon.dmx file C implicit double precision (a-h,o-z) #include "dipole.h" #include "divcon.dim" #include "divcon.h" #include "constants.h" #ifdef MPI #include "mpif.h" #endif dimension xyzroar(3*nat), boxroar(3), gradroar(3*nat), virial(4) dimension gradpme(3*nat), pmechg(nat) dimension ptchg(3,maxatm/3),ptchg2(3,maxatm/3),ptchg1(3,maxatm/3) dimension hybptchg(3,maxatm/3),com(3,maxatm/3) logical first integer presid(maxres), pdmx character*4 symbol2(nat) character keywords*400, options(30)*80 data first /.true./ save first integer nstepd data nstepd /0/ save nstepd nstepd = nstepd+1 CCC-----------------------------------------------------------------CC ! zero gradient do i = 1, 3*nat gradroar(i) = 0.0d0 end do c Box size (from Sander) do i=1,3 dbox(i) = boxroar(i) enddo boxvol = dbox(1)*dbox(2)*dbox(3) c write(*,*) iflag, iflaggr, iflagch, nat,maxatm, virial(4), edc ierror = 0 setch=.false. c write(*,*) iflag, iflaggr, iflagch, nat,maxatm, virial(4), edc if (iflag.eq.0) then C . initialize stuff pole = .true. call info_from_sander(netcharge, nsol, . ipolyn, fullscf, screen, wrtscr, prtsub, . pme, pmeqm, mullewald, cmewald, chewald, . stand, clust, resclust, . solute, . mndo, am1, pm3, rm1, am1d, pm3pddg, . nncore, dcbuff1, dcbuff2) #ifdef MPI call sebomd_mpi_vars(nproc, myid, commsebomd) C JV Override screen output for non-master nodes if ( myid .NE. 0) then open(unit=6,file='/dev/null') endif if (nproc.gt.maxproc) then write(iout,'(" ERROR: NUMBER OF PROCESSORS IS GREATER", $ " THAN ", & "MAXPROC", & /" INCREASE MAXPROC IN divcon.dim ", $ "AND RECOMPILE")') ierror = 1 goto 1000 endif #endif call etimer(t1) exit_pb = t1 ierror = 0 call setunit ! if (myid.eq.0) then ! call opnfil(2,ierror) ! else ! call opnpfil(12,ierror) ! endif if (ierror.ne.0) goto 1000 #ifdef MEMORY_OVERLAP C . check for right dimensions of overlap arrays call chkovlp(ierror) if (ierror.ne.0) then write(iout,'(/" ERROR IN OVERLAP ARRAYS. CHECK DIVCON.DIM")') goto 1000 endif #endif C . read keywords cart = .true. xyzspc = .true. ! call rdkeys(keywords,ierror) autosub = clust.or.resclust.or.atmsub.or.ressub & .or.(index(keywrd,'GRID').ne.0) if (ierror.ne.0) then c write(*,'(/" ERROR IN READING divcon.in")') write(iout,'(/" ERROR IN READING DIVCON KEYWORDS")') goto 1000 endif C . set the following variables by default c inter = .true. gradient = .true. ! screen = .false. ! prtsub = .false. centrl = .true. direct = .true. c Data from Amber : c number of atoms if (nat.gt.maxatm) then write(iout,'(" ERROR:: TOO MANY ATOMS: ", $ "INCREASE MAXATM IN divcon.dim")') iflag = -1 return endif natoms = nat c periodic boundary conditions if (ntb.ne.0) then pbc = .true. else pbc = .false. endif c charges : do i=1,nat chgpme(i) = pmechg(i) enddo c information about residues nres = nresid if(nres .gt. maxres) then ierror = 1 WRITE(IOUT, . '(/" MAXIMUM ALLOWED NUMBER OF RESIDUES REACHED", . /" -- INCREASE MAXRES PARAMETER IN divcon.dim", . " AND RECOMPILE")') ierror = 1 goto 1000 endif do i=1,nres irpnt(i) = presid(i) enddo irpnt(nres+1) = nat + 1 c type of atoms call rdelem(symbol2, ierror) c initial coordinates j = 1 do i=1,natoms xyz(1,i) = xyzroar(j) xyz(2,i) = xyzroar(j+1) xyz(3,i) = xyzroar(j+2) j = j + 3 enddo c options for rdtail do i=1,30 options2(i) = options(i) enddo if (ierror.ne.0) goto 1000 #ifdef MPI if (myid .eq. 0) then #endif ! call checking("divcon.in") #ifdef MPI endif #endif c If problem with convergence, use a new density matrix if (nstepd .gt. 1) guess = .false. call initp(0,ierror) endif ! iflag = 0 if (iflag.eq.1) then if ((nstepd.ge.6) .and. ipolyn) then call dmxinter endif endif C see if it is a MC calculation or a single point calculation if (mcsim) then write(iout,'(" ERROR: CANNOT DO A MC SIMULATION ", $ "WITHIN A MD SIMULATION")') iflag = -1 return if (iensemb.eq.0) then call nvtdriver(ierror) if (ierror.ne.0) goto 1000 else call nptdriver(ierror) if (ierror.ne.0) goto 1000 endif else call edriver(iflag, iflaggr, nat, xyzroar, $ gradroar, ierror, gradpme, symbol2) do i=1,4 !#ifdef MPI ! virial(i) = vir(i) / nproc !#else virial(i) = vir(i) !#endif enddo if (pmeqm) then do i=1,3 !#ifdef MPI ! virial(i) = virial(i) - virlrqm*eV2kcal/3.0d0/nproc !#else virial(i) = virial(i) - virlrqm*eV2kcal/3.0d0 !#endif enddo endif ! edc = etot ! GM - 08/01/2000 - change to eheat (in kcal/mol and smaller float) !#ifdef MPI ! edc = eheat / nproc !#else edc = eheat !#endif if (ierror.ne.0) then write(*,'(/" ERROR IN CALCULATING DIVCON ENERGY")') goto 1000 endif endif ! mcsim #ifdef MPI if (myid .eq. 0) then #endif c . if user has specified the 'wrtpdb' option, then write out a pdb c . file even if the error code is not zero. c if(index(keywrd,'wrtpdb').ne.0) call wrtpdb c . don't write error message for testrun calculation. if(index(keywrd,'TESTRUN').ne.0) stop if (iflagch.eq.1.and.iflag.ge.1) then call se_write_charges(natoms, edc, iatnum, xyz, atchg, . atchg2, atchg3) ! GM: 2010-11-04 begin ! comment out the computation of water molecular dipole moments c IF (pole) then c call dipoleh2o(ptchg,ptchg1,ptchg2,hybptchg,com) c ENDIF ! GM: 2010-11-04 end endif #ifdef MPI endif #endif c write(*,*) "iflag ", iflag if (iflag.gt.0) then if (screen) then ! write(iout,'(/"Divcon energy details :" ! $ /"ee=",F20.10," ec=",F20.10," et=",F20.10, ! $ /"hf=",F20.10," vir=",F20.10)') eelect,ecore,etot, ! $ eheat,vir(4) ! if (pme) then ! write(iout,'(/"pmedir=",F20.10," pmerec=",F20.10," ! $ pmeself=",f20.10, ! $ /"ecoul=",F20.10," elr=",F20.10)') ! $ pmedir,pmerec,pmeself,ecoul,elr ! endif ! write(iout,'("ndiag=",i6)') ndiag endif endif 1000 if(ierror.ne.0) then write(iout,'(/" DIVCON ERROR")') iflag = -1 return endif call etimer(t2) if (pdmx .eq. 1) call wrtdmx(ierror) if ((iflag.eq.1) .and. ipolyn) then ! save ip1 do II=1,NATOMS+1 IP13(II) = IP12(II) IP12(II) = IP11(II) IP11(II) = IP1(II) end do ! save ipair and ijmat do II=1,IP13(NATOMS+1) IPAIR3(II) = IPAIR2(II) IJMAT3(II) = IJMAT2(II) end do do II=1,IP12(NATOMS+1) IPAIR2(II) = IPAIR1(II) IJMAT2(II) = IJMAT1(II) end do do II=1,IP11(NATOMS+1) IPAIR1(II) = IPAIR(II) IJMAT1(II) = IJMAT(II) end do ! save pdiag DO II=1,IIMAT(NATOMS+1) PDIAG3(II) = PDIAG2(II) PDIAG2(II) = PDIAG1(II) PDIAG1(II) = PDIAG(II) ENDDO ! save pdiat do II = 1, IJMAT3(IP13(NATOMS+1)) PDIAT3(II) = PDIAT2(II) end do do II = 1, IJMAT2(IP12(NATOMS+1)) PDIAT2(II) = PDIAT1(II) end do do II = 1, IJMAT1(IP11(NATOMS+1)) PDIAT1(II) = PDIAT(II) end do endif ! iflag = 1 and ipolyn #ifdef MPI_IS_ON call MPI_FINALIZE(ierr) #endif end