!********************************************************************** ! Copyright 1998,1999,2000,2001,2002,2005,2007,2008,2009,2010 * ! Andreas Stohl, Petra Seibert, A. Frank, Gerhard Wotawa, * ! Caroline Forster, Sabine Eckhardt, John Burkhart, Harald Sodemann * ! * ! This file is part of FLEXPART. * ! * ! FLEXPART is free software: you can redistribute it and/or modify * ! it under the terms of the GNU General Public License as published by* ! the Free Software Foundation, either version 3 of the License, or * ! (at your option) any later version. * ! * ! FLEXPART is distributed in the hope that it will be useful, * ! but WITHOUT ANY WARRANTY; without even the implied warranty of * ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * ! GNU General Public License for more details. * ! * ! You should have received a copy of the GNU General Public License * ! along with FLEXPART. If not, see . * !********************************************************************** subroutine convmix(itime) ! i !************************************************************** !handles all the calculations related to convective mixing !Petra Seibert, Bernd C. Krueger, Feb 2001 !nested grids included, Bernd C. Krueger, May 2001 ! !Changes by Caroline Forster, April 2004 - February 2005: ! convmix called every lsynctime seconds !CHANGES by A. Stohl: ! various run-time optimizations - February 2005 !************************************************************** use par_mod, only: nxmax, maxpart, maxnests use com_mod, only: iflux, xresoln, xln, xrn, yln, yrn, tt2, ps, itra1, & nxn, nyn, xtra1, ytra1, itramem, ztra1, lage, qvh, tth, td2, qvhn, & tthn, td2n, tt2n, psn, path, yresoln, memind, memtime, nx, ny, numpart, & nageclass, nuvz, numbnests, lsynctime use conv_mod, only: tconv, qconv, pconv, nconvlev, psconv, & td2conv, tt2conv, cbaseflux, cbasefluxn,fmassfrac, nconvtop use random_mod, only: ran3, ran3_conv, idummy_ran3_conv use omp_lib, only: OMP_GET_THREAD_NUM implicit none integer :: igr,igrold, ipart, itime, ix, j, inest integer :: ipconv integer :: jy, kpart, ktop, ngrid,kz integer,allocatable,dimension(:) :: igrid, ipoint integer,allocatable,dimension(:,:) :: igridn ! itime [s] current time ! igrid(maxpart) horizontal grid position of each particle ! igridn(maxpart,maxnests) dto. for nested grids ! ipoint(maxpart) pointer to access particles according to grid position logical :: lconv real :: x, y, xtn,ytn, ztold, delt real :: dt1,dt2,dtt integer :: mind1,mind2 ! dt1,dt2,dtt,mind1,mind2 variables used for time interpolation integer :: itage,nage real,parameter :: eps=nxmax/3.e5 integer, allocatable, dimension(:) :: frst integer :: cnt, stat, kk ! for debugging, remove again ! character(len=255) :: fn ! integer :: thread ! integer :: sumconv ! allocate local variables on the heap allocate(igrid(maxpart),stat=stat) allocate(ipoint(maxpart),stat=stat) allocate(igridn(maxpart,maxnests),stat=stat) !monitoring variables !real sumconv,sumall ! Calculate auxiliary variables for time interpolation !***************************************************** dt1=real(itime-memtime(1)) dt2=real(memtime(2)-itime) dtt=1./(dt1+dt2) mind1=memind(1) mind2=memind(2) delt=real(abs(lsynctime)) lconv = .false. ! if no particles are present return after initialization !******************************************************** if (numpart.le.0) return ! Assign igrid and igridn, which are pseudo grid numbers indicating particles ! that are outside the part of the grid under consideration ! (e.g. particles near the poles or particles in other nests). ! Do this for all nests but use only the innermost nest; for all others ! igrid shall be -1 ! Also, initialize index vector ipoint !************************************************************************ !$OMP PARALLEL DEFAULT(none) & !$OMP PRIVATE(ipart, x, y, j, ngrid, xtn, ytn, ix, jy) & !$OMP SHARED(numpart, igrid, igridn, ipoint, itra1, itime, numbnests, & !$OMP xln, yln, xrn, yrn, xresoln, yresoln, xtra1, ytra1, nxn, nx) #if (defined STATIC_SCHED) !$OMP DO SCHEDULE(static) #else !$OMP DO SCHEDULE(dynamic, max(1,numpart/1000)) #endif do ipart=1,numpart igrid(ipart)=-1 do j=numbnests,1,-1 igridn(ipart,j)=-1 end do ipoint(ipart)=ipart ! do not consider particles that are (yet) not part of simulation if (itra1(ipart).ne.itime) cycle x = real(xtra1(ipart)) y = real(ytra1(ipart)) ! Determine which nesting level to be used !********************************************************** ngrid=0 do j=numbnests,1,-1 if ( x.gt.xln(j)+eps .and. x.lt.xrn(j)-eps .and. & y.gt.yln(j)+eps .and. y.lt.yrn(j)-eps ) then ngrid=j exit endif end do ! Determine nested grid coordinates !********************************** if (ngrid.gt.0) then ! nested grids xtn=(x-xln(ngrid))*xresoln(ngrid) ytn=(y-yln(ngrid))*yresoln(ngrid) ix=nint(xtn) jy=nint(ytn) igridn(ipart,ngrid) = 1 + jy*nxn(ngrid) + ix else if(ngrid.eq.0) then ! mother grid ix=nint(x) jy=nint(y) igrid(ipart) = 1 + jy*nx + ix endif end do !$OMP END DO !$OMP END PARALLEL !sumall = 0. !sumconv = 0. !***************************************************************************** ! 1. Now, do everything for the mother domain and, later, for all of the nested domains ! While all particles have to be considered for redistribution, the Emanuel convection ! scheme only needs to be called once for every grid column where particles are present. ! Therefore, particles are sorted according to their grid position. Whenever a new grid ! cell is encountered by looping through the sorted particles, the convection scheme is called. !***************************************************************************** ! sort particles according to horizontal position and calculate index vector IPOINT call sort2(numpart,igrid,ipoint) ! determine index where new grid cell starts ! by this also selecting grid cells where particles are present allocate(frst(nx*ny), stat=stat) if (stat.ne.0) write(*,*) "ERROR: could not allocate frst" cnt = 1 igrold = -1 do kpart=1,numpart if (igrold.ne.igrid(kpart)) then frst(cnt) = kpart igrold=igrid(kpart) cnt=cnt+1 endif enddo frst(cnt) = numpart+1 ! if all particles in nested grids cnt will be 1 otherwise >1 do kpart=1, numpart ! get random number outside parallel region ! (not necessary to do again for nested domains, because each particle is only treated once) if (itra1(kpart).eq.itime) then ran3_conv(kpart) = ran3(idummy_ran3_conv) endif end do ! Loop over grid columns with particles if (cnt.gt.1) then ! any particles in non-nested areas? !$OMP PARALLEL DEFAULT (none) & !$OMP PRIVATE(kk,jy,ix,kz, ktop, lconv, kpart, ipart, ztold, & !$OMP nage, ipconv, itage) & !$OMP SHARED(cnt, cbaseflux, frst, igrid, nx, mind1, mind2, ps, & !$OMP tt2, td2, tth, qvh, dt1, dt2, dtt, nuvz, delt, ipoint, & !$OMP iflux, itra1, itramem, nageclass, lage, xtra1, ytra1, ztra1) ! #if (defined _OPENMP) ! thread = OMP_GET_THREAD_NUM() ! #endif #if (defined STATIC_SCHED) !$OMP DO SCHEDULE(static) #else !$OMP DO SCHEDULE(dynamic) ! using default chunck sizes #endif do kk=1,cnt-1 ! loop through grid columns ! mother grid cell in nested domain, don't calculate convection if (igrid(frst(kk)) .eq. -1) cycle jy = (igrid(frst(kk))-1)/nx ix = igrid(frst(kk)) - jy*nx - 1 ! Interpolate all meteorological data needed for the convection scheme psconv=(ps(ix,jy,1,mind1)*dt2+ps(ix,jy,1,mind2)*dt1)*dtt tt2conv=(tt2(ix,jy,1,mind1)*dt2+tt2(ix,jy,1,mind2)*dt1)*dtt td2conv=(td2(ix,jy,1,mind1)*dt2+td2(ix,jy,1,mind2)*dt1)*dtt !!$ do kz=1,nconvlev+1 !old do kz=1,nuvz-1 !bugfix tconv(kz)=(tth(ix,jy,kz+1,mind1)*dt2+ & tth(ix,jy,kz+1,mind2)*dt1)*dtt qconv(kz)=(qvh(ix,jy,kz+1,mind1)*dt2+ & qvh(ix,jy,kz+1,mind2)*dt1)*dtt end do ! Calculate translocation matrix call calcmatrix(lconv,delt,cbaseflux(ix,jy)) ! treat column only if column has convection if (lconv) then ktop = 0 ! sumconv = 0 ! assign new vertical position to particle do kpart=frst(kk),frst(kk+1)-1 ipart = ipoint(kpart) ztold=ztra1(ipart) call redist(ipart,ktop,ipconv) ! if (ipconv.le.0) sumconv = sumconv+1 ! Calculate the gross fluxes across layer interfaces !*************************************************** if (iflux.eq.1) then itage=abs(itra1(ipart)-itramem(ipart)) do nage=1,nageclass if (itage.lt.lage(nage)) exit end do if (nage.le.nageclass) then call calcfluxes(nage,ipart,real(xtra1(ipart)), & real(ytra1(ipart)),ztold) end if end if ! flux calculation end do ! particles in column ! write(*,*) "convmix:", thread, igrid(frst(kk)), cbaseflux(ix,jy), sumconv end if ! convection present end do ! grid cell loop !$OMP END DO !$OMP END PARALLEL end if ! particles in non-nested areas deallocate(frst) ! write(fn, '(A,A,I6.6)') trim(path(2)), 'cbaseflux_', itime ! call dump_field(fn, cbaseflux(0:nx-1, 0:ny-1), nx, ny, 1) !***************************************************************************** ! 2. Nested domains !***************************************************************************** nestloop : do inest=1,numbnests ! sort particles according to horizontal position and calculate index vector IPOINT do ipart=1,numpart ipoint(ipart)=ipart igrid(ipart) = igridn(ipart,inest) enddo call sort2(numpart,igrid,ipoint) ! determine particle index where new grid cell starts ! by this also selecting grid cells where particles are present allocate(frst(nxn(inest)*nyn(inest)), stat=stat) if (stat.ne.0) write(*,*) "ERROR: could not allocate frst" cnt = 1 igrold = -1 do kpart=1,numpart if (igrold.ne.igrid(kpart)) then frst(cnt) = kpart igrold=igrid(kpart) cnt=cnt+1 endif enddo frst(cnt) = numpart+1 ! if all particles in nested grids cnt will be 1 otherwise >1 if (cnt.gt.1) then ! any particles in nested areas? !$OMP PARALLEL DEFAULT(none) & !$OMP PRIVATE(kk,jy,ix,kz, ktop, lconv, kpart, ipart, ztold, & !$OMP nage, ipconv, itage) & !$OMP SHARED(cnt, cbasefluxn, frst, igrid, nxn, mind1, mind2, psn, & !$OMP tt2n, td2n, tthn, qvhn, dt1, dt2, dtt, nuvz, delt, ipoint, & !$OMP iflux, itra1, itramem, nageclass, lage, xtra1, ytra1, ztra1, & !$OMP inest) #if (defined STATIC_SCHED) !$OMP DO SCHEDULE(static) #else !$OMP DO SCHEDULE(dynamic) ! using default chunck sizes #endif do kk=1,cnt-1 ! loop through grid columns jy = (igrid(frst(kk))-1)/nxn(inest) ix = igrid(frst(kk)) - jy*nxn(inest) - 1 ! Interpolate all meteorological data needed for the convection scheme psconv=(psn(ix,jy,1,mind1,inest)*dt2+ & psn(ix,jy,1,mind2,inest)*dt1)*dtt tt2conv=(tt2n(ix,jy,1,mind1,inest)*dt2+ & tt2n(ix,jy,1,mind2,inest)*dt1)*dtt td2conv=(td2n(ix,jy,1,mind1,inest)*dt2+ & td2n(ix,jy,1,mind2,inest)*dt1)*dtt !!$ do kz=1,nconvlev+1 !old do kz=1,nuvz-1 !bugfix tconv(kz)=(tthn(ix,jy,kz+1,mind1,inest)*dt2+ & tthn(ix,jy,kz+1,mind2,inest)*dt1)*dtt qconv(kz)=(qvhn(ix,jy,kz+1,mind1,inest)*dt2+ & qvhn(ix,jy,kz+1,mind2,inest)*dt1)*dtt end do ! calculate translocation matrix !******************************* call calcmatrix(lconv,delt,cbasefluxn(ix,jy,inest)) ! treat particle only if column has convection if (lconv) then ktop = 0 do kpart=frst(kk), frst(kk+1)-1 ! assign new vertical position to particle ipart = ipoint(kpart) ztold=ztra1(ipart) call redist(ipart,ktop,ipconv) ! if (ipconv.le.0) sumconv = sumconv+1 ! Calculate the gross fluxes across layer interfaces !*************************************************** if (iflux.eq.1) then itage=abs(itra1(ipart)-itramem(ipart)) do nage=1,nageclass if (itage.lt.lage(nage)) exit end do if (nage.le.nageclass) then call calcfluxes(nage,ipart,real(xtra1(ipart)), & real(ytra1(ipart)),ztold) end if end if ! flux calculation end do ! particles in colum end if !(lconv .eqv. .true.) end do ! grid cell loop !$OMP END DO !$OMP END PARALLEL end if ! any particles in nest deallocate(frst) end do nestloop !-------------------------------------------------------------------------- !write(*,*)'############################################' !write(*,*)'TIME=', ! & itime !write(*,*)'fraction of particles under convection', ! & sumconv/(sumall+0.001) !write(*,*)'total number of particles', ! & sumall !write(*,*)'number of particles under convection', ! & sumconv !write(*,*)'############################################' deallocate(igrid) deallocate(ipoint) deallocate(igridn) return end subroutine convmix