source: branches/flexpart91_hasod/src_parallel/convmix.f90 @ 8

Last change on this file since 8 was 8, checked in by hasod, 11 years ago

Added parallel version of Flexpart91

File size: 13.8 KB
Line 
1!**********************************************************************
2! Copyright 1998,1999,2000,2001,2002,2005,2007,2008,2009,2010         *
3! Andreas Stohl, Petra Seibert, A. Frank, Gerhard Wotawa,             *
4! Caroline Forster, Sabine Eckhardt, John Burkhart, Harald Sodemann   *
5!                                                                     *
6! This file is part of FLEXPART.                                      *
7!                                                                     *
8! FLEXPART is free software: you can redistribute it and/or modify    *
9! it under the terms of the GNU General Public License as published by*
10! the Free Software Foundation, either version 3 of the License, or   *
11! (at your option) any later version.                                 *
12!                                                                     *
13! FLEXPART is distributed in the hope that it will be useful,         *
14! but WITHOUT ANY WARRANTY; without even the implied warranty of      *
15! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the       *
16! GNU General Public License for more details.                        *
17!                                                                     *
18! You should have received a copy of the GNU General Public License   *
19! along with FLEXPART.  If not, see <http://www.gnu.org/licenses/>.   *
20!**********************************************************************
21
22subroutine convmix(itime)
23  !                     i
24  !**************************************************************
25  !handles all the calculations related to convective mixing
26  !Petra Seibert, Bernd C. Krueger, Feb 2001
27  !nested grids included, Bernd C. Krueger, May 2001
28  !
29  !Changes by Caroline Forster, April 2004 - February 2005:
30  !  convmix called every lsynctime seconds
31  !CHANGES by A. Stohl:
32  !  various run-time optimizations - February 2005
33  !**************************************************************
34
35  use par_mod, only: nxmax, maxpart, maxnests
36
37  use com_mod, only: iflux, xresoln, xln, xrn, yln, yrn, tt2, ps, itra1, &
38        nxn, nyn, xtra1, ytra1, itramem, ztra1, lage, qvh, tth, td2, qvhn, &
39        tthn, td2n, tt2n, psn, path, yresoln, memind, memtime, nx, ny, numpart, &
40        nageclass, nuvz, numbnests, lsynctime
41
42  use conv_mod, only: tconv, qconv, pconv, nconvlev, psconv, &
43        td2conv, tt2conv, cbaseflux, cbasefluxn,fmassfrac, nconvtop
44
45  use random_mod, only: ran3, ran3_conv, idummy_ran3_conv
46
47  use omp_lib, only: OMP_GET_THREAD_NUM
48
49  implicit none
50
51  integer :: igr,igrold, ipart, itime, ix, j, inest
52  integer :: ipconv
53  integer :: jy, kpart, ktop, ngrid,kz
54  integer,allocatable,dimension(:) :: igrid, ipoint
55  integer,allocatable,dimension(:,:) :: igridn
56  ! itime [s]                 current time
57  ! igrid(maxpart)            horizontal grid position of each particle
58  ! igridn(maxpart,maxnests)  dto. for nested grids
59  ! ipoint(maxpart)           pointer to access particles according to grid position
60
61  logical :: lconv
62  real :: x, y, xtn,ytn, ztold, delt
63  real :: dt1,dt2,dtt
64  integer :: mind1,mind2
65  ! dt1,dt2,dtt,mind1,mind2       variables used for time interpolation
66  integer :: itage,nage
67  real,parameter :: eps=nxmax/3.e5
68  integer, allocatable, dimension(:)  :: frst
69  integer :: cnt, stat, kk
70! for debugging, remove again
71!  character(len=255)   :: fn
72!  integer :: thread
73!  integer :: sumconv
74
75  ! allocate local variables on the heap
76  allocate(igrid(maxpart),stat=stat)
77  allocate(ipoint(maxpart),stat=stat)
78  allocate(igridn(maxpart,maxnests),stat=stat)
79
80  !monitoring variables
81  !real sumconv,sumall
82
83
84  ! Calculate auxiliary variables for time interpolation
85  !*****************************************************
86
87  dt1=real(itime-memtime(1))
88  dt2=real(memtime(2)-itime)
89  dtt=1./(dt1+dt2)
90  mind1=memind(1)
91  mind2=memind(2)
92  delt=real(abs(lsynctime))
93
94
95  lconv = .false.
96
97  ! if no particles are present return after initialization
98  !********************************************************
99
100  if (numpart.le.0) return
101
102  ! Assign igrid and igridn, which are pseudo grid numbers indicating particles
103  ! that are outside the part of the grid under consideration
104  ! (e.g. particles near the poles or particles in other nests).
105  ! Do this for all nests but use only the innermost nest; for all others
106  ! igrid shall be -1
107  ! Also, initialize index vector ipoint
108  !************************************************************************
109
110!$OMP PARALLEL DEFAULT(none) &
111!$OMP PRIVATE(ipart, x, y, j, ngrid, xtn, ytn, ix, jy) &
112!$OMP SHARED(numpart, igrid, igridn, ipoint, itra1, itime, numbnests, &
113!$OMP  xln, yln, xrn, yrn, xresoln, yresoln, xtra1, ytra1, nxn, nx)
114
115#if (defined STATIC_SCHED)
116!$OMP DO SCHEDULE(static)
117#else
118!$OMP DO SCHEDULE(dynamic, max(1,numpart/1000))
119#endif
120
121  do ipart=1,numpart
122    igrid(ipart)=-1
123    do j=numbnests,1,-1
124      igridn(ipart,j)=-1
125    end do
126    ipoint(ipart)=ipart
127  ! do not consider particles that are (yet) not part of simulation
128    if (itra1(ipart).ne.itime) cycle
129
130    x = real(xtra1(ipart))
131    y = real(ytra1(ipart))
132
133  ! Determine which nesting level to be used
134  !**********************************************************
135
136    ngrid=0
137    do j=numbnests,1,-1
138      if ( x.gt.xln(j)+eps .and. x.lt.xrn(j)-eps .and. &
139           y.gt.yln(j)+eps .and. y.lt.yrn(j)-eps ) then
140        ngrid=j
141        exit
142      endif
143    end do
144
145  ! Determine nested grid coordinates
146  !**********************************
147
148    if (ngrid.gt.0) then
149  ! nested grids
150      xtn=(x-xln(ngrid))*xresoln(ngrid)
151      ytn=(y-yln(ngrid))*yresoln(ngrid)
152      ix=nint(xtn)
153      jy=nint(ytn)
154      igridn(ipart,ngrid) = 1 + jy*nxn(ngrid) + ix
155    else if(ngrid.eq.0) then
156  ! mother grid
157      ix=nint(x)
158      jy=nint(y)
159      igrid(ipart) = 1 + jy*nx + ix
160    endif
161
162  end do
163!$OMP END DO
164!$OMP END PARALLEL
165
166  !sumall = 0.
167  !sumconv = 0.
168  !*****************************************************************************
169  ! 1. Now, do everything for the mother domain and, later, for all of the nested domains
170  ! While all particles have to be considered for redistribution, the Emanuel convection
171  ! scheme only needs to be called once for every grid column where particles are present.
172  ! Therefore, particles are sorted according to their grid position. Whenever a new grid
173  ! cell is encountered by looping through the sorted particles, the convection scheme is called.
174  !*****************************************************************************
175
176  ! sort particles according to horizontal position and calculate index vector IPOINT
177
178  call sort2(numpart,igrid,ipoint)
179
180! determine index where new grid cell starts
181! by this also selecting grid cells where particles are present
182  allocate(frst(nx*ny), stat=stat)
183  if (stat.ne.0) write(*,*) "ERROR: could not allocate frst"
184
185  cnt = 1
186  igrold = -1
187  do kpart=1,numpart
188
189    if (igrold.ne.igrid(kpart)) then
190      frst(cnt) = kpart
191      igrold=igrid(kpart)
192      cnt=cnt+1
193    endif
194  enddo
195  frst(cnt) = numpart+1
196! if all particles in nested grids cnt will be 1 otherwise >1
197
198  do kpart=1, numpart
199!   get random number outside parallel region
200!   (not necessary to do again for nested domains, because each particle is only treated once)
201    if (itra1(kpart).eq.itime) then
202      ran3_conv(kpart) = ran3(idummy_ran3_conv)
203    endif
204  end do
205
206!       Loop over grid columns with particles
207  if (cnt.gt.1) then ! any particles in non-nested areas?
208
209
210!$OMP PARALLEL DEFAULT (none) &
211!$OMP PRIVATE(kk,jy,ix,kz, ktop, lconv, kpart, ipart, ztold, &
212!$OMP   nage, ipconv, itage) &
213!$OMP SHARED(cnt, cbaseflux, frst, igrid, nx, mind1, mind2, ps, &
214!$OMP   tt2, td2, tth, qvh, dt1, dt2, dtt, nuvz, delt, ipoint, &
215!$OMP   iflux, itra1, itramem, nageclass, lage, xtra1, ytra1, ztra1)
216
217! #if (defined _OPENMP)
218!    thread = OMP_GET_THREAD_NUM()
219! #endif
220
221#if (defined STATIC_SCHED)
222!$OMP DO SCHEDULE(static)
223#else
224!$OMP DO SCHEDULE(dynamic)  ! using default chunck sizes
225#endif
226    do kk=1,cnt-1  ! loop through grid columns
227
228      ! mother grid cell in nested domain, don't calculate convection
229      if (igrid(frst(kk)) .eq. -1) cycle
230
231      jy = (igrid(frst(kk))-1)/nx
232      ix = igrid(frst(kk)) - jy*nx - 1
233
234  ! Interpolate all meteorological data needed for the convection scheme
235      psconv=(ps(ix,jy,1,mind1)*dt2+ps(ix,jy,1,mind2)*dt1)*dtt
236      tt2conv=(tt2(ix,jy,1,mind1)*dt2+tt2(ix,jy,1,mind2)*dt1)*dtt
237      td2conv=(td2(ix,jy,1,mind1)*dt2+td2(ix,jy,1,mind2)*dt1)*dtt
238!!$      do kz=1,nconvlev+1      !old
239      do kz=1,nuvz-1           !bugfix
240        tconv(kz)=(tth(ix,jy,kz+1,mind1)*dt2+ &
241             tth(ix,jy,kz+1,mind2)*dt1)*dtt
242        qconv(kz)=(qvh(ix,jy,kz+1,mind1)*dt2+ &
243             qvh(ix,jy,kz+1,mind2)*dt1)*dtt
244      end do
245
246  ! Calculate translocation matrix
247      call calcmatrix(lconv,delt,cbaseflux(ix,jy))
248
249  ! treat column only if column has convection
250      if (lconv) then
251        ktop = 0
252!        sumconv = 0
253  ! assign new vertical position to particle
254        do kpart=frst(kk),frst(kk+1)-1
255          ipart = ipoint(kpart)
256          ztold=ztra1(ipart)
257          call redist(ipart,ktop,ipconv)
258
259!         if (ipconv.le.0) sumconv = sumconv+1
260
261  ! Calculate the gross fluxes across layer interfaces
262  !***************************************************
263
264          if (iflux.eq.1) then
265            itage=abs(itra1(ipart)-itramem(ipart))
266            do nage=1,nageclass
267              if (itage.lt.lage(nage)) exit
268            end do
269
270            if (nage.le.nageclass) then
271              call calcfluxes(nage,ipart,real(xtra1(ipart)), &
272                real(ytra1(ipart)),ztold)
273            end if
274          end if ! flux calculation
275        end do ! particles in column
276!        write(*,*) "convmix:", thread, igrid(frst(kk)), cbaseflux(ix,jy), sumconv
277      end if ! convection present
278    end do ! grid cell loop
279!$OMP END DO
280!$OMP END PARALLEL
281  end if ! particles in non-nested areas
282
283  deallocate(frst)
284
285!  write(fn, '(A,A,I6.6)') trim(path(2)), 'cbaseflux_', itime
286!  call dump_field(fn, cbaseflux(0:nx-1, 0:ny-1), nx, ny, 1)
287
288
289  !*****************************************************************************
290  ! 2. Nested domains
291  !*****************************************************************************
292
293  nestloop : do inest=1,numbnests
294
295  ! sort particles according to horizontal position and calculate index vector IPOINT
296    do ipart=1,numpart
297      ipoint(ipart)=ipart
298      igrid(ipart) = igridn(ipart,inest)
299    enddo
300    call sort2(numpart,igrid,ipoint)
301
302! determine particle index where new grid cell starts
303! by this also selecting grid cells where particles are present
304    allocate(frst(nxn(inest)*nyn(inest)), stat=stat)
305    if (stat.ne.0) write(*,*) "ERROR: could not allocate frst"
306
307    cnt = 1
308    igrold = -1
309    do kpart=1,numpart
310      if (igrold.ne.igrid(kpart)) then
311        frst(cnt) = kpart
312        igrold=igrid(kpart)
313        cnt=cnt+1
314      endif
315    enddo
316    frst(cnt) = numpart+1
317! if all particles in nested grids cnt will be 1 otherwise >1
318
319    if (cnt.gt.1) then ! any particles in nested areas?
320
321!$OMP PARALLEL DEFAULT(none)   &
322!$OMP PRIVATE(kk,jy,ix,kz, ktop, lconv, kpart, ipart, ztold, &
323!$OMP   nage, ipconv, itage) &
324!$OMP SHARED(cnt, cbasefluxn, frst, igrid, nxn, mind1, mind2, psn, &
325!$OMP   tt2n, td2n, tthn, qvhn, dt1, dt2, dtt, nuvz, delt, ipoint, &
326!$OMP   iflux, itra1, itramem, nageclass, lage, xtra1, ytra1, ztra1, &
327!$OMP   inest)
328
329#if (defined STATIC_SCHED)
330!$OMP DO SCHEDULE(static)
331#else
332!$OMP DO SCHEDULE(dynamic)  ! using default chunck sizes
333#endif
334      do kk=1,cnt-1  ! loop through grid columns
335
336        jy = (igrid(frst(kk))-1)/nxn(inest)
337        ix = igrid(frst(kk)) - jy*nxn(inest) - 1
338
339  ! Interpolate all meteorological data needed for the convection scheme
340        psconv=(psn(ix,jy,1,mind1,inest)*dt2+ &
341             psn(ix,jy,1,mind2,inest)*dt1)*dtt
342        tt2conv=(tt2n(ix,jy,1,mind1,inest)*dt2+ &
343             tt2n(ix,jy,1,mind2,inest)*dt1)*dtt
344        td2conv=(td2n(ix,jy,1,mind1,inest)*dt2+ &
345             td2n(ix,jy,1,mind2,inest)*dt1)*dtt
346!!$        do kz=1,nconvlev+1    !old
347        do kz=1,nuvz-1           !bugfix
348          tconv(kz)=(tthn(ix,jy,kz+1,mind1,inest)*dt2+ &
349               tthn(ix,jy,kz+1,mind2,inest)*dt1)*dtt
350          qconv(kz)=(qvhn(ix,jy,kz+1,mind1,inest)*dt2+ &
351               qvhn(ix,jy,kz+1,mind2,inest)*dt1)*dtt
352        end do
353
354  ! calculate translocation matrix
355  !*******************************
356        call calcmatrix(lconv,delt,cbasefluxn(ix,jy,inest))
357
358  ! treat particle only if column has convection
359        if (lconv) then
360          ktop = 0
361          do kpart=frst(kk), frst(kk+1)-1
362  ! assign new vertical position to particle
363            ipart = ipoint(kpart)
364            ztold=ztra1(ipart)
365           call redist(ipart,ktop,ipconv)
366  !      if (ipconv.le.0) sumconv = sumconv+1
367
368  ! Calculate the gross fluxes across layer interfaces
369  !***************************************************
370
371            if (iflux.eq.1) then
372              itage=abs(itra1(ipart)-itramem(ipart))
373              do nage=1,nageclass
374                if (itage.lt.lage(nage)) exit
375              end do
376
377              if (nage.le.nageclass) then
378                call calcfluxes(nage,ipart,real(xtra1(ipart)), &
379                  real(ytra1(ipart)),ztold)
380              end if
381            end if ! flux calculation
382
383          end do  ! particles in colum
384        end if !(lconv .eqv. .true.)
385      end do ! grid cell loop
386!$OMP END DO
387!$OMP END PARALLEL
388    end if  ! any particles in nest 
389    deallocate(frst)
390  end do nestloop
391
392  !--------------------------------------------------------------------------
393  !write(*,*)'############################################'
394  !write(*,*)'TIME=',
395  !    &  itime
396  !write(*,*)'fraction of particles under convection',
397  !    &  sumconv/(sumall+0.001)
398  !write(*,*)'total number of particles',
399  !    &  sumall
400  !write(*,*)'number of particles under convection',
401  !    &  sumconv
402  !write(*,*)'############################################'
403
404  deallocate(igrid)
405  deallocate(ipoint)
406  deallocate(igridn)
407
408  return
409end subroutine convmix
Note: See TracBrowser for help on using the repository browser.
hosted by ZAMG