source: flexpart.git/src/mpi_mod.f90

10.4.1_peseiGFS_025bugfixes+enhancementsdevrelease-10release-10.4.1scaling-bug
Last change on this file was 92fab65, checked in by Ignacio Pisso <ip@…>, 4 years ago

add SPDX-License-Identifier to all .f90 files

  • Property mode set to 100644
File size: 108.2 KB
Line 
1! SPDX-FileCopyrightText: FLEXPART 1998-2019, see flexpart_license.txt
2! SPDX-License-Identifier: GPL-3.0-or-later
3
4module mpi_mod
5
6!*****************************************************************************
7!                                                                            *
8!  DESCRIPTION                                                               *
9!    This module contains subroutines and common variables used for the      *
10!    MPI parallelization of FLEXPART.                                        *
11!                                                                            *
12!  NOTE                                                                      *
13!    Depending on the MPI library installed on your system (e.g. mpich2,     *
14!    OpenMPI) you may need to choose below in this file between              *
15!      use mpi                                                               *
16!    (if the MPI library comes with the file 'mpi.mod'); or                  *
17!      include 'mpif.h'                                                      *
18!                                                                            *
19!                                                                            *
20!*****************************************************************************
21!                                                                            *
22! Variables:                                                                 *
23!                                                                            *
24! mp_ierr                 MPI error code                                     *
25! mp_np                   Number of MPI processes                            *
26! mp_pid                  Process ID of each MPI process                     *
27! mp_seed                 Parameter for random number seed                   *
28! read_grp_min            Minimum number of processes at which one will be   *
29!                         used as reader                                     *
30! numpart_mpi,            Number of particles per node                       *
31! maxpart_mpi                                                                *
32! mp_partid               MPI process ID for particle calculation            *
33! mp_partgroup_           Refers to the subset of processors performing      *
34!                         loops over particles. Will be all processes        *
35!                         unless a dedicated process runs getfields/readwind *
36! lmp_sync                If .false., use asynchronous MPI                   *
37! mp_cp                   Real precision to use for deposition fields        *
38!                                                                            *
39!                                                                            *
40!                                                                            *
41!                                                                            *
42!*****************************************************************************
43!                                                                            *
44! Modification by DJM, 2017-05-09 - added #ifdef USE_MPIINPLACE cpp          *
45! directive to mpif_tm_reduce_grid() to insure that MPI_IN_PLACE is          *
46! used in the MPI_Reduce() only if specifically compiled with that           *
47! directive.                                                                 *
48!                                                                            *
49!*****************************************************************************
50
51  use mpi
52  use par_mod, only: dp,sp
53  use com_mod, only: lroot
54
55  implicit none
56
57!  include 'mpif.h'
58
59  public
60
61! Set aside a process for reading windfields if using at least these many processes
62!==================================================
63  integer, parameter, private :: read_grp_min=4
64!==================================================
65
66! Variables for each MPI process in the world group
67  integer :: mp_ierr, mp_np, mp_pid, mp_partid
68  integer, private :: world_group_id
69
70! Variables for MPI processes in the 'particle' group
71  integer, allocatable, dimension(:) :: mp_partgroup_rank
72  integer, allocatable, dimension(:) :: npart_per_process
73  integer :: mp_partgroup_comm, mp_partgroup_pid, mp_partgroup_np
74
75  integer :: mp_seed=0
76  integer, parameter :: mp_sp=MPI_REAL4, mp_dp=MPI_REAL8
77  integer :: mp_cp
78  integer, parameter :: id_root=0 ! master process
79
80! MPI tags/requests for send/receive operation
81  integer :: tm1
82  integer, parameter :: nvar_async=26
83!integer, dimension(:), allocatable :: tags
84  integer, dimension(:), allocatable :: reqs
85
86! Status array used for certain MPI operations (MPI_RECV)
87  integer, dimension(MPI_STATUS_SIZE) :: mp_status
88
89
90  integer :: id_read   ! readwind/getfield process
91  integer :: numpart_mpi,maxpart_mpi ! number of particles per node
92  integer :: tot_numpart=0
93  integer :: mp_comm_used ! global or subgroup communicator
94
95  logical :: lmpreader=.false. ! is set to true for reading process(es) only.
96  logical :: lmp_use_reader=.false. ! true if separate readwind process is used
97
98! .true. if only using synchronous MPI send/recv (default)
99! If setting this to .false., numwfmem must be set to 3
100!===============================================================================
101  logical :: lmp_sync=.true.
102!===============================================================================
103
104! mp_dbg_mode       Used for debugging MPI.
105! mp_dev_mode       various checks related to debugging the parallel code
106! mp_dbg_out        write some arrays to data file for debugging
107! mp_measure_time   Measure cpu/wall time, write out at end of run
108! mp_time_barrier   Measure MPI barrier time
109! mp_exact_numpart  Use an extra MPI communication to give the exact number of particles
110!                   to standard output (this does not otherwise affect the simulation)
111  logical, parameter :: mp_dbg_mode = .false.
112  logical, parameter :: mp_dev_mode = .false.
113  logical, parameter :: mp_dbg_out = .false.
114  logical, parameter :: mp_time_barrier=.true.
115  logical, parameter :: mp_measure_time=.false.
116  logical, parameter :: mp_exact_numpart=.true.
117
118! for measuring CPU/Wall time
119  real(sp),private :: mp_comm_time_beg, mp_comm_time_end, mp_comm_time_total=0.
120  real(dp),private :: mp_comm_wtime_beg, mp_comm_wtime_end, mp_comm_wtime_total=0.
121  real(sp),private :: mp_root_time_beg, mp_root_time_end, mp_root_time_total=0.
122  real(dp),private :: mp_root_wtime_beg, mp_root_wtime_end, mp_root_wtime_total=0.
123  real(sp),private :: mp_barrier_time_beg, mp_barrier_time_end, mp_barrier_time_total=0.
124  real(dp),private :: mp_barrier_wtime_beg, mp_barrier_wtime_end, mp_barrier_wtime_total=0.
125  real(sp),private :: tm_nploop_beg, tm_nploop_end, tm_nploop_total=0.
126  real(sp),private :: tm_tot_beg, tm_tot_end, tm_tot_total=0.
127  real(dp),private :: mp_getfields_wtime_beg, mp_getfields_wtime_end, mp_getfields_wtime_total=0.
128  real(sp),private :: mp_getfields_time_beg, mp_getfields_time_end, mp_getfields_time_total=0.
129  real(dp),private :: mp_readwind_wtime_beg, mp_readwind_wtime_end, mp_readwind_wtime_total=0.
130  real(sp),private :: mp_readwind_time_beg, mp_readwind_time_end, mp_readwind_time_total=0.
131  real(dp),private :: mp_io_wtime_beg, mp_io_wtime_end, mp_io_wtime_total=0.
132  real(sp),private :: mp_io_time_beg, mp_io_time_end, mp_io_time_total=0.
133  real(dp),private :: mp_wetdepo_wtime_beg, mp_wetdepo_wtime_end, mp_wetdepo_wtime_total=0.
134  real(sp),private :: mp_wetdepo_time_beg, mp_wetdepo_time_end, mp_wetdepo_time_total=0.
135  real(dp),private :: mp_advance_wtime_beg, mp_advance_wtime_end, mp_advance_wtime_total=0.
136  real(dp),private :: mp_conccalc_time_beg, mp_conccalc_time_end, mp_conccalc_time_total=0.
137  real(dp),private :: mp_total_wtime_beg, mp_total_wtime_end, mp_total_wtime_total=0.
138  real(dp),private :: mp_vt_wtime_beg, mp_vt_wtime_end, mp_vt_wtime_total
139  real(sp),private :: mp_vt_time_beg, mp_vt_time_end, mp_vt_time_total
140
141! dat_lun           logical unit number for i/o
142  integer, private :: dat_lun
143
144! Temporary arrays for particles (allocated and deallocated as needed)
145  integer, allocatable, dimension(:) :: nclass_tmp, npoint_tmp, itra1_tmp, idt_tmp, &
146       & itramem_tmp, itrasplit_tmp
147  real(kind=dp), allocatable, dimension(:) :: xtra1_tmp, ytra1_tmp
148  real, allocatable, dimension(:) :: ztra1_tmp
149  real, allocatable, dimension(:,:) :: xmass1_tmp
150
151! mp_redist_fract        Exchange particles between processes if relative difference
152!                        is larger. A good value is between 0.0 and 0.5
153! mp_maxpart_factor      Allocate more memory per process than strictly needed
154!                        (safety factor). Recommended value between 1.5 and 2.5
155! mp_min_redist          Do not redistribute particles if below this limit
156  real, parameter :: mp_redist_fract=0.2, mp_maxpart_factor=1.5
157  integer,parameter :: mp_min_redist=100000
158
159
160contains
161
162  subroutine mpif_init
163!***********************************************************************
164! DESCRIPTION
165!   Initialize MPI.
166!   
167!   Create the global communicator MPI_COMM_WORLD
168!   If using a separate MPI process for getfields/readwind, a subgroup
169!   is created for the other processes.
170!
171! VARIABLES
172!   mpi_mode    default 0, set to 2/3 if running MPI version
173!   mp_np       number of running processes, decided at run-time
174!***********************************************************************
175    use par_mod, only: maxpart, numwfmem, dep_prec, maxreceptor, maxspec
176    use com_mod, only: mpi_mode, verbosity, creceptor0
177
178    implicit none
179
180    integer :: i,j,s,addmaxpart=0
181
182! Each process gets an ID (mp_pid) in the range 0,..,mp_np-1
183    call MPI_INIT(mp_ierr)
184    if (mp_ierr /= 0) goto 100
185    call MPI_COMM_RANK(MPI_COMM_WORLD, mp_pid, mp_ierr)
186    if (mp_ierr /= 0) goto 100
187    call MPI_COMM_SIZE(MPI_COMM_WORLD, mp_np, mp_ierr)
188    if (mp_ierr /= 0) goto 100
189
190
191! Variable mpi_mode is used to handle subroutines common to parallel/serial version
192    if (lmp_sync) then
193      mpi_mode=2 ! hold 2 windfields in memory
194    else
195      mpi_mode=3 ! hold 3 windfields in memory
196    end if
197
198    if (mp_pid.ne.0) then
199      lroot = .false.
200    end if
201
202! Set MPI precision to use for transferring deposition fields
203!************************************************************
204    if (dep_prec==dp) then
205      mp_cp = MPI_REAL8
206! TODO: write info message for serial version as well
207      if (lroot.and.verbosity>0) write(*,*) 'Using double precision for deposition fields'
208    else if (dep_prec==sp) then
209      mp_cp = MPI_REAL4
210      if (lroot.and.verbosity>0) write(*,*) 'Using single precision for deposition fields'
211    else
212      write(*,*) 'ERROR: something went wrong setting MPI real precision'
213      stop
214    end if
215
216! Check for sensible combination of parameters
217!*********************************************
218
219    if (.not.lmp_sync.and.numwfmem.ne.3) then
220      if (lroot) then
221        write(*,FMT='(80("#"))')
222        write(*,*) '#### mpi_mod::mpif_init> ERROR: ', &
223             & 'numwfmem must be set to 3 for asyncronous reading ####'
224        write(*,FMT='(80("#"))')
225      end if
226      call MPI_FINALIZE(mp_ierr)
227      stop
228    else if (lmp_sync.and.numwfmem.ne.2.and.lroot) then
229      write(*,FMT='(80("#"))')
230      write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
231           & 'numwfmem should be set to 2 for syncronous'
232      write(*,*) ' reading. Results will still be valid, but unneccesary memory &
233           &is allocated.'
234      write(*,FMT='(80("#"))')
235! Force "syncronized" version if all processes will call getfields
236    else if ((.not.lmp_sync.and.mp_np.lt.read_grp_min).or.(mp_np.eq.1)) then
237      if (lroot) then
238        write(*,FMT='(80("#"))')
239        write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
240             & 'all procs call getfields. Setting lmp_sync=.true.'
241        write(*,FMT='(80("#"))')
242      end if
243      lmp_sync=.true.
244    end if
245
246! TODO: Add more warnings for unimplemented flexpart features
247
248! Set ID of process that calls getfield/readwind.
249! Using the last process in the group ensures statistical identical results
250! as running with one process less but not using separate read process
251!**********************************************************************
252
253!    id_read = min(mp_np-1, 1)
254    id_read = mp_np-1
255
256    if (mp_pid.eq.id_read) lmpreader=.true.
257
258    call MPI_Comm_group (MPI_COMM_WORLD, world_group_id, mp_ierr)
259
260! Create a MPI group/communicator that will calculate trajectories.
261! Skip this step if program is run with only a few processes
262!************************************************************************
263    allocate(mp_partgroup_rank(0:mp_np-2))
264
265! This allows checking for allocation of arrays when no subgroup is used
266    mp_partgroup_pid=0
267
268    if (read_grp_min.lt.2) then
269      write(*,*) '#### mpi_mod::mpif_init> ERROR ####', &
270           & 'read_grp_min should be at least 2. Exiting'
271      stop
272    end if
273
274    if (mp_np.ge.read_grp_min) then
275      lmp_use_reader = .true.
276
277! Map the subgroup IDs= 0,1,2,...,mp_np-2, skipping reader process
278      j=-1
279      do i=0, mp_np-2 !loop over all (subgroup) IDs
280        j=j+1
281        if (i.eq.id_read) then
282          j=j+1
283        end if
284        mp_partgroup_rank(i) = j
285      end do
286
287      call MPI_Group_incl (world_group_id, mp_np-1, mp_partgroup_rank, &
288           &mp_partgroup_pid, mp_ierr)
289      if (mp_ierr /= 0) goto 100
290      call MPI_Comm_create (MPI_COMM_WORLD, mp_partgroup_pid, mp_partgroup_comm, mp_ierr)
291      if (mp_ierr /= 0) goto 100
292
293      if (mp_pid.ne.id_read) then
294        call MPI_Comm_rank (mp_partgroup_comm, mp_partgroup_pid, mp_ierr)
295        if (mp_ierr /= 0) goto 100
296
297! Get size of new group (=mp_np-1)
298        call MPI_COMM_SIZE(mp_partgroup_comm, mp_partgroup_np, mp_ierr)
299        if (mp_ierr /= 0) goto 100
300        if (mp_partgroup_np.ne.mp_np-1) then
301          write(*,*)  '#### mpi_mod:: mpif_init> ERROR ####&
302               & mp_partgroup_np.ne.mp_np-1'
303          stop
304        endif
305
306      else
307        mp_partgroup_pid = -1
308      end if
309    end if
310
311    if (lmp_use_reader) then
312      mp_comm_used = mp_partgroup_comm
313      mp_partid = mp_partgroup_pid
314      mp_partgroup_np=mp_np-1
315    else
316      mp_comm_used = MPI_COMM_WORLD
317      mp_partgroup_np = mp_np
318      mp_partid = mp_pid
319    end if
320
321! Set maxpart per process
322! ESO 08/2016: Increase maxpart per process, in case of unbalanced distribution
323    maxpart_mpi=int(mp_maxpart_factor*real(maxpart)/real(mp_partgroup_np))
324    if (mp_np == 1) maxpart_mpi = maxpart
325
326! Do not allocate particle data arrays for readwind process
327    if (lmpreader.and.lmp_use_reader) then
328      maxpart_mpi=0
329    end if
330
331! Set random seed for each non-root process
332    if (mp_pid.gt.0) then
333      s = 244
334      mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729))
335    end if
336    if (mp_dbg_mode) then
337      mp_seed=0
338      if (lroot) write(*,*) 'MPI: setting seed=0'
339    end if
340
341! Allocate request array for asynchronous MPI
342    if (.not.lmp_sync) then
343      allocate(reqs(0:nvar_async*mp_np-1))
344      reqs(:)=MPI_REQUEST_NULL
345    else
346      allocate(reqs(0:1))
347      reqs(:)=MPI_REQUEST_NULL
348    end if
349
350! Allocate array for number of particles per process   
351    allocate(npart_per_process(0:mp_partgroup_np-1))
352
353! Write whether MPI_IN_PLACE is used or not
354#ifdef USE_MPIINPLACE
355    if (lroot) write(*,*) 'Using MPI_IN_PLACE operations'
356#else
357    if (lroot) allocate(creceptor0(maxreceptor,maxspec))
358    if (lroot) write(*,*) 'Not using MPI_IN_PLACE operations'
359#endif
360    goto 101
361
362100 write(*,*) '#### mpi_mod::mpif_init> ERROR ####', mp_ierr
363    stop
364
365101 end subroutine mpif_init
366
367
368  subroutine mpif_mpi_barrier
369!***********************************************************************
370! Set MPI barrier (all processes are synchronized here), with the
371! possible exception of a separate 'readwind' process.
372! Optionally measure cpu/wall time.
373!
374!***********************************************************************
375    implicit none
376
377    if (mp_time_barrier) then
378      call cpu_time(mp_barrier_time_beg)
379      mp_barrier_wtime_beg = mpi_wtime()
380    endif
381
382    call MPI_BARRIER(mp_comm_used, mp_ierr)
383    if (mp_ierr /= 0) goto 100
384
385    if (mp_time_barrier) then
386      call cpu_time(mp_barrier_time_end)
387      mp_barrier_wtime_end = mpi_wtime()
388
389      mp_barrier_time_total=mp_barrier_time_total+&
390           &(mp_barrier_time_end-mp_barrier_time_beg)
391      mp_barrier_wtime_total=mp_barrier_wtime_total+&
392           &(mp_barrier_wtime_end-mp_barrier_wtime_beg)
393    end if
394
395    goto 101
396
397100 write(*,*) '#### mpi_mod::mpif_mpi_barrier> ERROR ####', mp_ierr
398    stop
399
400101 end subroutine mpif_mpi_barrier
401
402
403  subroutine mpif_com_mod_allocate
404!*******************************************************************************   
405! Dynamic allocation of arrays (in serial code these have size maxpart)
406!
407! Not in use anymore, moved to com_mod for interoperability with serial version
408!
409! TODO: remove
410!*******************************************************************************
411    use com_mod
412
413    implicit none
414
415    integer :: array_size
416
417    array_size = maxpart_mpi
418
419    allocate(itra1(array_size),npoint(array_size),&
420         & nclass(array_size),idt(array_size),&
421         & itramem(array_size),itrasplit(array_size),&
422         & xtra1(array_size),ytra1(array_size),&
423         & ztra1(array_size),xmass1(array_size, maxspec))
424
425    allocate(uap(array_size),ucp(array_size),&
426         & uzp(array_size),us(array_size),&
427         & vs(array_size),&
428         & ws(array_size),cbt(array_size))
429
430
431  end subroutine mpif_com_mod_allocate
432
433
434  subroutine mpif_tm_send_dims
435!***********************************************************************
436! Distribute array dimensions from pid0 to all processes.
437! numpart_mpi/numpart is sent to allow dynamic allocation
438!
439! Currently not in use.
440!
441! Variables of similar type (integer, real) are placed in an array
442! and sent collectively, to avoid the overhead associated with individual
443! MPI send/receives
444!
445!
446!***********************************************************************
447    use com_mod
448
449    implicit none
450
451    integer :: add_part=0
452
453    call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
454
455! MPI subgroup does the particle-loop
456    if (lmp_use_reader) then
457      if (mod(numpart,mp_partgroup_np).ne.0) add_part=1
458      numpart_mpi=int(numpart/mp_partgroup_np)+add_part
459    else
460      if (mod(numpart,mp_np).ne.0) add_part=1
461      numpart_mpi=int(numpart/mp_np)+add_part
462    end if
463
464
465! redefine numpart as 'numpart per process' throughout the code
466!**************************************************************
467    numpart = numpart_mpi
468
469  end subroutine mpif_tm_send_dims
470
471
472  subroutine mpif_send_part_properties(num_part)
473!***********************************************************************
474! Distribute particle properties from root to all processes.
475
476! Used by init_domainfill_mpi
477!
478! Variables:
479!
480! num_part        input, number of particles per process (rounded up)
481!
482!***********************************************************************
483    use com_mod
484
485    implicit none
486
487    integer,intent(in) :: num_part
488    integer :: i,jj, addone
489
490! Exit if too many particles
491    if (num_part.gt.maxpart_mpi) then
492      write(*,*) '#####################################################'
493      write(*,*) '#### ERROR - TOTAL NUMBER OF PARTICLES REQUIRED  ####'
494      write(*,*) '#### EXCEEDS THE MAXIMUM ALLOWED NUMBER. REDUCE  ####'
495      write(*,*) '#### EITHER NUMBER OF PARTICLES PER RELEASE POINT####'
496      write(*,*) '#### OR INCREASE MAXPART.                        ####'
497      write(*,*) '#####################################################'
498!      call MPI_FINALIZE(mp_ierr)
499      stop
500    end if
501
502
503! Time for MPI communications
504!****************************
505    if (mp_measure_time) call mpif_mtime('commtime',0)
506
507! Distribute variables, send from pid 0 to other processes (including itself)
508!****************************************************************************
509
510    call MPI_SCATTER(nclass_tmp,num_part,MPI_INTEGER,nclass,&
511         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
512    if (mp_ierr /= 0) goto 600
513    call MPI_SCATTER(npoint_tmp,num_part,MPI_INTEGER,npoint,&
514         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
515    if (mp_ierr /= 0) goto 600
516    call MPI_SCATTER(itra1_tmp,num_part,MPI_INTEGER,itra1,&
517         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
518    if (mp_ierr /= 0) goto 600
519    call MPI_SCATTER(idt_tmp,num_part,MPI_INTEGER,idt,&
520         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
521    if (mp_ierr /= 0) goto 600
522    call MPI_SCATTER(itramem_tmp,num_part,MPI_INTEGER,itramem,&
523         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
524    if (mp_ierr /= 0) goto 600
525    call MPI_SCATTER(itrasplit_tmp,num_part,MPI_INTEGER,itrasplit,&
526         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
527    if (mp_ierr /= 0) goto 600
528    call MPI_SCATTER(xtra1_tmp,num_part,mp_dp,xtra1,&
529         &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)
530    if (mp_ierr /= 0) goto 600
531    call MPI_SCATTER(ytra1_tmp,num_part,mp_dp,ytra1,&
532         &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)
533    if (mp_ierr /= 0) goto 600
534    call MPI_SCATTER(ztra1_tmp,num_part,mp_sp,ztra1,&
535         &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)
536    if (mp_ierr /= 0) goto 600
537    do i=1,nspec
538      call MPI_SCATTER(xmass1_tmp(:,i),num_part,mp_sp,xmass1(:,i),&
539           &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)
540      if (mp_ierr /= 0) goto 600
541    end do
542
543    if (mp_measure_time) call mpif_mtime('commtime',1)
544
545    goto 601
546
547600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
548    stop
549
550! After the transfer of particles it it possible that the value of
551! "numpart" is larger than the actual used, so we reduce it if there are
552! invalid particles at the end of the arrays
553
554601 do i=numpart, 1, -1
555      if (itra1(i).eq.-999999999) then
556        numpart=numpart-1
557      else
558        exit
559      end if
560    end do
561
562
563  end subroutine mpif_send_part_properties
564
565
566  subroutine mpif_calculate_part_redist(itime)
567!***********************************************************************
568! Calculate number of particles to redistribute between processes. This routine
569! can be called at regular time intervals to keep a level number of
570! particles on each process.
571!
572! First, all processes report their local "numpart" to each other, which is
573! stored in array "numpart_mpi(np)". This array is sorted from low to
574! high values, along with a corresponding process ID array "idx_arr(np)".
575! If the relative difference between the highest and lowest "numpart_mpi" value
576! is above a threshold, particles are transferred from process idx_arr(np-1)
577! to process idx_arr(0). Repeat for processes idx_arr(np-i) and idx_arr(i)
578! for all valid i.
579! Note: If np is an odd number, the process with the median
580! number of particles is left unchanged
581!
582! VARIABLES
583! itime        input, current time
584!***********************************************************************
585    use com_mod
586
587    implicit none
588   
589    integer, intent(in) :: itime
590    real :: pmin,z
591    integer :: i,jj,nn, num_part=1,m,imin, num_trans
592    logical :: first_iter
593    integer,allocatable,dimension(:) :: idx_arr
594    real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this
595
596! Exit if running with only 1 process
597!************************************************************************
598    if (mp_np.eq.1) return
599
600! All processes exchange information on number of particles
601!****************************************************************************
602    allocate( idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1))
603
604    call MPI_Allgather(numpart, 1, MPI_INTEGER, npart_per_process, &
605         & 1, MPI_INTEGER, mp_comm_used, mp_ierr)
606
607
608! Sort from lowest to highest
609! Initial guess: correct order
610    sorted(:) = npart_per_process(:)
611    do i=0, mp_partgroup_np-1
612      idx_arr(i) = i
613    end do
614
615! Do not rebalance particles for ipout=3   
616    if (ipout.eq.3) return
617
618! For each successive element in index array, see if a lower value exists
619    do i=0, mp_partgroup_np-2
620      pmin=sorted(i)
621      imin=idx_arr(i)
622      m=i+1
623      do jj=m, mp_partgroup_np-1
624        if (pmin.le.sorted(jj)) cycle
625        z=pmin
626        pmin=sorted(jj)
627        sorted(jj)=z
628
629        nn=imin
630        imin=idx_arr(jj)
631        idx_arr(jj)=nn
632      end do
633      sorted(i)=pmin
634      idx_arr(i)=imin
635    end do
636
637! For each pair of processes, transfer particles if the difference is above a
638! limit, and numpart at sending process large enough
639
640    m=mp_partgroup_np-1 ! index for last sorted process (most particles)
641    do i=0,mp_partgroup_np/2-1
642      num_trans = npart_per_process(idx_arr(m)) - npart_per_process(idx_arr(i))
643      if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then
644        if ( npart_per_process(idx_arr(m)).gt.mp_min_redist.and.&
645             & real(num_trans)/real(npart_per_process(idx_arr(m))).gt.mp_redist_fract) then
646! DBG
647          ! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart_per_process', &
648          !      &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart_per_process
649! DBG
650          call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2)
651        end if
652      end if
653      m=m-1
654    end do
655
656    deallocate(idx_arr, sorted)
657
658  end subroutine mpif_calculate_part_redist
659
660
661  subroutine mpif_redist_part(itime, src_proc, dest_proc, num_trans)
662!***********************************************************************
663! Transfer particle properties between two arbitrary processes.
664!
665! VARIABLES
666!
667! itime           input, current time
668! src_proc        input, ID of source process
669! dest_proc       input, ID of destination process
670! num_trans       input, number of particles to transfer
671!
672!************************************************************************
673    use com_mod !TODO:  ,only: nclass etc
674
675    implicit none
676
677    integer, intent(in) :: itime, src_proc, dest_proc, num_trans
678    integer :: ll, ul ! lower and upper indices in arrays
679    integer :: arr_sz ! size of temporary arrays
680    integer :: mtag   ! MPI message tag
681    integer :: i, j, minpart, ipart, maxnumpart
682 
683! Check for invalid input arguments
684!**********************************
685 if (src_proc.eq.dest_proc) then
686   write(*,*) '<mpi_mod::mpif_redist_part>: Error: &
687        &src_proc.eq.dest_proc'
688   stop
689 end if
690
691! Measure time for MPI communications
692!************************************
693    if (mp_measure_time) call mpif_mtime('commtime',0)
694
695! Send particles
696!***************
697    if (mp_partid.eq.src_proc) then
698      mtag = 2000
699
700! Array indices for the transferred particles
701      ll=numpart-num_trans+1
702      ul=numpart
703
704      if (mp_dev_mode) then
705        write(*,FMT='(72("#"))')
706        write(*,*) "Sending ", num_trans, "particles (from/to)", src_proc, dest_proc
707        write(*,*) "numpart before: ", numpart
708      end if
709
710      call MPI_SEND(nclass(ll:ul),num_trans,&
711           & MPI_INTEGER,dest_proc,mtag+1,mp_comm_used,mp_ierr)
712
713      call MPI_SEND(npoint(ll:ul),num_trans,&
714           & MPI_INTEGER,dest_proc,mtag+2,mp_comm_used,mp_ierr)
715
716      call MPI_SEND(itra1(ll:ul),num_trans, &
717           & MPI_INTEGER,dest_proc,mtag+3,mp_comm_used,mp_ierr)
718
719      call MPI_SEND(idt(ll:ul),num_trans, &
720           & MPI_INTEGER,dest_proc,mtag+4,mp_comm_used,mp_ierr)
721
722      call MPI_SEND(itramem(ll:ul),num_trans, &
723           & MPI_INTEGER,dest_proc,mtag+5,mp_comm_used,mp_ierr)
724
725      call MPI_SEND(itrasplit(ll:ul),num_trans,&
726           & MPI_INTEGER,dest_proc,mtag+6,mp_comm_used,mp_ierr)
727
728      call MPI_SEND(xtra1(ll:ul),num_trans, &
729           & mp_dp,dest_proc,mtag+7,mp_comm_used,mp_ierr)
730
731      call MPI_SEND(ytra1(ll:ul),num_trans,&
732           & mp_dp,dest_proc,mtag+8,mp_comm_used,mp_ierr)
733
734      call MPI_SEND(ztra1(ll:ul),num_trans,&
735           & mp_sp,dest_proc,mtag+9,mp_comm_used,mp_ierr)
736
737      do j=1,nspec
738        call MPI_SEND(xmass1(ll:ul,j),num_trans,&
739             & mp_sp,dest_proc,mtag+(9+j),mp_comm_used,mp_ierr)
740      end do
741
742! Terminate transferred particles, update numpart
743      itra1(ll:ul) = -999999999
744
745      numpart = numpart-num_trans
746
747      if (mp_dev_mode) then
748        write(*,*) "numpart after: ", numpart
749        write(*,FMT='(72("#"))')
750      end if
751
752    else if (mp_partid.eq.dest_proc) then
753
754! Receive particles
755!******************
756      mtag = 2000
757      ! Array indices for the transferred particles
758      ll=numpart+1
759      ul=numpart+num_trans
760
761      if (mp_dev_mode) then
762        write(*,FMT='(72("#"))')
763        write(*,*) "Receiving ", num_trans, "particles (from/to)", src_proc, dest_proc
764        write(*,*) "numpart before: ", numpart
765      end if
766
767! We could receive the data directly at nclass(ll:ul) etc., but this leaves
768! vacant spaces at lower indices. Using temporary arrays instead.
769      arr_sz = ul-ll+1
770      allocate(itra1_tmp(arr_sz),npoint_tmp(arr_sz),nclass_tmp(arr_sz),&
771           & idt_tmp(arr_sz),itramem_tmp(arr_sz),itrasplit_tmp(arr_sz),&
772           & xtra1_tmp(arr_sz),ytra1_tmp(arr_sz),ztra1_tmp(arr_sz),&
773           & xmass1_tmp(arr_sz, maxspec))
774     
775      call MPI_RECV(nclass_tmp,num_trans,&
776           & MPI_INTEGER,src_proc,mtag+1,mp_comm_used,mp_status,mp_ierr)
777
778      call MPI_RECV(npoint_tmp,num_trans,&
779           & MPI_INTEGER,src_proc,mtag+2,mp_comm_used,mp_status,mp_ierr)
780
781      call MPI_RECV(itra1_tmp,num_trans, &
782           & MPI_INTEGER,src_proc,mtag+3,mp_comm_used,mp_status,mp_ierr)
783
784      call MPI_RECV(idt_tmp,num_trans, &
785           & MPI_INTEGER,src_proc,mtag+4,mp_comm_used,mp_status,mp_ierr)
786
787      call MPI_RECV(itramem_tmp,num_trans, &
788           & MPI_INTEGER,src_proc,mtag+5,mp_comm_used,mp_status,mp_ierr)
789
790      call MPI_RECV(itrasplit_tmp,num_trans,&
791           & MPI_INTEGER,src_proc,mtag+6,mp_comm_used,mp_status,mp_ierr)
792
793      call MPI_RECV(xtra1_tmp,num_trans, &
794           & mp_dp,src_proc,mtag+7,mp_comm_used,mp_status,mp_ierr)
795
796      call MPI_RECV(ytra1_tmp,num_trans,&
797           & mp_dp,src_proc,mtag+8,mp_comm_used,mp_status,mp_ierr)
798
799      call MPI_RECV(ztra1_tmp,num_trans,&
800           & mp_sp,src_proc,mtag+9,mp_comm_used,mp_status,mp_ierr)
801
802      do j=1,nspec
803        call MPI_RECV(xmass1_tmp(:,j),num_trans,&
804             & mp_sp,src_proc,mtag+(9+j),mp_comm_used,mp_status,mp_ierr)
805      end do
806
807! This is the maximum value numpart can possibly have after the transfer
808      maxnumpart=numpart+num_trans
809
810! Search for vacant space and copy from temporary storage
811!********************************************************
812      minpart=1
813      do i=1, num_trans
814! Take into acount that we may have transferred invalid particles
815        if (itra1_tmp(i).ne.itime) cycle
816        do ipart=minpart,maxnumpart
817          if (itra1(ipart).ne.itime) then
818            itra1(ipart) = itra1_tmp(i)
819            npoint(ipart) = npoint_tmp(i)
820            nclass(ipart) = nclass_tmp(i)
821            idt(ipart) = idt_tmp(i)
822            itramem(ipart) = itramem_tmp(i)
823            itrasplit(ipart) = itrasplit_tmp(i)
824            xtra1(ipart) = xtra1_tmp(i)
825            ytra1(ipart) = ytra1_tmp(i)
826            ztra1(ipart) = ztra1_tmp(i)
827            xmass1(ipart,:) = xmass1_tmp(i,:)
828            goto 200 ! Storage space has been found, stop searching
829          end if
830! :TODO: add check for if too many particles requiried
831        end do
832200     minpart=ipart+1
833      end do
834! Increase numpart, if necessary
835      numpart=max(numpart,ipart)
836
837      deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,&
838           &itrasplit_tmp,xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp)
839
840      if (mp_dev_mode) then
841        write(*,*) "numpart after: ", numpart
842        write(*,FMT='(72("#"))')
843      end if
844
845    else
846! This routine should only be called by the two participating processes
847      write(*,*) "ERROR: wrong process has entered mpi_mod::mpif_redist_part"
848      stop
849      return
850    end if
851
852! Measure time for MPI communications
853!************************************
854    if (mp_measure_time) call mpif_mtime('commtime',1)
855
856  end subroutine mpif_redist_part
857
858
859  subroutine mpif_tm_send_vars
860!***********************************************************************
861! Distribute particle variables from pid0 to all processes.
862! Called from timemanager
863! *NOT IN USE* at the moment, but can be useful for debugging
864!
865!***********************************************************************
866    use com_mod
867
868    implicit none
869
870    integer :: i
871
872! Time for MPI communications
873!****************************
874    if (mp_measure_time) call mpif_mtime('commtime',0)
875
876! Distribute variables, send from pid 0 to other processes
877!**********************************************************************
878
879! integers
880    if (lroot) then
881      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
882           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
883      if (mp_ierr /= 0) goto 600
884      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
885           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
886      if (mp_ierr /= 0) goto 600
887      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
888           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
889      if (mp_ierr /= 0) goto 600
890      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
891           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
892      if (mp_ierr /= 0) goto 600
893      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
894           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
895      if (mp_ierr /= 0) goto 600
896
897! int2
898      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,&
899           & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
900      if (mp_ierr /= 0) goto 600
901
902! real
903      call MPI_SCATTER(uap,numpart_mpi,mp_sp,MPI_IN_PLACE,&
904           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
905      if (mp_ierr /= 0) goto 600
906      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
907           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
908      if (mp_ierr /= 0) goto 600
909      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
910           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
911      if (mp_ierr /= 0) goto 600
912
913      call MPI_SCATTER(us,numpart_mpi,mp_sp,MPI_IN_PLACE,&
914           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
915      if (mp_ierr /= 0) goto 600
916      call MPI_SCATTER(vs,numpart_mpi,mp_sp,MPI_IN_PLACE,&
917           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
918      if (mp_ierr /= 0) goto 600
919      call MPI_SCATTER(ws,numpart_mpi,mp_sp,MPI_IN_PLACE,&
920           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
921      if (mp_ierr /= 0) goto 600
922
923      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
924           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
925      if (mp_ierr /= 0) goto 600
926      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
927           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
928      if (mp_ierr /= 0) goto 600
929      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,MPI_IN_PLACE,&
930           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
931      if (mp_ierr /= 0) goto 600
932
933      do i=1,nspec
934        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,MPI_IN_PLACE,&
935             & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
936        if (mp_ierr /= 0) goto 600
937      end do
938
939    else ! (mp_pid >= 1)
940! integers
941      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,&
942           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
943      if (mp_ierr /= 0) goto 600
944      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,&
945           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
946      if (mp_ierr /= 0) goto 600
947      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,&
948           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
949      if (mp_ierr /= 0) goto 600
950      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,&
951           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
952      if (mp_ierr /= 0) goto 600
953      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,&
954           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
955      if (mp_ierr /= 0) goto 600
956
957! int2
958      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,&
959           & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
960      if (mp_ierr /= 0) goto 600
961
962! reals
963      call MPI_SCATTER(uap,numpart_mpi,mp_sp,uap,&
964           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
965      if (mp_ierr /= 0) goto 600
966      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,ucp,&
967           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
968      if (mp_ierr /= 0) goto 600
969      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,uzp,&
970           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
971      if (mp_ierr /= 0) goto 600
972
973      call MPI_SCATTER(us,numpart_mpi,mp_sp,us,&
974           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
975      if (mp_ierr /= 0) goto 600
976      call MPI_SCATTER(vs,numpart_mpi,mp_sp,vs,&
977           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
978      if (mp_ierr /= 0) goto 600
979      call MPI_SCATTER(ws,numpart_mpi,mp_sp,ws,&
980           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
981      if (mp_ierr /= 0) goto 600
982
983      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,&
984           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
985      if (mp_ierr /= 0) goto 600
986      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,&
987           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
988      if (mp_ierr /= 0) goto 600
989      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,ztra1,&
990           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
991      if (mp_ierr /= 0) goto 600
992
993      do i=1,nspec
994        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,xmass1(:,i),&
995             & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
996        if (mp_ierr /= 0) goto 600
997      end do
998
999    end if !(lroot)
1000
1001    if (mp_measure_time) call mpif_mtime('commtime',1)
1002
1003    goto 601
1004
1005600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1006    stop
1007601 end subroutine mpif_tm_send_vars
1008
1009
1010  subroutine mpif_tm_collect_vars
1011!*******************************************************************************
1012! Collect particle data to PID 0 from all processes.                           *
1013!                                                                              *
1014! This can be called from timemanager_mpi, before caclulating                  *
1015! concentrations/writing output grids.                                         *
1016!                                                                              *
1017! Currently *not in use*, as each process calculates concentrations            *
1018! separately, but can be useful for debugging                                  *
1019!                                                                              *
1020! To use this routine (together with mpif_tm_send_vars) to transfer data       *
1021! to the MPI root process (useful for debugging), insert code like this        *
1022! (in timemanager_mpi.f90)                                                     *
1023!                                                                              *
1024!      if (lroot) tot_numpart=numpart ! root stores total numpart              *
1025!      call mpif_tm_send_dims                                                  *
1026!      if (numpart>1) then                                                     *
1027!        call mpif_tm_send_vars                                                *
1028!      end if                                                                  *
1029!                                                                              *
1030!    To collect data from all processes to root process after a parallel       *
1031!    region:                                                                   *
1032!                                                                              *
1033!      if (numpart>0) then                                                     *
1034!          if (lroot) numpart = tot_numpart                                    *
1035!       call mpif_tm_collect_vars                                              *
1036!      end if                                                                  *
1037!                                                                              *
1038! Then a section that begins with "if (lroot) ..." will behave like            *
1039! serial flexpart                                                              *
1040!                                                                              *
1041!*******************************************************************************
1042    use com_mod !, only: numpart, nspec, itra1, npoint, nclass
1043
1044    implicit none
1045
1046    integer :: i
1047
1048    logical :: use_mp_vars = .false.! use mp_... type allocated vars
1049    logical :: use_in_place = .true.! use MPI_IN_PLACE to collect vars.
1050
1051
1052! Time for MPI communications
1053    if (mp_measure_time) call mpif_mtime('commtime',0)
1054
1055! Distribute variables, send from pid 0 to other processes
1056!**********************************************************************
1057
1058    if (.not. use_mp_vars.and..not.use_in_place) then
1059
1060! Integers:
1061      call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
1062           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1063      if (mp_ierr /= 0) goto 600
1064      call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
1065           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1066      if (mp_ierr /= 0) goto 600
1067      call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
1068           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1069      if (mp_ierr /= 0) goto 600
1070      call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
1071           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1072      if (mp_ierr /= 0) goto 600
1073      call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
1074           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1075      if (mp_ierr /= 0) goto 600
1076
1077! int2
1078      call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
1079           &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
1080      if (mp_ierr /= 0) goto 600
1081
1082! Reals:
1083      call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, &
1084           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1085      if (mp_ierr /= 0) goto 600
1086      call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, &
1087           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1088      if (mp_ierr /= 0) goto 600
1089      call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, &
1090           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1091      if (mp_ierr /= 0) goto 600
1092
1093      call MPI_GATHER(us, numpart_mpi, mp_sp, us, &
1094           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1095      if (mp_ierr /= 0) goto 600
1096      call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, &
1097           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1098      if (mp_ierr /= 0) goto 600
1099      call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, &
1100           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1101      if (mp_ierr /= 0) goto 600
1102
1103
1104      call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
1105           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1106      if (mp_ierr /= 0) goto 600
1107      call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
1108           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1109      if (mp_ierr /= 0) goto 600
1110      call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, &
1111           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1112      if (mp_ierr /= 0) goto 600
1113
1114      do i=1, nspec
1115        call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), &
1116             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1117        if (mp_ierr /= 0) goto 600
1118
1119      end do
1120
1121! Use variables npoint etc directly for communications
1122!***********************************************************************
1123    else if (use_in_place.and..not.use_mp_vars) then
1124      if (lroot) then
1125! Integers:
1126        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, npoint, &
1127             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1128        if (mp_ierr /= 0) goto 600
1129        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, idt, &
1130             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1131        if (mp_ierr /= 0) goto 600
1132        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itra1, &
1133             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1134        if (mp_ierr /= 0) goto 600
1135        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, nclass, &
1136             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1137        if (mp_ierr /= 0) goto 600
1138        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itramem, &
1139             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1140        if (mp_ierr /= 0) goto 600
1141
1142! int2
1143        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER2, cbt, &
1144             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
1145        if (mp_ierr /= 0) goto 600
1146
1147! Reals:
1148        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uap, &
1149             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1150        if (mp_ierr /= 0) goto 600
1151        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ucp, &
1152             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1153        if (mp_ierr /= 0) goto 600
1154        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uzp, &
1155             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1156        if (mp_ierr /= 0) goto 600
1157
1158        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, us, &
1159             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1160        if (mp_ierr /= 0) goto 600
1161        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, vs, &
1162             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1163        if (mp_ierr /= 0) goto 600
1164        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ws, &
1165             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1166        if (mp_ierr /= 0) goto 600
1167
1168
1169        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, xtra1, &
1170             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1171        if (mp_ierr /= 0) goto 600
1172        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, ytra1, &
1173             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1174        if (mp_ierr /= 0) goto 600
1175        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ztra1, &
1176             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1177        if (mp_ierr /= 0) goto 600
1178
1179        do i=1, nspec
1180          call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, xmass1(:,i), &
1181               &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1182          if (mp_ierr /= 0) goto 600
1183        end do
1184
1185      else ! (for mp_pid >= 1)
1186
1187! Integers:
1188        call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
1189             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1190        if (mp_ierr /= 0) goto 600
1191        call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
1192             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1193        if (mp_ierr /= 0) goto 600
1194        call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
1195             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1196        if (mp_ierr /= 0) goto 600
1197        call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
1198             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1199        if (mp_ierr /= 0) goto 600
1200        call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
1201             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1202        if (mp_ierr /= 0) goto 600
1203
1204! int2
1205        call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
1206             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
1207        if (mp_ierr /= 0) goto 600
1208
1209! Reals:
1210        call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, &
1211             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1212        if (mp_ierr /= 0) goto 600
1213        call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, &
1214             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1215        if (mp_ierr /= 0) goto 600
1216        call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, &
1217             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1218        if (mp_ierr /= 0) goto 600
1219
1220        call MPI_GATHER(us, numpart_mpi, mp_sp, us, &
1221             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1222        if (mp_ierr /= 0) goto 600
1223        call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, &
1224             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1225        if (mp_ierr /= 0) goto 600
1226        call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, &
1227             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1228        if (mp_ierr /= 0) goto 600
1229
1230
1231        call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
1232             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1233        if (mp_ierr /= 0) goto 600
1234        call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
1235             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1236        if (mp_ierr /= 0) goto 600
1237        call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, &
1238             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1239        if (mp_ierr /= 0) goto 600
1240
1241        do i=1, nspec
1242          call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), &
1243               &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1244          if (mp_ierr /= 0) goto 600
1245        end do
1246      end if ! (lroot)
1247    end if !  (use_in_place)
1248
1249    if (mp_measure_time) call mpif_mtime('commtime',1)
1250
1251    goto 601
1252
1253600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1254    stop
1255601 end subroutine mpif_tm_collect_vars
1256
1257
1258  subroutine mpif_gf_send_vars(memstat)
1259!*******************************************************************************
1260! DESCRIPTION
1261!   Distribute 'getfield' variables from reader process
1262!
1263!   Called from timemanager
1264!
1265! NOTE
1266!   This subroutine distributes windfields read from the reader process to
1267!   all other processes. Usually one set of fields are transfered, but at
1268!   first timestep when there are no fields in memory, both are transfered.
1269!   MPI_Bcast is used, so implicitly all processes are synchronized at this
1270!   step
1271!
1272!
1273!*******************************************************************************
1274    use com_mod
1275    use par_mod,only: numwfmem
1276
1277    implicit none
1278
1279    integer, intent(in) :: memstat
1280
1281! Common array sizes used for communications
1282    integer, parameter :: d3_size1 = nxmax*nymax*nzmax
1283    integer, parameter :: d3_size2 = nxmax*nymax*nuvzmax
1284    integer, parameter :: d2_size1 = nxmax*nymax
1285    integer, parameter :: d2_size2 = nxmax*nymax*maxspec
1286    integer, parameter :: d2_size3 = nxmax*nymax
1287    integer, parameter :: d1_size1 = maxwf
1288
1289    integer :: d3s1,d3s2,d2s1,d2s2
1290
1291! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
1292    integer :: li,ui
1293
1294! First time routine is called the unchangeable fields will be transfered   
1295    logical :: first_call=.true.
1296
1297!**********************************************************************
1298
1299! Sizes of arrays transfered
1300    d3s1=d3_size1
1301    d3s2=d3_size2
1302    d2s1=d2_size1
1303    d2s2=d2_size2
1304
1305! Decide which field(s) need to be transfered
1306    if (memstat.ge.32) then ! distribute 2 fields, to 1st/2nd indices
1307      li=1; ui=2
1308      d3s1=2*d3_size1
1309      d3s2=2*d3_size2
1310      d2s1=2*d2_size1
1311      d2s2=2*d2_size2
1312    else if (memstat.eq.17) then ! distribute 1 field, place on 1st index
1313      li=1; ui=1
1314    else if (memstat.eq.18) then ! distribute 1 field, place on 2nd index
1315      li=2; ui=2
1316    else if (memstat.eq.19) then ! distribute 1 field, place on 3nd index
1317      li=3; ui=3
1318    else
1319      write(*,*) '#### mpi_mod::mpif_gf_send_vars> ERROR: ', &
1320           & 'wrong value of memstat, exiting ####', memstat
1321      stop
1322    end if
1323
1324
1325! Time for MPI communications
1326    if (mp_measure_time) call mpif_mtime('commtime',0)
1327
1328! Send variables from getfield process (id_read) to other processes
1329!**********************************************************************
1330
1331! The non-reader processes need to know if cloud water was read.
1332    call MPI_Bcast(readclouds,1,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
1333    if (mp_ierr /= 0) goto 600
1334
1335! Static fields/variables sent only at startup
1336    if (first_call) then
1337      call MPI_Bcast(oro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1338      if (mp_ierr /= 0) goto 600
1339      call MPI_Bcast(excessoro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1340      if (mp_ierr /= 0) goto 600
1341      call MPI_Bcast(lsm(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1342      if (mp_ierr /= 0) goto 600
1343      call MPI_Bcast(xlanduse(:,:,:),d2_size3*numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1344      if (mp_ierr /= 0) goto 600
1345      call MPI_Bcast(wftime,d1_size1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1346      if (mp_ierr /= 0) goto 600
1347      call MPI_Bcast(numbwf,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1348      if (mp_ierr /= 0) goto 600
1349      call MPI_Bcast(nmixz,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1350      if (mp_ierr /= 0) goto 600
1351      call MPI_Bcast(height,nzmax,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1352      if (mp_ierr /= 0) goto 600
1353
1354      first_call=.false.
1355    endif
1356
1357    call MPI_Bcast(uu(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1358    if (mp_ierr /= 0) goto 600
1359    call MPI_Bcast(vv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1360    if (mp_ierr /= 0) goto 600
1361    call MPI_Bcast(uupol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1362    if (mp_ierr /= 0) goto 600
1363    call MPI_Bcast(vvpol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1364    if (mp_ierr /= 0) goto 600
1365    call MPI_Bcast(ww(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1366    if (mp_ierr /= 0) goto 600
1367    call MPI_Bcast(tt(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1368    if (mp_ierr /= 0) goto 600
1369    call MPI_Bcast(rho(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1370    if (mp_ierr /= 0) goto 600
1371    call MPI_Bcast(drhodz(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1372    if (mp_ierr /= 0) goto 600
1373    call MPI_Bcast(tth(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1374    if (mp_ierr /= 0) goto 600
1375    call MPI_Bcast(qvh(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1376    if (mp_ierr /= 0) goto 600
1377    call MPI_Bcast(qv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1378    if (mp_ierr /= 0) goto 600
1379    call MPI_Bcast(pv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1380    if (mp_ierr /= 0) goto 600
1381    call MPI_Bcast(clouds(:,:,:,li:ui),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
1382    if (mp_ierr /= 0) goto 600
1383
1384! cloud water/ice:
1385    if (readclouds) then
1386      call MPI_Bcast(ctwc(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1387      if (mp_ierr /= 0) goto 600
1388    end if
1389
1390! 2D fields
1391    call MPI_Bcast(cloudsh(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1392    if (mp_ierr /= 0) goto 600
1393    call MPI_Bcast(vdep(:,:,:,li:ui),d2s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1394    if (mp_ierr /= 0) goto 600
1395    call MPI_Bcast(ps(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1396    if (mp_ierr /= 0) goto 600
1397    call MPI_Bcast(sd(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1398    if (mp_ierr /= 0) goto 600
1399    call MPI_Bcast(tcc(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1400    if (mp_ierr /= 0) goto 600
1401    call MPI_Bcast(tt2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1402    if (mp_ierr /= 0) goto 600
1403    call MPI_Bcast(td2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1404    if (mp_ierr /= 0) goto 600
1405    call MPI_Bcast(lsprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1406    if (mp_ierr /= 0) goto 600
1407    call MPI_Bcast(convprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1408    if (mp_ierr /= 0) goto 600
1409    call MPI_Bcast(ustar(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1410    if (mp_ierr /= 0) goto 600
1411    call MPI_Bcast(wstar(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1412    if (mp_ierr /= 0) goto 600
1413    call MPI_Bcast(hmix(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1414    if (mp_ierr /= 0) goto 600
1415    call MPI_Bcast(tropopause(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1416    if (mp_ierr /= 0) goto 600
1417    call MPI_Bcast(oli(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1418    if (mp_ierr /= 0) goto 600
1419
1420    if (mp_measure_time) call mpif_mtime('commtime',1)
1421
1422    goto 601
1423
1424600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1425    stop
1426
1427601 end subroutine mpif_gf_send_vars
1428
1429
1430  subroutine mpif_gf_send_vars_nest(memstat)
1431!***********************************************************************
1432! DESCRIPTION
1433!   Distribute 'getfield' variables from reader process to all processes.
1434!   For nested fields
1435!
1436!   Called from timemanager
1437!
1438! NOTE
1439!   This subroutine distributes nested windfields from the reader process to
1440!   all other processes. Usually one set of fields is transfered, but at
1441!   first timestep when there are no fields in memory, both are transfered.
1442!   MPI_Bcast is used, so implicitly all processes are synchronized at this
1443!   step
1444!
1445!
1446!***********************************************************************
1447    use com_mod
1448    use par_mod,only: numwfmem
1449
1450    implicit none
1451
1452    integer, intent(in) :: memstat
1453    integer :: i
1454! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
1455    integer :: li,ui
1456
1457! Common array sizes used for communications
1458    integer :: d3_size1 = nxmaxn*nymaxn*nzmax
1459    integer :: d3_size2 = nxmaxn*nymaxn*nuvzmax
1460    integer :: d2_size1 = nxmaxn*nymaxn
1461    integer :: d2_size2 = nxmaxn*nymaxn*maxspec
1462    integer :: d2_size3 = nxmaxn*nymaxn
1463
1464    integer :: d3s1,d3s2,d2s1,d2s2
1465
1466! First time routine is called the unchangeable fields will be transfered   
1467    logical :: first_call=.true.
1468
1469!**********************************************************************
1470
1471! Sizes of arrays transfered
1472    d3s1=d3_size1
1473    d3s2=d3_size2
1474    d2s1=d2_size1
1475    d2s2=d2_size2
1476
1477! Decide which field(s) need to be transfered
1478    if (memstat.ge.32) then ! distribute 2 fields
1479      li=1; ui=2
1480      d3s1=2*d3_size1
1481      d3s2=2*d3_size2
1482      d2s1=2*d2_size1
1483      d2s2=2*d2_size2
1484    else if (memstat.eq.17) then ! distribute 1 field, on 1st index
1485      li=1; ui=1
1486    else if (memstat.eq.18) then ! distribute 1 field, on 2nd index
1487      li=2; ui=2
1488    else if (memstat.eq.19) then ! distribute 1 field, on 3nd index
1489      li=3; ui=3
1490    else
1491      write(*,*) '#### mpi_mod::mpif_gf_send_vars_nest> ERROR: ', &
1492           & 'wrong value of memstat, exiting ####', memstat
1493      stop
1494    end if
1495
1496
1497! Time for MPI communications
1498    if (mp_measure_time) call mpif_mtime('commtime',0)
1499
1500! Distribute variables, send from getfield process (id_read) to other
1501! processes
1502!**********************************************************************
1503
1504! The non-reader processes need to know if cloud water was read.
1505    call MPI_Bcast(readclouds_nest,maxnests,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
1506    if (mp_ierr /= 0) goto 600
1507
1508! Static fields/variables sent only at startup
1509    if (first_call) then
1510      call MPI_Bcast(oron(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1511      if (mp_ierr /= 0) goto 600
1512      call MPI_Bcast(excessoron(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1513      if (mp_ierr /= 0) goto 600
1514      call MPI_Bcast(lsmn(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1515      if (mp_ierr /= 0) goto 600
1516      call MPI_Bcast(xlandusen(:,:,:,:),d2_size3*numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1517      if (mp_ierr /= 0) goto 600
1518      first_call=.false.
1519    end if
1520
1521! MPI prefers contiguous arrays for sending (else a buffer is created),
1522! hence the loop over nests
1523!**********************************************************************
1524    do i=1, numbnests
1525! 3D fields
1526      call MPI_Bcast(uun(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1527      if (mp_ierr /= 0) goto 600
1528      call MPI_Bcast(vvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1529      if (mp_ierr /= 0) goto 600
1530      call MPI_Bcast(wwn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1531      if (mp_ierr /= 0) goto 600
1532      call MPI_Bcast(ttn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1533      if (mp_ierr /= 0) goto 600
1534      call MPI_Bcast(rhon(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1535      if (mp_ierr /= 0) goto 600
1536      call MPI_Bcast(drhodzn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1537      if (mp_ierr /= 0) goto 600
1538      call MPI_Bcast(tthn(:,:,:,li:ui,i),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1539      if (mp_ierr /= 0) goto 600
1540      call MPI_Bcast(qvhn(:,:,:,li:ui,i),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1541      if (mp_ierr /= 0) goto 600
1542      call MPI_Bcast(qvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1543      if (mp_ierr /= 0) goto 600
1544      call MPI_Bcast(pvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1545      if (mp_ierr /= 0) goto 600
1546      call MPI_Bcast(cloudsn(:,:,:,li:ui,i),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
1547      if (mp_ierr /= 0) goto 600
1548
1549! cloud water/ice:
1550      if (readclouds_nest(i)) then
1551! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1552! if (mp_ierr /= 0) goto 600
1553        call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1554        if (mp_ierr /= 0) goto 600
1555      end if
1556
1557! 2D fields
1558      call MPI_Bcast(cloudshn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1559      if (mp_ierr /= 0) goto 600
1560      call MPI_Bcast(vdepn(:,:,:,li:ui,i),d2s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1561      if (mp_ierr /= 0) goto 600
1562      call MPI_Bcast(psn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1563      if (mp_ierr /= 0) goto 600
1564      call MPI_Bcast(sdn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1565      if (mp_ierr /= 0) goto 600
1566      call MPI_Bcast(tccn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1567      if (mp_ierr /= 0) goto 600
1568      call MPI_Bcast(tt2n(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1569      if (mp_ierr /= 0) goto 600
1570      call MPI_Bcast(td2n(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1571      if (mp_ierr /= 0) goto 600
1572      call MPI_Bcast(lsprecn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1573      if (mp_ierr /= 0) goto 600
1574      call MPI_Bcast(convprecn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1575      if (mp_ierr /= 0) goto 600
1576      call MPI_Bcast(ustarn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1577      if (mp_ierr /= 0) goto 600
1578      call MPI_Bcast(wstarn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1579      if (mp_ierr /= 0) goto 600
1580      call MPI_Bcast(olin(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1581      if (mp_ierr /= 0) goto 600
1582      call MPI_Bcast(hmixn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1583      if (mp_ierr /= 0) goto 600
1584      call MPI_Bcast(tropopausen(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1585      if (mp_ierr /= 0) goto 600
1586    end do
1587
1588    if (mp_measure_time) call mpif_mtime('commtime',1)
1589
1590    goto 601
1591
1592600 write(*,*) "mpi_mod> mp_ierr \= 0",mp_ierr
1593    stop
1594
1595601 end subroutine mpif_gf_send_vars_nest
1596
1597
1598  subroutine mpif_gf_send_vars_async(memstat)
1599!*******************************************************************************
1600! DESCRIPTION
1601!   Distribute 'getfield' variables from reader process to all processes.
1602!   Called from timemanager by reader process only
1603!
1604! NOTE
1605!   This version uses asynchronious sends. The newest fields are sent in the
1606!   background, so calculations can continue while
1607!   MPI communications are performed.
1608!
1609!   The way the MPI tags/requests are sequenced means that this routine must
1610!   carefully match routine 'mpif_gf_recv_vars_async'
1611!
1612! VARIABLES
1613!   memstat -- input, for resolving pointer to windfield index being read
1614!   mind    -- index where to place new fields
1615!
1616!
1617!*******************************************************************************
1618    use com_mod
1619
1620    implicit none
1621
1622    integer, intent(in) :: memstat
1623    integer :: mind
1624    integer :: dest,i
1625
1626! Common array sizes used for communications
1627    integer :: d3s1 = nxmax*nymax*nzmax
1628    integer :: d3s2 = nxmax*nymax*nuvzmax
1629    integer :: d2s1 = nxmax*nymax
1630    integer :: d2s2 = nxmax*nymax*maxspec
1631!    integer :: d1s1 = maxwf
1632
1633!*******************************************************************************
1634
1635! At the time the send is posted, the reader process is one step further
1636! in the permutation of memstat compared with the receiving processes
1637
1638    if (memstat.ge.32) then
1639! last read was synchronous, to indices 1 and 2, 3 is free
1640      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1641           & memstat>=32 should never happen here."
1642      stop
1643    else if (memstat.eq.17) then
1644! old fields on 1,2, send 3
1645      mind=3
1646    else if (memstat.eq.18) then
1647! old fields on 2,3, send 1
1648      mind=1
1649    else if (memstat.eq.19) then
1650! old fields on 3,1, send 2
1651      mind=2
1652    else
1653      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1654           & invalid memstat"
1655      mind=-1
1656      stop
1657    end if
1658
1659    if (mp_dev_mode) then
1660      if (mp_pid.ne.id_read) then
1661        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_async'
1662      end if
1663    end if
1664
1665    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1666
1667! Time for MPI communications
1668    if (mp_measure_time) call mpif_mtime('commtime',0)
1669
1670! Loop over receiving processes, initiate data sending
1671!*****************************************************
1672
1673    do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading
1674! TODO: use mp_partgroup_np here
1675      if (dest.eq.id_read) cycle
1676      i=dest*nvar_async
1677      call MPI_Isend(uu(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1678      if (mp_ierr /= 0) goto 600
1679      i=i+1
1680      call MPI_Isend(vv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1681      if (mp_ierr /= 0) goto 600
1682      i=i+1
1683      call MPI_Isend(uupol(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1684      if (mp_ierr /= 0) goto 600
1685      i=i+1
1686      call MPI_Isend(vvpol(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1687      if (mp_ierr /= 0) goto 600
1688      i=i+1
1689      call MPI_Isend(ww(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1690      if (mp_ierr /= 0) goto 600
1691      i=i+1
1692      call MPI_Isend(tt(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1693      if (mp_ierr /= 0) goto 600
1694      i=i+1
1695      call MPI_Isend(rho(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1696      if (mp_ierr /= 0) goto 600
1697      i=i+1
1698      call MPI_Isend(drhodz(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1699      if (mp_ierr /= 0) goto 600
1700      i=i+1
1701      call MPI_Isend(tth(:,:,:,mind),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1702      if (mp_ierr /= 0) goto 600
1703      i=i+1
1704      call MPI_Isend(qvh(:,:,:,mind),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1705      if (mp_ierr /= 0) goto 600
1706      i=i+1
1707      call MPI_Isend(qv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1708      if (mp_ierr /= 0) goto 600
1709      i=i+1
1710      call MPI_Isend(pv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1711      if (mp_ierr /= 0) goto 600
1712      i=i+1
1713      call MPI_Isend(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1714      i=i+1
1715      if (mp_ierr /= 0) goto 600
1716      call MPI_Isend(cloudsh(:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1717      if (mp_ierr /= 0) goto 600
1718      i=i+1
1719      call MPI_Isend(vdep(:,:,:,mind),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1720      if (mp_ierr /= 0) goto 600
1721      i=i+1
1722! 15
1723      call MPI_Isend(ps(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1724      if (mp_ierr /= 0) goto 600
1725      i=i+1
1726      call MPI_Isend(sd(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1727      if (mp_ierr /= 0) goto 600
1728      i=i+1
1729      call MPI_Isend(tcc(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1730      if (mp_ierr /= 0) goto 600
1731      i=i+1
1732      call MPI_Isend(tt2(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1733      if (mp_ierr /= 0) goto 600
1734      i=i+1
1735      call MPI_Isend(td2(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1736      if (mp_ierr /= 0) goto 600
1737      i=i+1
1738      call MPI_Isend(lsprec(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1739      if (mp_ierr /= 0) goto 600
1740      i=i+1
1741      call MPI_Isend(convprec(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1742      if (mp_ierr /= 0) goto 600
1743      i=i+1
1744      call MPI_Isend(ustar(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1745      if (mp_ierr /= 0) goto 600
1746      i=i+1
1747      call MPI_Isend(wstar(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1748      if (mp_ierr /= 0) goto 600
1749      i=i+1
1750      call MPI_Isend(hmix(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1751      if (mp_ierr /= 0) goto 600
1752      i=i+1
1753! 25
1754      call MPI_Isend(tropopause(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1755      if (mp_ierr /= 0) goto 600
1756      i=i+1
1757      call MPI_Isend(oli(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1758      if (mp_ierr /= 0) goto 600
1759
1760! Send cloud water if it exists. Increment counter always (as on receiving end)
1761      if (readclouds) then
1762        i=i+1
1763        call MPI_Isend(ctwc(:,:,mind),d2s1,mp_sp,dest,tm1,&
1764             &MPI_COMM_WORLD,reqs(i),mp_ierr)
1765        if (mp_ierr /= 0) goto 600
1766      end if
1767    end do
1768
1769    if (mp_measure_time) call mpif_mtime('commtime',1)
1770
1771    goto 601
1772
1773600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> mp_ierr \= 0", mp_ierr
1774    stop
1775
1776601 end subroutine mpif_gf_send_vars_async
1777
1778
1779  subroutine mpif_gf_recv_vars_async(memstat)
1780!*******************************************************************************
1781! DESCRIPTION
1782!   Receive 'getfield' variables from reader process.
1783!   Called from timemanager by all processes except reader
1784!
1785! NOTE
1786!   This version uses asynchronious communications.
1787!
1788! VARIABLES
1789!   memstat -- input, used to resolve windfield index being received
1790!
1791!
1792!*******************************************************************************
1793    use com_mod
1794
1795    implicit none
1796
1797    integer, intent(in) :: memstat
1798    integer :: mind,j
1799
1800! Common array sizes used for communications
1801    integer :: d3s1 = nxmax*nymax*nzmax
1802    integer :: d3s2 = nxmax*nymax*nuvzmax
1803    integer :: d2s1 = nxmax*nymax
1804    integer :: d2s2 = nxmax*nymax*maxspec
1805!integer :: d1_size1 = maxwf
1806
1807!    integer :: d3s1,d3s2,d2s1,d2s2
1808!*******************************************************************************
1809
1810! At the time this immediate receive is posted, memstat is the state of
1811! windfield indices at the previous/current time. From this, the future
1812! state is deduced.
1813    if (memstat.eq.32) then
1814! last read was synchronous to indices 1 and 2, 3 is free
1815      mind=3
1816    else if (memstat.eq.17) then
1817! last read was asynchronous to index 3, 1 is free
1818      mind=1
1819    else if (memstat.eq.18) then
1820! last read was asynchronous to index 1, 2 is free
1821      mind=2
1822    else if (memstat.eq.19) then
1823! last read was asynchronous to index 2, 3 is free
1824      mind=3
1825    else
1826! illegal state
1827      write(*,*) 'mpi_mod> FLEXPART ERROR: Illegal memstat value. Exiting.'
1828      stop
1829    end if
1830
1831    if (mp_dev_mode) then
1832      if (mp_pid.eq.id_read) then
1833        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async'
1834      end if
1835    end if
1836
1837! Time for MPI communications
1838    if (mp_measure_time) call mpif_mtime('commtime',0)
1839
1840    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1841
1842! Initiate receiving of data
1843!***************************
1844
1845! Get MPI tags/requests for communications
1846    j=mp_pid*nvar_async
1847    call MPI_Irecv(uu(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1848         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1849    if (mp_ierr /= 0) goto 600
1850    j=j+1
1851    call MPI_Irecv(vv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1852         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1853    if (mp_ierr /= 0) goto 600
1854    j=j+1
1855    call MPI_Irecv(uupol(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1856         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1857    if (mp_ierr /= 0) goto 600
1858    j=j+1
1859    call MPI_Irecv(vvpol(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1860         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1861    if (mp_ierr /= 0) goto 600
1862    j=j+1
1863    call MPI_Irecv(ww(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1864         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1865    if (mp_ierr /= 0) goto 600
1866    j=j+1
1867    call MPI_Irecv(tt(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1868         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1869    if (mp_ierr /= 0) goto 600
1870    j=j+1
1871    call MPI_Irecv(rho(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1872         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1873    if (mp_ierr /= 0) goto 600
1874    j=j+1
1875    call MPI_Irecv(drhodz(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1876         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1877    if (mp_ierr /= 0) goto 600
1878    j=j+1
1879    call MPI_Irecv(tth(:,:,:,mind),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
1880         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1881    if (mp_ierr /= 0) goto 600
1882    j=j+1
1883    call MPI_Irecv(qvh(:,:,:,mind),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
1884         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1885    if (mp_ierr /= 0) goto 600
1886    j=j+1
1887    call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1888         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1889    if (mp_ierr /= 0) goto 600
1890    j=j+1
1891    call MPI_Irecv(pv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1892         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1893    if (mp_ierr /= 0) goto 600
1894    j=j+1
1895    call MPI_Irecv(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
1896         &MPI_COMM_WORLD,reqs(j),mp_ierr)   
1897    if (mp_ierr /= 0) goto 600
1898    j=j+1
1899    call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1900         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1901    if (mp_ierr /= 0) goto 600
1902    j=j+1
1903    call MPI_Irecv(vdep(:,:,:,mind),d2s2,mp_sp,id_read,MPI_ANY_TAG,&
1904         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1905    if (mp_ierr /= 0) goto 600
1906    j=j+1
1907    call MPI_Irecv(ps(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1908         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1909    if (mp_ierr /= 0) goto 600
1910    j=j+1
1911    call MPI_Irecv(sd(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1912         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1913    if (mp_ierr /= 0) goto 600
1914    j=j+1
1915    call MPI_Irecv(tcc(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1916         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1917    if (mp_ierr /= 0) goto 600
1918    j=j+1
1919    call MPI_Irecv(tt2(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1920         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1921    if (mp_ierr /= 0) goto 600
1922    j=j+1
1923    call MPI_Irecv(td2(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1924         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1925    if (mp_ierr /= 0) goto 600
1926    j=j+1
1927    call MPI_Irecv(lsprec(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1928         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1929    if (mp_ierr /= 0) goto 600
1930    j=j+1
1931    call MPI_Irecv(convprec(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1932         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1933    if (mp_ierr /= 0) goto 600
1934    call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1935         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1936    if (mp_ierr /= 0) goto 600
1937    j=j+1
1938    call MPI_Irecv(wstar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1939         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1940    if (mp_ierr /= 0) goto 600
1941    j=j+1
1942    call MPI_Irecv(hmix(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1943         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1944    if (mp_ierr /= 0) goto 600
1945    j=j+1
1946    call MPI_Irecv(tropopause(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1947         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1948    if (mp_ierr /= 0) goto 600
1949    j=j+1
1950    call MPI_Irecv(oli(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1951         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1952    if (mp_ierr /= 0) goto 600
1953
1954! Post request for clwc. These data are possibly not sent, request must then be cancelled
1955! For now assume that data at all steps either have or do not have water
1956    if (readclouds) then
1957      j=j+1
1958      call MPI_Irecv(ctwc(:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
1959           &MPI_COMM_WORLD,reqs(j),mp_ierr)
1960      if (mp_ierr /= 0) goto 600
1961    end if
1962
1963
1964    if (mp_measure_time) call mpif_mtime('commtime',1)
1965
1966    goto 601
1967
1968600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_async> MPI ERROR ", mp_ierr
1969    stop
1970
1971601 end subroutine mpif_gf_recv_vars_async
1972
1973
1974  subroutine mpif_gf_send_vars_nest_async(memstat)
1975!*******************************************************************************
1976! DESCRIPTION
1977!   Distribute 'getfield' variables from reader process to all processes.
1978!   Called from timemanager by reader process only.
1979!   Version for nested wind fields
1980!
1981! NOTE
1982!   This version uses asynchronious sends. The newest fields are sent in the
1983!   background, so calculations can continue while
1984!   MPI communications are performed.
1985!
1986!   The way the MPI tags/requests are sequenced means that this routine must
1987!   carefully match routine 'mpif_gf_recv_vars_async'
1988!
1989! VARIABLES
1990!   memstat -- input, for resolving pointer to windfield index being read
1991!   mind    -- index where to place new fields
1992!
1993! TODO
1994!   Some unused arrays are currently sent (uupoln,..)
1995!*******************************************************************************
1996    use com_mod
1997
1998    implicit none
1999
2000    integer, intent(in) :: memstat
2001    integer :: mind
2002    integer :: dest,i,k
2003
2004! Common array sizes used for communications
2005    integer :: d3s1 = nxmaxn*nymaxn*nzmax
2006    integer :: d3s2 = nxmaxn*nymaxn*nuvzmax
2007    integer :: d2s1 = nxmaxn*nymaxn
2008    integer :: d2s2 = nxmaxn*nymaxn*maxspec
2009
2010!*******************************************************************************
2011
2012! At the time the send is posted, the reader process is one step further
2013! in the permutation of memstat compared with the receiving processes
2014
2015    if (memstat.ge.32) then
2016! last read was synchronous, to indices 1 and 2, 3 is free
2017      write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: &
2018           & memstat>=32 should never happen here."
2019      stop
2020    else if (memstat.eq.17) then
2021! old fields on 1,2, send 3
2022      mind=3
2023    else if (memstat.eq.18) then
2024! old fields on 2,3, send 1
2025      mind=1
2026    else if (memstat.eq.19) then
2027! old fields on 3,1, send 2
2028      mind=2
2029    else
2030      write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: &
2031           & invalid memstat"
2032      mind=-1
2033      stop
2034    end if
2035
2036    if (mp_dev_mode) then
2037      if (mp_pid.ne.id_read) then
2038        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_nest_async'
2039      end if
2040    end if
2041
2042    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_nest_async, memstat:', memstat
2043
2044! Time for MPI communications
2045    if (mp_measure_time) call mpif_mtime('commtime',0)
2046
2047! Loop over receiving processes, initiate data sending
2048!*****************************************************
2049
2050    do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading
2051! TODO: use mp_partgroup_np here
2052      if (dest.eq.id_read) cycle
2053      do k=1, numbnests
2054        i=dest*nvar_async
2055        call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2056        if (mp_ierr /= 0) goto 600
2057        i=i+1
2058        call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2059        if (mp_ierr /= 0) goto 600
2060        i=i+1
2061        call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2062        if (mp_ierr /= 0) goto 600
2063        i=i+1
2064        call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2065        if (mp_ierr /= 0) goto 600
2066        i=i+1
2067        call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2068        if (mp_ierr /= 0) goto 600
2069        i=i+1
2070        call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2071        if (mp_ierr /= 0) goto 600
2072        i=i+1
2073        call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2074        if (mp_ierr /= 0) goto 600
2075        i=i+1
2076        call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2077        if (mp_ierr /= 0) goto 600
2078        i=i+1
2079        call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2080        if (mp_ierr /= 0) goto 600
2081        i=i+1
2082        call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2083        if (mp_ierr /= 0) goto 600
2084        i=i+1
2085        call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2086        i=i+1
2087        if (mp_ierr /= 0) goto 600
2088        call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2089        if (mp_ierr /= 0) goto 600
2090        i=i+1
2091        call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2092        if (mp_ierr /= 0) goto 600
2093        i=i+1
2094        call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2095        if (mp_ierr /= 0) goto 600
2096        i=i+1
2097        call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2098        if (mp_ierr /= 0) goto 600
2099        i=i+1
2100! 15
2101        call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2102        if (mp_ierr /= 0) goto 600
2103        i=i+1
2104        call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2105        if (mp_ierr /= 0) goto 600
2106        i=i+1
2107        call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2108        if (mp_ierr /= 0) goto 600
2109        i=i+1
2110        call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2111        if (mp_ierr /= 0) goto 600
2112        i=i+1
2113        call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2114        if (mp_ierr /= 0) goto 600
2115        i=i+1
2116        call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2117        if (mp_ierr /= 0) goto 600
2118        i=i+1
2119        call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2120        if (mp_ierr /= 0) goto 600
2121        i=i+1
2122        call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2123        if (mp_ierr /= 0) goto 600
2124        i=i+1
2125        call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2126        if (mp_ierr /= 0) goto 600
2127        i=i+1
2128        call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2129        if (mp_ierr /= 0) goto 600
2130! 25
2131
2132! Send cloud water if it exists. Increment counter always (as on receiving end)
2133        if (readclouds) then
2134          i=i+1
2135          call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,&
2136               &MPI_COMM_WORLD,reqs(i),mp_ierr)
2137          if (mp_ierr /= 0) goto 600
2138        end if
2139      end do
2140    end do
2141
2142    if (mp_measure_time) call mpif_mtime('commtime',1)
2143
2144    goto 601
2145
2146600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> mp_ierr \= 0", mp_ierr
2147    stop
2148
2149601 end subroutine mpif_gf_send_vars_nest_async
2150
2151
2152  subroutine mpif_gf_recv_vars_nest_async(memstat)
2153!*******************************************************************************
2154! DESCRIPTION
2155!   Receive 'getfield' variables from reader process.
2156!   Called from timemanager by all processes except reader
2157!   Version for nested wind fields
2158!
2159! NOTE
2160!   This version uses asynchronious communications.
2161!
2162! VARIABLES
2163!   memstat -- input, used to resolve windfield index being received
2164!
2165!
2166!*******************************************************************************
2167    use com_mod
2168
2169    implicit none
2170
2171    integer, intent(in) :: memstat
2172    integer :: mind,j,k
2173
2174! Common array sizes used for communications
2175    integer :: d3s1 = nxmaxn*nymaxn*nzmax
2176    integer :: d3s2 = nxmaxn*nymaxn*nuvzmax
2177    integer :: d2s1 = nxmaxn*nymaxn
2178    integer :: d2s2 = nxmaxn*nymaxn*maxspec
2179
2180!*******************************************************************************
2181
2182! At the time this immediate receive is posted, memstat is the state of
2183! windfield indices at the previous/current time. From this, the future
2184! state is deduced.
2185    if (memstat.eq.32) then
2186! last read was synchronous to indices 1 and 2, 3 is free
2187      mind=3
2188    else if (memstat.eq.17) then
2189! last read was asynchronous to index 3, 1 is free
2190      mind=1
2191    else if (memstat.eq.18) then
2192! last read was asynchronous to index 1, 2 is free
2193      mind=2
2194    else if (memstat.eq.19) then
2195! last read was asynchronous to index 2, 3 is free
2196      mind=3
2197    else
2198! illegal state
2199      write(*,*) 'mpi_mod> FLEXPART ERROR: Illegal memstat value. Exiting.'
2200      stop
2201    end if
2202
2203    if (mp_dev_mode) then
2204      if (mp_pid.eq.id_read) then
2205        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async'
2206      end if
2207    end if
2208
2209! Time for MPI communications
2210    if (mp_measure_time) call mpif_mtime('commtime',0)
2211
2212    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
2213
2214! Initiate receiving of data
2215!***************************
2216
2217    do k=1, numbnests
2218! Get MPI tags/requests for communications
2219      j=mp_pid*nvar_async
2220      call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2221           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2222      if (mp_ierr /= 0) goto 600
2223      j=j+1
2224      call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2225           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2226      if (mp_ierr /= 0) goto 600
2227      j=j+1
2228      call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2229           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2230      if (mp_ierr /= 0) goto 600
2231      j=j+1
2232      call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2233           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2234      if (mp_ierr /= 0) goto 600
2235      j=j+1
2236      call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2237           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2238      if (mp_ierr /= 0) goto 600
2239      j=j+1
2240      call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2241           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2242      if (mp_ierr /= 0) goto 600
2243      j=j+1
2244      call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
2245           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2246      if (mp_ierr /= 0) goto 600
2247      j=j+1
2248      call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
2249           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2250      if (mp_ierr /= 0) goto 600
2251      j=j+1
2252      call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2253           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2254      if (mp_ierr /= 0) goto 600
2255      j=j+1
2256      call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2257           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2258      if (mp_ierr /= 0) goto 600
2259      j=j+1
2260      call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
2261           &MPI_COMM_WORLD,reqs(j),mp_ierr)   
2262      if (mp_ierr /= 0) goto 600
2263      j=j+1
2264      call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2265           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2266      if (mp_ierr /= 0) goto 600
2267      j=j+1
2268      call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,&
2269           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2270      if (mp_ierr /= 0) goto 600
2271      j=j+1
2272      call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2273           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2274      if (mp_ierr /= 0) goto 600
2275      j=j+1
2276      call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2277           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2278      if (mp_ierr /= 0) goto 600
2279      j=j+1
2280      call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2281           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2282      if (mp_ierr /= 0) goto 600
2283      j=j+1
2284      call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2285           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2286      if (mp_ierr /= 0) goto 600
2287      j=j+1
2288      call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2289           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2290      if (mp_ierr /= 0) goto 600
2291      j=j+1
2292      call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2293           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2294      if (mp_ierr /= 0) goto 600
2295      j=j+1
2296      call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2297           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2298      if (mp_ierr /= 0) goto 600
2299      call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2300           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2301      if (mp_ierr /= 0) goto 600
2302      j=j+1
2303      call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2304           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2305      if (mp_ierr /= 0) goto 600
2306      j=j+1
2307      call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2308           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2309      if (mp_ierr /= 0) goto 600
2310      j=j+1
2311      call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2312           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2313      if (mp_ierr /= 0) goto 600
2314      j=j+1
2315      call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2316           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2317      if (mp_ierr /= 0) goto 600
2318
2319! Post request for clwc. These data are possibly not sent, request must then be cancelled
2320! For now assume that data at all steps either have or do not have water
2321      if (readclouds) then
2322        j=j+1
2323        call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
2324             &MPI_COMM_WORLD,reqs(j),mp_ierr)
2325        if (mp_ierr /= 0) goto 600
2326      end if
2327    end do
2328
2329    if (mp_measure_time) call mpif_mtime('commtime',1)
2330
2331    goto 601
2332
2333600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_nest_async> MPI ERROR ", mp_ierr
2334    stop
2335
2336601 end subroutine mpif_gf_recv_vars_nest_async
2337
2338
2339  subroutine mpif_gf_request
2340!*******************************************************************************
2341! DESCRIPTION
2342!   Check for completion of MPI_Isend/MPI_Irecv data transfer.
2343!   
2344!   
2345! NOTE
2346!   implicit synchronisation between all processes takes place here
2347!
2348! TODO
2349!   NB: take into account nested wind fields by using a separate
2350!   subroutine mpif_gf_request_nest (and separate request arrays for the
2351!   nested variables)
2352!
2353!*******************************************************************************
2354!    use com_mod,only: readclouds
2355
2356    implicit none
2357
2358
2359    integer :: n_req !,i
2360
2361!***********************************************************************
2362
2363    n_req=nvar_async*mp_np
2364
2365    if (mp_measure_time) call mpif_mtime('commtime',0)
2366
2367!    call MPI_Wait(rm1,MPI_STATUS_IGNORE,mp_ierr)
2368
2369! TODO: cancel recv request if readclouds=.false.
2370!    if (readclouds) then
2371    call MPI_Waitall(n_req,reqs,MPI_STATUSES_IGNORE,mp_ierr)
2372!    endif
2373! else
2374!   do i = 0, nvar_async*mp_np-1
2375!     if (mod(i,27).eq.0 .or. mod(i,28).eq.0) then
2376!       call MPI_Cancel(reqs(i),mp_ierr)
2377!       cycle
2378!     end if
2379!     call MPI_Wait(reqs(i),MPI_STATUS_IGNORE,mp_ierr)
2380!   end do
2381! end if
2382
2383    if (mp_ierr /= 0) goto 600
2384
2385    if (mp_measure_time) call mpif_mtime('commtime',1)
2386
2387    goto 601
2388
2389600 write(*,*) "#### mpi_mod::mpif_gf_request> MPI ERROR ", mp_ierr
2390    stop
2391
2392601 end subroutine mpif_gf_request
2393
2394
2395  subroutine mpif_tm_reduce_grid
2396!***********************************************************************
2397! Collect grid variable to PID 0, adding from all processes.
2398!
2399! NOTE
2400!   - Older MPI implementations may lack the MPI_IN_PLACE operation.
2401!     If so use 1) below and change unc_mod
2402!
2403!***********************************************************************
2404    use com_mod
2405    use unc_mod
2406    use par_mod
2407
2408    implicit none
2409
2410    integer :: grid_size2d,grid_size3d
2411    integer, parameter :: rcpt_size=maxreceptor*maxspec
2412
2413!**********************************************************************
2414    grid_size2d=numxgrid*numygrid*maxspec* &
2415         & maxpointspec_act*nclassunc*maxageclass
2416    grid_size3d=numxgrid*numygrid*numzgrid*maxspec* &
2417         & maxpointspec_act*nclassunc*maxageclass
2418
2419
2420! Time for MPI communications
2421    if (mp_measure_time) call mpif_mtime('commtime',0)
2422
2423! 1) Using a separate grid (gridunc0) for received values
2424! call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2425!      & mp_comm_used, mp_ierr)
2426! if (mp_ierr /= 0) goto 600
2427
2428! 2) Using in-place reduction
2429
2430!!!--------------------------------------------------------------------
2431!!! DJM - 2017-05-09 change - MPI_IN_PLACE option for MPI_Reduce() causes
2432!!! severe numerical problems in some cases.  I'm guessing there are memory
2433!!! overrun issues in this complex code, but have so far failed to identify
2434!!! a specific problem.  And/or, when one searches the Internet for this
2435!!! issue, there is "some" hint that the implementation may be buggy. 
2436!!!
2437!!! At this point, with the following CPP USE_MPIINPLACE directive, the
2438!!! default behaviour will be to not use the MPI_IN_PLACE option.
2439!!! Users will have to compile with -DUSE_MPIINPLACE if they want that option.
2440!!! Introduction of the CPP directives also requires that the code be compiled
2441!!! with the "-x f95-cpp-input" option.
2442!!!
2443!!! Modification of this section requires the addition of array gridunc0, which
2444!!! requires an allocation in outgrid_init.f90 and initial declaration in
2445!!! unc_mod.f90.
2446!!!--------------------------------------------------------------------
2447
2448#ifdef USE_MPIINPLACE
2449
2450    if (lroot) then
2451      call MPI_Reduce(MPI_IN_PLACE, gridunc, grid_size3d, mp_sp, MPI_SUM, id_root, &
2452           & mp_comm_used, mp_ierr)
2453      if (mp_ierr /= 0) goto 600
2454    else
2455      call MPI_Reduce(gridunc, 0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2456           & mp_comm_used, mp_ierr)
2457    end if
2458
2459  ! Receptor concentrations   
2460    if (lroot) then
2461      call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &
2462           & mp_comm_used,mp_ierr)
2463      if (mp_ierr /= 0) goto 600
2464    else
2465      call MPI_Reduce(creceptor,0,rcpt_size,mp_sp,MPI_SUM,id_root, &
2466           & mp_comm_used,mp_ierr)
2467    end if
2468
2469#else
2470
2471      call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2472           & mp_comm_used, mp_ierr)
2473      if (mp_ierr /= 0) goto 600
2474      if (lroot) gridunc = gridunc0
2475
2476      call MPI_Reduce(creceptor, creceptor0,rcpt_size,mp_sp,MPI_SUM,id_root, &
2477           & mp_comm_used,mp_ierr)
2478      if (mp_ierr /= 0) goto 600
2479      if (lroot) creceptor = creceptor0
2480
2481#endif
2482
2483    if ((WETDEP).and.(ldirect.gt.0)) then
2484      call MPI_Reduce(wetgridunc, wetgridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2485           & mp_comm_used, mp_ierr)
2486      if (mp_ierr /= 0) goto 600
2487    end if
2488
2489    if ((DRYDEP).and.(ldirect.gt.0)) then
2490      call MPI_Reduce(drygridunc, drygridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2491           & mp_comm_used, mp_ierr)
2492      if (mp_ierr /= 0) goto 600
2493    end if
2494
2495
2496    if (mp_measure_time) call mpif_mtime('commtime',1)
2497
2498    goto 601
2499
2500600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
2501    stop
2502
2503601 end subroutine mpif_tm_reduce_grid
2504
2505
2506  subroutine mpif_tm_reduce_grid_nest
2507!***********************************************************************
2508! Collect nested grids to PID 0, adding from all processes.
2509!
2510! NOTE
2511!   - Compiling with 'fcheck=all' (gfortran) will cause a run-time error
2512!     as wetgriduncn0 is only allocated for root process. Possibly valid
2513!     MPI code but invalid gfortran.
2514!
2515!***********************************************************************
2516    use com_mod
2517    use unc_mod
2518    use par_mod
2519
2520    implicit none
2521
2522    integer :: grid_size2d,grid_size3d
2523
2524!**********************************************************************
2525
2526    grid_size3d=numxgridn*numygridn*numzgrid*maxspec* &
2527         & maxpointspec_act*nclassunc*maxageclass
2528    grid_size2d=numxgridn*numygridn*maxspec* &
2529         & maxpointspec_act*nclassunc*maxageclass
2530
2531
2532! Time for MPI communications
2533    if (mp_measure_time) call mpif_mtime('commtime',0)
2534
2535! Using a separate grid (gridunc0) for received values, for debugging
2536! call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2537!      & mp_comm_used, mp_ierr)
2538! if (mp_ierr /= 0) goto 600
2539
2540#ifdef USE_MPIINPLACE
2541! Using in-place reduction
2542    if (lroot) then
2543      call MPI_Reduce(MPI_IN_PLACE, griduncn, grid_size3d, mp_sp, MPI_SUM, id_root, &
2544           & mp_comm_used, mp_ierr)
2545      if (mp_ierr /= 0) goto 600
2546    else
2547      call MPI_Reduce(griduncn, 0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2548           & mp_comm_used, mp_ierr)
2549      if (mp_ierr /= 0) goto 600
2550    end if
2551
2552#else
2553    call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2554         & mp_comm_used, mp_ierr)
2555    if (mp_ierr /= 0) goto 600
2556    if (lroot) griduncn = griduncn0
2557
2558#endif
2559
2560    if ((WETDEP).and.(ldirect.gt.0)) then
2561      call MPI_Reduce(wetgriduncn, wetgriduncn0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2562           & mp_comm_used, mp_ierr)
2563      if (mp_ierr /= 0) goto 600
2564    end if
2565
2566    if ((DRYDEP).and.(ldirect.gt.0)) then
2567      call MPI_Reduce(drygriduncn, drygriduncn0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2568           & mp_comm_used, mp_ierr)
2569      if (mp_ierr /= 0) goto 600
2570    end if
2571
2572    if (mp_measure_time) call mpif_mtime('commtime',1)
2573
2574    goto 601
2575
2576600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
2577    stop
2578
2579601 end subroutine mpif_tm_reduce_grid_nest
2580
2581
2582  subroutine mpif_mtime(ident,imode)
2583!***********************************************************************
2584! Measure CPU/Wall time in various parts of the code
2585!
2586! VARIABLES
2587!   ident        character, identifies routine to measure
2588!   imode        integer, 0:start clock(s)  1: stop clock(s)
2589!
2590!***********************************************************************
2591    implicit none
2592
2593    character(LEN=*), intent(in) :: ident
2594    integer, intent(in) :: imode
2595
2596!***********************************************************************
2597
2598    select case(ident)
2599
2600    case ('timemanager')
2601      if (imode.eq.0) then
2602        call cpu_time(tm_tot_beg)
2603      else
2604        call cpu_time(tm_tot_end)
2605        tm_tot_total = tm_tot_total + (tm_tot_end-tm_tot_beg)
2606      end if
2607
2608    case ('wetdepo')
2609      if (imode.eq.0) then
2610        mp_wetdepo_wtime_beg = mpi_wtime()
2611        call cpu_time(mp_wetdepo_time_beg)
2612      else
2613        mp_wetdepo_wtime_end = mpi_wtime()
2614        call cpu_time(mp_wetdepo_time_end)
2615
2616        mp_wetdepo_wtime_total = mp_wetdepo_wtime_total + &
2617             &(mp_wetdepo_wtime_end - mp_wetdepo_wtime_beg)
2618        mp_wetdepo_time_total = mp_wetdepo_time_total + &
2619             &(mp_wetdepo_time_end - mp_wetdepo_time_beg)
2620      end if
2621
2622    case ('advance')
2623      if (imode.eq.0) then
2624        mp_advance_wtime_beg = mpi_wtime()
2625      else
2626        mp_advance_wtime_end = mpi_wtime()
2627
2628        mp_advance_wtime_total = mp_advance_wtime_total + &
2629             &(mp_advance_wtime_end - mp_advance_wtime_beg)
2630      end if
2631
2632    case ('getfields')
2633      if (imode.eq.0) then
2634        mp_getfields_wtime_beg = mpi_wtime()
2635        call cpu_time(mp_getfields_time_beg)
2636      else
2637        mp_getfields_wtime_end = mpi_wtime()
2638        call cpu_time(mp_getfields_time_end)
2639
2640        mp_getfields_wtime_total = mp_getfields_wtime_total + &
2641             &(mp_getfields_wtime_end - mp_getfields_wtime_beg)
2642        mp_getfields_time_total = mp_getfields_time_total + &
2643             &(mp_getfields_time_end - mp_getfields_time_beg)
2644      end if
2645
2646    case ('partloop1')
2647      if (imode.eq.0) then
2648        call cpu_time(tm_nploop_beg)
2649      else
2650        call cpu_time(tm_nploop_end)
2651        tm_nploop_total = tm_nploop_total + (tm_nploop_end - tm_nploop_beg)
2652      end if
2653
2654    case ('conccalc')
2655      if (imode.eq.0) then
2656        call cpu_time(mp_conccalc_time_beg)
2657      else
2658        call cpu_time(mp_conccalc_time_end)
2659        mp_conccalc_time_total = mp_conccalc_time_total + mp_conccalc_time_end - &
2660             &mp_conccalc_time_beg
2661      end if
2662
2663    case ('rootonly')
2664      if (imode.eq.0) then
2665        call cpu_time(mp_root_time_beg)
2666        mp_root_wtime_beg = mpi_wtime()
2667      else
2668        call cpu_time(mp_root_time_end)
2669        mp_root_wtime_end = mpi_wtime()
2670        mp_root_time_total = mp_root_time_total + &
2671             &(mp_root_time_end - mp_root_time_beg)
2672        mp_root_wtime_total = mp_root_wtime_total + &
2673             &(mp_root_wtime_end - mp_root_wtime_beg)
2674      end if
2675
2676    case ('iotime')
2677      if (imode.eq.0) then
2678        mp_io_wtime_beg = mpi_wtime()
2679        call cpu_time(mp_io_time_beg)
2680      else
2681        mp_io_wtime_end = mpi_wtime()
2682        call cpu_time(mp_io_time_end)
2683
2684        mp_io_wtime_total = mp_io_wtime_total + (mp_io_wtime_end - &
2685             & mp_io_wtime_beg)
2686        mp_io_time_total = mp_io_time_total + (mp_io_time_end - &
2687             & mp_io_time_beg)
2688      end if
2689
2690    case ('verttransform')
2691      if (imode.eq.0) then
2692        mp_vt_wtime_beg = mpi_wtime()
2693        call cpu_time(mp_vt_time_beg)
2694      else
2695        mp_vt_wtime_end = mpi_wtime()
2696        call cpu_time(mp_vt_time_end)
2697
2698        mp_vt_wtime_total = mp_vt_wtime_total + (mp_vt_wtime_end - &
2699             & mp_vt_wtime_beg)
2700        mp_vt_time_total = mp_vt_time_total + (mp_vt_time_end - &
2701             & mp_vt_time_beg)
2702      end if
2703
2704   case ('readwind')
2705     if (imode.eq.0) then
2706       call cpu_time(mp_readwind_time_beg)
2707       mp_readwind_wtime_beg = mpi_wtime()
2708     else
2709       call cpu_time(mp_readwind_time_end)
2710       mp_readwind_wtime_end = mpi_wtime()
2711
2712       mp_readwind_time_total = mp_readwind_time_total + &
2713            &(mp_readwind_time_end - mp_readwind_time_beg)
2714       mp_readwind_wtime_total = mp_readwind_wtime_total + &
2715            &(mp_readwind_wtime_end - mp_readwind_wtime_beg)
2716     end if
2717
2718    case ('commtime')
2719      if (imode.eq.0) then
2720        call cpu_time(mp_comm_time_beg)
2721        mp_comm_wtime_beg = mpi_wtime()
2722      else
2723        call cpu_time(mp_comm_time_end)
2724        mp_comm_wtime_end = mpi_wtime()
2725        mp_comm_time_total = mp_comm_time_total + &
2726             &(mp_comm_time_end - mp_comm_time_beg)
2727        mp_comm_wtime_total = mp_comm_wtime_total + &
2728             &(mp_comm_wtime_end - mp_comm_wtime_beg)
2729      end if
2730
2731    case ('flexpart')
2732      if (imode.eq.0) then
2733        mp_total_wtime_beg=mpi_wtime()
2734      else
2735        mp_total_wtime_end=mpi_wtime()
2736        mp_total_wtime_total = mp_total_wtime_end-mp_total_wtime_beg
2737      end if
2738
2739    case default
2740      write(*,*) 'mpi_mod::mpif_mtime> WARNING: unknown case identifier'
2741
2742    end select
2743
2744
2745  end subroutine mpif_mtime
2746
2747
2748  subroutine mpif_finalize
2749!***********************************************************************
2750! Finalize MPI
2751! Optionally print detailed time diagnostics
2752!***********************************************************************
2753    implicit none
2754
2755    integer :: ip !,j,r
2756
2757!***********************************************************************
2758
2759    IF (mp_measure_time) THEN
2760      do ip=0, mp_np-1
2761        call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr)
2762
2763        if (ip == mp_pid) then
2764          write(*,FMT='(72("#"))')
2765          write(*,FMT='(A60,I3)') 'STATISTICS FOR MPI PROCESS:', mp_pid
2766          if (ip == 0) then
2767            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR ROOT-ONLY (PID 0) &
2768                 &SECTIONS: ', mp_root_time_total
2769            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ROOT-ONLY (PID 0) &
2770                 &SECTIONS:', mp_root_wtime_total
2771          end if
2772          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FLEXPART: ' &
2773               &, mp_total_wtime_total
2774          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI COMMUNICATIONS:',&
2775               & mp_comm_time_total
2776          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI COMMUNICATIONS:',&
2777               & mp_comm_wtime_total
2778          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI BARRIERS:',&
2779               & mp_barrier_time_total
2780          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI BARRIERS:',&
2781               & mp_barrier_wtime_total
2782          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER:',&
2783               & tm_tot_total
2784          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER/NUMPART LOOP:',&
2785               & tm_nploop_total
2786          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ADVANCE:',&
2787               & mp_advance_wtime_total
2788          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR GETFIELDS:',&
2789               & mp_getfields_wtime_total
2790          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR GETFIELDS:',&
2791               & mp_getfields_time_total
2792!          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',&
2793!               & mp_readwind_wtime_total
2794!          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',&
2795!               & mp_readwind_time_total
2796          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FILE IO:',&
2797               & mp_io_wtime_total
2798          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR FILE IO:',&
2799               & mp_io_time_total
2800          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR WETDEPO:',&
2801               & mp_wetdepo_wtime_total
2802          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR WETDEPO:',&
2803               & mp_wetdepo_time_total
2804          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',&
2805               & mp_conccalc_time_total
2806! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',&
2807!      & mp_vt_wtime_total
2808! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',&
2809!      & mp_vt_time_total
2810! NB: the 'flush' function is possibly a gfortran-specific extension,
2811! comment out if it gives problems
2812!          call flush()
2813        end if
2814      end do
2815    end if
2816
2817! This call to barrier is for correctly formatting output
2818    call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr)
2819
2820    if (lroot.and.mp_measure_time) then
2821      write(*,FMT='(72("#"))')
2822      WRITE(*,*) "To turn off output of time measurements, set "
2823      WRITE(*,*) "    mp_measure_time=.false."
2824      WRITE(*,*) "in file mpi_mod.f90"
2825      write(*,FMT='(72("#"))')
2826    end if
2827
2828! j=mp_pid*nvar_async
2829! In the implementation with 3 fields, the processes may have posted
2830! MPI_Irecv requests that should be cancelled here
2831! if (.not.lmp_sync) then
2832!   r=mp_pid*nvar_async
2833!   do j=r,r+nvar_async-1
2834!     call MPI_Cancel(j,mp_ierr)
2835!     if (mp_ierr /= 0) write(*,*) '#### mpif_finalize::MPI_Cancel> ERROR ####'
2836!   end do
2837! end if
2838
2839    call MPI_FINALIZE(mp_ierr)
2840    if (mp_ierr /= 0) then
2841      write(*,*) '#### mpif_finalize::MPI_FINALIZE> MPI ERROR ', mp_ierr, ' ####'
2842      stop
2843    end if
2844
2845
2846  end subroutine mpif_finalize
2847
2848
2849  subroutine get_lun(my_lun)
2850!***********************************************************************
2851! get_lun:
2852!   Starting from 100, get next free logical unit number
2853!***********************************************************************
2854
2855    implicit none
2856
2857    integer, intent(inout) :: my_lun
2858    integer, save :: free_lun=100
2859    logical :: exists, iopen
2860
2861!***********************************************************************
2862
2863    loop1: do
2864      inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen)
2865      if (exists .and. .not.iopen) exit loop1
2866      free_lun = free_lun+1
2867    end do loop1
2868    my_lun = free_lun
2869
2870  end subroutine get_lun
2871
2872
2873  subroutine write_data_dbg(array_in, array_name, tstep, ident)
2874!***********************************************************************
2875! Write one-dimensional arrays to file (for debugging purposes)
2876!***********************************************************************
2877    implicit none
2878
2879    real, intent(in), dimension(:) :: array_in
2880    integer, intent(in) :: tstep
2881    integer :: lios
2882    character(LEN=*), intent(in) :: ident, array_name
2883
2884    character(LEN=8) :: c_ts
2885    character(LEN=40) :: fn_1, fn_2
2886
2887!***********************************************************************
2888
2889    write(c_ts, FMT='(I8.8,BZ)') tstep
2890    fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident)
2891    write(c_ts, FMT='(I2.2,BZ)') mp_np
2892    fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat'
2893
2894    call get_lun(dat_lun)
2895    open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', &
2896         FORM='UNFORMATTED', STATUS='REPLACE')
2897    write(UNIT=dat_lun, IOSTAT=lios) array_in
2898    close(UNIT=dat_lun)
2899
2900  end subroutine write_data_dbg
2901
2902
2903  subroutine set_fields_synthetic()
2904!*******************************************************************************
2905! DESCRIPTION
2906!   Set all meteorological fields to synthetic (usually constant/homogeneous)
2907!   values.
2908!   Used for validation and error-checking
2909!
2910! NOTE
2911!   This version uses asynchronious communications.
2912!
2913! VARIABLES
2914!   
2915!
2916!
2917!*******************************************************************************
2918    use com_mod
2919
2920    implicit none
2921
2922    integer :: li=1, ui=2 ! wfmem indices (i.e, operate on entire field)
2923
2924    if (.not.lmp_sync) ui=3
2925
2926
2927! Variables transferred at initialization only
2928!*********************************************
2929!    readclouds=readclouds_
2930    oro(:,:)=0.0
2931    excessoro(:,:)=0.0
2932    lsm(:,:)=0.0
2933    xlanduse(:,:,:)=0.0
2934!    wftime
2935!    numbwf
2936!    nmixz
2937!    height
2938
2939! Time-varying fields:
2940    uu(:,:,:,li:ui) = 10.0
2941    vv(:,:,:,li:ui) = 0.0
2942    uupol(:,:,:,li:ui) = 10.0
2943    vvpol(:,:,:,li:ui)=0.0
2944    ww(:,:,:,li:ui)=0.
2945    tt(:,:,:,li:ui)=300.
2946    rho(:,:,:,li:ui)=1.3
2947    drhodz(:,:,:,li:ui)=.0
2948    tth(:,:,:,li:ui)=0.0
2949    qvh(:,:,:,li:ui)=1.0
2950    qv(:,:,:,li:ui)=1.0
2951
2952    pv(:,:,:,li:ui)=1.0
2953    clouds(:,:,:,li:ui)=0
2954
2955    clwc(:,:,:,li:ui)=0.0
2956    ciwc(:,:,:,li:ui)=0.0
2957
2958! 2D fields
2959
2960    cloudsh(:,:,li:ui)=0
2961    vdep(:,:,:,li:ui)=0.0
2962    ps(:,:,:,li:ui)=1.0e5
2963    sd(:,:,:,li:ui)=0.0
2964    tcc(:,:,:,li:ui)=0.0
2965    tt2(:,:,:,li:ui)=300.
2966    td2(:,:,:,li:ui)=250.
2967    lsprec(:,:,:,li:ui)=0.0
2968    convprec(:,:,:,li:ui)=0.0
2969    ustar(:,:,:,li:ui)=1.0
2970    wstar(:,:,:,li:ui)=1.0
2971    hmix(:,:,:,li:ui)=10000.
2972    tropopause(:,:,:,li:ui)=10000.
2973    oli(:,:,:,li:ui)=0.01
2974
2975  end subroutine set_fields_synthetic
2976
2977end module mpi_mod
Note: See TracBrowser for help on using the repository browser.
hosted by ZAMG