source: flexpart.git/src/mpi_mod.f90 @ 1228ef7

dev
Last change on this file since 1228ef7 was 1228ef7, checked in by Espen Sollum <eso@…>, 3 years ago

MPI: fix for mquasilag output

  • Property mode set to 100644
File size: 109.5 KB
Line 
1! SPDX-FileCopyrightText: FLEXPART 1998-2019, see flexpart_license.txt
2! SPDX-License-Identifier: GPL-3.0-or-later
3
4module mpi_mod
5
6!*****************************************************************************
7!                                                                            *
8!  DESCRIPTION                                                               *
9!    This module contains subroutines and common variables used for the      *
10!    MPI parallelization of FLEXPART.                                        *
11!                                                                            *
12!  NOTE                                                                      *
13!    Depending on the MPI library installed on your system (e.g. mpich2,     *
14!    OpenMPI) you may need to choose below in this file between              *
15!      use mpi                                                               *
16!    (if the MPI library comes with the file 'mpi.mod'); or                  *
17!      include 'mpif.h'                                                      *
18!                                                                            *
19!                                                                            *
20!*****************************************************************************
21!                                                                            *
22! Variables:                                                                 *
23!                                                                            *
24! mp_ierr                 MPI error code                                     *
25! mp_np                   Number of MPI processes                            *
26! mp_pid                  Process ID of each MPI process                     *
27! mp_seed                 Parameter for random number seed                   *
28! read_grp_min            Minimum number of processes at which one will be   *
29!                         used as reader                                     *
30! numpart_mpi,            Number of particles per node                       *
31! maxpart_mpi                                                                *
32! mp_partid               MPI process ID for particle calculation            *
33! mp_partgroup_           Refers to the subset of processors performing      *
34!                         loops over particles. Will be all processes        *
35!                         unless a dedicated process runs getfields/readwind *
36! lmp_sync                If .false., use asynchronous MPI                   *
37! mp_cp                   Real precision to use for deposition fields        *
38!                                                                            *
39!                                                                            *
40!                                                                            *
41!                                                                            *
42!*****************************************************************************
43!                                                                            *
44! Modification by DJM, 2017-05-09 - added #ifdef USE_MPIINPLACE cpp          *
45! directive to mpif_tm_reduce_grid() to insure that MPI_IN_PLACE is          *
46! used in the MPI_Reduce() only if specifically compiled with that           *
47! directive.                                                                 *
48!                                                                            *
49!*****************************************************************************
50
51  use mpi
52  use par_mod, only: dp,sp
53  use com_mod, only: lroot
54
55  implicit none
56
57!  include 'mpif.h'
58
59  public
60
61! Set aside a process for reading windfields if using at least these many processes
62!==================================================
63  integer, parameter, private :: read_grp_min=4
64!==================================================
65
66! Variables for each MPI process in the world group
67  integer :: mp_ierr, mp_np, mp_pid, mp_partid
68  integer, private :: world_group_id
69
70! Variables for MPI processes in the 'particle' group
71  integer, allocatable, dimension(:) :: mp_partgroup_rank
72  integer, allocatable, dimension(:) :: npart_per_process
73  integer :: mp_partgroup_comm, mp_partgroup_pid, mp_partgroup_np
74
75  integer :: mp_seed=0
76  integer, parameter :: mp_sp=MPI_REAL4, mp_dp=MPI_REAL8
77  integer :: mp_cp
78  integer, parameter :: id_root=0 ! master process
79
80! MPI tags/requests for send/receive operation
81  integer :: tm1
82  integer, parameter :: nvar_async=26
83!integer, dimension(:), allocatable :: tags
84  integer, dimension(:), allocatable :: reqs
85
86! Status array used for certain MPI operations (MPI_RECV)
87  integer, dimension(MPI_STATUS_SIZE) :: mp_status
88
89
90  integer :: id_read   ! readwind/getfield process
91  integer :: numpart_mpi,maxpart_mpi ! number of particles per node
92  integer :: tot_numpart=0
93  integer :: mp_comm_used ! global or subgroup communicator
94
95  logical :: lmpreader=.false. ! is set to true for reading process(es) only.
96  logical :: lmp_use_reader=.false. ! true if separate readwind process is used
97
98! .true. if only using synchronous MPI send/recv (default)
99! If setting this to .false., numwfmem must be set to 3
100!===============================================================================
101  logical :: lmp_sync=.true.
102!===============================================================================
103
104! mp_dbg_mode       Used for debugging MPI.
105! mp_dev_mode       various checks related to debugging the parallel code
106! mp_dbg_out        write some arrays to data file for debugging
107! mp_measure_time   Measure cpu/wall time, write out at end of run
108! mp_time_barrier   Measure MPI barrier time
109! mp_exact_numpart  Use an extra MPI communication to give the exact number of particles
110!                   to standard output (this does not otherwise affect the simulation)
111  logical, parameter :: mp_dbg_mode = .false.
112  logical, parameter :: mp_dev_mode = .false.
113  logical, parameter :: mp_dbg_out = .false.
114  logical, parameter :: mp_time_barrier=.true.
115  logical, parameter :: mp_measure_time=.false.
116  logical, parameter :: mp_exact_numpart=.true.
117
118! for measuring CPU/Wall time
119  real(sp),private :: mp_comm_time_beg, mp_comm_time_end, mp_comm_time_total=0.
120  real(dp),private :: mp_comm_wtime_beg, mp_comm_wtime_end, mp_comm_wtime_total=0.
121  real(sp),private :: mp_root_time_beg, mp_root_time_end, mp_root_time_total=0.
122  real(dp),private :: mp_root_wtime_beg, mp_root_wtime_end, mp_root_wtime_total=0.
123  real(sp),private :: mp_barrier_time_beg, mp_barrier_time_end, mp_barrier_time_total=0.
124  real(dp),private :: mp_barrier_wtime_beg, mp_barrier_wtime_end, mp_barrier_wtime_total=0.
125  real(sp),private :: tm_nploop_beg, tm_nploop_end, tm_nploop_total=0.
126  real(sp),private :: tm_tot_beg, tm_tot_end, tm_tot_total=0.
127  real(dp),private :: mp_getfields_wtime_beg, mp_getfields_wtime_end, mp_getfields_wtime_total=0.
128  real(sp),private :: mp_getfields_time_beg, mp_getfields_time_end, mp_getfields_time_total=0.
129  real(dp),private :: mp_readwind_wtime_beg, mp_readwind_wtime_end, mp_readwind_wtime_total=0.
130  real(sp),private :: mp_readwind_time_beg, mp_readwind_time_end, mp_readwind_time_total=0.
131  real(dp),private :: mp_io_wtime_beg, mp_io_wtime_end, mp_io_wtime_total=0.
132  real(sp),private :: mp_io_time_beg, mp_io_time_end, mp_io_time_total=0.
133  real(dp),private :: mp_wetdepo_wtime_beg, mp_wetdepo_wtime_end, mp_wetdepo_wtime_total=0.
134  real(sp),private :: mp_wetdepo_time_beg, mp_wetdepo_time_end, mp_wetdepo_time_total=0.
135  real(dp),private :: mp_advance_wtime_beg, mp_advance_wtime_end, mp_advance_wtime_total=0.
136  real(dp),private :: mp_conccalc_time_beg, mp_conccalc_time_end, mp_conccalc_time_total=0.
137  real(dp),private :: mp_total_wtime_beg, mp_total_wtime_end, mp_total_wtime_total=0.
138  real(dp),private :: mp_vt_wtime_beg, mp_vt_wtime_end, mp_vt_wtime_total
139  real(sp),private :: mp_vt_time_beg, mp_vt_time_end, mp_vt_time_total
140
141! dat_lun           logical unit number for i/o
142  integer, private :: dat_lun
143
144! Temporary arrays for particles (allocated and deallocated as needed)
145  integer, allocatable, dimension(:) :: nclass_tmp, npoint_tmp, itra1_tmp, idt_tmp, &
146       & itramem_tmp, itrasplit_tmp
147  real(kind=dp), allocatable, dimension(:) :: xtra1_tmp, ytra1_tmp
148  real, allocatable, dimension(:) :: ztra1_tmp
149  real, allocatable, dimension(:,:) :: xmass1_tmp
150
151! mp_redist_fract        Exchange particles between processes if relative difference
152!                        is larger. A good value is between 0.0 and 0.5
153! mp_maxpart_factor      Allocate more memory per process than strictly needed
154!                        (safety factor). Recommended value between 1.5 and 2.5
155! mp_min_redist          Do not redistribute particles if below this limit
156  real, parameter :: mp_redist_fract=0.2, mp_maxpart_factor=1.5
157  integer,parameter :: mp_min_redist=100000
158
159
160contains
161
162  subroutine mpif_init
163!***********************************************************************
164! DESCRIPTION
165!   Initialize MPI.
166!   
167!   Create the global communicator MPI_COMM_WORLD
168!   If using a separate MPI process for getfields/readwind, a subgroup
169!   is created for the other processes.
170!
171! VARIABLES
172!   mpi_mode    default 0, set to 2/3 if running MPI version
173!   mp_np       number of running processes, decided at run-time
174!***********************************************************************
175    use par_mod, only: maxpart, numwfmem, dep_prec, maxreceptor, maxspec
176    use com_mod, only: mpi_mode, verbosity, creceptor0
177
178    implicit none
179
180    integer :: i,j,s
181
182! Each process gets an ID (mp_pid) in the range 0,..,mp_np-1
183    call MPI_INIT(mp_ierr)
184    if (mp_ierr /= 0) goto 100
185    call MPI_COMM_RANK(MPI_COMM_WORLD, mp_pid, mp_ierr)
186    if (mp_ierr /= 0) goto 100
187    call MPI_COMM_SIZE(MPI_COMM_WORLD, mp_np, mp_ierr)
188    if (mp_ierr /= 0) goto 100
189
190
191! Variable mpi_mode is used to handle subroutines common to parallel/serial version
192    if (lmp_sync) then
193      mpi_mode=2 ! hold 2 windfields in memory
194    else
195      mpi_mode=3 ! hold 3 windfields in memory
196    end if
197
198    if (mp_pid.ne.0) then
199      lroot = .false.
200    end if
201
202! Set MPI precision to use for transferring deposition fields
203!************************************************************
204    if (dep_prec==dp) then
205      mp_cp = MPI_REAL8
206! TODO: write info message for serial version as well
207      if (lroot.and.verbosity>0) write(*,*) 'Using double precision for deposition fields'
208    else if (dep_prec==sp) then
209      mp_cp = MPI_REAL4
210      if (lroot.and.verbosity>0) write(*,*) 'Using single precision for deposition fields'
211    else
212      write(*,*) 'ERROR: something went wrong setting MPI real precision'
213      stop
214    end if
215
216! Check for sensible combination of parameters
217!*********************************************
218
219    if (.not.lmp_sync.and.numwfmem.ne.3) then
220      if (lroot) then
221        write(*,FMT='(80("#"))')
222        write(*,*) '#### mpi_mod::mpif_init> ERROR: ', &
223             & 'numwfmem must be set to 3 for asyncronous reading ####'
224        write(*,FMT='(80("#"))')
225      end if
226      call MPI_FINALIZE(mp_ierr)
227      stop
228    else if (lmp_sync.and.numwfmem.ne.2.and.lroot) then
229      write(*,FMT='(80("#"))')
230      write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
231           & 'numwfmem should be set to 2 for syncronous'
232      write(*,*) ' reading. Results will still be valid, but unneccesary memory &
233           &is allocated.'
234      write(*,FMT='(80("#"))')
235! Force "syncronized" version if all processes will call getfields
236    else if ((.not.lmp_sync.and.mp_np.lt.read_grp_min).or.(mp_np.eq.1)) then
237      if (lroot) then
238        write(*,FMT='(80("#"))')
239        write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
240             & 'all procs call getfields. Setting lmp_sync=.true.'
241        write(*,FMT='(80("#"))')
242      end if
243      lmp_sync=.true.
244    end if
245
246! TODO: Add more warnings for unimplemented flexpart features
247
248! Set ID of process that calls getfield/readwind.
249! Using the last process in the group ensures statistical identical results
250! as running with one process less but not using separate read process
251!**********************************************************************
252
253!    id_read = min(mp_np-1, 1)
254    id_read = mp_np-1
255
256    if (mp_pid.eq.id_read) lmpreader=.true.
257
258    call MPI_Comm_group (MPI_COMM_WORLD, world_group_id, mp_ierr)
259
260! Create a MPI group/communicator that will calculate trajectories.
261! Skip this step if program is run with only a few processes
262!************************************************************************
263    allocate(mp_partgroup_rank(0:mp_np-2))
264
265! This allows checking for allocation of arrays when no subgroup is used
266    mp_partgroup_pid=0
267
268    if (read_grp_min.lt.2) then
269      write(*,*) '#### mpi_mod::mpif_init> ERROR ####', &
270           & 'read_grp_min should be at least 2. Exiting'
271      stop
272    end if
273
274    if (mp_np.ge.read_grp_min) then
275      lmp_use_reader = .true.
276
277! Map the subgroup IDs= 0,1,2,...,mp_np-2, skipping reader process
278      j=-1
279      do i=0, mp_np-2 !loop over all (subgroup) IDs
280        j=j+1
281        if (i.eq.id_read) then
282          j=j+1
283        end if
284        mp_partgroup_rank(i) = j
285      end do
286
287      call MPI_Group_incl (world_group_id, mp_np-1, mp_partgroup_rank, &
288           &mp_partgroup_pid, mp_ierr)
289      if (mp_ierr /= 0) goto 100
290      call MPI_Comm_create (MPI_COMM_WORLD, mp_partgroup_pid, mp_partgroup_comm, mp_ierr)
291      if (mp_ierr /= 0) goto 100
292
293      if (mp_pid.ne.id_read) then
294        call MPI_Comm_rank (mp_partgroup_comm, mp_partgroup_pid, mp_ierr)
295        if (mp_ierr /= 0) goto 100
296
297! Get size of new group (=mp_np-1)
298        call MPI_COMM_SIZE(mp_partgroup_comm, mp_partgroup_np, mp_ierr)
299        if (mp_ierr /= 0) goto 100
300        if (mp_partgroup_np.ne.mp_np-1) then
301          write(*,*)  '#### mpi_mod:: mpif_init> ERROR ####&
302               & mp_partgroup_np.ne.mp_np-1'
303          stop
304        endif
305
306      else
307        mp_partgroup_pid = -1
308      end if
309    end if
310
311    if (lmp_use_reader) then
312      mp_comm_used = mp_partgroup_comm
313      mp_partid = mp_partgroup_pid
314      mp_partgroup_np=mp_np-1
315    else
316      mp_comm_used = MPI_COMM_WORLD
317      mp_partgroup_np = mp_np
318      mp_partid = mp_pid
319    end if
320
321! Set maxpart per process
322! ESO 08/2016: Increase maxpart per process, in case of unbalanced distribution
323    maxpart_mpi=int(mp_maxpart_factor*real(maxpart)/real(mp_partgroup_np))
324    if (mp_np == 1) maxpart_mpi = maxpart
325
326! Do not allocate particle data arrays for readwind process
327    if (lmpreader.and.lmp_use_reader) then
328      maxpart_mpi=0
329    end if
330
331! Set random seed for each non-root process
332    if (mp_pid.gt.0) then
333      s = 244
334      mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729))
335    end if
336    if (mp_dbg_mode) then
337      mp_seed=0
338      if (lroot) write(*,*) 'MPI: setting seed=0'
339    end if
340
341! Allocate request array for asynchronous MPI
342    if (.not.lmp_sync) then
343      allocate(reqs(0:nvar_async*mp_np-1))
344      reqs(:)=MPI_REQUEST_NULL
345    else
346      allocate(reqs(0:1))
347      reqs(:)=MPI_REQUEST_NULL
348    end if
349
350! Allocate array for number of particles per process   
351    allocate(npart_per_process(0:mp_partgroup_np-1))
352
353! Write whether MPI_IN_PLACE is used or not
354#ifdef USE_MPIINPLACE
355    if (lroot) write(*,*) 'Using MPI_IN_PLACE operations'
356#else
357    if (lroot) allocate(creceptor0(maxreceptor,maxspec))
358    if (lroot) write(*,*) 'Not using MPI_IN_PLACE operations'
359#endif
360    goto 101
361
362100 write(*,*) '#### mpi_mod::mpif_init> ERROR ####', mp_ierr
363    stop
364
365101 end subroutine mpif_init
366
367
368  subroutine mpif_mpi_barrier
369!***********************************************************************
370! Set MPI barrier (all processes are synchronized here), with the
371! possible exception of a separate 'readwind' process.
372! Optionally measure cpu/wall time.
373!
374!***********************************************************************
375    implicit none
376
377    if (mp_time_barrier) then
378      call cpu_time(mp_barrier_time_beg)
379      mp_barrier_wtime_beg = mpi_wtime()
380    endif
381
382    call MPI_BARRIER(mp_comm_used, mp_ierr)
383    if (mp_ierr /= 0) goto 100
384
385    if (mp_time_barrier) then
386      call cpu_time(mp_barrier_time_end)
387      mp_barrier_wtime_end = mpi_wtime()
388
389      mp_barrier_time_total=mp_barrier_time_total+&
390           &(mp_barrier_time_end-mp_barrier_time_beg)
391      mp_barrier_wtime_total=mp_barrier_wtime_total+&
392           &(mp_barrier_wtime_end-mp_barrier_wtime_beg)
393    end if
394
395    goto 101
396
397100 write(*,*) '#### mpi_mod::mpif_mpi_barrier> ERROR ####', mp_ierr
398    stop
399
400101 end subroutine mpif_mpi_barrier
401
402
403  subroutine mpif_com_mod_allocate
404!*******************************************************************************   
405! Dynamic allocation of arrays (in serial code these have size maxpart)
406!
407! Not in use anymore, moved to com_mod for interoperability with serial version
408!
409! TODO: remove
410!*******************************************************************************
411    use com_mod
412
413    implicit none
414
415    integer :: array_size
416
417    array_size = maxpart_mpi
418
419    allocate(itra1(array_size),npoint(array_size),&
420         & nclass(array_size),idt(array_size),&
421         & itramem(array_size),itrasplit(array_size),&
422         & xtra1(array_size),ytra1(array_size),&
423         & ztra1(array_size),xmass1(array_size, maxspec))
424
425    allocate(uap(array_size),ucp(array_size),&
426         & uzp(array_size),us(array_size),&
427         & vs(array_size),&
428         & ws(array_size),cbt(array_size))
429
430
431  end subroutine mpif_com_mod_allocate
432
433
434  subroutine mpif_tm_send_dims
435!***********************************************************************
436! Distribute array dimensions from pid0 to all processes.
437! numpart_mpi/numpart is sent to allow dynamic allocation
438!
439! Currently not in use.
440!
441! Variables of similar type (integer, real) are placed in an array
442! and sent collectively, to avoid the overhead associated with individual
443! MPI send/receives
444!
445!
446!***********************************************************************
447    use com_mod
448
449    implicit none
450
451    integer :: add_part=0
452
453    call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
454
455! MPI subgroup does the particle-loop
456    if (lmp_use_reader) then
457      if (mod(numpart,mp_partgroup_np).ne.0) add_part=1
458      numpart_mpi=int(numpart/mp_partgroup_np)+add_part
459    else
460      if (mod(numpart,mp_np).ne.0) add_part=1
461      numpart_mpi=int(numpart/mp_np)+add_part
462    end if
463
464
465! redefine numpart as 'numpart per process' throughout the code
466!**************************************************************
467    numpart = numpart_mpi
468
469  end subroutine mpif_tm_send_dims
470
471
472  subroutine mpif_send_part_properties(num_part)
473!***********************************************************************
474! Distribute particle properties from root to all processes.
475
476! Used by init_domainfill_mpi
477!
478! Variables:
479!
480! num_part        input, number of particles per process (rounded up)
481!
482!***********************************************************************
483    use com_mod
484
485    implicit none
486
487    integer,intent(in) :: num_part
488    integer :: i
489
490! Exit if too many particles
491    if (num_part.gt.maxpart_mpi) then
492      write(*,*) '#####################################################'
493      write(*,*) '#### ERROR - TOTAL NUMBER OF PARTICLES REQUIRED  ####'
494      write(*,*) '#### EXCEEDS THE MAXIMUM ALLOWED NUMBER. REDUCE  ####'
495      write(*,*) '#### EITHER NUMBER OF PARTICLES PER RELEASE POINT####'
496      write(*,*) '#### OR INCREASE MAXPART.                        ####'
497      write(*,*) '#####################################################'
498!      call MPI_FINALIZE(mp_ierr)
499      stop
500    end if
501
502
503! Time for MPI communications
504!****************************
505    if (mp_measure_time) call mpif_mtime('commtime',0)
506
507! Distribute variables, send from pid 0 to other processes (including itself)
508!****************************************************************************
509
510    call MPI_SCATTER(nclass_tmp,num_part,MPI_INTEGER,nclass,&
511         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
512    if (mp_ierr /= 0) goto 600
513    call MPI_SCATTER(npoint_tmp,num_part,MPI_INTEGER,npoint,&
514         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
515    if (mp_ierr /= 0) goto 600
516    call MPI_SCATTER(itra1_tmp,num_part,MPI_INTEGER,itra1,&
517         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
518    if (mp_ierr /= 0) goto 600
519    call MPI_SCATTER(idt_tmp,num_part,MPI_INTEGER,idt,&
520         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
521    if (mp_ierr /= 0) goto 600
522    call MPI_SCATTER(itramem_tmp,num_part,MPI_INTEGER,itramem,&
523         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
524    if (mp_ierr /= 0) goto 600
525    call MPI_SCATTER(itrasplit_tmp,num_part,MPI_INTEGER,itrasplit,&
526         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
527    if (mp_ierr /= 0) goto 600
528    call MPI_SCATTER(xtra1_tmp,num_part,mp_dp,xtra1,&
529         &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)
530    if (mp_ierr /= 0) goto 600
531    call MPI_SCATTER(ytra1_tmp,num_part,mp_dp,ytra1,&
532         &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)
533    if (mp_ierr /= 0) goto 600
534    call MPI_SCATTER(ztra1_tmp,num_part,mp_sp,ztra1,&
535         &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)
536    if (mp_ierr /= 0) goto 600
537    do i=1,nspec
538      call MPI_SCATTER(xmass1_tmp(:,i),num_part,mp_sp,xmass1(:,i),&
539           &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)
540      if (mp_ierr /= 0) goto 600
541    end do
542
543    if (mp_measure_time) call mpif_mtime('commtime',1)
544
545    goto 601
546
547600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
548    stop
549
550! After the transfer of particles it it possible that the value of
551! "numpart" is larger than the actual used, so we reduce it if there are
552! invalid particles at the end of the arrays
553
554601 do i=numpart, 1, -1
555      if (itra1(i).eq.-999999999) then
556        numpart=numpart-1
557      else
558        exit
559      end if
560    end do
561
562
563  end subroutine mpif_send_part_properties
564
565
566  subroutine mpif_calculate_part_redist(itime)
567!***********************************************************************
568! Calculate number of particles to redistribute between processes. This routine
569! can be called at regular time intervals to keep a level number of
570! particles on each process.
571!
572! First, all processes report their local "numpart" to each other, which is
573! stored in array "numpart_mpi(np)". This array is sorted from low to
574! high values, along with a corresponding process ID array "idx_arr(np)".
575! If the relative difference between the highest and lowest "numpart_mpi" value
576! is above a threshold, particles are transferred from process idx_arr(np-1)
577! to process idx_arr(0). Repeat for processes idx_arr(np-i) and idx_arr(i)
578! for all valid i.
579! Note: If np is an odd number, the process with the median
580! number of particles is left unchanged
581!
582! VARIABLES
583! itime        input, current time
584!***********************************************************************
585    use com_mod
586
587    implicit none
588   
589    integer, intent(in) :: itime
590    real :: pmin,z
591    integer :: i,jj,nn,m,imin, num_trans
592    integer,allocatable,dimension(:) :: idx_arr
593    real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this
594
595! Exit if running with only 1 process
596!************************************************************************
597    if (mp_np.eq.1) return
598
599! All processes exchange information on number of particles
600!****************************************************************************
601    allocate( idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1))
602
603    call MPI_Allgather(numpart, 1, MPI_INTEGER, npart_per_process, &
604         & 1, MPI_INTEGER, mp_comm_used, mp_ierr)
605
606
607! Sort from lowest to highest
608! Initial guess: correct order
609    sorted(:) = npart_per_process(:)
610    do i=0, mp_partgroup_np-1
611      idx_arr(i) = i
612    end do
613
614! Do not rebalance particles for ipout=3 or mquasilag=1
615    if ((ipout.eq.3).or.(mquasilag.eq.1)) return
616
617! For each successive element in index array, see if a lower value exists
618    do i=0, mp_partgroup_np-2
619      pmin=sorted(i)
620      imin=idx_arr(i)
621      m=i+1
622      do jj=m, mp_partgroup_np-1
623        if (pmin.le.sorted(jj)) cycle
624        z=pmin
625        pmin=sorted(jj)
626        sorted(jj)=z
627
628        nn=imin
629        imin=idx_arr(jj)
630        idx_arr(jj)=nn
631      end do
632      sorted(i)=pmin
633      idx_arr(i)=imin
634    end do
635
636! For each pair of processes, transfer particles if the difference is above a
637! limit, and numpart at sending process large enough
638
639    m=mp_partgroup_np-1 ! index for last sorted process (most particles)
640    do i=0,mp_partgroup_np/2-1
641      num_trans = npart_per_process(idx_arr(m)) - npart_per_process(idx_arr(i))
642      if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then
643        if ( npart_per_process(idx_arr(m)).gt.mp_min_redist.and.&
644             & real(num_trans)/real(npart_per_process(idx_arr(m))).gt.mp_redist_fract) then
645! DBG
646          ! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart_per_process', &
647          !      &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart_per_process
648! DBG
649          call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2)
650        end if
651      end if
652      m=m-1
653    end do
654
655    deallocate(idx_arr, sorted)
656
657  end subroutine mpif_calculate_part_redist
658
659
660  subroutine mpif_redist_part(itime, src_proc, dest_proc, num_trans)
661!***********************************************************************
662! Transfer particle properties between two arbitrary processes.
663!
664! VARIABLES
665!
666! itime           input, current time
667! src_proc        input, ID of source process
668! dest_proc       input, ID of destination process
669! num_trans       input, number of particles to transfer
670!
671!************************************************************************
672    use com_mod !TODO:  ,only: nclass etc
673
674    implicit none
675
676    integer, intent(in) :: itime, src_proc, dest_proc, num_trans
677    integer :: ll, ul ! lower and upper indices in arrays
678    integer :: arr_sz ! size of temporary arrays
679    integer :: mtag   ! MPI message tag
680    integer :: i, j, minpart, ipart, maxnumpart
681 
682! Check for invalid input arguments
683!**********************************
684 if (src_proc.eq.dest_proc) then
685   write(*,*) '<mpi_mod::mpif_redist_part>: Error: &
686        &src_proc.eq.dest_proc'
687   stop
688 end if
689
690! Measure time for MPI communications
691!************************************
692    if (mp_measure_time) call mpif_mtime('commtime',0)
693
694! Send particles
695!***************
696    if (mp_partid.eq.src_proc) then
697      mtag = 2000
698
699! Array indices for the transferred particles
700      ll=numpart-num_trans+1
701      ul=numpart
702
703      if (mp_dev_mode) then
704        write(*,FMT='(72("#"))')
705        write(*,*) "Sending ", num_trans, "particles (from/to)", src_proc, dest_proc
706        write(*,*) "numpart before: ", numpart
707      end if
708
709      call MPI_SEND(nclass(ll:ul),num_trans,&
710           & MPI_INTEGER,dest_proc,mtag+1,mp_comm_used,mp_ierr)
711
712      call MPI_SEND(npoint(ll:ul),num_trans,&
713           & MPI_INTEGER,dest_proc,mtag+2,mp_comm_used,mp_ierr)
714
715      call MPI_SEND(itra1(ll:ul),num_trans, &
716           & MPI_INTEGER,dest_proc,mtag+3,mp_comm_used,mp_ierr)
717
718      call MPI_SEND(idt(ll:ul),num_trans, &
719           & MPI_INTEGER,dest_proc,mtag+4,mp_comm_used,mp_ierr)
720
721      call MPI_SEND(itramem(ll:ul),num_trans, &
722           & MPI_INTEGER,dest_proc,mtag+5,mp_comm_used,mp_ierr)
723
724      call MPI_SEND(itrasplit(ll:ul),num_trans,&
725           & MPI_INTEGER,dest_proc,mtag+6,mp_comm_used,mp_ierr)
726
727      call MPI_SEND(xtra1(ll:ul),num_trans, &
728           & mp_dp,dest_proc,mtag+7,mp_comm_used,mp_ierr)
729
730      call MPI_SEND(ytra1(ll:ul),num_trans,&
731           & mp_dp,dest_proc,mtag+8,mp_comm_used,mp_ierr)
732
733      call MPI_SEND(ztra1(ll:ul),num_trans,&
734           & mp_sp,dest_proc,mtag+9,mp_comm_used,mp_ierr)
735
736      do j=1,nspec
737        call MPI_SEND(xmass1(ll:ul,j),num_trans,&
738             & mp_sp,dest_proc,mtag+(9+j),mp_comm_used,mp_ierr)
739      end do
740
741! Terminate transferred particles, update numpart
742      itra1(ll:ul) = -999999999
743
744      numpart = numpart-num_trans
745
746      if (mp_dev_mode) then
747        write(*,*) "numpart after: ", numpart
748        write(*,FMT='(72("#"))')
749      end if
750
751    else if (mp_partid.eq.dest_proc) then
752
753! Receive particles
754!******************
755      mtag = 2000
756      ! Array indices for the transferred particles
757      ll=numpart+1
758      ul=numpart+num_trans
759
760      if (mp_dev_mode) then
761        write(*,FMT='(72("#"))')
762        write(*,*) "Receiving ", num_trans, "particles (from/to)", src_proc, dest_proc
763        write(*,*) "numpart before: ", numpart
764      end if
765
766! We could receive the data directly at nclass(ll:ul) etc., but this leaves
767! vacant spaces at lower indices. Using temporary arrays instead.
768      arr_sz = ul-ll+1
769      allocate(itra1_tmp(arr_sz),npoint_tmp(arr_sz),nclass_tmp(arr_sz),&
770           & idt_tmp(arr_sz),itramem_tmp(arr_sz),itrasplit_tmp(arr_sz),&
771           & xtra1_tmp(arr_sz),ytra1_tmp(arr_sz),ztra1_tmp(arr_sz),&
772           & xmass1_tmp(arr_sz, maxspec))
773     
774      call MPI_RECV(nclass_tmp,num_trans,&
775           & MPI_INTEGER,src_proc,mtag+1,mp_comm_used,mp_status,mp_ierr)
776
777      call MPI_RECV(npoint_tmp,num_trans,&
778           & MPI_INTEGER,src_proc,mtag+2,mp_comm_used,mp_status,mp_ierr)
779
780      call MPI_RECV(itra1_tmp,num_trans, &
781           & MPI_INTEGER,src_proc,mtag+3,mp_comm_used,mp_status,mp_ierr)
782
783      call MPI_RECV(idt_tmp,num_trans, &
784           & MPI_INTEGER,src_proc,mtag+4,mp_comm_used,mp_status,mp_ierr)
785
786      call MPI_RECV(itramem_tmp,num_trans, &
787           & MPI_INTEGER,src_proc,mtag+5,mp_comm_used,mp_status,mp_ierr)
788
789      call MPI_RECV(itrasplit_tmp,num_trans,&
790           & MPI_INTEGER,src_proc,mtag+6,mp_comm_used,mp_status,mp_ierr)
791
792      call MPI_RECV(xtra1_tmp,num_trans, &
793           & mp_dp,src_proc,mtag+7,mp_comm_used,mp_status,mp_ierr)
794
795      call MPI_RECV(ytra1_tmp,num_trans,&
796           & mp_dp,src_proc,mtag+8,mp_comm_used,mp_status,mp_ierr)
797
798      call MPI_RECV(ztra1_tmp,num_trans,&
799           & mp_sp,src_proc,mtag+9,mp_comm_used,mp_status,mp_ierr)
800
801      do j=1,nspec
802        call MPI_RECV(xmass1_tmp(:,j),num_trans,&
803             & mp_sp,src_proc,mtag+(9+j),mp_comm_used,mp_status,mp_ierr)
804      end do
805
806! This is the maximum value numpart can possibly have after the transfer
807      maxnumpart=numpart+num_trans
808
809! Search for vacant space and copy from temporary storage
810!********************************************************
811      minpart=1
812      do i=1, num_trans
813! Take into acount that we may have transferred invalid particles
814        if (itra1_tmp(i).ne.itime) cycle
815        do ipart=minpart,maxnumpart
816          if (itra1(ipart).ne.itime) then
817            itra1(ipart) = itra1_tmp(i)
818            npoint(ipart) = npoint_tmp(i)
819            nclass(ipart) = nclass_tmp(i)
820            idt(ipart) = idt_tmp(i)
821            itramem(ipart) = itramem_tmp(i)
822            itrasplit(ipart) = itrasplit_tmp(i)
823            xtra1(ipart) = xtra1_tmp(i)
824            ytra1(ipart) = ytra1_tmp(i)
825            ztra1(ipart) = ztra1_tmp(i)
826            xmass1(ipart,:) = xmass1_tmp(i,:)
827            goto 200 ! Storage space has been found, stop searching
828          end if
829! :TODO: add check for if too many particles requiried
830        end do
831200     minpart=ipart+1
832      end do
833! Increase numpart, if necessary
834      numpart=max(numpart,ipart)
835
836      deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,&
837           &itrasplit_tmp,xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp)
838
839      if (mp_dev_mode) then
840        write(*,*) "numpart after: ", numpart
841        write(*,FMT='(72("#"))')
842      end if
843
844    else
845! This routine should only be called by the two participating processes
846      write(*,*) "ERROR: wrong process has entered mpi_mod::mpif_redist_part"
847      stop
848      return
849    end if
850
851! Measure time for MPI communications
852!************************************
853    if (mp_measure_time) call mpif_mtime('commtime',1)
854
855  end subroutine mpif_redist_part
856
857
858  subroutine mpif_tm_send_vars
859!***********************************************************************
860! Distribute particle variables from pid0 to all processes.
861! Called from timemanager
862! *NOT IN USE* at the moment, but can be useful for debugging
863!
864!***********************************************************************
865    use com_mod
866
867    implicit none
868
869    integer :: i
870
871! Time for MPI communications
872!****************************
873    if (mp_measure_time) call mpif_mtime('commtime',0)
874
875! Distribute variables, send from pid 0 to other processes
876!**********************************************************************
877
878! integers
879    if (lroot) then
880      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
881           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
882      if (mp_ierr /= 0) goto 600
883      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
884           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
885      if (mp_ierr /= 0) goto 600
886      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
887           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
888      if (mp_ierr /= 0) goto 600
889      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
890           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
891      if (mp_ierr /= 0) goto 600
892      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
893           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
894      if (mp_ierr /= 0) goto 600
895
896! int2
897      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,&
898           & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
899      if (mp_ierr /= 0) goto 600
900
901! real
902      call MPI_SCATTER(uap,numpart_mpi,mp_sp,MPI_IN_PLACE,&
903           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
904      if (mp_ierr /= 0) goto 600
905      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
906           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
907      if (mp_ierr /= 0) goto 600
908      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
909           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
910      if (mp_ierr /= 0) goto 600
911
912      call MPI_SCATTER(us,numpart_mpi,mp_sp,MPI_IN_PLACE,&
913           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
914      if (mp_ierr /= 0) goto 600
915      call MPI_SCATTER(vs,numpart_mpi,mp_sp,MPI_IN_PLACE,&
916           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
917      if (mp_ierr /= 0) goto 600
918      call MPI_SCATTER(ws,numpart_mpi,mp_sp,MPI_IN_PLACE,&
919           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
920      if (mp_ierr /= 0) goto 600
921
922      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
923           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
924      if (mp_ierr /= 0) goto 600
925      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
926           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
927      if (mp_ierr /= 0) goto 600
928      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,MPI_IN_PLACE,&
929           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
930      if (mp_ierr /= 0) goto 600
931
932      do i=1,nspec
933        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,MPI_IN_PLACE,&
934             & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
935        if (mp_ierr /= 0) goto 600
936      end do
937
938    else ! (mp_pid >= 1)
939! integers
940      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,&
941           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
942      if (mp_ierr /= 0) goto 600
943      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,&
944           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
945      if (mp_ierr /= 0) goto 600
946      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,&
947           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
948      if (mp_ierr /= 0) goto 600
949      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,&
950           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
951      if (mp_ierr /= 0) goto 600
952      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,&
953           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
954      if (mp_ierr /= 0) goto 600
955
956! int2
957      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,&
958           & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
959      if (mp_ierr /= 0) goto 600
960
961! reals
962      call MPI_SCATTER(uap,numpart_mpi,mp_sp,uap,&
963           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
964      if (mp_ierr /= 0) goto 600
965      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,ucp,&
966           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
967      if (mp_ierr /= 0) goto 600
968      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,uzp,&
969           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
970      if (mp_ierr /= 0) goto 600
971
972      call MPI_SCATTER(us,numpart_mpi,mp_sp,us,&
973           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
974      if (mp_ierr /= 0) goto 600
975      call MPI_SCATTER(vs,numpart_mpi,mp_sp,vs,&
976           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
977      if (mp_ierr /= 0) goto 600
978      call MPI_SCATTER(ws,numpart_mpi,mp_sp,ws,&
979           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
980      if (mp_ierr /= 0) goto 600
981
982      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,&
983           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
984      if (mp_ierr /= 0) goto 600
985      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,&
986           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
987      if (mp_ierr /= 0) goto 600
988      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,ztra1,&
989           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
990      if (mp_ierr /= 0) goto 600
991
992      do i=1,nspec
993        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,xmass1(:,i),&
994             & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
995        if (mp_ierr /= 0) goto 600
996      end do
997
998    end if !(lroot)
999
1000    if (mp_measure_time) call mpif_mtime('commtime',1)
1001
1002    goto 601
1003
1004600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1005    stop
1006601 end subroutine mpif_tm_send_vars
1007
1008
1009  subroutine mpif_tm_collect_vars
1010!*******************************************************************************
1011! Collect particle data to PID 0 from all processes.                           *
1012!                                                                              *
1013! This can be called from timemanager_mpi, before caclulating                  *
1014! concentrations/writing output grids.                                         *
1015!                                                                              *
1016! Currently *not in use*, as each process calculates concentrations            *
1017! separately, but can be useful for debugging                                  *
1018!                                                                              *
1019! To use this routine (together with mpif_tm_send_vars) to transfer data       *
1020! to the MPI root process (useful for debugging), insert code like this        *
1021! (in timemanager_mpi.f90)                                                     *
1022!                                                                              *
1023!      if (lroot) tot_numpart=numpart ! root stores total numpart              *
1024!      call mpif_tm_send_dims                                                  *
1025!      if (numpart>1) then                                                     *
1026!        call mpif_tm_send_vars                                                *
1027!      end if                                                                  *
1028!                                                                              *
1029!    To collect data from all processes to root process after a parallel       *
1030!    region:                                                                   *
1031!                                                                              *
1032!      if (numpart>0) then                                                     *
1033!          if (lroot) numpart = tot_numpart                                    *
1034!       call mpif_tm_collect_vars                                              *
1035!      end if                                                                  *
1036!                                                                              *
1037! Then a section that begins with "if (lroot) ..." will behave like            *
1038! serial flexpart                                                              *
1039!                                                                              *
1040!*******************************************************************************
1041    use com_mod !, only: numpart, nspec, itra1, npoint, nclass
1042
1043    implicit none
1044
1045    integer :: i
1046
1047    logical :: use_mp_vars = .false.! use mp_... type allocated vars
1048    logical :: use_in_place = .true.! use MPI_IN_PLACE to collect vars.
1049
1050
1051! Time for MPI communications
1052    if (mp_measure_time) call mpif_mtime('commtime',0)
1053
1054! Distribute variables, send from pid 0 to other processes
1055!**********************************************************************
1056
1057    if (.not. use_mp_vars.and..not.use_in_place) then
1058
1059! Integers:
1060      call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
1061           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1062      if (mp_ierr /= 0) goto 600
1063      call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
1064           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1065      if (mp_ierr /= 0) goto 600
1066      call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
1067           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1068      if (mp_ierr /= 0) goto 600
1069      call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
1070           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1071      if (mp_ierr /= 0) goto 600
1072      call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
1073           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1074      if (mp_ierr /= 0) goto 600
1075
1076! int2
1077      call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
1078           &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
1079      if (mp_ierr /= 0) goto 600
1080
1081! Reals:
1082      call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, &
1083           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1084      if (mp_ierr /= 0) goto 600
1085      call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, &
1086           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1087      if (mp_ierr /= 0) goto 600
1088      call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, &
1089           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1090      if (mp_ierr /= 0) goto 600
1091
1092      call MPI_GATHER(us, numpart_mpi, mp_sp, us, &
1093           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1094      if (mp_ierr /= 0) goto 600
1095      call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, &
1096           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1097      if (mp_ierr /= 0) goto 600
1098      call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, &
1099           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1100      if (mp_ierr /= 0) goto 600
1101
1102
1103      call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
1104           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1105      if (mp_ierr /= 0) goto 600
1106      call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
1107           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1108      if (mp_ierr /= 0) goto 600
1109      call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, &
1110           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1111      if (mp_ierr /= 0) goto 600
1112
1113      do i=1, nspec
1114        call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), &
1115             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1116        if (mp_ierr /= 0) goto 600
1117
1118      end do
1119
1120! Use variables npoint etc directly for communications
1121!***********************************************************************
1122    else if (use_in_place.and..not.use_mp_vars) then
1123      if (lroot) then
1124! Integers:
1125        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, npoint, &
1126             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1127        if (mp_ierr /= 0) goto 600
1128        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, idt, &
1129             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1130        if (mp_ierr /= 0) goto 600
1131        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itra1, &
1132             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1133        if (mp_ierr /= 0) goto 600
1134        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, nclass, &
1135             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1136        if (mp_ierr /= 0) goto 600
1137        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itramem, &
1138             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1139        if (mp_ierr /= 0) goto 600
1140
1141! int2
1142        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER2, cbt, &
1143             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
1144        if (mp_ierr /= 0) goto 600
1145
1146! Reals:
1147        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uap, &
1148             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1149        if (mp_ierr /= 0) goto 600
1150        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ucp, &
1151             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1152        if (mp_ierr /= 0) goto 600
1153        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uzp, &
1154             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1155        if (mp_ierr /= 0) goto 600
1156
1157        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, us, &
1158             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1159        if (mp_ierr /= 0) goto 600
1160        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, vs, &
1161             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1162        if (mp_ierr /= 0) goto 600
1163        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ws, &
1164             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1165        if (mp_ierr /= 0) goto 600
1166
1167
1168        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, xtra1, &
1169             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1170        if (mp_ierr /= 0) goto 600
1171        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, ytra1, &
1172             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1173        if (mp_ierr /= 0) goto 600
1174        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ztra1, &
1175             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1176        if (mp_ierr /= 0) goto 600
1177
1178        do i=1, nspec
1179          call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, xmass1(:,i), &
1180               &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1181          if (mp_ierr /= 0) goto 600
1182        end do
1183
1184      else ! (for mp_pid >= 1)
1185
1186! Integers:
1187        call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
1188             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1189        if (mp_ierr /= 0) goto 600
1190        call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
1191             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1192        if (mp_ierr /= 0) goto 600
1193        call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
1194             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1195        if (mp_ierr /= 0) goto 600
1196        call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
1197             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1198        if (mp_ierr /= 0) goto 600
1199        call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
1200             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1201        if (mp_ierr /= 0) goto 600
1202
1203! int2
1204        call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
1205             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
1206        if (mp_ierr /= 0) goto 600
1207
1208! Reals:
1209        call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, &
1210             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1211        if (mp_ierr /= 0) goto 600
1212        call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, &
1213             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1214        if (mp_ierr /= 0) goto 600
1215        call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, &
1216             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1217        if (mp_ierr /= 0) goto 600
1218
1219        call MPI_GATHER(us, numpart_mpi, mp_sp, us, &
1220             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1221        if (mp_ierr /= 0) goto 600
1222        call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, &
1223             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1224        if (mp_ierr /= 0) goto 600
1225        call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, &
1226             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1227        if (mp_ierr /= 0) goto 600
1228
1229
1230        call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
1231             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1232        if (mp_ierr /= 0) goto 600
1233        call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
1234             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1235        if (mp_ierr /= 0) goto 600
1236        call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, &
1237             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1238        if (mp_ierr /= 0) goto 600
1239
1240        do i=1, nspec
1241          call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), &
1242               &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1243          if (mp_ierr /= 0) goto 600
1244        end do
1245      end if ! (lroot)
1246    end if !  (use_in_place)
1247
1248    if (mp_measure_time) call mpif_mtime('commtime',1)
1249
1250    goto 601
1251
1252600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1253    stop
1254601 end subroutine mpif_tm_collect_vars
1255
1256
1257  subroutine mpif_gf_send_vars(memstat)
1258!*******************************************************************************
1259! DESCRIPTION
1260!   Distribute 'getfield' variables from reader process
1261!
1262!   Called from timemanager
1263!
1264! NOTE
1265!   This subroutine distributes windfields read from the reader process to
1266!   all other processes. Usually one set of fields are transfered, but at
1267!   first timestep when there are no fields in memory, both are transfered.
1268!   MPI_Bcast is used, so implicitly all processes are synchronized at this
1269!   step
1270!
1271!
1272!*******************************************************************************
1273    use com_mod
1274    use par_mod,only: numwfmem
1275
1276    implicit none
1277
1278    integer, intent(in) :: memstat
1279
1280! Common array sizes used for communications
1281    integer, parameter :: d3_size1 = nxmax*nymax*nzmax
1282    integer, parameter :: d3_size2 = nxmax*nymax*nuvzmax
1283    integer, parameter :: d2_size1 = nxmax*nymax
1284    integer, parameter :: d2_size2 = nxmax*nymax*maxspec
1285    integer, parameter :: d2_size3 = nxmax*nymax
1286    integer, parameter :: d1_size1 = maxwf
1287
1288    integer :: d3s1,d3s2,d2s1,d2s2
1289
1290! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
1291    integer :: li,ui
1292
1293! First time routine is called the unchangeable fields will be transfered   
1294    logical :: first_call=.true.
1295
1296!**********************************************************************
1297
1298! Sizes of arrays transfered
1299    d3s1=d3_size1
1300    d3s2=d3_size2
1301    d2s1=d2_size1
1302    d2s2=d2_size2
1303
1304! Decide which field(s) need to be transfered
1305    if (memstat.ge.32) then ! distribute 2 fields, to 1st/2nd indices
1306      li=1; ui=2
1307      d3s1=2*d3_size1
1308      d3s2=2*d3_size2
1309      d2s1=2*d2_size1
1310      d2s2=2*d2_size2
1311    else if (memstat.eq.17) then ! distribute 1 field, place on 1st index
1312      li=1; ui=1
1313    else if (memstat.eq.18) then ! distribute 1 field, place on 2nd index
1314      li=2; ui=2
1315    else if (memstat.eq.19) then ! distribute 1 field, place on 3nd index
1316      li=3; ui=3
1317    else
1318      write(*,*) '#### mpi_mod::mpif_gf_send_vars> ERROR: ', &
1319           & 'wrong value of memstat, exiting ####', memstat
1320      stop
1321    end if
1322
1323
1324! Time for MPI communications
1325    if (mp_measure_time) call mpif_mtime('commtime',0)
1326
1327! Send variables from getfield process (id_read) to other processes
1328!**********************************************************************
1329
1330! The non-reader processes need to know if cloud water was read.
1331    call MPI_Bcast(readclouds,1,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
1332    if (mp_ierr /= 0) goto 600
1333
1334! Static fields/variables sent only at startup
1335    if (first_call) then
1336      call MPI_Bcast(oro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1337      if (mp_ierr /= 0) goto 600
1338      call MPI_Bcast(excessoro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1339      if (mp_ierr /= 0) goto 600
1340      call MPI_Bcast(lsm(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1341      if (mp_ierr /= 0) goto 600
1342      call MPI_Bcast(xlanduse(:,:,:),d2_size3*numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1343      if (mp_ierr /= 0) goto 600
1344      call MPI_Bcast(wftime,d1_size1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1345      if (mp_ierr /= 0) goto 600
1346      call MPI_Bcast(numbwf,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1347      if (mp_ierr /= 0) goto 600
1348      call MPI_Bcast(nmixz,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1349      if (mp_ierr /= 0) goto 600
1350      call MPI_Bcast(height,nzmax,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1351      if (mp_ierr /= 0) goto 600
1352
1353      first_call=.false.
1354    endif
1355
1356    call MPI_Bcast(uu(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1357    if (mp_ierr /= 0) goto 600
1358    call MPI_Bcast(vv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1359    if (mp_ierr /= 0) goto 600
1360    call MPI_Bcast(uupol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1361    if (mp_ierr /= 0) goto 600
1362    call MPI_Bcast(vvpol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1363    if (mp_ierr /= 0) goto 600
1364    call MPI_Bcast(ww(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1365    if (mp_ierr /= 0) goto 600
1366    call MPI_Bcast(tt(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1367    if (mp_ierr /= 0) goto 600
1368    call MPI_Bcast(rho(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1369    if (mp_ierr /= 0) goto 600
1370    call MPI_Bcast(drhodz(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1371    if (mp_ierr /= 0) goto 600
1372    call MPI_Bcast(tth(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1373    if (mp_ierr /= 0) goto 600
1374    call MPI_Bcast(qvh(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1375    if (mp_ierr /= 0) goto 600
1376    call MPI_Bcast(qv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1377    if (mp_ierr /= 0) goto 600
1378    call MPI_Bcast(pv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1379    if (mp_ierr /= 0) goto 600
1380    call MPI_Bcast(clouds(:,:,:,li:ui),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
1381    if (mp_ierr /= 0) goto 600
1382
1383! cloud water/ice:
1384    if (readclouds) then
1385      call MPI_Bcast(ctwc(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1386      if (mp_ierr /= 0) goto 600
1387    end if
1388
1389! 2D fields
1390    call MPI_Bcast(cloudsh(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1391    if (mp_ierr /= 0) goto 600
1392    call MPI_Bcast(vdep(:,:,:,li:ui),d2s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1393    if (mp_ierr /= 0) goto 600
1394    call MPI_Bcast(ps(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1395    if (mp_ierr /= 0) goto 600
1396    call MPI_Bcast(sd(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1397    if (mp_ierr /= 0) goto 600
1398    call MPI_Bcast(tcc(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1399    if (mp_ierr /= 0) goto 600
1400    call MPI_Bcast(tt2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1401    if (mp_ierr /= 0) goto 600
1402    call MPI_Bcast(td2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1403    if (mp_ierr /= 0) goto 600
1404    call MPI_Bcast(lsprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1405    if (mp_ierr /= 0) goto 600
1406    call MPI_Bcast(convprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1407    if (mp_ierr /= 0) goto 600
1408    call MPI_Bcast(ustar(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1409    if (mp_ierr /= 0) goto 600
1410    call MPI_Bcast(wstar(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1411    if (mp_ierr /= 0) goto 600
1412    call MPI_Bcast(hmix(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1413    if (mp_ierr /= 0) goto 600
1414    call MPI_Bcast(tropopause(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1415    if (mp_ierr /= 0) goto 600
1416    call MPI_Bcast(oli(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1417    if (mp_ierr /= 0) goto 600
1418
1419    if (mp_measure_time) call mpif_mtime('commtime',1)
1420
1421    goto 601
1422
1423600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1424    stop
1425
1426601 end subroutine mpif_gf_send_vars
1427
1428
1429  subroutine mpif_gf_send_vars_nest(memstat)
1430!***********************************************************************
1431! DESCRIPTION
1432!   Distribute 'getfield' variables from reader process to all processes.
1433!   For nested fields
1434!
1435!   Called from timemanager
1436!
1437! NOTE
1438!   This subroutine distributes nested windfields from the reader process to
1439!   all other processes. Usually one set of fields is transfered, but at
1440!   first timestep when there are no fields in memory, both are transfered.
1441!   MPI_Bcast is used, so implicitly all processes are synchronized at this
1442!   step
1443!
1444!
1445!***********************************************************************
1446    use com_mod
1447    use par_mod,only: numwfmem
1448
1449    implicit none
1450
1451    integer, intent(in) :: memstat
1452    integer :: i
1453! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
1454    integer :: li,ui
1455
1456! Common array sizes used for communications
1457    integer :: d3_size1 = nxmaxn*nymaxn*nzmax
1458    integer :: d3_size2 = nxmaxn*nymaxn*nuvzmax
1459    integer :: d2_size1 = nxmaxn*nymaxn
1460    integer :: d2_size2 = nxmaxn*nymaxn*maxspec
1461    integer :: d2_size3 = nxmaxn*nymaxn
1462
1463    integer :: d3s1,d3s2,d2s1,d2s2
1464
1465! First time routine is called the unchangeable fields will be transfered   
1466    logical :: first_call=.true.
1467
1468!**********************************************************************
1469
1470! Sizes of arrays transfered
1471    d3s1=d3_size1
1472    d3s2=d3_size2
1473    d2s1=d2_size1
1474    d2s2=d2_size2
1475
1476! Decide which field(s) need to be transfered
1477    if (memstat.ge.32) then ! distribute 2 fields
1478      li=1; ui=2
1479      d3s1=2*d3_size1
1480      d3s2=2*d3_size2
1481      d2s1=2*d2_size1
1482      d2s2=2*d2_size2
1483    else if (memstat.eq.17) then ! distribute 1 field, on 1st index
1484      li=1; ui=1
1485    else if (memstat.eq.18) then ! distribute 1 field, on 2nd index
1486      li=2; ui=2
1487    else if (memstat.eq.19) then ! distribute 1 field, on 3nd index
1488      li=3; ui=3
1489    else
1490      write(*,*) '#### mpi_mod::mpif_gf_send_vars_nest> ERROR: ', &
1491           & 'wrong value of memstat, exiting ####', memstat
1492      stop
1493    end if
1494
1495
1496! Time for MPI communications
1497    if (mp_measure_time) call mpif_mtime('commtime',0)
1498
1499! Distribute variables, send from getfield process (id_read) to other
1500! processes
1501!**********************************************************************
1502
1503! The non-reader processes need to know if cloud water was read.
1504    call MPI_Bcast(readclouds_nest,maxnests,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
1505    if (mp_ierr /= 0) goto 600
1506
1507! Static fields/variables sent only at startup
1508    if (first_call) then
1509      call MPI_Bcast(oron(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1510      if (mp_ierr /= 0) goto 600
1511      call MPI_Bcast(excessoron(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1512      if (mp_ierr /= 0) goto 600
1513      call MPI_Bcast(lsmn(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1514      if (mp_ierr /= 0) goto 600
1515      call MPI_Bcast(xlandusen(:,:,:,:),d2_size3*numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1516      if (mp_ierr /= 0) goto 600
1517      first_call=.false.
1518    end if
1519
1520! MPI prefers contiguous arrays for sending (else a buffer is created),
1521! hence the loop over nests
1522!**********************************************************************
1523    do i=1, numbnests
1524! 3D fields
1525      call MPI_Bcast(uun(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1526      if (mp_ierr /= 0) goto 600
1527      call MPI_Bcast(vvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1528      if (mp_ierr /= 0) goto 600
1529      call MPI_Bcast(wwn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1530      if (mp_ierr /= 0) goto 600
1531      call MPI_Bcast(ttn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1532      if (mp_ierr /= 0) goto 600
1533      call MPI_Bcast(rhon(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1534      if (mp_ierr /= 0) goto 600
1535      call MPI_Bcast(drhodzn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1536      if (mp_ierr /= 0) goto 600
1537      call MPI_Bcast(tthn(:,:,:,li:ui,i),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1538      if (mp_ierr /= 0) goto 600
1539      call MPI_Bcast(qvhn(:,:,:,li:ui,i),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1540      if (mp_ierr /= 0) goto 600
1541      call MPI_Bcast(qvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1542      if (mp_ierr /= 0) goto 600
1543      call MPI_Bcast(pvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1544      if (mp_ierr /= 0) goto 600
1545      call MPI_Bcast(cloudsn(:,:,:,li:ui,i),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
1546      if (mp_ierr /= 0) goto 600
1547
1548! cloud water/ice:
1549      if (readclouds_nest(i)) then
1550! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1551! if (mp_ierr /= 0) goto 600
1552        call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1553        if (mp_ierr /= 0) goto 600
1554      end if
1555
1556! 2D fields
1557      call MPI_Bcast(cloudshn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1558      if (mp_ierr /= 0) goto 600
1559      call MPI_Bcast(vdepn(:,:,:,li:ui,i),d2s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1560      if (mp_ierr /= 0) goto 600
1561      call MPI_Bcast(psn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1562      if (mp_ierr /= 0) goto 600
1563      call MPI_Bcast(sdn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1564      if (mp_ierr /= 0) goto 600
1565      call MPI_Bcast(tccn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1566      if (mp_ierr /= 0) goto 600
1567      call MPI_Bcast(tt2n(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1568      if (mp_ierr /= 0) goto 600
1569      call MPI_Bcast(td2n(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1570      if (mp_ierr /= 0) goto 600
1571      call MPI_Bcast(lsprecn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1572      if (mp_ierr /= 0) goto 600
1573      call MPI_Bcast(convprecn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1574      if (mp_ierr /= 0) goto 600
1575      call MPI_Bcast(ustarn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1576      if (mp_ierr /= 0) goto 600
1577      call MPI_Bcast(wstarn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1578      if (mp_ierr /= 0) goto 600
1579      call MPI_Bcast(olin(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1580      if (mp_ierr /= 0) goto 600
1581      call MPI_Bcast(hmixn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1582      if (mp_ierr /= 0) goto 600
1583      call MPI_Bcast(tropopausen(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1584      if (mp_ierr /= 0) goto 600
1585    end do
1586
1587    if (mp_measure_time) call mpif_mtime('commtime',1)
1588
1589    goto 601
1590
1591600 write(*,*) "mpi_mod> mp_ierr \= 0",mp_ierr
1592    stop
1593
1594601 end subroutine mpif_gf_send_vars_nest
1595
1596
1597  subroutine mpif_gf_send_vars_async(memstat)
1598!*******************************************************************************
1599! DESCRIPTION
1600!   Distribute 'getfield' variables from reader process to all processes.
1601!   Called from timemanager by reader process only
1602!
1603! NOTE
1604!   This version uses asynchronious sends. The newest fields are sent in the
1605!   background, so calculations can continue while
1606!   MPI communications are performed.
1607!
1608!   The way the MPI tags/requests are sequenced means that this routine must
1609!   carefully match routine 'mpif_gf_recv_vars_async'
1610!
1611! VARIABLES
1612!   memstat -- input, for resolving pointer to windfield index being read
1613!   mind    -- index where to place new fields
1614!
1615!
1616!*******************************************************************************
1617    use com_mod
1618
1619    implicit none
1620
1621    integer, intent(in) :: memstat
1622    integer :: mind
1623    integer :: dest,i
1624
1625! Common array sizes used for communications
1626    integer :: d3s1 = nxmax*nymax*nzmax
1627    integer :: d3s2 = nxmax*nymax*nuvzmax
1628    integer :: d2s1 = nxmax*nymax
1629    integer :: d2s2 = nxmax*nymax*maxspec
1630!    integer :: d1s1 = maxwf
1631
1632!*******************************************************************************
1633
1634! At the time the send is posted, the reader process is one step further
1635! in the permutation of memstat compared with the receiving processes
1636
1637    if (memstat.ge.32) then
1638! last read was synchronous, to indices 1 and 2, 3 is free
1639      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1640           & memstat>=32 should never happen here."
1641      stop
1642    else if (memstat.eq.17) then
1643! old fields on 1,2, send 3
1644      mind=3
1645    else if (memstat.eq.18) then
1646! old fields on 2,3, send 1
1647      mind=1
1648    else if (memstat.eq.19) then
1649! old fields on 3,1, send 2
1650      mind=2
1651    else
1652      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1653           & invalid memstat"
1654      mind=-1
1655      stop
1656    end if
1657
1658    if (mp_dev_mode) then
1659      if (mp_pid.ne.id_read) then
1660        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_async'
1661      end if
1662    end if
1663
1664    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1665
1666! Time for MPI communications
1667    if (mp_measure_time) call mpif_mtime('commtime',0)
1668
1669! Loop over receiving processes, initiate data sending
1670!*****************************************************
1671
1672    do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading
1673! TODO: use mp_partgroup_np here
1674      if (dest.eq.id_read) cycle
1675      i=dest*nvar_async
1676      call MPI_Isend(uu(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1677      if (mp_ierr /= 0) goto 600
1678      i=i+1
1679      call MPI_Isend(vv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1680      if (mp_ierr /= 0) goto 600
1681      i=i+1
1682      call MPI_Isend(uupol(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1683      if (mp_ierr /= 0) goto 600
1684      i=i+1
1685      call MPI_Isend(vvpol(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1686      if (mp_ierr /= 0) goto 600
1687      i=i+1
1688      call MPI_Isend(ww(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1689      if (mp_ierr /= 0) goto 600
1690      i=i+1
1691      call MPI_Isend(tt(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1692      if (mp_ierr /= 0) goto 600
1693      i=i+1
1694      call MPI_Isend(rho(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1695      if (mp_ierr /= 0) goto 600
1696      i=i+1
1697      call MPI_Isend(drhodz(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1698      if (mp_ierr /= 0) goto 600
1699      i=i+1
1700      call MPI_Isend(tth(:,:,:,mind),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1701      if (mp_ierr /= 0) goto 600
1702      i=i+1
1703      call MPI_Isend(qvh(:,:,:,mind),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1704      if (mp_ierr /= 0) goto 600
1705      i=i+1
1706      call MPI_Isend(qv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1707      if (mp_ierr /= 0) goto 600
1708      i=i+1
1709      call MPI_Isend(pv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1710      if (mp_ierr /= 0) goto 600
1711      i=i+1
1712      call MPI_Isend(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1713      i=i+1
1714      if (mp_ierr /= 0) goto 600
1715      call MPI_Isend(cloudsh(:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1716      if (mp_ierr /= 0) goto 600
1717      i=i+1
1718      call MPI_Isend(vdep(:,:,:,mind),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1719      if (mp_ierr /= 0) goto 600
1720      i=i+1
1721! 15
1722      call MPI_Isend(ps(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1723      if (mp_ierr /= 0) goto 600
1724      i=i+1
1725      call MPI_Isend(sd(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1726      if (mp_ierr /= 0) goto 600
1727      i=i+1
1728      call MPI_Isend(tcc(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1729      if (mp_ierr /= 0) goto 600
1730      i=i+1
1731      call MPI_Isend(tt2(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1732      if (mp_ierr /= 0) goto 600
1733      i=i+1
1734      call MPI_Isend(td2(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1735      if (mp_ierr /= 0) goto 600
1736      i=i+1
1737      call MPI_Isend(lsprec(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1738      if (mp_ierr /= 0) goto 600
1739      i=i+1
1740      call MPI_Isend(convprec(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1741      if (mp_ierr /= 0) goto 600
1742      i=i+1
1743      call MPI_Isend(ustar(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1744      if (mp_ierr /= 0) goto 600
1745      i=i+1
1746      call MPI_Isend(wstar(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1747      if (mp_ierr /= 0) goto 600
1748      i=i+1
1749      call MPI_Isend(hmix(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1750      if (mp_ierr /= 0) goto 600
1751      i=i+1
1752! 25
1753      call MPI_Isend(tropopause(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1754      if (mp_ierr /= 0) goto 600
1755      i=i+1
1756      call MPI_Isend(oli(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1757      if (mp_ierr /= 0) goto 600
1758
1759! Send cloud water if it exists. Increment counter always (as on receiving end)
1760      if (readclouds) then
1761        i=i+1
1762        call MPI_Isend(ctwc(:,:,mind),d2s1,mp_sp,dest,tm1,&
1763             &MPI_COMM_WORLD,reqs(i),mp_ierr)
1764        if (mp_ierr /= 0) goto 600
1765      end if
1766    end do
1767
1768    if (mp_measure_time) call mpif_mtime('commtime',1)
1769
1770    goto 601
1771
1772600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> mp_ierr \= 0", mp_ierr
1773    stop
1774
1775601 end subroutine mpif_gf_send_vars_async
1776
1777
1778  subroutine mpif_gf_recv_vars_async(memstat)
1779!*******************************************************************************
1780! DESCRIPTION
1781!   Receive 'getfield' variables from reader process.
1782!   Called from timemanager by all processes except reader
1783!
1784! NOTE
1785!   This version uses asynchronious communications.
1786!
1787! VARIABLES
1788!   memstat -- input, used to resolve windfield index being received
1789!
1790!
1791!*******************************************************************************
1792    use com_mod
1793
1794    implicit none
1795
1796    integer, intent(in) :: memstat
1797    integer :: mind,j
1798
1799! Common array sizes used for communications
1800    integer :: d3s1 = nxmax*nymax*nzmax
1801    integer :: d3s2 = nxmax*nymax*nuvzmax
1802    integer :: d2s1 = nxmax*nymax
1803    integer :: d2s2 = nxmax*nymax*maxspec
1804!integer :: d1_size1 = maxwf
1805
1806!    integer :: d3s1,d3s2,d2s1,d2s2
1807!*******************************************************************************
1808
1809! At the time this immediate receive is posted, memstat is the state of
1810! windfield indices at the previous/current time. From this, the future
1811! state is deduced.
1812    if (memstat.eq.32) then
1813! last read was synchronous to indices 1 and 2, 3 is free
1814      mind=3
1815    else if (memstat.eq.17) then
1816! last read was asynchronous to index 3, 1 is free
1817      mind=1
1818    else if (memstat.eq.18) then
1819! last read was asynchronous to index 1, 2 is free
1820      mind=2
1821    else if (memstat.eq.19) then
1822! last read was asynchronous to index 2, 3 is free
1823      mind=3
1824    else
1825! illegal state
1826      write(*,*) 'mpi_mod> FLEXPART ERROR: Illegal memstat value. Exiting.'
1827      stop
1828    end if
1829
1830    if (mp_dev_mode) then
1831      if (mp_pid.eq.id_read) then
1832        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async'
1833      end if
1834    end if
1835
1836! Time for MPI communications
1837    if (mp_measure_time) call mpif_mtime('commtime',0)
1838
1839    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1840
1841! Initiate receiving of data
1842!***************************
1843
1844! Get MPI tags/requests for communications
1845    j=mp_pid*nvar_async
1846    call MPI_Irecv(uu(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1847         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1848    if (mp_ierr /= 0) goto 600
1849    j=j+1
1850    call MPI_Irecv(vv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1851         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1852    if (mp_ierr /= 0) goto 600
1853    j=j+1
1854    call MPI_Irecv(uupol(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1855         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1856    if (mp_ierr /= 0) goto 600
1857    j=j+1
1858    call MPI_Irecv(vvpol(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1859         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1860    if (mp_ierr /= 0) goto 600
1861    j=j+1
1862    call MPI_Irecv(ww(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1863         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1864    if (mp_ierr /= 0) goto 600
1865    j=j+1
1866    call MPI_Irecv(tt(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1867         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1868    if (mp_ierr /= 0) goto 600
1869    j=j+1
1870    call MPI_Irecv(rho(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1871         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1872    if (mp_ierr /= 0) goto 600
1873    j=j+1
1874    call MPI_Irecv(drhodz(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1875         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1876    if (mp_ierr /= 0) goto 600
1877    j=j+1
1878    call MPI_Irecv(tth(:,:,:,mind),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
1879         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1880    if (mp_ierr /= 0) goto 600
1881    j=j+1
1882    call MPI_Irecv(qvh(:,:,:,mind),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
1883         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1884    if (mp_ierr /= 0) goto 600
1885    j=j+1
1886    call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1887         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1888    if (mp_ierr /= 0) goto 600
1889    j=j+1
1890    call MPI_Irecv(pv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1891         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1892    if (mp_ierr /= 0) goto 600
1893    j=j+1
1894    call MPI_Irecv(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
1895         &MPI_COMM_WORLD,reqs(j),mp_ierr)   
1896    if (mp_ierr /= 0) goto 600
1897    j=j+1
1898    call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1899         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1900    if (mp_ierr /= 0) goto 600
1901    j=j+1
1902    call MPI_Irecv(vdep(:,:,:,mind),d2s2,mp_sp,id_read,MPI_ANY_TAG,&
1903         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1904    if (mp_ierr /= 0) goto 600
1905    j=j+1
1906    call MPI_Irecv(ps(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1907         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1908    if (mp_ierr /= 0) goto 600
1909    j=j+1
1910    call MPI_Irecv(sd(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1911         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1912    if (mp_ierr /= 0) goto 600
1913    j=j+1
1914    call MPI_Irecv(tcc(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1915         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1916    if (mp_ierr /= 0) goto 600
1917    j=j+1
1918    call MPI_Irecv(tt2(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1919         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1920    if (mp_ierr /= 0) goto 600
1921    j=j+1
1922    call MPI_Irecv(td2(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1923         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1924    if (mp_ierr /= 0) goto 600
1925    j=j+1
1926    call MPI_Irecv(lsprec(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1927         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1928    if (mp_ierr /= 0) goto 600
1929    j=j+1
1930    call MPI_Irecv(convprec(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1931         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1932    if (mp_ierr /= 0) goto 600
1933    call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1934         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1935    if (mp_ierr /= 0) goto 600
1936    j=j+1
1937    call MPI_Irecv(wstar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1938         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1939    if (mp_ierr /= 0) goto 600
1940    j=j+1
1941    call MPI_Irecv(hmix(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1942         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1943    if (mp_ierr /= 0) goto 600
1944    j=j+1
1945    call MPI_Irecv(tropopause(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1946         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1947    if (mp_ierr /= 0) goto 600
1948    j=j+1
1949    call MPI_Irecv(oli(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1950         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1951    if (mp_ierr /= 0) goto 600
1952
1953! Post request for clwc. These data are possibly not sent, request must then be cancelled
1954! For now assume that data at all steps either have or do not have water
1955    if (readclouds) then
1956      j=j+1
1957      call MPI_Irecv(ctwc(:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
1958           &MPI_COMM_WORLD,reqs(j),mp_ierr)
1959      if (mp_ierr /= 0) goto 600
1960    end if
1961
1962
1963    if (mp_measure_time) call mpif_mtime('commtime',1)
1964
1965    goto 601
1966
1967600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_async> MPI ERROR ", mp_ierr
1968    stop
1969
1970601 end subroutine mpif_gf_recv_vars_async
1971
1972
1973  subroutine mpif_gf_send_vars_nest_async(memstat)
1974!*******************************************************************************
1975! DESCRIPTION
1976!   Distribute 'getfield' variables from reader process to all processes.
1977!   Called from timemanager by reader process only.
1978!   Version for nested wind fields
1979!
1980! NOTE
1981!   This version uses asynchronious sends. The newest fields are sent in the
1982!   background, so calculations can continue while
1983!   MPI communications are performed.
1984!
1985!   The way the MPI tags/requests are sequenced means that this routine must
1986!   carefully match routine 'mpif_gf_recv_vars_async'
1987!
1988! VARIABLES
1989!   memstat -- input, for resolving pointer to windfield index being read
1990!   mind    -- index where to place new fields
1991!
1992! TODO
1993!   Some unused arrays are currently sent (uupoln,..)
1994!*******************************************************************************
1995    use com_mod
1996
1997    implicit none
1998
1999    integer, intent(in) :: memstat
2000    integer :: mind
2001    integer :: dest,i,k
2002
2003! Common array sizes used for communications
2004    integer :: d3s1 = nxmaxn*nymaxn*nzmax
2005    integer :: d3s2 = nxmaxn*nymaxn*nuvzmax
2006    integer :: d2s1 = nxmaxn*nymaxn
2007    integer :: d2s2 = nxmaxn*nymaxn*maxspec
2008
2009!*******************************************************************************
2010
2011! At the time the send is posted, the reader process is one step further
2012! in the permutation of memstat compared with the receiving processes
2013
2014    if (memstat.ge.32) then
2015! last read was synchronous, to indices 1 and 2, 3 is free
2016      write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: &
2017           & memstat>=32 should never happen here."
2018      stop
2019    else if (memstat.eq.17) then
2020! old fields on 1,2, send 3
2021      mind=3
2022    else if (memstat.eq.18) then
2023! old fields on 2,3, send 1
2024      mind=1
2025    else if (memstat.eq.19) then
2026! old fields on 3,1, send 2
2027      mind=2
2028    else
2029      write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: &
2030           & invalid memstat"
2031      mind=-1
2032      stop
2033    end if
2034
2035    if (mp_dev_mode) then
2036      if (mp_pid.ne.id_read) then
2037        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_nest_async'
2038      end if
2039    end if
2040
2041    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_nest_async, memstat:', memstat
2042
2043! Time for MPI communications
2044    if (mp_measure_time) call mpif_mtime('commtime',0)
2045
2046! Loop over receiving processes, initiate data sending
2047!*****************************************************
2048
2049    do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading
2050! TODO: use mp_partgroup_np here
2051      if (dest.eq.id_read) cycle
2052      do k=1, numbnests
2053        i=dest*nvar_async
2054        call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2055        if (mp_ierr /= 0) goto 600
2056        i=i+1
2057        call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2058        if (mp_ierr /= 0) goto 600
2059        i=i+1
2060        call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2061        if (mp_ierr /= 0) goto 600
2062        i=i+1
2063        call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2064        if (mp_ierr /= 0) goto 600
2065        i=i+1
2066        call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2067        if (mp_ierr /= 0) goto 600
2068        i=i+1
2069        call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2070        if (mp_ierr /= 0) goto 600
2071        i=i+1
2072        call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2073        if (mp_ierr /= 0) goto 600
2074        i=i+1
2075        call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2076        if (mp_ierr /= 0) goto 600
2077        i=i+1
2078        call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2079        if (mp_ierr /= 0) goto 600
2080        i=i+1
2081        call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2082        if (mp_ierr /= 0) goto 600
2083        i=i+1
2084        call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2085        i=i+1
2086        if (mp_ierr /= 0) goto 600
2087        call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2088        if (mp_ierr /= 0) goto 600
2089        i=i+1
2090        call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2091        if (mp_ierr /= 0) goto 600
2092        i=i+1
2093        call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2094        if (mp_ierr /= 0) goto 600
2095        i=i+1
2096        call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2097        if (mp_ierr /= 0) goto 600
2098        i=i+1
2099! 15
2100        call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2101        if (mp_ierr /= 0) goto 600
2102        i=i+1
2103        call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2104        if (mp_ierr /= 0) goto 600
2105        i=i+1
2106        call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2107        if (mp_ierr /= 0) goto 600
2108        i=i+1
2109        call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2110        if (mp_ierr /= 0) goto 600
2111        i=i+1
2112        call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2113        if (mp_ierr /= 0) goto 600
2114        i=i+1
2115        call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2116        if (mp_ierr /= 0) goto 600
2117        i=i+1
2118        call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2119        if (mp_ierr /= 0) goto 600
2120        i=i+1
2121        call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2122        if (mp_ierr /= 0) goto 600
2123        i=i+1
2124        call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2125        if (mp_ierr /= 0) goto 600
2126        i=i+1
2127        call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2128        if (mp_ierr /= 0) goto 600
2129! 25
2130
2131! Send cloud water if it exists. Increment counter always (as on receiving end)
2132        if (readclouds) then
2133          i=i+1
2134          call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,&
2135               &MPI_COMM_WORLD,reqs(i),mp_ierr)
2136          if (mp_ierr /= 0) goto 600
2137        end if
2138      end do
2139    end do
2140
2141    if (mp_measure_time) call mpif_mtime('commtime',1)
2142
2143    goto 601
2144
2145600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> mp_ierr \= 0", mp_ierr
2146    stop
2147
2148601 end subroutine mpif_gf_send_vars_nest_async
2149
2150
2151  subroutine mpif_gf_recv_vars_nest_async(memstat)
2152!*******************************************************************************
2153! DESCRIPTION
2154!   Receive 'getfield' variables from reader process.
2155!   Called from timemanager by all processes except reader
2156!   Version for nested wind fields
2157!
2158! NOTE
2159!   This version uses asynchronious communications.
2160!
2161! VARIABLES
2162!   memstat -- input, used to resolve windfield index being received
2163!
2164!
2165!*******************************************************************************
2166    use com_mod
2167
2168    implicit none
2169
2170    integer, intent(in) :: memstat
2171    integer :: mind,j,k
2172
2173! Common array sizes used for communications
2174    integer :: d3s1 = nxmaxn*nymaxn*nzmax
2175    integer :: d3s2 = nxmaxn*nymaxn*nuvzmax
2176    integer :: d2s1 = nxmaxn*nymaxn
2177    integer :: d2s2 = nxmaxn*nymaxn*maxspec
2178
2179!*******************************************************************************
2180
2181! At the time this immediate receive is posted, memstat is the state of
2182! windfield indices at the previous/current time. From this, the future
2183! state is deduced.
2184    if (memstat.eq.32) then
2185! last read was synchronous to indices 1 and 2, 3 is free
2186      mind=3
2187    else if (memstat.eq.17) then
2188! last read was asynchronous to index 3, 1 is free
2189      mind=1
2190    else if (memstat.eq.18) then
2191! last read was asynchronous to index 1, 2 is free
2192      mind=2
2193    else if (memstat.eq.19) then
2194! last read was asynchronous to index 2, 3 is free
2195      mind=3
2196    else
2197! illegal state
2198      write(*,*) 'mpi_mod> FLEXPART ERROR: Illegal memstat value. Exiting.'
2199      stop
2200    end if
2201
2202    if (mp_dev_mode) then
2203      if (mp_pid.eq.id_read) then
2204        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async'
2205      end if
2206    end if
2207
2208! Time for MPI communications
2209    if (mp_measure_time) call mpif_mtime('commtime',0)
2210
2211    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
2212
2213! Initiate receiving of data
2214!***************************
2215
2216    do k=1, numbnests
2217! Get MPI tags/requests for communications
2218      j=mp_pid*nvar_async
2219      call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2220           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2221      if (mp_ierr /= 0) goto 600
2222      j=j+1
2223      call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2224           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2225      if (mp_ierr /= 0) goto 600
2226      j=j+1
2227      call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2228           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2229      if (mp_ierr /= 0) goto 600
2230      j=j+1
2231      call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2232           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2233      if (mp_ierr /= 0) goto 600
2234      j=j+1
2235      call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2236           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2237      if (mp_ierr /= 0) goto 600
2238      j=j+1
2239      call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2240           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2241      if (mp_ierr /= 0) goto 600
2242      j=j+1
2243      call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
2244           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2245      if (mp_ierr /= 0) goto 600
2246      j=j+1
2247      call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
2248           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2249      if (mp_ierr /= 0) goto 600
2250      j=j+1
2251      call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2252           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2253      if (mp_ierr /= 0) goto 600
2254      j=j+1
2255      call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2256           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2257      if (mp_ierr /= 0) goto 600
2258      j=j+1
2259      call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
2260           &MPI_COMM_WORLD,reqs(j),mp_ierr)   
2261      if (mp_ierr /= 0) goto 600
2262      j=j+1
2263      call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2264           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2265      if (mp_ierr /= 0) goto 600
2266      j=j+1
2267      call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,&
2268           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2269      if (mp_ierr /= 0) goto 600
2270      j=j+1
2271      call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2272           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2273      if (mp_ierr /= 0) goto 600
2274      j=j+1
2275      call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2276           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2277      if (mp_ierr /= 0) goto 600
2278      j=j+1
2279      call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2280           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2281      if (mp_ierr /= 0) goto 600
2282      j=j+1
2283      call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2284           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2285      if (mp_ierr /= 0) goto 600
2286      j=j+1
2287      call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2288           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2289      if (mp_ierr /= 0) goto 600
2290      j=j+1
2291      call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2292           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2293      if (mp_ierr /= 0) goto 600
2294      j=j+1
2295      call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2296           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2297      if (mp_ierr /= 0) goto 600
2298      call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2299           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2300      if (mp_ierr /= 0) goto 600
2301      j=j+1
2302      call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2303           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2304      if (mp_ierr /= 0) goto 600
2305      j=j+1
2306      call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2307           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2308      if (mp_ierr /= 0) goto 600
2309      j=j+1
2310      call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2311           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2312      if (mp_ierr /= 0) goto 600
2313      j=j+1
2314      call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2315           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2316      if (mp_ierr /= 0) goto 600
2317
2318! Post request for clwc. These data are possibly not sent, request must then be cancelled
2319! For now assume that data at all steps either have or do not have water
2320      if (readclouds) then
2321        j=j+1
2322        call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
2323             &MPI_COMM_WORLD,reqs(j),mp_ierr)
2324        if (mp_ierr /= 0) goto 600
2325      end if
2326    end do
2327
2328    if (mp_measure_time) call mpif_mtime('commtime',1)
2329
2330    goto 601
2331
2332600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_nest_async> MPI ERROR ", mp_ierr
2333    stop
2334
2335601 end subroutine mpif_gf_recv_vars_nest_async
2336
2337
2338  subroutine mpif_gf_request
2339!*******************************************************************************
2340! DESCRIPTION
2341!   Check for completion of MPI_Isend/MPI_Irecv data transfer.
2342!   
2343!   
2344! NOTE
2345!   implicit synchronisation between all processes takes place here
2346!
2347! TODO
2348!   NB: take into account nested wind fields by using a separate
2349!   subroutine mpif_gf_request_nest (and separate request arrays for the
2350!   nested variables)
2351!
2352!*******************************************************************************
2353!    use com_mod,only: readclouds
2354
2355    implicit none
2356
2357
2358    integer :: n_req !,i
2359
2360!***********************************************************************
2361
2362    n_req=nvar_async*mp_np
2363
2364    if (mp_measure_time) call mpif_mtime('commtime',0)
2365
2366!    call MPI_Wait(rm1,MPI_STATUS_IGNORE,mp_ierr)
2367
2368! TODO: cancel recv request if readclouds=.false.
2369!    if (readclouds) then
2370    call MPI_Waitall(n_req,reqs,MPI_STATUSES_IGNORE,mp_ierr)
2371!    endif
2372! else
2373!   do i = 0, nvar_async*mp_np-1
2374!     if (mod(i,27).eq.0 .or. mod(i,28).eq.0) then
2375!       call MPI_Cancel(reqs(i),mp_ierr)
2376!       cycle
2377!     end if
2378!     call MPI_Wait(reqs(i),MPI_STATUS_IGNORE,mp_ierr)
2379!   end do
2380! end if
2381
2382    if (mp_ierr /= 0) goto 600
2383
2384    if (mp_measure_time) call mpif_mtime('commtime',1)
2385
2386    goto 601
2387
2388600 write(*,*) "#### mpi_mod::mpif_gf_request> MPI ERROR ", mp_ierr
2389    stop
2390
2391601 end subroutine mpif_gf_request
2392
2393
2394  subroutine mpif_tm_reduce_grid
2395!***********************************************************************
2396! Collect grid variable to PID 0, adding from all processes.
2397!
2398! NOTE
2399!   - Older MPI implementations may lack the MPI_IN_PLACE operation.
2400!     If so use 1) below and change unc_mod
2401!
2402!***********************************************************************
2403    use com_mod
2404    use unc_mod
2405    use par_mod
2406
2407    implicit none
2408
2409    integer :: grid_size2d,grid_size3d
2410    integer, parameter :: rcpt_size=maxreceptor*maxspec
2411
2412!**********************************************************************
2413    grid_size2d=numxgrid*numygrid*maxspec* &
2414         & maxpointspec_act*nclassunc*maxageclass
2415    grid_size3d=numxgrid*numygrid*numzgrid*maxspec* &
2416         & maxpointspec_act*nclassunc*maxageclass
2417
2418
2419! Time for MPI communications
2420    if (mp_measure_time) call mpif_mtime('commtime',0)
2421
2422! 1) Using a separate grid (gridunc0) for received values
2423! call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2424!      & mp_comm_used, mp_ierr)
2425! if (mp_ierr /= 0) goto 600
2426
2427! 2) Using in-place reduction
2428
2429!!!--------------------------------------------------------------------
2430!!! DJM - 2017-05-09 change - MPI_IN_PLACE option for MPI_Reduce() causes
2431!!! severe numerical problems in some cases.  I'm guessing there are memory
2432!!! overrun issues in this complex code, but have so far failed to identify
2433!!! a specific problem.  And/or, when one searches the Internet for this
2434!!! issue, there is "some" hint that the implementation may be buggy. 
2435!!!
2436!!! At this point, with the following CPP USE_MPIINPLACE directive, the
2437!!! default behaviour will be to not use the MPI_IN_PLACE option.
2438!!! Users will have to compile with -DUSE_MPIINPLACE if they want that option.
2439!!! Introduction of the CPP directives also requires that the code be compiled
2440!!! with the "-x f95-cpp-input" option.
2441!!!
2442!!! Modification of this section requires the addition of array gridunc0, which
2443!!! requires an allocation in outgrid_init.f90 and initial declaration in
2444!!! unc_mod.f90.
2445!!!--------------------------------------------------------------------
2446
2447#ifdef USE_MPIINPLACE
2448
2449    if (lroot) then
2450      call MPI_Reduce(MPI_IN_PLACE, gridunc, grid_size3d, mp_sp, MPI_SUM, id_root, &
2451           & mp_comm_used, mp_ierr)
2452      if (mp_ierr /= 0) goto 600
2453    else
2454      call MPI_Reduce(gridunc, 0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2455           & mp_comm_used, mp_ierr)
2456    end if
2457
2458  ! Receptor concentrations   
2459    if (lroot) then
2460      call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &
2461           & mp_comm_used,mp_ierr)
2462      if (mp_ierr /= 0) goto 600
2463    else
2464      call MPI_Reduce(creceptor,0,rcpt_size,mp_sp,MPI_SUM,id_root, &
2465           & mp_comm_used,mp_ierr)
2466    end if
2467
2468#else
2469
2470      call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2471           & mp_comm_used, mp_ierr)
2472      if (mp_ierr /= 0) goto 600
2473      if (lroot) gridunc = gridunc0
2474
2475      call MPI_Reduce(creceptor, creceptor0,rcpt_size,mp_sp,MPI_SUM,id_root, &
2476           & mp_comm_used,mp_ierr)
2477      if (mp_ierr /= 0) goto 600
2478      if (lroot) creceptor = creceptor0
2479
2480#endif
2481
2482    if ((WETDEP).and.(ldirect.gt.0)) then
2483      call MPI_Reduce(wetgridunc, wetgridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2484           & mp_comm_used, mp_ierr)
2485      if (mp_ierr /= 0) goto 600
2486    end if
2487
2488    if ((DRYDEP).and.(ldirect.gt.0)) then
2489      call MPI_Reduce(drygridunc, drygridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2490           & mp_comm_used, mp_ierr)
2491      if (mp_ierr /= 0) goto 600
2492    end if
2493
2494
2495    if (mp_measure_time) call mpif_mtime('commtime',1)
2496
2497    goto 601
2498
2499600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
2500    stop
2501
2502601 end subroutine mpif_tm_reduce_grid
2503
2504
2505  subroutine mpif_tm_reduce_grid_nest
2506!***********************************************************************
2507! Collect nested grids to PID 0, adding from all processes.
2508!
2509! NOTE
2510!   - Compiling with 'fcheck=all' (gfortran) will cause a run-time error
2511!     as wetgriduncn0 is only allocated for root process. Possibly valid
2512!     MPI code but invalid gfortran.
2513!
2514!***********************************************************************
2515    use com_mod
2516    use unc_mod
2517    use par_mod
2518
2519    implicit none
2520
2521    integer :: grid_size2d,grid_size3d
2522
2523!**********************************************************************
2524
2525    grid_size3d=numxgridn*numygridn*numzgrid*maxspec* &
2526         & maxpointspec_act*nclassunc*maxageclass
2527    grid_size2d=numxgridn*numygridn*maxspec* &
2528         & maxpointspec_act*nclassunc*maxageclass
2529
2530
2531! Time for MPI communications
2532    if (mp_measure_time) call mpif_mtime('commtime',0)
2533
2534! Using a separate grid (gridunc0) for received values, for debugging
2535! call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2536!      & mp_comm_used, mp_ierr)
2537! if (mp_ierr /= 0) goto 600
2538
2539#ifdef USE_MPIINPLACE
2540! Using in-place reduction
2541    if (lroot) then
2542      call MPI_Reduce(MPI_IN_PLACE, griduncn, grid_size3d, mp_sp, MPI_SUM, id_root, &
2543           & mp_comm_used, mp_ierr)
2544      if (mp_ierr /= 0) goto 600
2545    else
2546      call MPI_Reduce(griduncn, 0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2547           & mp_comm_used, mp_ierr)
2548      if (mp_ierr /= 0) goto 600
2549    end if
2550
2551#else
2552    call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2553         & mp_comm_used, mp_ierr)
2554    if (mp_ierr /= 0) goto 600
2555    if (lroot) griduncn = griduncn0
2556
2557#endif
2558
2559    if ((WETDEP).and.(ldirect.gt.0)) then
2560      call MPI_Reduce(wetgriduncn, wetgriduncn0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2561           & mp_comm_used, mp_ierr)
2562      if (mp_ierr /= 0) goto 600
2563    end if
2564
2565    if ((DRYDEP).and.(ldirect.gt.0)) then
2566      call MPI_Reduce(drygriduncn, drygriduncn0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2567           & mp_comm_used, mp_ierr)
2568      if (mp_ierr /= 0) goto 600
2569    end if
2570
2571    if (mp_measure_time) call mpif_mtime('commtime',1)
2572
2573    goto 601
2574
2575600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
2576    stop
2577
2578601 end subroutine mpif_tm_reduce_grid_nest
2579
2580
2581  subroutine mpif_tm_reduce_initcond
2582!***********************************************************************
2583! Collect init_cond to PID 0, adding from all processes.
2584!
2585!
2586!***********************************************************************
2587    use com_mod
2588    use unc_mod
2589    use par_mod
2590
2591    implicit none
2592
2593    integer :: grid_size
2594
2595!**********************************************************************
2596
2597    grid_size=numxgrid*numygrid*numzgrid*maxspec* &
2598         & maxpointspec_act
2599
2600! Time for MPI communications
2601    if (mp_measure_time) call mpif_mtime('commtime',0)
2602
2603
2604#ifdef USE_MPIINPLACE
2605! Using in-place reduction
2606    if (lroot) then
2607      call MPI_Reduce(MPI_IN_PLACE, init_cond, grid_size, mp_sp, MPI_SUM, id_root, &
2608           & mp_comm_used, mp_ierr)
2609      if (mp_ierr /= 0) goto 600
2610    else
2611      call MPI_Reduce(init_cond, 0, grid_size, mp_sp, MPI_SUM, id_root, &
2612           & mp_comm_used, mp_ierr)
2613      if (mp_ierr /= 0) goto 600
2614    end if
2615
2616#else
2617    call MPI_Reduce(init_cond, init_cond0, grid_size, mp_sp, MPI_SUM, id_root, &
2618         & mp_comm_used, mp_ierr)
2619    if (mp_ierr /= 0) goto 600
2620!    if (lroot) init_cond = init_cond0
2621#endif
2622
2623    if (mp_measure_time) call mpif_mtime('commtime',1)
2624
2625    goto 601
2626
2627600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
2628    stop
2629
2630601 end subroutine mpif_tm_reduce_initcond
2631   
2632
2633  subroutine mpif_mtime(ident,imode)
2634!***********************************************************************
2635! Measure CPU/Wall time in various parts of the code
2636!
2637! VARIABLES
2638!   ident        character, identifies routine to measure
2639!   imode        integer, 0:start clock(s)  1: stop clock(s)
2640!
2641!***********************************************************************
2642    implicit none
2643
2644    character(LEN=*), intent(in) :: ident
2645    integer, intent(in) :: imode
2646
2647!***********************************************************************
2648
2649    select case(ident)
2650
2651    case ('timemanager')
2652      if (imode.eq.0) then
2653        call cpu_time(tm_tot_beg)
2654      else
2655        call cpu_time(tm_tot_end)
2656        tm_tot_total = tm_tot_total + (tm_tot_end-tm_tot_beg)
2657      end if
2658
2659    case ('wetdepo')
2660      if (imode.eq.0) then
2661        mp_wetdepo_wtime_beg = mpi_wtime()
2662        call cpu_time(mp_wetdepo_time_beg)
2663      else
2664        mp_wetdepo_wtime_end = mpi_wtime()
2665        call cpu_time(mp_wetdepo_time_end)
2666
2667        mp_wetdepo_wtime_total = mp_wetdepo_wtime_total + &
2668             &(mp_wetdepo_wtime_end - mp_wetdepo_wtime_beg)
2669        mp_wetdepo_time_total = mp_wetdepo_time_total + &
2670             &(mp_wetdepo_time_end - mp_wetdepo_time_beg)
2671      end if
2672
2673    case ('advance')
2674      if (imode.eq.0) then
2675        mp_advance_wtime_beg = mpi_wtime()
2676      else
2677        mp_advance_wtime_end = mpi_wtime()
2678
2679        mp_advance_wtime_total = mp_advance_wtime_total + &
2680             &(mp_advance_wtime_end - mp_advance_wtime_beg)
2681      end if
2682
2683    case ('getfields')
2684      if (imode.eq.0) then
2685        mp_getfields_wtime_beg = mpi_wtime()
2686        call cpu_time(mp_getfields_time_beg)
2687      else
2688        mp_getfields_wtime_end = mpi_wtime()
2689        call cpu_time(mp_getfields_time_end)
2690
2691        mp_getfields_wtime_total = mp_getfields_wtime_total + &
2692             &(mp_getfields_wtime_end - mp_getfields_wtime_beg)
2693        mp_getfields_time_total = mp_getfields_time_total + &
2694             &(mp_getfields_time_end - mp_getfields_time_beg)
2695      end if
2696
2697    case ('partloop1')
2698      if (imode.eq.0) then
2699        call cpu_time(tm_nploop_beg)
2700      else
2701        call cpu_time(tm_nploop_end)
2702        tm_nploop_total = tm_nploop_total + (tm_nploop_end - tm_nploop_beg)
2703      end if
2704
2705    case ('conccalc')
2706      if (imode.eq.0) then
2707        call cpu_time(mp_conccalc_time_beg)
2708      else
2709        call cpu_time(mp_conccalc_time_end)
2710        mp_conccalc_time_total = mp_conccalc_time_total + mp_conccalc_time_end - &
2711             &mp_conccalc_time_beg
2712      end if
2713
2714    case ('rootonly')
2715      if (imode.eq.0) then
2716        call cpu_time(mp_root_time_beg)
2717        mp_root_wtime_beg = mpi_wtime()
2718      else
2719        call cpu_time(mp_root_time_end)
2720        mp_root_wtime_end = mpi_wtime()
2721        mp_root_time_total = mp_root_time_total + &
2722             &(mp_root_time_end - mp_root_time_beg)
2723        mp_root_wtime_total = mp_root_wtime_total + &
2724             &(mp_root_wtime_end - mp_root_wtime_beg)
2725      end if
2726
2727    case ('iotime')
2728      if (imode.eq.0) then
2729        mp_io_wtime_beg = mpi_wtime()
2730        call cpu_time(mp_io_time_beg)
2731      else
2732        mp_io_wtime_end = mpi_wtime()
2733        call cpu_time(mp_io_time_end)
2734
2735        mp_io_wtime_total = mp_io_wtime_total + (mp_io_wtime_end - &
2736             & mp_io_wtime_beg)
2737        mp_io_time_total = mp_io_time_total + (mp_io_time_end - &
2738             & mp_io_time_beg)
2739      end if
2740
2741    case ('verttransform')
2742      if (imode.eq.0) then
2743        mp_vt_wtime_beg = mpi_wtime()
2744        call cpu_time(mp_vt_time_beg)
2745      else
2746        mp_vt_wtime_end = mpi_wtime()
2747        call cpu_time(mp_vt_time_end)
2748
2749        mp_vt_wtime_total = mp_vt_wtime_total + (mp_vt_wtime_end - &
2750             & mp_vt_wtime_beg)
2751        mp_vt_time_total = mp_vt_time_total + (mp_vt_time_end - &
2752             & mp_vt_time_beg)
2753      end if
2754
2755   case ('readwind')
2756     if (imode.eq.0) then
2757       call cpu_time(mp_readwind_time_beg)
2758       mp_readwind_wtime_beg = mpi_wtime()
2759     else
2760       call cpu_time(mp_readwind_time_end)
2761       mp_readwind_wtime_end = mpi_wtime()
2762
2763       mp_readwind_time_total = mp_readwind_time_total + &
2764            &(mp_readwind_time_end - mp_readwind_time_beg)
2765       mp_readwind_wtime_total = mp_readwind_wtime_total + &
2766            &(mp_readwind_wtime_end - mp_readwind_wtime_beg)
2767     end if
2768
2769    case ('commtime')
2770      if (imode.eq.0) then
2771        call cpu_time(mp_comm_time_beg)
2772        mp_comm_wtime_beg = mpi_wtime()
2773      else
2774        call cpu_time(mp_comm_time_end)
2775        mp_comm_wtime_end = mpi_wtime()
2776        mp_comm_time_total = mp_comm_time_total + &
2777             &(mp_comm_time_end - mp_comm_time_beg)
2778        mp_comm_wtime_total = mp_comm_wtime_total + &
2779             &(mp_comm_wtime_end - mp_comm_wtime_beg)
2780      end if
2781
2782    case ('flexpart')
2783      if (imode.eq.0) then
2784        mp_total_wtime_beg=mpi_wtime()
2785      else
2786        mp_total_wtime_end=mpi_wtime()
2787        mp_total_wtime_total = mp_total_wtime_end-mp_total_wtime_beg
2788      end if
2789
2790    case default
2791      write(*,*) 'mpi_mod::mpif_mtime> WARNING: unknown case identifier'
2792
2793    end select
2794
2795
2796  end subroutine mpif_mtime
2797
2798
2799  subroutine mpif_finalize
2800!***********************************************************************
2801! Finalize MPI
2802! Optionally print detailed time diagnostics
2803!***********************************************************************
2804    implicit none
2805
2806    integer :: ip !,j,r
2807
2808!***********************************************************************
2809
2810    IF (mp_measure_time) THEN
2811      do ip=0, mp_np-1
2812        call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr)
2813
2814        if (ip == mp_pid) then
2815          write(*,FMT='(72("#"))')
2816          write(*,FMT='(A60,I3)') 'STATISTICS FOR MPI PROCESS:', mp_pid
2817          if (ip == 0) then
2818            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR ROOT-ONLY (PID 0) &
2819                 &SECTIONS: ', mp_root_time_total
2820            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ROOT-ONLY (PID 0) &
2821                 &SECTIONS:', mp_root_wtime_total
2822          end if
2823          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FLEXPART: ' &
2824               &, mp_total_wtime_total
2825          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI COMMUNICATIONS:',&
2826               & mp_comm_time_total
2827          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI COMMUNICATIONS:',&
2828               & mp_comm_wtime_total
2829          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI BARRIERS:',&
2830               & mp_barrier_time_total
2831          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI BARRIERS:',&
2832               & mp_barrier_wtime_total
2833          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER:',&
2834               & tm_tot_total
2835          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER/NUMPART LOOP:',&
2836               & tm_nploop_total
2837          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ADVANCE:',&
2838               & mp_advance_wtime_total
2839          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR GETFIELDS:',&
2840               & mp_getfields_wtime_total
2841          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR GETFIELDS:',&
2842               & mp_getfields_time_total
2843!          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',&
2844!               & mp_readwind_wtime_total
2845!          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',&
2846!               & mp_readwind_time_total
2847          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FILE IO:',&
2848               & mp_io_wtime_total
2849          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR FILE IO:',&
2850               & mp_io_time_total
2851          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR WETDEPO:',&
2852               & mp_wetdepo_wtime_total
2853          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR WETDEPO:',&
2854               & mp_wetdepo_time_total
2855          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',&
2856               & mp_conccalc_time_total
2857! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',&
2858!      & mp_vt_wtime_total
2859! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',&
2860!      & mp_vt_time_total
2861! NB: the 'flush' function is possibly a gfortran-specific extension,
2862! comment out if it gives problems
2863!          call flush()
2864        end if
2865      end do
2866    end if
2867
2868! This call to barrier is for correctly formatting output
2869    call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr)
2870
2871    if (lroot.and.mp_measure_time) then
2872      write(*,FMT='(72("#"))')
2873      WRITE(*,*) "To turn off output of time measurements, set "
2874      WRITE(*,*) "    mp_measure_time=.false."
2875      WRITE(*,*) "in file mpi_mod.f90"
2876      write(*,FMT='(72("#"))')
2877    end if
2878
2879! j=mp_pid*nvar_async
2880! In the implementation with 3 fields, the processes may have posted
2881! MPI_Irecv requests that should be cancelled here
2882! if (.not.lmp_sync) then
2883!   r=mp_pid*nvar_async
2884!   do j=r,r+nvar_async-1
2885!     call MPI_Cancel(j,mp_ierr)
2886!     if (mp_ierr /= 0) write(*,*) '#### mpif_finalize::MPI_Cancel> ERROR ####'
2887!   end do
2888! end if
2889
2890    call MPI_FINALIZE(mp_ierr)
2891    if (mp_ierr /= 0) then
2892      write(*,*) '#### mpif_finalize::MPI_FINALIZE> MPI ERROR ', mp_ierr, ' ####'
2893      stop
2894    end if
2895
2896
2897  end subroutine mpif_finalize
2898
2899
2900  subroutine get_lun(my_lun)
2901!***********************************************************************
2902! get_lun:
2903!   Starting from 100, get next free logical unit number
2904!***********************************************************************
2905
2906    implicit none
2907
2908    integer, intent(inout) :: my_lun
2909    integer, save :: free_lun=100
2910    logical :: exists, iopen
2911
2912!***********************************************************************
2913
2914    loop1: do
2915      inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen)
2916      if (exists .and. .not.iopen) exit loop1
2917      free_lun = free_lun+1
2918    end do loop1
2919    my_lun = free_lun
2920
2921  end subroutine get_lun
2922
2923
2924  subroutine write_data_dbg(array_in, array_name, tstep, ident)
2925!***********************************************************************
2926! Write one-dimensional arrays to file (for debugging purposes)
2927!***********************************************************************
2928    implicit none
2929
2930    real, intent(in), dimension(:) :: array_in
2931    integer, intent(in) :: tstep
2932    integer :: lios
2933    character(LEN=*), intent(in) :: ident, array_name
2934
2935    character(LEN=8) :: c_ts
2936    character(LEN=40) :: fn_1, fn_2
2937
2938!***********************************************************************
2939
2940    write(c_ts, FMT='(I8.8,BZ)') tstep
2941    fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident)
2942    write(c_ts, FMT='(I2.2,BZ)') mp_np
2943    fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat'
2944
2945    call get_lun(dat_lun)
2946    open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', &
2947         FORM='UNFORMATTED', STATUS='REPLACE')
2948    write(UNIT=dat_lun, IOSTAT=lios) array_in
2949    close(UNIT=dat_lun)
2950
2951  end subroutine write_data_dbg
2952
2953
2954  subroutine set_fields_synthetic()
2955!*******************************************************************************
2956! DESCRIPTION
2957!   Set all meteorological fields to synthetic (usually constant/homogeneous)
2958!   values.
2959!   Used for validation and error-checking
2960!
2961! NOTE
2962!   This version uses asynchronious communications.
2963!
2964! VARIABLES
2965!   
2966!
2967!
2968!*******************************************************************************
2969    use com_mod
2970
2971    implicit none
2972
2973    integer :: li=1, ui=2 ! wfmem indices (i.e, operate on entire field)
2974
2975    if (.not.lmp_sync) ui=3
2976
2977
2978! Variables transferred at initialization only
2979!*********************************************
2980!    readclouds=readclouds_
2981    oro(:,:)=0.0
2982    excessoro(:,:)=0.0
2983    lsm(:,:)=0.0
2984    xlanduse(:,:,:)=0.0
2985!    wftime
2986!    numbwf
2987!    nmixz
2988!    height
2989
2990! Time-varying fields:
2991    uu(:,:,:,li:ui) = 10.0
2992    vv(:,:,:,li:ui) = 0.0
2993    uupol(:,:,:,li:ui) = 10.0
2994    vvpol(:,:,:,li:ui)=0.0
2995    ww(:,:,:,li:ui)=0.
2996    tt(:,:,:,li:ui)=300.
2997    rho(:,:,:,li:ui)=1.3
2998    drhodz(:,:,:,li:ui)=.0
2999    tth(:,:,:,li:ui)=0.0
3000    qvh(:,:,:,li:ui)=1.0
3001    qv(:,:,:,li:ui)=1.0
3002
3003    pv(:,:,:,li:ui)=1.0
3004    clouds(:,:,:,li:ui)=0
3005
3006    clwc(:,:,:,li:ui)=0.0
3007    ciwc(:,:,:,li:ui)=0.0
3008
3009! 2D fields
3010
3011    cloudsh(:,:,li:ui)=0
3012    vdep(:,:,:,li:ui)=0.0
3013    ps(:,:,:,li:ui)=1.0e5
3014    sd(:,:,:,li:ui)=0.0
3015    tcc(:,:,:,li:ui)=0.0
3016    tt2(:,:,:,li:ui)=300.
3017    td2(:,:,:,li:ui)=250.
3018    lsprec(:,:,:,li:ui)=0.0
3019    convprec(:,:,:,li:ui)=0.0
3020    ustar(:,:,:,li:ui)=1.0
3021    wstar(:,:,:,li:ui)=1.0
3022    hmix(:,:,:,li:ui)=10000.
3023    tropopause(:,:,:,li:ui)=10000.
3024    oli(:,:,:,li:ui)=0.01
3025
3026  end subroutine set_fields_synthetic
3027
3028end module mpi_mod
Note: See TracBrowser for help on using the repository browser.
hosted by ZAMG