source: flexpart.git/src/mpi_mod.f90 @ 328fdf9

10.4.1_peseiGFS_025bugfixes+enhancementsdevrelease-10release-10.4.1scaling-bug
Last change on this file since 328fdf9 was 328fdf9, checked in by Espen Sollum ATMOS <eso@…>, 5 years ago

bugfix: particles were lost at start of global domain filling run. Error in restarting domain filling run from particle dump

  • Property mode set to 100644
File size: 109.5 KB
Line 
1!**********************************************************************
2! Copyright 1998,1999,2000,2001,2002,2005,2007,2008,2009,2010         *
3! Andreas Stohl, Petra Seibert, A. Frank, Gerhard Wotawa,             *
4! Caroline Forster, Sabine Eckhardt, John Burkhart, Harald Sodemann   *
5!                                                                     *
6! This file is part of FLEXPART.                                      *
7!                                                                     *
8! FLEXPART is free software: you can redistribute it and/or modify    *
9! it under the terms of the GNU General Public License as published by*
10! the Free Software Foundation, either version 3 of the License, or   *
11! (at your option) any later version.                                 *
12!                                                                     *
13! FLEXPART is distributed in the hope that it will be useful,         *
14! but WITHOUT ANY WARRANTY; without even the implied warranty of      *
15! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the       *
16! GNU General Public License for more details.                        *
17!                                                                     *
18! You should have received a copy of the GNU General Public License   *
19! along with FLEXPART.  If not, see <http://www.gnu.org/licenses/>.   *
20!**********************************************************************
21
22module mpi_mod
23
24!*****************************************************************************
25!                                                                            *
26!  DESCRIPTION                                                               *
27!    This module contains subroutines and common variables used for the      *
28!    MPI parallelization of FLEXPART.                                        *
29!                                                                            *
30!  NOTE                                                                      *
31!    Depending on the MPI library installed on your system (e.g. mpich2,     *
32!    OpenMPI) you may need to choose below in this file between              *
33!      use mpi                                                               *
34!    (if the MPI library comes with the file 'mpi.mod'); or                  *
35!      include 'mpif.h'                                                      *
36!                                                                            *
37!                                                                            *
38!*****************************************************************************
39!                                                                            *
40! Variables:                                                                 *
41!                                                                            *
42! mp_ierr                 MPI error code                                     *
43! mp_np                   Number of MPI processes                            *
44! mp_pid                  Process ID of each MPI process                     *
45! mp_seed                 Parameter for random number seed                   *
46! read_grp_min            Minimum number of processes at which one will be   *
47!                         used as reader                                     *
48! numpart_mpi,            Number of particles per node                       *
49! maxpart_mpi                                                                *
50! mp_partid               MPI process ID for particle calculation            *
51! mp_partgroup_           Refers to the subset of processors performing      *
52!                         loops over particles. Will be all processes        *
53!                         unless a dedicated process runs getfields/readwind *
54! lmp_sync                If .false., use asynchronous MPI                   *
55! mp_cp                   Real precision to use for deposition fields        *
56!                                                                            *
57!                                                                            *
58!                                                                            *
59!                                                                            *
60!*****************************************************************************
61!                                                                            *
62! Modification by DJM, 2017-05-09 - added #ifdef USE_MPIINPLACE cpp          *
63! directive to mpif_tm_reduce_grid() to insure that MPI_IN_PLACE is          *
64! used in the MPI_Reduce() only if specifically compiled with that           *
65! directive.                                                                 *
66!                                                                            *
67!*****************************************************************************
68
69  use mpi
70  use par_mod, only: dp,sp
71  use com_mod, only: lroot
72
73  implicit none
74
75!  include 'mpif.h'
76
77  public
78
79! Set aside a process for reading windfields if using at least these many processes
80!==================================================
81  integer, parameter, private :: read_grp_min=4
82!==================================================
83
84! Variables for each MPI process in the world group
85  integer :: mp_ierr, mp_np, mp_pid, mp_partid
86  integer, private :: world_group_id
87
88! Variables for MPI processes in the 'particle' group
89  integer, allocatable, dimension(:) :: mp_partgroup_rank
90  integer :: mp_partgroup_comm, mp_partgroup_pid, mp_partgroup_np
91
92  integer :: mp_seed=0
93  integer, parameter :: mp_sp=MPI_REAL4, mp_dp=MPI_REAL8
94  integer :: mp_cp
95  integer, parameter :: id_root=0 ! master process
96
97! MPI tags/requests for send/receive operation
98  integer :: tm1
99  integer, parameter :: nvar_async=26
100!integer, dimension(:), allocatable :: tags
101  integer, dimension(:), allocatable :: reqs
102
103! Status array used for certain MPI operations (MPI_RECV)
104  integer, dimension(MPI_STATUS_SIZE) :: mp_status
105
106
107  integer :: id_read   ! readwind/getfield process
108  integer :: numpart_mpi,maxpart_mpi ! number of particles per node
109  integer :: tot_numpart=0
110  integer :: mp_comm_used ! global or subgroup communicator
111
112  logical :: lmpreader=.false. ! is set to true for reading process(es) only.
113  logical :: lmp_use_reader=.false. ! true if separate readwind process is used
114
115! .true. if only using synchronous MPI send/recv (default)
116! If setting this to .false., numwfmem must be set to 3
117!===============================================================================
118  logical :: lmp_sync=.true.
119!===============================================================================
120
121! mp_dbg_mode       Used for debugging MPI.
122! mp_dev_mode       various checks related to debugging the parallel code
123! mp_dbg_out        write some arrays to data file for debugging
124! mp_measure_time   Measure cpu/wall time, write out at end of run
125! mp_time_barrier   Measure MPI barrier time
126! mp_exact_numpart  Use an extra MPI communication to give the exact number of particles
127!                   to standard output (this does *not* otherwise affect the simulation)
128! mp_rebalance      Attempt to rebalance particle between processes
129  logical, parameter :: mp_dbg_mode = .false.
130  logical, parameter :: mp_dev_mode = .false.
131  logical, parameter :: mp_dbg_out = .false.
132  logical, parameter :: mp_time_barrier=.true.
133  logical, parameter :: mp_measure_time=.false.
134  logical, parameter :: mp_exact_numpart=.true.
135  logical, parameter :: mp_rebalance=.true.
136
137! for measuring CPU/Wall time
138  real(sp),private :: mp_comm_time_beg, mp_comm_time_end, mp_comm_time_total=0.
139  real(dp),private :: mp_comm_wtime_beg, mp_comm_wtime_end, mp_comm_wtime_total=0.
140  real(sp),private :: mp_root_time_beg, mp_root_time_end, mp_root_time_total=0.
141  real(dp),private :: mp_root_wtime_beg, mp_root_wtime_end, mp_root_wtime_total=0.
142  real(sp),private :: mp_barrier_time_beg, mp_barrier_time_end, mp_barrier_time_total=0.
143  real(dp),private :: mp_barrier_wtime_beg, mp_barrier_wtime_end, mp_barrier_wtime_total=0.
144  real(sp),private :: tm_nploop_beg, tm_nploop_end, tm_nploop_total=0.
145  real(sp),private :: tm_tot_beg, tm_tot_end, tm_tot_total=0.
146  real(dp),private :: mp_getfields_wtime_beg, mp_getfields_wtime_end, mp_getfields_wtime_total=0.
147  real(sp),private :: mp_getfields_time_beg, mp_getfields_time_end, mp_getfields_time_total=0.
148!  real(dp),private :: mp_readwind_wtime_beg, mp_readwind_wtime_end, mp_readwind_wtime_total=0.
149!  real(sp),private :: mp_readwind_time_beg, mp_readwind_time_end, mp_readwind_time_total=0.
150  real(dp),private :: mp_io_wtime_beg, mp_io_wtime_end, mp_io_wtime_total=0.
151  real(sp),private :: mp_io_time_beg, mp_io_time_end, mp_io_time_total=0.
152  real(dp),private :: mp_wetdepo_wtime_beg, mp_wetdepo_wtime_end, mp_wetdepo_wtime_total=0.
153  real(sp),private :: mp_wetdepo_time_beg, mp_wetdepo_time_end, mp_wetdepo_time_total=0.
154  real(dp),private :: mp_advance_wtime_beg, mp_advance_wtime_end, mp_advance_wtime_total=0.
155  real(dp),private :: mp_conccalc_time_beg, mp_conccalc_time_end, mp_conccalc_time_total=0.
156  real(dp),private :: mp_total_wtime_beg, mp_total_wtime_end, mp_total_wtime_total=0.
157  real(dp),private :: mp_vt_wtime_beg, mp_vt_wtime_end, mp_vt_wtime_total
158  real(sp),private :: mp_vt_time_beg, mp_vt_time_end, mp_vt_time_total
159
160! dat_lun           logical unit number for i/o
161  integer, private :: dat_lun
162
163! Temporary arrays for particles (allocated and deallocated as needed)
164  integer, allocatable, dimension(:) :: nclass_tmp, npoint_tmp, itra1_tmp, idt_tmp, &
165       & itramem_tmp, itrasplit_tmp
166  real(kind=dp), allocatable, dimension(:) :: xtra1_tmp, ytra1_tmp
167  real, allocatable, dimension(:) :: ztra1_tmp
168  real, allocatable, dimension(:,:) :: xmass1_tmp
169
170! mp_redist_fract        Exchange particles between processes if relative difference
171!                        is larger. A good value is between 0.0 and 0.5
172! mp_maxpart_factor      Allocate more memory per process than strictly needed
173!                        (safety factor). Recommended value between 1.5 and 2.5
174! mp_min_redist          Do not redistribute particles if below this limit
175  real, parameter :: mp_redist_fract=0.2, mp_maxpart_factor=1.5
176  integer,parameter :: mp_min_redist=100000
177
178
179contains
180
181  subroutine mpif_init
182!***********************************************************************
183! DESCRIPTION
184!   Initialize MPI.
185!   
186!   Create the global communicator MPI_COMM_WORLD
187!   If using a separate MPI process for getfields/readwind, a subgroup
188!   is created for the other processes.
189!
190! VARIABLES
191!   mpi_mode    default 0, set to 2/3 if running MPI version
192!   mp_np       number of running processes, decided at run-time
193!***********************************************************************
194    use par_mod, only: maxpart, numwfmem, dep_prec, maxreceptor, maxspec
195    use com_mod, only: mpi_mode, verbosity, creceptor0
196
197    implicit none
198
199    integer :: i,j,s,addmaxpart=0
200
201! Each process gets an ID (mp_pid) in the range 0,..,mp_np-1
202    call MPI_INIT(mp_ierr)
203    if (mp_ierr /= 0) goto 100
204    call MPI_COMM_RANK(MPI_COMM_WORLD, mp_pid, mp_ierr)
205    if (mp_ierr /= 0) goto 100
206    call MPI_COMM_SIZE(MPI_COMM_WORLD, mp_np, mp_ierr)
207    if (mp_ierr /= 0) goto 100
208
209
210! Variable mpi_mode is used to handle subroutines common to parallel/serial version
211    if (lmp_sync) then
212      mpi_mode=2 ! hold 2 windfields in memory
213    else
214      mpi_mode=3 ! hold 3 windfields in memory
215    end if
216
217    if (mp_pid.ne.0) then
218      lroot = .false.
219    end if
220
221! Set MPI precision to use for transferring deposition fields
222!************************************************************
223    if (dep_prec==dp) then
224      mp_cp = MPI_REAL8
225! TODO: write info message for serial version as well
226      if (lroot.and.verbosity>0) write(*,*) 'Using double precision for deposition fields'
227    else if (dep_prec==sp) then
228      mp_cp = MPI_REAL4
229      if (lroot.and.verbosity>0) write(*,*) 'Using single precision for deposition fields'
230    else
231      write(*,*) 'ERROR: something went wrong setting MPI real precision'
232      stop
233    end if
234
235! Check for sensible combination of parameters
236!*********************************************
237
238    if (.not.lmp_sync.and.numwfmem.ne.3) then
239      if (lroot) then
240        write(*,FMT='(80("#"))')
241        write(*,*) '#### mpi_mod::mpif_init> ERROR: ', &
242             & 'numwfmem must be set to 3 for asyncronous reading ####'
243        write(*,FMT='(80("#"))')
244      end if
245      call MPI_FINALIZE(mp_ierr)
246      stop
247    else if (lmp_sync.and.numwfmem.ne.2.and.lroot) then
248      write(*,FMT='(80("#"))')
249      write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
250           & 'numwfmem should be set to 2 for syncronous'
251      write(*,*) ' reading. Results will still be valid, but unneccesary memory &
252           &is allocated.'
253      write(*,FMT='(80("#"))')
254! Force "syncronized" version if all processes will call getfields
255    else if ((.not.lmp_sync.and.mp_np.lt.read_grp_min).or.(mp_np.eq.1)) then
256      if (lroot) then
257        write(*,FMT='(80("#"))')
258        write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
259             & 'all procs call getfields. Setting lmp_sync=.true.'
260        write(*,FMT='(80("#"))')
261      end if
262      lmp_sync=.true.
263    end if
264
265! TODO: Add more warnings for unimplemented flexpart features
266
267! Set ID of process that calls getfield/readwind.
268! Using the last process in the group ensures statistical identical results
269! as running with one process less but not using separate read process
270!**********************************************************************
271
272!    id_read = min(mp_np-1, 1)
273    id_read = mp_np-1
274
275    if (mp_pid.eq.id_read) lmpreader=.true.
276
277    call MPI_Comm_group (MPI_COMM_WORLD, world_group_id, mp_ierr)
278
279! Create a MPI group/communicator that will calculate trajectories.
280! Skip this step if program is run with only a few processes
281!************************************************************************
282    allocate(mp_partgroup_rank(0:mp_np-2))
283
284! This allows checking for allocation of arrays when no subgroup is used
285    mp_partgroup_pid=0
286
287    if (read_grp_min.lt.2) then
288      write(*,*) '#### mpi_mod::mpif_init> ERROR ####', &
289           & 'read_grp_min should be at least 2. Exiting'
290      stop
291    end if
292
293    if (mp_np.ge.read_grp_min) then
294      lmp_use_reader = .true.
295
296! Map the subgroup IDs= 0,1,2,...,mp_np-2, skipping reader process
297      j=-1
298      do i=0, mp_np-2 !loop over all (subgroup) IDs
299        j=j+1
300        if (i.eq.id_read) then
301          j=j+1
302        end if
303        mp_partgroup_rank(i) = j
304      end do
305
306      call MPI_Group_incl (world_group_id, mp_np-1, mp_partgroup_rank, &
307           &mp_partgroup_pid, mp_ierr)
308      if (mp_ierr /= 0) goto 100
309      call MPI_Comm_create (MPI_COMM_WORLD, mp_partgroup_pid, mp_partgroup_comm, mp_ierr)
310      if (mp_ierr /= 0) goto 100
311
312      if (mp_pid.ne.id_read) then
313        call MPI_Comm_rank (mp_partgroup_comm, mp_partgroup_pid, mp_ierr)
314        if (mp_ierr /= 0) goto 100
315
316! Get size of new group (=mp_np-1)
317        call MPI_COMM_SIZE(mp_partgroup_comm, mp_partgroup_np, mp_ierr)
318        if (mp_ierr /= 0) goto 100
319        if (mp_partgroup_np.ne.mp_np-1) then
320          write(*,*)  '#### mpi_mod:: mpif_init> ERROR ####&
321               & mp_partgroup_np.ne.mp_np-1'
322          stop
323        endif
324
325      else
326        mp_partgroup_pid = -1
327      end if
328    end if
329
330    if (lmp_use_reader) then
331      mp_comm_used = mp_partgroup_comm
332      mp_partid = mp_partgroup_pid
333      mp_partgroup_np=mp_np-1
334    else
335      mp_comm_used = MPI_COMM_WORLD
336      mp_partgroup_np = mp_np
337      mp_partid = mp_pid
338    end if
339
340! Set maxpart per process
341! ESO 08/2016: Increase maxpart per process, in case of unbalanced distribution
342    maxpart_mpi=int(mp_maxpart_factor*real(maxpart)/real(mp_partgroup_np))
343    if (mp_np == 1) maxpart_mpi = maxpart
344
345! Do not allocate particle data arrays for readwind process
346    if (lmpreader.and.lmp_use_reader) then
347      maxpart_mpi=0
348    end if
349
350! Set random seed for each non-root process
351    if (mp_pid.gt.0) then
352      s = 244
353      mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729))
354    end if
355    if (mp_dbg_mode) then
356      mp_seed=0
357      if (lroot) write(*,*) 'MPI: setting seed=0'
358    end if
359
360! Allocate request array for asynchronous MPI
361    if (.not.lmp_sync) then
362      allocate(reqs(0:nvar_async*mp_np-1))
363      reqs(:)=MPI_REQUEST_NULL
364    else
365      allocate(reqs(0:1))
366      reqs(:)=MPI_REQUEST_NULL
367    end if
368
369! Write whether MPI_IN_PLACE is used or not
370#ifdef USE_MPIINPLACE
371    if (lroot) write(*,*) 'Using MPI_IN_PLACE operations'
372#else
373    if (lroot) allocate(creceptor0(maxreceptor,maxspec))
374    if (lroot) write(*,*) 'Not using MPI_IN_PLACE operations'
375#endif
376    goto 101
377
378100 write(*,*) '#### mpi_mod::mpif_init> ERROR ####', mp_ierr
379    stop
380
381101 end subroutine mpif_init
382
383
384  subroutine mpif_mpi_barrier
385!***********************************************************************
386! Set MPI barrier (all processes are synchronized here), with the
387! possible exception of a separate 'readwind' process.
388! Optionally measure cpu/wall time.
389!
390!***********************************************************************
391    implicit none
392
393    if (mp_time_barrier) then
394      call cpu_time(mp_barrier_time_beg)
395      mp_barrier_wtime_beg = mpi_wtime()
396    endif
397
398    call MPI_BARRIER(mp_comm_used, mp_ierr)
399    if (mp_ierr /= 0) goto 100
400
401    if (mp_time_barrier) then
402      call cpu_time(mp_barrier_time_end)
403      mp_barrier_wtime_end = mpi_wtime()
404
405      mp_barrier_time_total=mp_barrier_time_total+&
406           &(mp_barrier_time_end-mp_barrier_time_beg)
407      mp_barrier_wtime_total=mp_barrier_wtime_total+&
408           &(mp_barrier_wtime_end-mp_barrier_wtime_beg)
409    end if
410
411    goto 101
412
413100 write(*,*) '#### mpi_mod::mpif_mpi_barrier> ERROR ####', mp_ierr
414    stop
415
416101 end subroutine mpif_mpi_barrier
417
418
419  subroutine mpif_com_mod_allocate
420!*******************************************************************************   
421! Dynamic allocation of arrays (in serial code these have size maxpart)
422!
423! Not in use anymore, moved to com_mod for interoperability with serial version
424!
425! TODO: remove
426!*******************************************************************************
427    use com_mod
428
429    implicit none
430
431    integer :: array_size
432
433    array_size = maxpart_mpi
434
435    allocate(itra1(array_size),npoint(array_size),&
436         & nclass(array_size),idt(array_size),&
437         & itramem(array_size),itrasplit(array_size),&
438         & xtra1(array_size),ytra1(array_size),&
439         & ztra1(array_size),xmass1(array_size, maxspec))
440
441    allocate(uap(array_size),ucp(array_size),&
442         & uzp(array_size),us(array_size),&
443         & vs(array_size),&
444         & ws(array_size),cbt(array_size))
445
446
447  end subroutine mpif_com_mod_allocate
448
449
450  subroutine mpif_tm_send_dims
451!***********************************************************************
452! Distribute array dimensions from pid0 to all processes.
453! numpart_mpi/numpart is sent to allow dynamic allocation
454!
455! Currently not in use.
456!
457! Variables of similar type (integer, real) are placed in an array
458! and sent collectively, to avoid the overhead associated with individual
459! MPI send/receives
460!
461!
462!***********************************************************************
463    use com_mod
464
465    implicit none
466
467    integer :: add_part=0
468
469    call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
470
471! MPI subgroup does the particle-loop
472    if (lmp_use_reader) then
473      if (mod(numpart,mp_partgroup_np).ne.0) add_part=1
474      numpart_mpi=int(numpart/mp_partgroup_np)+add_part
475    else
476      if (mod(numpart,mp_np).ne.0) add_part=1
477      numpart_mpi=int(numpart/mp_np)+add_part
478    end if
479
480
481! redefine numpart as 'numpart per process' throughout the code
482!**************************************************************
483    numpart = numpart_mpi
484
485  end subroutine mpif_tm_send_dims
486
487
488  subroutine mpif_send_part_properties(num_part)
489!***********************************************************************
490! Distribute particle properties from root to all processes.
491
492! Used by init_domainfill_mpi
493!
494! Variables:
495!
496! num_part        input, number of particles per process (rounded up)
497!
498!***********************************************************************
499    use com_mod
500
501    implicit none
502
503    integer,intent(in) :: num_part
504    integer :: i,jj, addone
505
506! Exit if too many particles
507    if (num_part.gt.maxpart_mpi) then
508      write(*,*) '#####################################################'
509      write(*,*) '#### ERROR - TOTAL NUMBER OF PARTICLES REQUIRED  ####'
510      write(*,*) '#### EXCEEDS THE MAXIMUM ALLOWED NUMBER. REDUCE  ####'
511      write(*,*) '#### EITHER NUMBER OF PARTICLES PER RELEASE POINT####'
512      write(*,*) '#### OR INCREASE MAXPART.                        ####'
513      write(*,*) '#####################################################'
514!      call MPI_FINALIZE(mp_ierr)
515      stop
516    end if
517
518
519! Time for MPI communications
520!****************************
521    if (mp_measure_time) call mpif_mtime('commtime',0)
522
523! Distribute variables, send from pid 0 to other processes (including itself)
524!****************************************************************************
525
526    call MPI_SCATTER(nclass_tmp,num_part,MPI_INTEGER,nclass,&
527         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
528    if (mp_ierr /= 0) goto 600
529    call MPI_SCATTER(npoint_tmp,num_part,MPI_INTEGER,npoint,&
530         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
531    if (mp_ierr /= 0) goto 600
532    call MPI_SCATTER(itra1_tmp,num_part,MPI_INTEGER,itra1,&
533         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
534    if (mp_ierr /= 0) goto 600
535    call MPI_SCATTER(idt_tmp,num_part,MPI_INTEGER,idt,&
536         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
537    if (mp_ierr /= 0) goto 600
538    call MPI_SCATTER(itramem_tmp,num_part,MPI_INTEGER,itramem,&
539         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
540    if (mp_ierr /= 0) goto 600
541    call MPI_SCATTER(itrasplit_tmp,num_part,MPI_INTEGER,itrasplit,&
542         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
543    if (mp_ierr /= 0) goto 600
544    call MPI_SCATTER(xtra1_tmp,num_part,mp_dp,xtra1,&
545         &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)
546    if (mp_ierr /= 0) goto 600
547    call MPI_SCATTER(ytra1_tmp,num_part,mp_dp,ytra1,&
548         &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)
549    if (mp_ierr /= 0) goto 600
550    call MPI_SCATTER(ztra1_tmp,num_part,mp_sp,ztra1,&
551         &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)
552    if (mp_ierr /= 0) goto 600
553    do i=1,nspec
554      call MPI_SCATTER(xmass1_tmp(:,i),num_part,mp_sp,xmass1(:,i),&
555           &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)
556      if (mp_ierr /= 0) goto 600
557    end do
558
559    if (mp_measure_time) call mpif_mtime('commtime',1)
560
561    goto 601
562
563600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
564    stop
565
566! After the transfer of particles it it possible that the value of
567! "numpart" is larger than the actual used, so we reduce it if there are
568! invalid particles at the end of the arrays
569
570601 do i=numpart, 1, -1
571      if (itra1(i).eq.-999999999) then
572        numpart=numpart-1
573      else
574        exit
575      end if
576    end do
577
578
579  end subroutine mpif_send_part_properties
580
581
582  subroutine mpif_calculate_part_redist(itime)
583!***********************************************************************
584! Calculate number of particles to redistribute between processes. This routine
585! can be called at regular time intervals to keep a level number of
586! particles on each process.
587!
588! First, all processes report their local "numpart" to each other, which is
589! stored in array "numpart_mpi(np)". This array is sorted from low to
590! high values, along with a corresponding process ID array "idx_arr(np)".
591! If the relative difference between the highest and lowest "numpart_mpi" value
592! is above a threshold, particles are transferred from process idx_arr(np-1)
593! to process idx_arr(0). Repeat for processes idx_arr(np-i) and idx_arr(i)
594! for all valid i.
595! Note: If np is an odd number, the process with the median
596! number of particles is left unchanged
597!
598! VARIABLES
599! itime        input, current time
600!***********************************************************************
601    use com_mod
602
603    implicit none
604   
605    integer, intent(in) :: itime
606    real :: pmin,z
607    integer :: i,jj,nn, num_part=1,m,imin, num_trans
608    logical :: first_iter
609    integer,allocatable,dimension(:) :: numparticles_mpi, idx_arr
610    real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this
611
612! Exit if running with only 1 process
613!************************************************************************
614    if (mp_np.eq.1) return
615
616! All processes exchange information on number of particles
617!****************************************************************************
618    allocate(numparticles_mpi(0:mp_partgroup_np-1), &
619         &idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1))
620
621    call MPI_Allgather(numpart, 1, MPI_INTEGER, numparticles_mpi, &
622         & 1, MPI_INTEGER, mp_comm_used, mp_ierr)
623
624
625! Sort from lowest to highest
626! Initial guess: correct order
627    sorted(:) = numparticles_mpi(:)
628    do i=0, mp_partgroup_np-1
629      idx_arr(i) = i
630    end do
631
632! For each successive element in index array, see if a lower value exists
633    do i=0, mp_partgroup_np-2
634      pmin=sorted(i)
635      imin=idx_arr(i)
636      m=i+1
637      do jj=m, mp_partgroup_np-1
638        if (pmin.le.sorted(jj)) cycle
639        z=pmin
640        pmin=sorted(jj)
641        sorted(jj)=z
642
643        nn=imin
644        imin=idx_arr(jj)
645        idx_arr(jj)=nn
646      end do
647      sorted(i)=pmin
648      idx_arr(i)=imin
649    end do
650
651! For each pair of processes, transfer particles if the difference is above a
652! limit, and numpart at sending process large enough
653
654    m=mp_partgroup_np-1 ! index for last sorted process (most particles)
655    do i=0,mp_partgroup_np/2-1
656      num_trans = numparticles_mpi(idx_arr(m)) - numparticles_mpi(idx_arr(i))
657      if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then
658        if ( numparticles_mpi(idx_arr(m)).gt.mp_min_redist.and.&
659             & real(num_trans)/real(numparticles_mpi(idx_arr(m))).gt.mp_redist_fract) then
660! DBG
661          ! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi', &
662          !      &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi
663! DBG
664          call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2)
665        end if
666      end if
667      m=m-1
668    end do
669
670    deallocate(numparticles_mpi, idx_arr, sorted)
671
672  end subroutine mpif_calculate_part_redist
673
674
675  subroutine mpif_redist_part(itime, src_proc, dest_proc, num_trans)
676!***********************************************************************
677! Transfer particle properties between two arbitrary processes.
678!
679! VARIABLES
680!
681! itime           input, current time
682! src_proc        input, ID of source process
683! dest_proc       input, ID of destination process
684! num_trans       input, number of particles to transfer
685!
686!************************************************************************
687    use com_mod !TODO:  ,only: nclass etc
688
689    implicit none
690
691    integer, intent(in) :: itime, src_proc, dest_proc, num_trans
692    integer :: ll, ul ! lower and upper indices in arrays
693    integer :: arr_sz ! size of temporary arrays
694    integer :: mtag   ! MPI message tag
695    integer :: i, j, minpart, ipart, maxnumpart
696 
697! Check for invalid input arguments
698!**********************************
699 if (src_proc.eq.dest_proc) then
700   write(*,*) '<mpi_mod::mpif_redist_part>: Error: &
701        &src_proc.eq.dest_proc'
702   stop
703 end if
704
705! Measure time for MPI communications
706!************************************
707    if (mp_measure_time) call mpif_mtime('commtime',0)
708
709! Send particles
710!***************
711    if (mp_partid.eq.src_proc) then
712      mtag = 2000
713
714! Array indices for the transferred particles
715      ll=numpart-num_trans+1
716      ul=numpart
717
718      if (mp_dev_mode) then
719        write(*,FMT='(72("#"))')
720        write(*,*) "Sending ", num_trans, "particles (from/to)", src_proc, dest_proc
721        write(*,*) "numpart before: ", numpart
722      end if
723
724      call MPI_SEND(nclass(ll:ul),num_trans,&
725           & MPI_INTEGER,dest_proc,mtag+1,mp_comm_used,mp_ierr)
726
727      call MPI_SEND(npoint(ll:ul),num_trans,&
728           & MPI_INTEGER,dest_proc,mtag+2,mp_comm_used,mp_ierr)
729
730      call MPI_SEND(itra1(ll:ul),num_trans, &
731           & MPI_INTEGER,dest_proc,mtag+3,mp_comm_used,mp_ierr)
732
733      call MPI_SEND(idt(ll:ul),num_trans, &
734           & MPI_INTEGER,dest_proc,mtag+4,mp_comm_used,mp_ierr)
735
736      call MPI_SEND(itramem(ll:ul),num_trans, &
737           & MPI_INTEGER,dest_proc,mtag+5,mp_comm_used,mp_ierr)
738
739      call MPI_SEND(itrasplit(ll:ul),num_trans,&
740           & MPI_INTEGER,dest_proc,mtag+6,mp_comm_used,mp_ierr)
741
742      call MPI_SEND(xtra1(ll:ul),num_trans, &
743           & mp_dp,dest_proc,mtag+7,mp_comm_used,mp_ierr)
744
745      call MPI_SEND(ytra1(ll:ul),num_trans,&
746           & mp_dp,dest_proc,mtag+8,mp_comm_used,mp_ierr)
747
748      call MPI_SEND(ztra1(ll:ul),num_trans,&
749           & mp_sp,dest_proc,mtag+9,mp_comm_used,mp_ierr)
750
751      do j=1,nspec
752        call MPI_SEND(xmass1(ll:ul,j),num_trans,&
753             & mp_sp,dest_proc,mtag+(9+j),mp_comm_used,mp_ierr)
754      end do
755
756! Terminate transferred particles, update numpart
757      itra1(ll:ul) = -999999999
758
759      numpart = numpart-num_trans
760
761      if (mp_dev_mode) then
762        write(*,*) "numpart after: ", numpart
763        write(*,FMT='(72("#"))')
764      end if
765
766    else if (mp_partid.eq.dest_proc) then
767
768! Receive particles
769!******************
770      mtag = 2000
771      ! Array indices for the transferred particles
772      ll=numpart+1
773      ul=numpart+num_trans
774
775      if (mp_dev_mode) then
776        write(*,FMT='(72("#"))')
777        write(*,*) "Receiving ", num_trans, "particles (from/to)", src_proc, dest_proc
778        write(*,*) "numpart before: ", numpart
779      end if
780
781! We could receive the data directly at nclass(ll:ul) etc., but this leaves
782! vacant spaces at lower indices. Using temporary arrays instead.
783      arr_sz = ul-ll+1
784      allocate(itra1_tmp(arr_sz),npoint_tmp(arr_sz),nclass_tmp(arr_sz),&
785           & idt_tmp(arr_sz),itramem_tmp(arr_sz),itrasplit_tmp(arr_sz),&
786           & xtra1_tmp(arr_sz),ytra1_tmp(arr_sz),ztra1_tmp(arr_sz),&
787           & xmass1_tmp(arr_sz, maxspec))
788     
789      call MPI_RECV(nclass_tmp,num_trans,&
790           & MPI_INTEGER,src_proc,mtag+1,mp_comm_used,mp_status,mp_ierr)
791
792      call MPI_RECV(npoint_tmp,num_trans,&
793           & MPI_INTEGER,src_proc,mtag+2,mp_comm_used,mp_status,mp_ierr)
794
795      call MPI_RECV(itra1_tmp,num_trans, &
796           & MPI_INTEGER,src_proc,mtag+3,mp_comm_used,mp_status,mp_ierr)
797
798      call MPI_RECV(idt_tmp,num_trans, &
799           & MPI_INTEGER,src_proc,mtag+4,mp_comm_used,mp_status,mp_ierr)
800
801      call MPI_RECV(itramem_tmp,num_trans, &
802           & MPI_INTEGER,src_proc,mtag+5,mp_comm_used,mp_status,mp_ierr)
803
804      call MPI_RECV(itrasplit_tmp,num_trans,&
805           & MPI_INTEGER,src_proc,mtag+6,mp_comm_used,mp_status,mp_ierr)
806
807      call MPI_RECV(xtra1_tmp,num_trans, &
808           & mp_dp,src_proc,mtag+7,mp_comm_used,mp_status,mp_ierr)
809
810      call MPI_RECV(ytra1_tmp,num_trans,&
811           & mp_dp,src_proc,mtag+8,mp_comm_used,mp_status,mp_ierr)
812
813      call MPI_RECV(ztra1_tmp,num_trans,&
814           & mp_sp,src_proc,mtag+9,mp_comm_used,mp_status,mp_ierr)
815
816      do j=1,nspec
817        call MPI_RECV(xmass1_tmp(:,j),num_trans,&
818             & mp_sp,src_proc,mtag+(9+j),mp_comm_used,mp_status,mp_ierr)
819      end do
820
821! This is the maximum value numpart can possibly have after the transfer
822      maxnumpart=numpart+num_trans
823
824! Search for vacant space and copy from temporary storage
825!********************************************************
826      minpart=1
827      do i=1, num_trans
828! Take into acount that we may have transferred invalid particles
829        if (itra1_tmp(i).ne.itime) cycle
830        do ipart=minpart,maxnumpart
831          if (itra1(ipart).ne.itime) then
832            itra1(ipart) = itra1_tmp(i)
833            npoint(ipart) = npoint_tmp(i)
834            nclass(ipart) = nclass_tmp(i)
835            idt(ipart) = idt_tmp(i)
836            itramem(ipart) = itramem_tmp(i)
837            itrasplit(ipart) = itrasplit_tmp(i)
838            xtra1(ipart) = xtra1_tmp(i)
839            ytra1(ipart) = ytra1_tmp(i)
840            ztra1(ipart) = ztra1_tmp(i)
841            xmass1(ipart,:) = xmass1_tmp(i,:)
842            goto 200 ! Storage space has been found, stop searching
843          end if
844! :TODO: add check for if too many particles requiried
845        end do
846200     minpart=ipart+1
847      end do
848! Increase numpart, if necessary
849      numpart=max(numpart,ipart)
850
851      deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,&
852           &itrasplit_tmp,xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp)
853
854      if (mp_dev_mode) then
855        write(*,*) "numpart after: ", numpart
856        write(*,FMT='(72("#"))')
857      end if
858
859    else
860! This routine should only be called by the two participating processes
861      write(*,*) "ERROR: wrong process has entered mpi_mod::mpif_redist_part"
862      stop
863      return
864    end if
865
866! Measure time for MPI communications
867!************************************
868    if (mp_measure_time) call mpif_mtime('commtime',1)
869
870  end subroutine mpif_redist_part
871
872
873  subroutine mpif_tm_send_vars
874!***********************************************************************
875! Distribute particle variables from pid0 to all processes.
876! Called from timemanager
877! *NOT IN USE* at the moment, but can be useful for debugging
878!
879!***********************************************************************
880    use com_mod
881
882    implicit none
883
884    integer :: i
885
886! Time for MPI communications
887!****************************
888    if (mp_measure_time) call mpif_mtime('commtime',0)
889
890! Distribute variables, send from pid 0 to other processes
891!**********************************************************************
892
893! integers
894    if (lroot) then
895      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
896           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
897      if (mp_ierr /= 0) goto 600
898      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
899           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
900      if (mp_ierr /= 0) goto 600
901      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
902           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
903      if (mp_ierr /= 0) goto 600
904      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
905           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
906      if (mp_ierr /= 0) goto 600
907      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
908           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
909      if (mp_ierr /= 0) goto 600
910
911! int2
912      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,&
913           & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
914      if (mp_ierr /= 0) goto 600
915
916! real
917      call MPI_SCATTER(uap,numpart_mpi,mp_sp,MPI_IN_PLACE,&
918           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
919      if (mp_ierr /= 0) goto 600
920      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
921           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
922      if (mp_ierr /= 0) goto 600
923      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
924           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
925      if (mp_ierr /= 0) goto 600
926
927      call MPI_SCATTER(us,numpart_mpi,mp_sp,MPI_IN_PLACE,&
928           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
929      if (mp_ierr /= 0) goto 600
930      call MPI_SCATTER(vs,numpart_mpi,mp_sp,MPI_IN_PLACE,&
931           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
932      if (mp_ierr /= 0) goto 600
933      call MPI_SCATTER(ws,numpart_mpi,mp_sp,MPI_IN_PLACE,&
934           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
935      if (mp_ierr /= 0) goto 600
936
937      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
938           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
939      if (mp_ierr /= 0) goto 600
940      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
941           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
942      if (mp_ierr /= 0) goto 600
943      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,MPI_IN_PLACE,&
944           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
945      if (mp_ierr /= 0) goto 600
946
947      do i=1,nspec
948        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,MPI_IN_PLACE,&
949             & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
950        if (mp_ierr /= 0) goto 600
951      end do
952
953    else ! (mp_pid >= 1)
954! integers
955      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,&
956           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
957      if (mp_ierr /= 0) goto 600
958      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,&
959           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
960      if (mp_ierr /= 0) goto 600
961      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,&
962           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
963      if (mp_ierr /= 0) goto 600
964      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,&
965           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
966      if (mp_ierr /= 0) goto 600
967      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,&
968           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
969      if (mp_ierr /= 0) goto 600
970
971! int2
972      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,&
973           & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
974      if (mp_ierr /= 0) goto 600
975
976! reals
977      call MPI_SCATTER(uap,numpart_mpi,mp_sp,uap,&
978           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
979      if (mp_ierr /= 0) goto 600
980      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,ucp,&
981           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
982      if (mp_ierr /= 0) goto 600
983      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,uzp,&
984           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
985      if (mp_ierr /= 0) goto 600
986
987      call MPI_SCATTER(us,numpart_mpi,mp_sp,us,&
988           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
989      if (mp_ierr /= 0) goto 600
990      call MPI_SCATTER(vs,numpart_mpi,mp_sp,vs,&
991           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
992      if (mp_ierr /= 0) goto 600
993      call MPI_SCATTER(ws,numpart_mpi,mp_sp,ws,&
994           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
995      if (mp_ierr /= 0) goto 600
996
997      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,&
998           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
999      if (mp_ierr /= 0) goto 600
1000      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,&
1001           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
1002      if (mp_ierr /= 0) goto 600
1003      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,ztra1,&
1004           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
1005      if (mp_ierr /= 0) goto 600
1006
1007      do i=1,nspec
1008        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,xmass1(:,i),&
1009             & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
1010        if (mp_ierr /= 0) goto 600
1011      end do
1012
1013    end if !(lroot)
1014
1015    if (mp_measure_time) call mpif_mtime('commtime',1)
1016
1017    goto 601
1018
1019600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1020    stop
1021601 end subroutine mpif_tm_send_vars
1022
1023
1024  subroutine mpif_tm_collect_vars
1025!*******************************************************************************
1026! Collect particle data to PID 0 from all processes.                           *
1027!                                                                              *
1028! This can be called from timemanager_mpi, before caclulating                  *
1029! concentrations/writing output grids.                                         *
1030!                                                                              *
1031! Currently *not in use*, as each process calculates concentrations            *
1032! separately, but can be useful for debugging                                  *
1033!                                                                              *
1034! To use this routine (together with mpif_tm_send_vars) to transfer data       *
1035! to the MPI root process (useful for debugging), insert code like this        *
1036! (in timemanager_mpi.f90)                                                     *
1037!                                                                              *
1038!      if (lroot) tot_numpart=numpart ! root stores total numpart              *
1039!      call mpif_tm_send_dims                                                  *
1040!      if (numpart>1) then                                                     *
1041!        call mpif_tm_send_vars                                                *
1042!      end if                                                                  *
1043!                                                                              *
1044!    To collect data from all processes to root process after a parallel       *
1045!    region:                                                                   *
1046!                                                                              *
1047!      if (numpart>0) then                                                     *
1048!          if (lroot) numpart = tot_numpart                                    *
1049!       call mpif_tm_collect_vars                                              *
1050!      end if                                                                  *
1051!                                                                              *
1052! Then a section that begins with "if (lroot) ..." will behave like            *
1053! serial flexpart                                                              *
1054!                                                                              *
1055!*******************************************************************************
1056    use com_mod !, only: numpart, nspec, itra1, npoint, nclass
1057
1058    implicit none
1059
1060    integer :: i
1061
1062    logical :: use_mp_vars = .false.! use mp_... type allocated vars
1063    logical :: use_in_place = .true.! use MPI_IN_PLACE to collect vars.
1064
1065
1066! Time for MPI communications
1067    if (mp_measure_time) call mpif_mtime('commtime',0)
1068
1069! Distribute variables, send from pid 0 to other processes
1070!**********************************************************************
1071
1072    if (.not. use_mp_vars.and..not.use_in_place) then
1073
1074! Integers:
1075      call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
1076           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1077      if (mp_ierr /= 0) goto 600
1078      call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
1079           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1080      if (mp_ierr /= 0) goto 600
1081      call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
1082           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1083      if (mp_ierr /= 0) goto 600
1084      call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
1085           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1086      if (mp_ierr /= 0) goto 600
1087      call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
1088           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1089      if (mp_ierr /= 0) goto 600
1090
1091! int2
1092      call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
1093           &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
1094      if (mp_ierr /= 0) goto 600
1095
1096! Reals:
1097      call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, &
1098           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1099      if (mp_ierr /= 0) goto 600
1100      call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, &
1101           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1102      if (mp_ierr /= 0) goto 600
1103      call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, &
1104           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1105      if (mp_ierr /= 0) goto 600
1106
1107      call MPI_GATHER(us, numpart_mpi, mp_sp, us, &
1108           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1109      if (mp_ierr /= 0) goto 600
1110      call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, &
1111           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1112      if (mp_ierr /= 0) goto 600
1113      call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, &
1114           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1115      if (mp_ierr /= 0) goto 600
1116
1117
1118      call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
1119           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1120      if (mp_ierr /= 0) goto 600
1121      call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
1122           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1123      if (mp_ierr /= 0) goto 600
1124      call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, &
1125           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1126      if (mp_ierr /= 0) goto 600
1127
1128      do i=1, nspec
1129        call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), &
1130             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1131        if (mp_ierr /= 0) goto 600
1132
1133      end do
1134
1135! Use variables npoint etc directly for communications
1136!***********************************************************************
1137    else if (use_in_place.and..not.use_mp_vars) then
1138      if (lroot) then
1139! Integers:
1140        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, npoint, &
1141             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1142        if (mp_ierr /= 0) goto 600
1143        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, idt, &
1144             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1145        if (mp_ierr /= 0) goto 600
1146        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itra1, &
1147             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1148        if (mp_ierr /= 0) goto 600
1149        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, nclass, &
1150             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1151        if (mp_ierr /= 0) goto 600
1152        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itramem, &
1153             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1154        if (mp_ierr /= 0) goto 600
1155
1156! int2
1157        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER2, cbt, &
1158             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
1159        if (mp_ierr /= 0) goto 600
1160
1161! Reals:
1162        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uap, &
1163             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1164        if (mp_ierr /= 0) goto 600
1165        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ucp, &
1166             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1167        if (mp_ierr /= 0) goto 600
1168        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uzp, &
1169             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1170        if (mp_ierr /= 0) goto 600
1171
1172        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, us, &
1173             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1174        if (mp_ierr /= 0) goto 600
1175        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, vs, &
1176             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1177        if (mp_ierr /= 0) goto 600
1178        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ws, &
1179             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1180        if (mp_ierr /= 0) goto 600
1181
1182
1183        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, xtra1, &
1184             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1185        if (mp_ierr /= 0) goto 600
1186        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, ytra1, &
1187             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1188        if (mp_ierr /= 0) goto 600
1189        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ztra1, &
1190             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1191        if (mp_ierr /= 0) goto 600
1192
1193        do i=1, nspec
1194          call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, xmass1(:,i), &
1195               &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1196          if (mp_ierr /= 0) goto 600
1197        end do
1198
1199      else ! (for mp_pid >= 1)
1200
1201! Integers:
1202        call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
1203             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1204        if (mp_ierr /= 0) goto 600
1205        call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
1206             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1207        if (mp_ierr /= 0) goto 600
1208        call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
1209             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1210        if (mp_ierr /= 0) goto 600
1211        call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
1212             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1213        if (mp_ierr /= 0) goto 600
1214        call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
1215             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1216        if (mp_ierr /= 0) goto 600
1217
1218! int2
1219        call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
1220             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
1221        if (mp_ierr /= 0) goto 600
1222
1223! Reals:
1224        call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, &
1225             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1226        if (mp_ierr /= 0) goto 600
1227        call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, &
1228             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1229        if (mp_ierr /= 0) goto 600
1230        call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, &
1231             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1232        if (mp_ierr /= 0) goto 600
1233
1234        call MPI_GATHER(us, numpart_mpi, mp_sp, us, &
1235             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1236        if (mp_ierr /= 0) goto 600
1237        call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, &
1238             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1239        if (mp_ierr /= 0) goto 600
1240        call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, &
1241             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1242        if (mp_ierr /= 0) goto 600
1243
1244
1245        call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
1246             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1247        if (mp_ierr /= 0) goto 600
1248        call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
1249             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1250        if (mp_ierr /= 0) goto 600
1251        call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, &
1252             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1253        if (mp_ierr /= 0) goto 600
1254
1255        do i=1, nspec
1256          call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), &
1257               &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1258          if (mp_ierr /= 0) goto 600
1259        end do
1260      end if ! (lroot)
1261    end if !  (use_in_place)
1262
1263    if (mp_measure_time) call mpif_mtime('commtime',1)
1264
1265    goto 601
1266
1267600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1268    stop
1269601 end subroutine mpif_tm_collect_vars
1270
1271
1272  subroutine mpif_gf_send_vars(memstat)
1273!*******************************************************************************
1274! DESCRIPTION
1275!   Distribute 'getfield' variables from reader process
1276!
1277!   Called from timemanager
1278!
1279! NOTE
1280!   This subroutine distributes windfields read from the reader process to
1281!   all other processes. Usually one set of fields are transfered, but at
1282!   first timestep when there are no fields in memory, both are transfered.
1283!   MPI_Bcast is used, so implicitly all processes are synchronized at this
1284!   step
1285!
1286!
1287!*******************************************************************************
1288    use com_mod
1289    use par_mod,only: numwfmem
1290
1291    implicit none
1292
1293    integer, intent(in) :: memstat
1294
1295! Common array sizes used for communications
1296    integer, parameter :: d3_size1 = nxmax*nymax*nzmax
1297    integer, parameter :: d3_size2 = nxmax*nymax*nuvzmax
1298    integer, parameter :: d2_size1 = nxmax*nymax
1299    integer, parameter :: d2_size2 = nxmax*nymax*maxspec
1300    integer, parameter :: d2_size3 = nxmax*nymax
1301    integer, parameter :: d1_size1 = maxwf
1302
1303    integer :: d3s1,d3s2,d2s1,d2s2
1304
1305! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
1306    integer :: li,ui
1307
1308! First time routine is called the unchangeable fields will be transfered   
1309    logical :: first_call=.true.
1310
1311!**********************************************************************
1312
1313! Sizes of arrays transfered
1314    d3s1=d3_size1
1315    d3s2=d3_size2
1316    d2s1=d2_size1
1317    d2s2=d2_size2
1318
1319! Decide which field(s) need to be transfered
1320    if (memstat.ge.32) then ! distribute 2 fields, to 1st/2nd indices
1321      li=1; ui=2
1322      d3s1=2*d3_size1
1323      d3s2=2*d3_size2
1324      d2s1=2*d2_size1
1325      d2s2=2*d2_size2
1326    else if (memstat.eq.17) then ! distribute 1 field, place on 1st index
1327      li=1; ui=1
1328    else if (memstat.eq.18) then ! distribute 1 field, place on 2nd index
1329      li=2; ui=2
1330    else if (memstat.eq.19) then ! distribute 1 field, place on 3nd index
1331      li=3; ui=3
1332    else
1333      write(*,*) '#### mpi_mod::mpif_gf_send_vars> ERROR: ', &
1334           & 'wrong value of memstat, exiting ####', memstat
1335      stop
1336    end if
1337
1338
1339! Time for MPI communications
1340    if (mp_measure_time) call mpif_mtime('commtime',0)
1341
1342! Send variables from getfield process (id_read) to other processes
1343!**********************************************************************
1344
1345! The non-reader processes need to know if cloud water was read.
1346    call MPI_Bcast(readclouds,1,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
1347    if (mp_ierr /= 0) goto 600
1348
1349! Static fields/variables sent only at startup
1350    if (first_call) then
1351      call MPI_Bcast(oro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1352      if (mp_ierr /= 0) goto 600
1353      call MPI_Bcast(excessoro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1354      if (mp_ierr /= 0) goto 600
1355      call MPI_Bcast(lsm(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1356      if (mp_ierr /= 0) goto 600
1357      call MPI_Bcast(xlanduse(:,:,:),d2_size3*numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1358      if (mp_ierr /= 0) goto 600
1359      call MPI_Bcast(wftime,d1_size1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1360      if (mp_ierr /= 0) goto 600
1361      call MPI_Bcast(numbwf,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1362      if (mp_ierr /= 0) goto 600
1363      call MPI_Bcast(nmixz,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1364      if (mp_ierr /= 0) goto 600
1365      call MPI_Bcast(height,nzmax,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1366      if (mp_ierr /= 0) goto 600
1367
1368      first_call=.false.
1369    endif
1370
1371    call MPI_Bcast(uu(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1372    if (mp_ierr /= 0) goto 600
1373    call MPI_Bcast(vv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1374    if (mp_ierr /= 0) goto 600
1375    call MPI_Bcast(uupol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1376    if (mp_ierr /= 0) goto 600
1377    call MPI_Bcast(vvpol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1378    if (mp_ierr /= 0) goto 600
1379    call MPI_Bcast(ww(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1380    if (mp_ierr /= 0) goto 600
1381    call MPI_Bcast(tt(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1382    if (mp_ierr /= 0) goto 600
1383    call MPI_Bcast(rho(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1384    if (mp_ierr /= 0) goto 600
1385    call MPI_Bcast(drhodz(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1386    if (mp_ierr /= 0) goto 600
1387    call MPI_Bcast(tth(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1388    if (mp_ierr /= 0) goto 600
1389    call MPI_Bcast(qvh(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1390    if (mp_ierr /= 0) goto 600
1391    call MPI_Bcast(qv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1392    if (mp_ierr /= 0) goto 600
1393    call MPI_Bcast(pv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1394    if (mp_ierr /= 0) goto 600
1395    call MPI_Bcast(clouds(:,:,:,li:ui),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
1396    if (mp_ierr /= 0) goto 600
1397
1398! cloud water/ice:
1399    if (readclouds) then
1400      call MPI_Bcast(ctwc(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1401      if (mp_ierr /= 0) goto 600
1402    end if
1403
1404! 2D fields
1405    call MPI_Bcast(cloudsh(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1406    if (mp_ierr /= 0) goto 600
1407    call MPI_Bcast(vdep(:,:,:,li:ui),d2s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1408    if (mp_ierr /= 0) goto 600
1409    call MPI_Bcast(ps(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1410    if (mp_ierr /= 0) goto 600
1411    call MPI_Bcast(sd(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1412    if (mp_ierr /= 0) goto 600
1413    call MPI_Bcast(tcc(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1414    if (mp_ierr /= 0) goto 600
1415    call MPI_Bcast(tt2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1416    if (mp_ierr /= 0) goto 600
1417    call MPI_Bcast(td2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1418    if (mp_ierr /= 0) goto 600
1419    call MPI_Bcast(lsprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1420    if (mp_ierr /= 0) goto 600
1421    call MPI_Bcast(convprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1422    if (mp_ierr /= 0) goto 600
1423    call MPI_Bcast(ustar(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1424    if (mp_ierr /= 0) goto 600
1425    call MPI_Bcast(wstar(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1426    if (mp_ierr /= 0) goto 600
1427    call MPI_Bcast(hmix(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1428    if (mp_ierr /= 0) goto 600
1429    call MPI_Bcast(tropopause(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1430    if (mp_ierr /= 0) goto 600
1431    call MPI_Bcast(oli(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1432    if (mp_ierr /= 0) goto 600
1433
1434    if (mp_measure_time) call mpif_mtime('commtime',1)
1435
1436    goto 601
1437
1438600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1439    stop
1440
1441601 end subroutine mpif_gf_send_vars
1442
1443
1444  subroutine mpif_gf_send_vars_nest(memstat)
1445!***********************************************************************
1446! DESCRIPTION
1447!   Distribute 'getfield' variables from reader process to all processes.
1448!   For nested fields
1449!
1450!   Called from timemanager
1451!
1452! NOTE
1453!   This subroutine distributes nested windfields from the reader process to
1454!   all other processes. Usually one set of fields is transfered, but at
1455!   first timestep when there are no fields in memory, both are transfered.
1456!   MPI_Bcast is used, so implicitly all processes are synchronized at this
1457!   step
1458!
1459!
1460!***********************************************************************
1461    use com_mod
1462    use par_mod,only: numwfmem
1463
1464    implicit none
1465
1466    integer, intent(in) :: memstat
1467    integer :: i
1468! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
1469    integer :: li,ui
1470
1471! Common array sizes used for communications
1472    integer :: d3_size1 = nxmaxn*nymaxn*nzmax
1473    integer :: d3_size2 = nxmaxn*nymaxn*nuvzmax
1474    integer :: d2_size1 = nxmaxn*nymaxn
1475    integer :: d2_size2 = nxmaxn*nymaxn*maxspec
1476    integer :: d2_size3 = nxmaxn*nymaxn
1477
1478    integer :: d3s1,d3s2,d2s1,d2s2
1479
1480! First time routine is called the unchangeable fields will be transfered   
1481    logical :: first_call=.true.
1482
1483!**********************************************************************
1484
1485! Sizes of arrays transfered
1486    d3s1=d3_size1
1487    d3s2=d3_size2
1488    d2s1=d2_size1
1489    d2s2=d2_size2
1490
1491! Decide which field(s) need to be transfered
1492    if (memstat.ge.32) then ! distribute 2 fields
1493      li=1; ui=2
1494      d3s1=2*d3_size1
1495      d3s2=2*d3_size2
1496      d2s1=2*d2_size1
1497      d2s2=2*d2_size2
1498    else if (memstat.eq.17) then ! distribute 1 field, on 1st index
1499      li=1; ui=1
1500    else if (memstat.eq.18) then ! distribute 1 field, on 2nd index
1501      li=2; ui=2
1502    else if (memstat.eq.19) then ! distribute 1 field, on 3nd index
1503      li=3; ui=3
1504    else
1505      write(*,*) '#### mpi_mod::mpif_gf_send_vars_nest> ERROR: ', &
1506           & 'wrong value of memstat, exiting ####', memstat
1507      stop
1508    end if
1509
1510
1511! Time for MPI communications
1512    if (mp_measure_time) call mpif_mtime('commtime',0)
1513
1514! Distribute variables, send from getfield process (id_read) to other
1515! processes
1516!**********************************************************************
1517
1518! The non-reader processes need to know if cloud water was read.
1519    call MPI_Bcast(readclouds_nest,maxnests,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
1520    if (mp_ierr /= 0) goto 600
1521
1522! Static fields/variables sent only at startup
1523    if (first_call) then
1524      call MPI_Bcast(oron(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1525      if (mp_ierr /= 0) goto 600
1526      call MPI_Bcast(excessoron(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1527      if (mp_ierr /= 0) goto 600
1528      call MPI_Bcast(lsmn(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1529      if (mp_ierr /= 0) goto 600
1530      call MPI_Bcast(xlandusen(:,:,:,:),d2_size3*numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1531      if (mp_ierr /= 0) goto 600
1532      first_call=.false.
1533    end if
1534
1535! MPI prefers contiguous arrays for sending (else a buffer is created),
1536! hence the loop over nests
1537!**********************************************************************
1538    do i=1, numbnests
1539! 3D fields
1540      call MPI_Bcast(uun(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1541      if (mp_ierr /= 0) goto 600
1542      call MPI_Bcast(vvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1543      if (mp_ierr /= 0) goto 600
1544      call MPI_Bcast(wwn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1545      if (mp_ierr /= 0) goto 600
1546      call MPI_Bcast(ttn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1547      if (mp_ierr /= 0) goto 600
1548      call MPI_Bcast(rhon(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1549      if (mp_ierr /= 0) goto 600
1550      call MPI_Bcast(drhodzn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1551      if (mp_ierr /= 0) goto 600
1552      call MPI_Bcast(tthn(:,:,:,li:ui,i),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1553      if (mp_ierr /= 0) goto 600
1554      call MPI_Bcast(qvhn(:,:,:,li:ui,i),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1555      if (mp_ierr /= 0) goto 600
1556      call MPI_Bcast(qvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1557      if (mp_ierr /= 0) goto 600
1558      call MPI_Bcast(pvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1559      if (mp_ierr /= 0) goto 600
1560      call MPI_Bcast(cloudsn(:,:,:,li:ui,i),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
1561      if (mp_ierr /= 0) goto 600
1562
1563! cloud water/ice:
1564      if (readclouds_nest(i)) then
1565! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1566! if (mp_ierr /= 0) goto 600
1567        call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1568        if (mp_ierr /= 0) goto 600
1569      end if
1570
1571! 2D fields
1572      call MPI_Bcast(cloudshn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1573      if (mp_ierr /= 0) goto 600
1574      call MPI_Bcast(vdepn(:,:,:,li:ui,i),d2s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1575      if (mp_ierr /= 0) goto 600
1576      call MPI_Bcast(psn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1577      if (mp_ierr /= 0) goto 600
1578      call MPI_Bcast(sdn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1579      if (mp_ierr /= 0) goto 600
1580      call MPI_Bcast(tccn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1581      if (mp_ierr /= 0) goto 600
1582      call MPI_Bcast(tt2n(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1583      if (mp_ierr /= 0) goto 600
1584      call MPI_Bcast(td2n(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1585      if (mp_ierr /= 0) goto 600
1586      call MPI_Bcast(lsprecn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1587      if (mp_ierr /= 0) goto 600
1588      call MPI_Bcast(convprecn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1589      if (mp_ierr /= 0) goto 600
1590      call MPI_Bcast(ustarn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1591      if (mp_ierr /= 0) goto 600
1592      call MPI_Bcast(wstarn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1593      if (mp_ierr /= 0) goto 600
1594      call MPI_Bcast(olin(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1595      if (mp_ierr /= 0) goto 600
1596      call MPI_Bcast(hmixn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1597      if (mp_ierr /= 0) goto 600
1598      call MPI_Bcast(tropopausen(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1599      if (mp_ierr /= 0) goto 600
1600    end do
1601
1602    if (mp_measure_time) call mpif_mtime('commtime',1)
1603
1604    goto 601
1605
1606600 write(*,*) "mpi_mod> mp_ierr \= 0",mp_ierr
1607    stop
1608
1609601 end subroutine mpif_gf_send_vars_nest
1610
1611
1612  subroutine mpif_gf_send_vars_async(memstat)
1613!*******************************************************************************
1614! DESCRIPTION
1615!   Distribute 'getfield' variables from reader process to all processes.
1616!   Called from timemanager by reader process only
1617!
1618! NOTE
1619!   This version uses asynchronious sends. The newest fields are sent in the
1620!   background, so calculations can continue while
1621!   MPI communications are performed.
1622!
1623!   The way the MPI tags/requests are sequenced means that this routine must
1624!   carefully match routine 'mpif_gf_recv_vars_async'
1625!
1626! VARIABLES
1627!   memstat -- input, for resolving pointer to windfield index being read
1628!   mind    -- index where to place new fields
1629!
1630!
1631!*******************************************************************************
1632    use com_mod
1633
1634    implicit none
1635
1636    integer, intent(in) :: memstat
1637    integer :: mind
1638    integer :: dest,i
1639
1640! Common array sizes used for communications
1641    integer :: d3s1 = nxmax*nymax*nzmax
1642    integer :: d3s2 = nxmax*nymax*nuvzmax
1643    integer :: d2s1 = nxmax*nymax
1644    integer :: d2s2 = nxmax*nymax*maxspec
1645!    integer :: d1s1 = maxwf
1646
1647!*******************************************************************************
1648
1649! At the time the send is posted, the reader process is one step further
1650! in the permutation of memstat compared with the receiving processes
1651
1652    if (memstat.ge.32) then
1653! last read was synchronous, to indices 1 and 2, 3 is free
1654      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1655           & memstat>=32 should never happen here."
1656      stop
1657    else if (memstat.eq.17) then
1658! old fields on 1,2, send 3
1659      mind=3
1660    else if (memstat.eq.18) then
1661! old fields on 2,3, send 1
1662      mind=1
1663    else if (memstat.eq.19) then
1664! old fields on 3,1, send 2
1665      mind=2
1666    else
1667      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1668           & invalid memstat"
1669      mind=-1
1670      stop
1671    end if
1672
1673    if (mp_dev_mode) then
1674      if (mp_pid.ne.id_read) then
1675        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_async'
1676      end if
1677    end if
1678
1679    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1680
1681! Time for MPI communications
1682    if (mp_measure_time) call mpif_mtime('commtime',0)
1683
1684! Loop over receiving processes, initiate data sending
1685!*****************************************************
1686
1687    do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading
1688! TODO: use mp_partgroup_np here
1689      if (dest.eq.id_read) cycle
1690      i=dest*nvar_async
1691      call MPI_Isend(uu(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1692      if (mp_ierr /= 0) goto 600
1693      i=i+1
1694      call MPI_Isend(vv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1695      if (mp_ierr /= 0) goto 600
1696      i=i+1
1697      call MPI_Isend(uupol(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1698      if (mp_ierr /= 0) goto 600
1699      i=i+1
1700      call MPI_Isend(vvpol(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1701      if (mp_ierr /= 0) goto 600
1702      i=i+1
1703      call MPI_Isend(ww(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1704      if (mp_ierr /= 0) goto 600
1705      i=i+1
1706      call MPI_Isend(tt(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1707      if (mp_ierr /= 0) goto 600
1708      i=i+1
1709      call MPI_Isend(rho(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1710      if (mp_ierr /= 0) goto 600
1711      i=i+1
1712      call MPI_Isend(drhodz(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1713      if (mp_ierr /= 0) goto 600
1714      i=i+1
1715      call MPI_Isend(tth(:,:,:,mind),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1716      if (mp_ierr /= 0) goto 600
1717      i=i+1
1718      call MPI_Isend(qvh(:,:,:,mind),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1719      if (mp_ierr /= 0) goto 600
1720      i=i+1
1721      call MPI_Isend(qv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1722      if (mp_ierr /= 0) goto 600
1723      i=i+1
1724      call MPI_Isend(pv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1725      if (mp_ierr /= 0) goto 600
1726      i=i+1
1727      call MPI_Isend(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1728      i=i+1
1729      if (mp_ierr /= 0) goto 600
1730      call MPI_Isend(cloudsh(:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1731      if (mp_ierr /= 0) goto 600
1732      i=i+1
1733      call MPI_Isend(vdep(:,:,:,mind),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1734      if (mp_ierr /= 0) goto 600
1735      i=i+1
1736! 15
1737      call MPI_Isend(ps(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1738      if (mp_ierr /= 0) goto 600
1739      i=i+1
1740      call MPI_Isend(sd(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1741      if (mp_ierr /= 0) goto 600
1742      i=i+1
1743      call MPI_Isend(tcc(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1744      if (mp_ierr /= 0) goto 600
1745      i=i+1
1746      call MPI_Isend(tt2(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1747      if (mp_ierr /= 0) goto 600
1748      i=i+1
1749      call MPI_Isend(td2(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1750      if (mp_ierr /= 0) goto 600
1751      i=i+1
1752      call MPI_Isend(lsprec(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1753      if (mp_ierr /= 0) goto 600
1754      i=i+1
1755      call MPI_Isend(convprec(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1756      if (mp_ierr /= 0) goto 600
1757      i=i+1
1758      call MPI_Isend(ustar(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1759      if (mp_ierr /= 0) goto 600
1760      i=i+1
1761      call MPI_Isend(wstar(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1762      if (mp_ierr /= 0) goto 600
1763      i=i+1
1764      call MPI_Isend(hmix(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1765      if (mp_ierr /= 0) goto 600
1766      i=i+1
1767! 25
1768      call MPI_Isend(tropopause(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1769      if (mp_ierr /= 0) goto 600
1770      i=i+1
1771      call MPI_Isend(oli(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1772      if (mp_ierr /= 0) goto 600
1773
1774! Send cloud water if it exists. Increment counter always (as on receiving end)
1775      if (readclouds) then
1776        i=i+1
1777        call MPI_Isend(ctwc(:,:,mind),d2s1,mp_sp,dest,tm1,&
1778             &MPI_COMM_WORLD,reqs(i),mp_ierr)
1779        if (mp_ierr /= 0) goto 600
1780      end if
1781    end do
1782
1783    if (mp_measure_time) call mpif_mtime('commtime',1)
1784
1785    goto 601
1786
1787600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> mp_ierr \= 0", mp_ierr
1788    stop
1789
1790601 end subroutine mpif_gf_send_vars_async
1791
1792
1793  subroutine mpif_gf_recv_vars_async(memstat)
1794!*******************************************************************************
1795! DESCRIPTION
1796!   Receive 'getfield' variables from reader process.
1797!   Called from timemanager by all processes except reader
1798!
1799! NOTE
1800!   This version uses asynchronious communications.
1801!
1802! VARIABLES
1803!   memstat -- input, used to resolve windfield index being received
1804!
1805!
1806!*******************************************************************************
1807    use com_mod
1808
1809    implicit none
1810
1811    integer, intent(in) :: memstat
1812    integer :: mind,j
1813
1814! Common array sizes used for communications
1815    integer :: d3s1 = nxmax*nymax*nzmax
1816    integer :: d3s2 = nxmax*nymax*nuvzmax
1817    integer :: d2s1 = nxmax*nymax
1818    integer :: d2s2 = nxmax*nymax*maxspec
1819!integer :: d1_size1 = maxwf
1820
1821!    integer :: d3s1,d3s2,d2s1,d2s2
1822!*******************************************************************************
1823
1824! At the time this immediate receive is posted, memstat is the state of
1825! windfield indices at the previous/current time. From this, the future
1826! state is deduced.
1827    if (memstat.eq.32) then
1828! last read was synchronous to indices 1 and 2, 3 is free
1829      mind=3
1830    else if (memstat.eq.17) then
1831! last read was asynchronous to index 3, 1 is free
1832      mind=1
1833    else if (memstat.eq.18) then
1834! last read was asynchronous to index 1, 2 is free
1835      mind=2
1836    else if (memstat.eq.19) then
1837! last read was asynchronous to index 2, 3 is free
1838      mind=3
1839    else
1840! illegal state
1841      write(*,*) 'mpi_mod> FLEXPART ERROR: Illegal memstat value. Exiting.'
1842      stop
1843    end if
1844
1845    if (mp_dev_mode) then
1846      if (mp_pid.eq.id_read) then
1847        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async'
1848      end if
1849    end if
1850
1851! Time for MPI communications
1852    if (mp_measure_time) call mpif_mtime('commtime',0)
1853
1854    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1855
1856! Initiate receiving of data
1857!***************************
1858
1859! Get MPI tags/requests for communications
1860    j=mp_pid*nvar_async
1861    call MPI_Irecv(uu(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1862         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1863    if (mp_ierr /= 0) goto 600
1864    j=j+1
1865    call MPI_Irecv(vv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1866         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1867    if (mp_ierr /= 0) goto 600
1868    j=j+1
1869    call MPI_Irecv(uupol(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1870         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1871    if (mp_ierr /= 0) goto 600
1872    j=j+1
1873    call MPI_Irecv(vvpol(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1874         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1875    if (mp_ierr /= 0) goto 600
1876    j=j+1
1877    call MPI_Irecv(ww(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1878         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1879    if (mp_ierr /= 0) goto 600
1880    j=j+1
1881    call MPI_Irecv(tt(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1882         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1883    if (mp_ierr /= 0) goto 600
1884    j=j+1
1885    call MPI_Irecv(rho(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1886         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1887    if (mp_ierr /= 0) goto 600
1888    j=j+1
1889    call MPI_Irecv(drhodz(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1890         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1891    if (mp_ierr /= 0) goto 600
1892    j=j+1
1893    call MPI_Irecv(tth(:,:,:,mind),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
1894         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1895    if (mp_ierr /= 0) goto 600
1896    j=j+1
1897    call MPI_Irecv(qvh(:,:,:,mind),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
1898         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1899    if (mp_ierr /= 0) goto 600
1900    j=j+1
1901    call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1902         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1903    if (mp_ierr /= 0) goto 600
1904    j=j+1
1905    call MPI_Irecv(pv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1906         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1907    if (mp_ierr /= 0) goto 600
1908    j=j+1
1909    call MPI_Irecv(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
1910         &MPI_COMM_WORLD,reqs(j),mp_ierr)   
1911    if (mp_ierr /= 0) goto 600
1912    j=j+1
1913    call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1914         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1915    if (mp_ierr /= 0) goto 600
1916    j=j+1
1917    call MPI_Irecv(vdep(:,:,:,mind),d2s2,mp_sp,id_read,MPI_ANY_TAG,&
1918         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1919    if (mp_ierr /= 0) goto 600
1920    j=j+1
1921    call MPI_Irecv(ps(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1922         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1923    if (mp_ierr /= 0) goto 600
1924    j=j+1
1925    call MPI_Irecv(sd(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1926         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1927    if (mp_ierr /= 0) goto 600
1928    j=j+1
1929    call MPI_Irecv(tcc(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1930         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1931    if (mp_ierr /= 0) goto 600
1932    j=j+1
1933    call MPI_Irecv(tt2(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1934         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1935    if (mp_ierr /= 0) goto 600
1936    j=j+1
1937    call MPI_Irecv(td2(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1938         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1939    if (mp_ierr /= 0) goto 600
1940    j=j+1
1941    call MPI_Irecv(lsprec(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1942         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1943    if (mp_ierr /= 0) goto 600
1944    j=j+1
1945    call MPI_Irecv(convprec(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1946         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1947    if (mp_ierr /= 0) goto 600
1948    call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1949         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1950    if (mp_ierr /= 0) goto 600
1951    j=j+1
1952    call MPI_Irecv(wstar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1953         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1954    if (mp_ierr /= 0) goto 600
1955    j=j+1
1956    call MPI_Irecv(hmix(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1957         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1958    if (mp_ierr /= 0) goto 600
1959    j=j+1
1960    call MPI_Irecv(tropopause(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1961         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1962    if (mp_ierr /= 0) goto 600
1963    j=j+1
1964    call MPI_Irecv(oli(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1965         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1966    if (mp_ierr /= 0) goto 600
1967
1968! Post request for clwc. These data are possibly not sent, request must then be cancelled
1969! For now assume that data at all steps either have or do not have water
1970    if (readclouds) then
1971      j=j+1
1972      call MPI_Irecv(ctwc(:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
1973           &MPI_COMM_WORLD,reqs(j),mp_ierr)
1974      if (mp_ierr /= 0) goto 600
1975    end if
1976
1977
1978    if (mp_measure_time) call mpif_mtime('commtime',1)
1979
1980    goto 601
1981
1982600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_async> MPI ERROR ", mp_ierr
1983    stop
1984
1985601 end subroutine mpif_gf_recv_vars_async
1986
1987
1988  subroutine mpif_gf_send_vars_nest_async(memstat)
1989!*******************************************************************************
1990! DESCRIPTION
1991!   Distribute 'getfield' variables from reader process to all processes.
1992!   Called from timemanager by reader process only.
1993!   Version for nested wind fields
1994!
1995! NOTE
1996!   This version uses asynchronious sends. The newest fields are sent in the
1997!   background, so calculations can continue while
1998!   MPI communications are performed.
1999!
2000!   The way the MPI tags/requests are sequenced means that this routine must
2001!   carefully match routine 'mpif_gf_recv_vars_async'
2002!
2003! VARIABLES
2004!   memstat -- input, for resolving pointer to windfield index being read
2005!   mind    -- index where to place new fields
2006!
2007! TODO
2008!   Some unused arrays are currently sent (uupoln,..)
2009!*******************************************************************************
2010    use com_mod
2011
2012    implicit none
2013
2014    integer, intent(in) :: memstat
2015    integer :: mind
2016    integer :: dest,i,k
2017
2018! Common array sizes used for communications
2019    integer :: d3s1 = nxmaxn*nymaxn*nzmax
2020    integer :: d3s2 = nxmaxn*nymaxn*nuvzmax
2021    integer :: d2s1 = nxmaxn*nymaxn
2022    integer :: d2s2 = nxmaxn*nymaxn*maxspec
2023
2024!*******************************************************************************
2025
2026! At the time the send is posted, the reader process is one step further
2027! in the permutation of memstat compared with the receiving processes
2028
2029    if (memstat.ge.32) then
2030! last read was synchronous, to indices 1 and 2, 3 is free
2031      write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: &
2032           & memstat>=32 should never happen here."
2033      stop
2034    else if (memstat.eq.17) then
2035! old fields on 1,2, send 3
2036      mind=3
2037    else if (memstat.eq.18) then
2038! old fields on 2,3, send 1
2039      mind=1
2040    else if (memstat.eq.19) then
2041! old fields on 3,1, send 2
2042      mind=2
2043    else
2044      write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: &
2045           & invalid memstat"
2046      mind=-1
2047      stop
2048    end if
2049
2050    if (mp_dev_mode) then
2051      if (mp_pid.ne.id_read) then
2052        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_nest_async'
2053      end if
2054    end if
2055
2056    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_nest_async, memstat:', memstat
2057
2058! Time for MPI communications
2059    if (mp_measure_time) call mpif_mtime('commtime',0)
2060
2061! Loop over receiving processes, initiate data sending
2062!*****************************************************
2063
2064    do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading
2065! TODO: use mp_partgroup_np here
2066      if (dest.eq.id_read) cycle
2067      do k=1, numbnests
2068        i=dest*nvar_async
2069        call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2070        if (mp_ierr /= 0) goto 600
2071        i=i+1
2072        call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2073        if (mp_ierr /= 0) goto 600
2074        i=i+1
2075        call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2076        if (mp_ierr /= 0) goto 600
2077        i=i+1
2078        call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2079        if (mp_ierr /= 0) goto 600
2080        i=i+1
2081        call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2082        if (mp_ierr /= 0) goto 600
2083        i=i+1
2084        call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2085        if (mp_ierr /= 0) goto 600
2086        i=i+1
2087        call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2088        if (mp_ierr /= 0) goto 600
2089        i=i+1
2090        call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2091        if (mp_ierr /= 0) goto 600
2092        i=i+1
2093        call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2094        if (mp_ierr /= 0) goto 600
2095        i=i+1
2096        call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2097        if (mp_ierr /= 0) goto 600
2098        i=i+1
2099        call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2100        i=i+1
2101        if (mp_ierr /= 0) goto 600
2102        call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2103        if (mp_ierr /= 0) goto 600
2104        i=i+1
2105        call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2106        if (mp_ierr /= 0) goto 600
2107        i=i+1
2108        call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2109        if (mp_ierr /= 0) goto 600
2110        i=i+1
2111        call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2112        if (mp_ierr /= 0) goto 600
2113        i=i+1
2114! 15
2115        call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2116        if (mp_ierr /= 0) goto 600
2117        i=i+1
2118        call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2119        if (mp_ierr /= 0) goto 600
2120        i=i+1
2121        call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2122        if (mp_ierr /= 0) goto 600
2123        i=i+1
2124        call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2125        if (mp_ierr /= 0) goto 600
2126        i=i+1
2127        call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2128        if (mp_ierr /= 0) goto 600
2129        i=i+1
2130        call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2131        if (mp_ierr /= 0) goto 600
2132        i=i+1
2133        call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2134        if (mp_ierr /= 0) goto 600
2135        i=i+1
2136        call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2137        if (mp_ierr /= 0) goto 600
2138        i=i+1
2139        call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2140        if (mp_ierr /= 0) goto 600
2141        i=i+1
2142        call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2143        if (mp_ierr /= 0) goto 600
2144! 25
2145
2146! Send cloud water if it exists. Increment counter always (as on receiving end)
2147        if (readclouds) then
2148          i=i+1
2149          call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,&
2150               &MPI_COMM_WORLD,reqs(i),mp_ierr)
2151          if (mp_ierr /= 0) goto 600
2152        end if
2153      end do
2154    end do
2155
2156    if (mp_measure_time) call mpif_mtime('commtime',1)
2157
2158    goto 601
2159
2160600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> mp_ierr \= 0", mp_ierr
2161    stop
2162
2163601 end subroutine mpif_gf_send_vars_nest_async
2164
2165
2166  subroutine mpif_gf_recv_vars_nest_async(memstat)
2167!*******************************************************************************
2168! DESCRIPTION
2169!   Receive 'getfield' variables from reader process.
2170!   Called from timemanager by all processes except reader
2171!   Version for nested wind fields
2172!
2173! NOTE
2174!   This version uses asynchronious communications.
2175!
2176! VARIABLES
2177!   memstat -- input, used to resolve windfield index being received
2178!
2179!
2180!*******************************************************************************
2181    use com_mod
2182
2183    implicit none
2184
2185    integer, intent(in) :: memstat
2186    integer :: mind,j,k
2187
2188! Common array sizes used for communications
2189    integer :: d3s1 = nxmaxn*nymaxn*nzmax
2190    integer :: d3s2 = nxmaxn*nymaxn*nuvzmax
2191    integer :: d2s1 = nxmaxn*nymaxn
2192    integer :: d2s2 = nxmaxn*nymaxn*maxspec
2193
2194!*******************************************************************************
2195
2196! At the time this immediate receive is posted, memstat is the state of
2197! windfield indices at the previous/current time. From this, the future
2198! state is deduced.
2199    if (memstat.eq.32) then
2200! last read was synchronous to indices 1 and 2, 3 is free
2201      mind=3
2202    else if (memstat.eq.17) then
2203! last read was asynchronous to index 3, 1 is free
2204      mind=1
2205    else if (memstat.eq.18) then
2206! last read was asynchronous to index 1, 2 is free
2207      mind=2
2208    else if (memstat.eq.19) then
2209! last read was asynchronous to index 2, 3 is free
2210      mind=3
2211    else
2212! illegal state
2213      write(*,*) 'mpi_mod> FLEXPART ERROR: Illegal memstat value. Exiting.'
2214      stop
2215    end if
2216
2217    if (mp_dev_mode) then
2218      if (mp_pid.eq.id_read) then
2219        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async'
2220      end if
2221    end if
2222
2223! Time for MPI communications
2224    if (mp_measure_time) call mpif_mtime('commtime',0)
2225
2226    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
2227
2228! Initiate receiving of data
2229!***************************
2230
2231    do k=1, numbnests
2232! Get MPI tags/requests for communications
2233      j=mp_pid*nvar_async
2234      call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2235           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2236      if (mp_ierr /= 0) goto 600
2237      j=j+1
2238      call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2239           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2240      if (mp_ierr /= 0) goto 600
2241      j=j+1
2242      call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2243           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2244      if (mp_ierr /= 0) goto 600
2245      j=j+1
2246      call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2247           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2248      if (mp_ierr /= 0) goto 600
2249      j=j+1
2250      call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2251           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2252      if (mp_ierr /= 0) goto 600
2253      j=j+1
2254      call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2255           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2256      if (mp_ierr /= 0) goto 600
2257      j=j+1
2258      call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
2259           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2260      if (mp_ierr /= 0) goto 600
2261      j=j+1
2262      call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
2263           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2264      if (mp_ierr /= 0) goto 600
2265      j=j+1
2266      call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2267           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2268      if (mp_ierr /= 0) goto 600
2269      j=j+1
2270      call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2271           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2272      if (mp_ierr /= 0) goto 600
2273      j=j+1
2274      call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
2275           &MPI_COMM_WORLD,reqs(j),mp_ierr)   
2276      if (mp_ierr /= 0) goto 600
2277      j=j+1
2278      call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2279           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2280      if (mp_ierr /= 0) goto 600
2281      j=j+1
2282      call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,&
2283           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2284      if (mp_ierr /= 0) goto 600
2285      j=j+1
2286      call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2287           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2288      if (mp_ierr /= 0) goto 600
2289      j=j+1
2290      call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2291           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2292      if (mp_ierr /= 0) goto 600
2293      j=j+1
2294      call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2295           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2296      if (mp_ierr /= 0) goto 600
2297      j=j+1
2298      call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2299           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2300      if (mp_ierr /= 0) goto 600
2301      j=j+1
2302      call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2303           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2304      if (mp_ierr /= 0) goto 600
2305      j=j+1
2306      call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2307           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2308      if (mp_ierr /= 0) goto 600
2309      j=j+1
2310      call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2311           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2312      if (mp_ierr /= 0) goto 600
2313      call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2314           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2315      if (mp_ierr /= 0) goto 600
2316      j=j+1
2317      call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2318           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2319      if (mp_ierr /= 0) goto 600
2320      j=j+1
2321      call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2322           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2323      if (mp_ierr /= 0) goto 600
2324      j=j+1
2325      call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2326           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2327      if (mp_ierr /= 0) goto 600
2328      j=j+1
2329      call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2330           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2331      if (mp_ierr /= 0) goto 600
2332
2333! Post request for clwc. These data are possibly not sent, request must then be cancelled
2334! For now assume that data at all steps either have or do not have water
2335      if (readclouds) then
2336        j=j+1
2337        call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
2338             &MPI_COMM_WORLD,reqs(j),mp_ierr)
2339        if (mp_ierr /= 0) goto 600
2340      end if
2341    end do
2342
2343    if (mp_measure_time) call mpif_mtime('commtime',1)
2344
2345    goto 601
2346
2347600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_nest_async> MPI ERROR ", mp_ierr
2348    stop
2349
2350601 end subroutine mpif_gf_recv_vars_nest_async
2351
2352
2353  subroutine mpif_gf_request
2354!*******************************************************************************
2355! DESCRIPTION
2356!   Check for completion of MPI_Isend/MPI_Irecv data transfer.
2357!   
2358!   
2359! NOTE
2360!   implicit synchronisation between all processes takes place here
2361!
2362! TODO
2363!   NB: take into account nested wind fields by using a separate
2364!   subroutine mpif_gf_request_nest (and separate request arrays for the
2365!   nested variables)
2366!
2367!*******************************************************************************
2368!    use com_mod,only: readclouds
2369
2370    implicit none
2371
2372
2373    integer :: n_req !,i
2374
2375!***********************************************************************
2376
2377    n_req=nvar_async*mp_np
2378
2379    if (mp_measure_time) call mpif_mtime('commtime',0)
2380
2381!    call MPI_Wait(rm1,MPI_STATUS_IGNORE,mp_ierr)
2382
2383! TODO: cancel recv request if readclouds=.false.
2384!    if (readclouds) then
2385    call MPI_Waitall(n_req,reqs,MPI_STATUSES_IGNORE,mp_ierr)
2386!    endif
2387! else
2388!   do i = 0, nvar_async*mp_np-1
2389!     if (mod(i,27).eq.0 .or. mod(i,28).eq.0) then
2390!       call MPI_Cancel(reqs(i),mp_ierr)
2391!       cycle
2392!     end if
2393!     call MPI_Wait(reqs(i),MPI_STATUS_IGNORE,mp_ierr)
2394!   end do
2395! end if
2396
2397    if (mp_ierr /= 0) goto 600
2398
2399    if (mp_measure_time) call mpif_mtime('commtime',1)
2400
2401    goto 601
2402
2403600 write(*,*) "#### mpi_mod::mpif_gf_request> MPI ERROR ", mp_ierr
2404    stop
2405
2406601 end subroutine mpif_gf_request
2407
2408
2409  subroutine mpif_tm_reduce_grid
2410!***********************************************************************
2411! Collect grid variable to PID 0, adding from all processes.
2412!
2413! NOTE
2414!   - Older MPI implementations may lack the MPI_IN_PLACE operation.
2415!     If so use 1) below and change unc_mod
2416!
2417!***********************************************************************
2418    use com_mod
2419    use unc_mod
2420    use par_mod
2421
2422    implicit none
2423
2424    integer :: grid_size2d,grid_size3d
2425    integer, parameter :: rcpt_size=maxreceptor*maxspec
2426
2427!**********************************************************************
2428    grid_size2d=numxgrid*numygrid*maxspec* &
2429         & maxpointspec_act*nclassunc*maxageclass
2430    grid_size3d=numxgrid*numygrid*numzgrid*maxspec* &
2431         & maxpointspec_act*nclassunc*maxageclass
2432
2433
2434! Time for MPI communications
2435    if (mp_measure_time) call mpif_mtime('commtime',0)
2436
2437! 1) Using a separate grid (gridunc0) for received values
2438! call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2439!      & mp_comm_used, mp_ierr)
2440! if (mp_ierr /= 0) goto 600
2441
2442! 2) Using in-place reduction
2443
2444!!!--------------------------------------------------------------------
2445!!! DJM - 2017-05-09 change - MPI_IN_PLACE option for MPI_Reduce() causes
2446!!! severe numerical problems in some cases.  I'm guessing there are memory
2447!!! overrun issues in this complex code, but have so far failed to identify
2448!!! a specific problem.  And/or, when one searches the Internet for this
2449!!! issue, there is "some" hint that the implementation may be buggy. 
2450!!!
2451!!! At this point, with the following CPP USE_MPIINPLACE directive, the
2452!!! default behaviour will be to not use the MPI_IN_PLACE option.
2453!!! Users will have to compile with -DUSE_MPIINPLACE if they want that option.
2454!!! Introduction of the CPP directives also requires that the code be compiled
2455!!! with the "-x f95-cpp-input" option.
2456!!!
2457!!! Modification of this section requires the addition of array gridunc0, which
2458!!! requires an allocation in outgrid_init.f90 and initial declaration in
2459!!! unc_mod.f90.
2460!!!--------------------------------------------------------------------
2461
2462#ifdef USE_MPIINPLACE
2463
2464    if (lroot) then
2465      call MPI_Reduce(MPI_IN_PLACE, gridunc, grid_size3d, mp_sp, MPI_SUM, id_root, &
2466           & mp_comm_used, mp_ierr)
2467      if (mp_ierr /= 0) goto 600
2468    else
2469      call MPI_Reduce(gridunc, 0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2470           & mp_comm_used, mp_ierr)
2471    end if
2472
2473  ! Receptor concentrations   
2474    if (lroot) then
2475      call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &
2476           & mp_comm_used,mp_ierr)
2477      if (mp_ierr /= 0) goto 600
2478    else
2479      call MPI_Reduce(creceptor,0,rcpt_size,mp_sp,MPI_SUM,id_root, &
2480           & mp_comm_used,mp_ierr)
2481    end if
2482
2483#else
2484
2485      call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2486           & mp_comm_used, mp_ierr)
2487      if (mp_ierr /= 0) goto 600
2488      if (lroot) gridunc = gridunc0
2489
2490      call MPI_Reduce(creceptor, creceptor0,rcpt_size,mp_sp,MPI_SUM,id_root, &
2491           & mp_comm_used,mp_ierr)
2492      if (mp_ierr /= 0) goto 600
2493      if (lroot) creceptor = creceptor0
2494
2495#endif
2496
2497    if ((WETDEP).and.(ldirect.gt.0)) then
2498      call MPI_Reduce(wetgridunc, wetgridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2499           & mp_comm_used, mp_ierr)
2500      if (mp_ierr /= 0) goto 600
2501    end if
2502
2503    if ((DRYDEP).and.(ldirect.gt.0)) then
2504      call MPI_Reduce(drygridunc, drygridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2505           & mp_comm_used, mp_ierr)
2506      if (mp_ierr /= 0) goto 600
2507    end if
2508
2509
2510    if (mp_measure_time) call mpif_mtime('commtime',1)
2511
2512    goto 601
2513
2514600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
2515    stop
2516
2517601 end subroutine mpif_tm_reduce_grid
2518
2519
2520  subroutine mpif_tm_reduce_grid_nest
2521!***********************************************************************
2522! Collect nested grids to PID 0, adding from all processes.
2523!
2524! NOTE
2525!   - Compiling with 'fcheck=all' (gfortran) will cause a run-time error
2526!     as wetgriduncn0 is only allocated for root process. Possibly valid
2527!     MPI code but invalid gfortran.
2528!
2529!***********************************************************************
2530    use com_mod
2531    use unc_mod
2532    use par_mod
2533
2534    implicit none
2535
2536    integer :: grid_size2d,grid_size3d
2537
2538!**********************************************************************
2539
2540    grid_size3d=numxgridn*numygridn*numzgrid*maxspec* &
2541         & maxpointspec_act*nclassunc*maxageclass
2542    grid_size2d=numxgridn*numygridn*maxspec* &
2543         & maxpointspec_act*nclassunc*maxageclass
2544
2545
2546! Time for MPI communications
2547    if (mp_measure_time) call mpif_mtime('commtime',0)
2548
2549! Using a separate grid (gridunc0) for received values, for debugging
2550! call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2551!      & mp_comm_used, mp_ierr)
2552! if (mp_ierr /= 0) goto 600
2553
2554#ifdef USE_MPIINPLACE
2555! Using in-place reduction
2556    if (lroot) then
2557      call MPI_Reduce(MPI_IN_PLACE, griduncn, grid_size3d, mp_sp, MPI_SUM, id_root, &
2558           & mp_comm_used, mp_ierr)
2559      if (mp_ierr /= 0) goto 600
2560    else
2561      call MPI_Reduce(griduncn, 0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2562           & mp_comm_used, mp_ierr)
2563      if (mp_ierr /= 0) goto 600
2564    end if
2565
2566#else
2567    call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2568         & mp_comm_used, mp_ierr)
2569    if (mp_ierr /= 0) goto 600
2570    if (lroot) griduncn = griduncn0
2571
2572#endif
2573
2574    if ((WETDEP).and.(ldirect.gt.0)) then
2575      call MPI_Reduce(wetgriduncn, wetgriduncn0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2576           & mp_comm_used, mp_ierr)
2577      if (mp_ierr /= 0) goto 600
2578    end if
2579
2580    if ((DRYDEP).and.(ldirect.gt.0)) then
2581      call MPI_Reduce(drygriduncn, drygriduncn0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2582           & mp_comm_used, mp_ierr)
2583      if (mp_ierr /= 0) goto 600
2584    end if
2585
2586    if (mp_measure_time) call mpif_mtime('commtime',1)
2587
2588    goto 601
2589
2590600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
2591    stop
2592
2593601 end subroutine mpif_tm_reduce_grid_nest
2594
2595
2596  subroutine mpif_mtime(ident,imode)
2597!***********************************************************************
2598! Measure CPU/Wall time in various parts of the code
2599!
2600! VARIABLES
2601!   ident        character, identifies routine to measure
2602!   imode        integer, 0:start clock(s)  1: stop clock(s)
2603!
2604!***********************************************************************
2605    implicit none
2606
2607    character(LEN=*), intent(in) :: ident
2608    integer, intent(in) :: imode
2609
2610!***********************************************************************
2611
2612    select case(ident)
2613
2614    case ('timemanager')
2615      if (imode.eq.0) then
2616        call cpu_time(tm_tot_beg)
2617      else
2618        call cpu_time(tm_tot_end)
2619        tm_tot_total = tm_tot_total + (tm_tot_end-tm_tot_beg)
2620      end if
2621
2622    case ('wetdepo')
2623      if (imode.eq.0) then
2624        mp_wetdepo_wtime_beg = mpi_wtime()
2625        call cpu_time(mp_wetdepo_time_beg)
2626      else
2627        mp_wetdepo_wtime_end = mpi_wtime()
2628        call cpu_time(mp_wetdepo_time_end)
2629
2630        mp_wetdepo_wtime_total = mp_wetdepo_wtime_total + &
2631             &(mp_wetdepo_wtime_end - mp_wetdepo_wtime_beg)
2632        mp_wetdepo_time_total = mp_wetdepo_time_total + &
2633             &(mp_wetdepo_time_end - mp_wetdepo_time_beg)
2634      end if
2635
2636    case ('advance')
2637      if (imode.eq.0) then
2638        mp_advance_wtime_beg = mpi_wtime()
2639      else
2640        mp_advance_wtime_end = mpi_wtime()
2641
2642        mp_advance_wtime_total = mp_advance_wtime_total + &
2643             &(mp_advance_wtime_end - mp_advance_wtime_beg)
2644      end if
2645
2646    case ('getfields')
2647      if (imode.eq.0) then
2648        mp_getfields_wtime_beg = mpi_wtime()
2649        call cpu_time(mp_getfields_time_beg)
2650      else
2651        mp_getfields_wtime_end = mpi_wtime()
2652        call cpu_time(mp_getfields_time_end)
2653
2654        mp_getfields_wtime_total = mp_getfields_wtime_total + &
2655             &(mp_getfields_wtime_end - mp_getfields_wtime_beg)
2656        mp_getfields_time_total = mp_getfields_time_total + &
2657             &(mp_getfields_time_end - mp_getfields_time_beg)
2658      end if
2659
2660    case ('partloop1')
2661      if (imode.eq.0) then
2662        call cpu_time(tm_nploop_beg)
2663      else
2664        call cpu_time(tm_nploop_end)
2665        tm_nploop_total = tm_nploop_total + (tm_nploop_end - tm_nploop_beg)
2666      end if
2667
2668    case ('conccalc')
2669      if (imode.eq.0) then
2670        call cpu_time(mp_conccalc_time_beg)
2671      else
2672        call cpu_time(mp_conccalc_time_end)
2673        mp_conccalc_time_total = mp_conccalc_time_total + mp_conccalc_time_end - &
2674             &mp_conccalc_time_beg
2675      end if
2676
2677    case ('rootonly')
2678      if (imode.eq.0) then
2679        call cpu_time(mp_root_time_beg)
2680        mp_root_wtime_beg = mpi_wtime()
2681      else
2682        call cpu_time(mp_root_time_end)
2683        mp_root_wtime_end = mpi_wtime()
2684        mp_root_time_total = mp_root_time_total + &
2685             &(mp_root_time_end - mp_root_time_beg)
2686        mp_root_wtime_total = mp_root_wtime_total + &
2687             &(mp_root_wtime_end - mp_root_wtime_beg)
2688      end if
2689
2690    case ('iotime')
2691      if (imode.eq.0) then
2692        mp_io_wtime_beg = mpi_wtime()
2693        call cpu_time(mp_io_time_beg)
2694      else
2695        mp_io_wtime_end = mpi_wtime()
2696        call cpu_time(mp_io_time_end)
2697
2698        mp_io_wtime_total = mp_io_wtime_total + (mp_io_wtime_end - &
2699             & mp_io_wtime_beg)
2700        mp_io_time_total = mp_io_time_total + (mp_io_time_end - &
2701             & mp_io_time_beg)
2702      end if
2703
2704    case ('verttransform')
2705      if (imode.eq.0) then
2706        mp_vt_wtime_beg = mpi_wtime()
2707        call cpu_time(mp_vt_time_beg)
2708      else
2709        mp_vt_wtime_end = mpi_wtime()
2710        call cpu_time(mp_vt_time_end)
2711
2712        mp_vt_wtime_total = mp_vt_wtime_total + (mp_vt_wtime_end - &
2713             & mp_vt_wtime_beg)
2714        mp_vt_time_total = mp_vt_time_total + (mp_vt_time_end - &
2715             & mp_vt_time_beg)
2716      end if
2717
2718!    case ('readwind')
2719!      if (imode.eq.0) then
2720!        call cpu_time(mp_readwind_time_beg)
2721!        mp_readwind_wtime_beg = mpi_wtime()
2722!      else
2723!        call cpu_time(mp_readwind_time_end)
2724!        mp_readwind_wtime_end = mpi_wtime()
2725!
2726!        mp_readwind_time_total = mp_readwind_time_total + &
2727!             &(mp_readwind_time_end - mp_readwind_time_beg)
2728!        mp_readwind_wtime_total = mp_readwind_wtime_total + &
2729!             &(mp_readwind_wtime_end - mp_readwind_wtime_beg)
2730!      end if
2731
2732    case ('commtime')
2733      if (imode.eq.0) then
2734        call cpu_time(mp_comm_time_beg)
2735        mp_comm_wtime_beg = mpi_wtime()
2736      else
2737        call cpu_time(mp_comm_time_end)
2738        mp_comm_wtime_end = mpi_wtime()
2739        mp_comm_time_total = mp_comm_time_total + &
2740             &(mp_comm_time_end - mp_comm_time_beg)
2741        mp_comm_wtime_total = mp_comm_wtime_total + &
2742             &(mp_comm_wtime_end - mp_comm_wtime_beg)
2743      end if
2744
2745    case ('flexpart')
2746      if (imode.eq.0) then
2747        mp_total_wtime_beg=mpi_wtime()
2748      else
2749        mp_total_wtime_end=mpi_wtime()
2750        mp_total_wtime_total = mp_total_wtime_end-mp_total_wtime_beg
2751      end if
2752
2753    case default
2754      write(*,*) 'mpi_mod::mpif_mtime> WARNING: unknown case identifier'
2755
2756    end select
2757
2758
2759  end subroutine mpif_mtime
2760
2761
2762  subroutine mpif_finalize
2763!***********************************************************************
2764! Finalize MPI
2765! Optionally print detailed time diagnostics
2766!***********************************************************************
2767    implicit none
2768
2769    integer :: ip !,j,r
2770
2771!***********************************************************************
2772
2773    IF (mp_measure_time) THEN
2774      do ip=0, mp_np-1
2775        call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr)
2776
2777        if (ip == mp_pid) then
2778          write(*,FMT='(72("#"))')
2779          write(*,FMT='(A60,I3)') 'STATISTICS FOR MPI PROCESS:', mp_pid
2780          if (ip == 0) then
2781            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR ROOT-ONLY (PID 0) &
2782                 &SECTIONS: ', mp_root_time_total
2783            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ROOT-ONLY (PID 0) &
2784                 &SECTIONS:', mp_root_wtime_total
2785          end if
2786          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FLEXPART: ' &
2787               &, mp_total_wtime_total
2788          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI COMMUNICATIONS:',&
2789               & mp_comm_time_total
2790          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI COMMUNICATIONS:',&
2791               & mp_comm_wtime_total
2792          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI BARRIERS:',&
2793               & mp_barrier_time_total
2794          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI BARRIERS:',&
2795               & mp_barrier_wtime_total
2796          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER:',&
2797               & tm_tot_total
2798          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER/NUMPART LOOP:',&
2799               & tm_nploop_total
2800          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ADVANCE:',&
2801               & mp_advance_wtime_total
2802          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR GETFIELDS:',&
2803               & mp_getfields_wtime_total
2804          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR GETFIELDS:',&
2805               & mp_getfields_time_total
2806!          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',&
2807!               & mp_readwind_wtime_total
2808!          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',&
2809!               & mp_readwind_time_total
2810          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FILE IO:',&
2811               & mp_io_wtime_total
2812          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR FILE IO:',&
2813               & mp_io_time_total
2814          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR WETDEPO:',&
2815               & mp_wetdepo_wtime_total
2816          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR WETDEPO:',&
2817               & mp_wetdepo_time_total
2818          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',&
2819               & mp_conccalc_time_total
2820! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',&
2821!      & mp_vt_wtime_total
2822! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',&
2823!      & mp_vt_time_total
2824! NB: the 'flush' function is possibly a gfortran-specific extension,
2825! comment out if it gives problems
2826!          call flush()
2827        end if
2828      end do
2829    end if
2830
2831! This call to barrier is for correctly formatting output
2832    call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr)
2833
2834    if (lroot.and.mp_measure_time) then
2835      write(*,FMT='(72("#"))')
2836      WRITE(*,*) "To turn off output of time measurements, set "
2837      WRITE(*,*) "    mp_measure_time=.false."
2838      WRITE(*,*) "in file mpi_mod.f90"
2839      write(*,FMT='(72("#"))')
2840    end if
2841
2842! j=mp_pid*nvar_async
2843! In the implementation with 3 fields, the processes may have posted
2844! MPI_Irecv requests that should be cancelled here
2845! if (.not.lmp_sync) then
2846!   r=mp_pid*nvar_async
2847!   do j=r,r+nvar_async-1
2848!     call MPI_Cancel(j,mp_ierr)
2849!     if (mp_ierr /= 0) write(*,*) '#### mpif_finalize::MPI_Cancel> ERROR ####'
2850!   end do
2851! end if
2852
2853    call MPI_FINALIZE(mp_ierr)
2854    if (mp_ierr /= 0) then
2855      write(*,*) '#### mpif_finalize::MPI_FINALIZE> MPI ERROR ', mp_ierr, ' ####'
2856      stop
2857    end if
2858
2859
2860  end subroutine mpif_finalize
2861
2862
2863  subroutine get_lun(my_lun)
2864!***********************************************************************
2865! get_lun:
2866!   Starting from 100, get next free logical unit number
2867!***********************************************************************
2868
2869    implicit none
2870
2871    integer, intent(inout) :: my_lun
2872    integer, save :: free_lun=100
2873    logical :: exists, iopen
2874
2875!***********************************************************************
2876
2877    loop1: do
2878      inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen)
2879      if (exists .and. .not.iopen) exit loop1
2880      free_lun = free_lun+1
2881    end do loop1
2882    my_lun = free_lun
2883
2884  end subroutine get_lun
2885
2886
2887  subroutine write_data_dbg(array_in, array_name, tstep, ident)
2888!***********************************************************************
2889! Write one-dimensional arrays to file (for debugging purposes)
2890!***********************************************************************
2891    implicit none
2892
2893    real, intent(in), dimension(:) :: array_in
2894    integer, intent(in) :: tstep
2895    integer :: lios
2896    character(LEN=*), intent(in) :: ident, array_name
2897
2898    character(LEN=8) :: c_ts
2899    character(LEN=40) :: fn_1, fn_2
2900
2901!***********************************************************************
2902
2903    write(c_ts, FMT='(I8.8,BZ)') tstep
2904    fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident)
2905    write(c_ts, FMT='(I2.2,BZ)') mp_np
2906    fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat'
2907
2908    call get_lun(dat_lun)
2909    open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', &
2910         FORM='UNFORMATTED', STATUS='REPLACE')
2911    write(UNIT=dat_lun, IOSTAT=lios) array_in
2912    close(UNIT=dat_lun)
2913
2914  end subroutine write_data_dbg
2915
2916
2917  subroutine set_fields_synthetic()
2918!*******************************************************************************
2919! DESCRIPTION
2920!   Set all meteorological fields to synthetic (usually constant/homogeneous)
2921!   values.
2922!   Used for validation and error-checking
2923!
2924! NOTE
2925!   This version uses asynchronious communications.
2926!
2927! VARIABLES
2928!   
2929!
2930!
2931!*******************************************************************************
2932    use com_mod
2933
2934    implicit none
2935
2936    integer :: li=1, ui=2 ! wfmem indices (i.e, operate on entire field)
2937
2938    if (.not.lmp_sync) ui=3
2939
2940
2941! Variables transferred at initialization only
2942!*********************************************
2943!    readclouds=readclouds_
2944    oro(:,:)=0.0
2945    excessoro(:,:)=0.0
2946    lsm(:,:)=0.0
2947    xlanduse(:,:,:)=0.0
2948!    wftime
2949!    numbwf
2950!    nmixz
2951!    height
2952
2953! Time-varying fields:
2954    uu(:,:,:,li:ui) = 10.0
2955    vv(:,:,:,li:ui) = 0.0
2956    uupol(:,:,:,li:ui) = 10.0
2957    vvpol(:,:,:,li:ui)=0.0
2958    ww(:,:,:,li:ui)=0.
2959    tt(:,:,:,li:ui)=300.
2960    rho(:,:,:,li:ui)=1.3
2961    drhodz(:,:,:,li:ui)=.0
2962    tth(:,:,:,li:ui)=0.0
2963    qvh(:,:,:,li:ui)=1.0
2964    qv(:,:,:,li:ui)=1.0
2965
2966    pv(:,:,:,li:ui)=1.0
2967    clouds(:,:,:,li:ui)=0
2968
2969    clwc(:,:,:,li:ui)=0.0
2970    ciwc(:,:,:,li:ui)=0.0
2971
2972! 2D fields
2973
2974    cloudsh(:,:,li:ui)=0
2975    vdep(:,:,:,li:ui)=0.0
2976    ps(:,:,:,li:ui)=1.0e5
2977    sd(:,:,:,li:ui)=0.0
2978    tcc(:,:,:,li:ui)=0.0
2979    tt2(:,:,:,li:ui)=300.
2980    td2(:,:,:,li:ui)=250.
2981    lsprec(:,:,:,li:ui)=0.0
2982    convprec(:,:,:,li:ui)=0.0
2983    ustar(:,:,:,li:ui)=1.0
2984    wstar(:,:,:,li:ui)=1.0
2985    hmix(:,:,:,li:ui)=10000.
2986    tropopause(:,:,:,li:ui)=10000.
2987    oli(:,:,:,li:ui)=0.01
2988
2989  end subroutine set_fields_synthetic
2990
2991end module mpi_mod
Note: See TracBrowser for help on using the repository browser.
hosted by ZAMG