source: flexpart.git/src/mpi_mod.f90 @ 3481cc1

10.4.1_peseiGFS_025bugfixes+enhancementsdevrelease-10release-10.4.1scaling-bug
Last change on this file since 3481cc1 was 3481cc1, checked in by Ignacio Pisso <ip@…>, 4 years ago

move license from headers to a different file

  • Property mode set to 100644
File size: 108.1 KB
Line 
1module mpi_mod
2
3!*****************************************************************************
4!                                                                            *
5!  DESCRIPTION                                                               *
6!    This module contains subroutines and common variables used for the      *
7!    MPI parallelization of FLEXPART.                                        *
8!                                                                            *
9!  NOTE                                                                      *
10!    Depending on the MPI library installed on your system (e.g. mpich2,     *
11!    OpenMPI) you may need to choose below in this file between              *
12!      use mpi                                                               *
13!    (if the MPI library comes with the file 'mpi.mod'); or                  *
14!      include 'mpif.h'                                                      *
15!                                                                            *
16!                                                                            *
17!*****************************************************************************
18!                                                                            *
19! Variables:                                                                 *
20!                                                                            *
21! mp_ierr                 MPI error code                                     *
22! mp_np                   Number of MPI processes                            *
23! mp_pid                  Process ID of each MPI process                     *
24! mp_seed                 Parameter for random number seed                   *
25! read_grp_min            Minimum number of processes at which one will be   *
26!                         used as reader                                     *
27! numpart_mpi,            Number of particles per node                       *
28! maxpart_mpi                                                                *
29! mp_partid               MPI process ID for particle calculation            *
30! mp_partgroup_           Refers to the subset of processors performing      *
31!                         loops over particles. Will be all processes        *
32!                         unless a dedicated process runs getfields/readwind *
33! lmp_sync                If .false., use asynchronous MPI                   *
34! mp_cp                   Real precision to use for deposition fields        *
35!                                                                            *
36!                                                                            *
37!                                                                            *
38!                                                                            *
39!*****************************************************************************
40!                                                                            *
41! Modification by DJM, 2017-05-09 - added #ifdef USE_MPIINPLACE cpp          *
42! directive to mpif_tm_reduce_grid() to insure that MPI_IN_PLACE is          *
43! used in the MPI_Reduce() only if specifically compiled with that           *
44! directive.                                                                 *
45!                                                                            *
46!*****************************************************************************
47
48  use mpi
49  use par_mod, only: dp,sp
50  use com_mod, only: lroot
51
52  implicit none
53
54!  include 'mpif.h'
55
56  public
57
58! Set aside a process for reading windfields if using at least these many processes
59!==================================================
60  integer, parameter, private :: read_grp_min=4
61!==================================================
62
63! Variables for each MPI process in the world group
64  integer :: mp_ierr, mp_np, mp_pid, mp_partid
65  integer, private :: world_group_id
66
67! Variables for MPI processes in the 'particle' group
68  integer, allocatable, dimension(:) :: mp_partgroup_rank
69  integer, allocatable, dimension(:) :: npart_per_process
70  integer :: mp_partgroup_comm, mp_partgroup_pid, mp_partgroup_np
71
72  integer :: mp_seed=0
73  integer, parameter :: mp_sp=MPI_REAL4, mp_dp=MPI_REAL8
74  integer :: mp_cp
75  integer, parameter :: id_root=0 ! master process
76
77! MPI tags/requests for send/receive operation
78  integer :: tm1
79  integer, parameter :: nvar_async=26
80!integer, dimension(:), allocatable :: tags
81  integer, dimension(:), allocatable :: reqs
82
83! Status array used for certain MPI operations (MPI_RECV)
84  integer, dimension(MPI_STATUS_SIZE) :: mp_status
85
86
87  integer :: id_read   ! readwind/getfield process
88  integer :: numpart_mpi,maxpart_mpi ! number of particles per node
89  integer :: tot_numpart=0
90  integer :: mp_comm_used ! global or subgroup communicator
91
92  logical :: lmpreader=.false. ! is set to true for reading process(es) only.
93  logical :: lmp_use_reader=.false. ! true if separate readwind process is used
94
95! .true. if only using synchronous MPI send/recv (default)
96! If setting this to .false., numwfmem must be set to 3
97!===============================================================================
98  logical :: lmp_sync=.true.
99!===============================================================================
100
101! mp_dbg_mode       Used for debugging MPI.
102! mp_dev_mode       various checks related to debugging the parallel code
103! mp_dbg_out        write some arrays to data file for debugging
104! mp_measure_time   Measure cpu/wall time, write out at end of run
105! mp_time_barrier   Measure MPI barrier time
106! mp_exact_numpart  Use an extra MPI communication to give the exact number of particles
107!                   to standard output (this does not otherwise affect the simulation)
108  logical, parameter :: mp_dbg_mode = .false.
109  logical, parameter :: mp_dev_mode = .false.
110  logical, parameter :: mp_dbg_out = .false.
111  logical, parameter :: mp_time_barrier=.true.
112  logical, parameter :: mp_measure_time=.false.
113  logical, parameter :: mp_exact_numpart=.true.
114
115! for measuring CPU/Wall time
116  real(sp),private :: mp_comm_time_beg, mp_comm_time_end, mp_comm_time_total=0.
117  real(dp),private :: mp_comm_wtime_beg, mp_comm_wtime_end, mp_comm_wtime_total=0.
118  real(sp),private :: mp_root_time_beg, mp_root_time_end, mp_root_time_total=0.
119  real(dp),private :: mp_root_wtime_beg, mp_root_wtime_end, mp_root_wtime_total=0.
120  real(sp),private :: mp_barrier_time_beg, mp_barrier_time_end, mp_barrier_time_total=0.
121  real(dp),private :: mp_barrier_wtime_beg, mp_barrier_wtime_end, mp_barrier_wtime_total=0.
122  real(sp),private :: tm_nploop_beg, tm_nploop_end, tm_nploop_total=0.
123  real(sp),private :: tm_tot_beg, tm_tot_end, tm_tot_total=0.
124  real(dp),private :: mp_getfields_wtime_beg, mp_getfields_wtime_end, mp_getfields_wtime_total=0.
125  real(sp),private :: mp_getfields_time_beg, mp_getfields_time_end, mp_getfields_time_total=0.
126  real(dp),private :: mp_readwind_wtime_beg, mp_readwind_wtime_end, mp_readwind_wtime_total=0.
127  real(sp),private :: mp_readwind_time_beg, mp_readwind_time_end, mp_readwind_time_total=0.
128  real(dp),private :: mp_io_wtime_beg, mp_io_wtime_end, mp_io_wtime_total=0.
129  real(sp),private :: mp_io_time_beg, mp_io_time_end, mp_io_time_total=0.
130  real(dp),private :: mp_wetdepo_wtime_beg, mp_wetdepo_wtime_end, mp_wetdepo_wtime_total=0.
131  real(sp),private :: mp_wetdepo_time_beg, mp_wetdepo_time_end, mp_wetdepo_time_total=0.
132  real(dp),private :: mp_advance_wtime_beg, mp_advance_wtime_end, mp_advance_wtime_total=0.
133  real(dp),private :: mp_conccalc_time_beg, mp_conccalc_time_end, mp_conccalc_time_total=0.
134  real(dp),private :: mp_total_wtime_beg, mp_total_wtime_end, mp_total_wtime_total=0.
135  real(dp),private :: mp_vt_wtime_beg, mp_vt_wtime_end, mp_vt_wtime_total
136  real(sp),private :: mp_vt_time_beg, mp_vt_time_end, mp_vt_time_total
137
138! dat_lun           logical unit number for i/o
139  integer, private :: dat_lun
140
141! Temporary arrays for particles (allocated and deallocated as needed)
142  integer, allocatable, dimension(:) :: nclass_tmp, npoint_tmp, itra1_tmp, idt_tmp, &
143       & itramem_tmp, itrasplit_tmp
144  real(kind=dp), allocatable, dimension(:) :: xtra1_tmp, ytra1_tmp
145  real, allocatable, dimension(:) :: ztra1_tmp
146  real, allocatable, dimension(:,:) :: xmass1_tmp
147
148! mp_redist_fract        Exchange particles between processes if relative difference
149!                        is larger. A good value is between 0.0 and 0.5
150! mp_maxpart_factor      Allocate more memory per process than strictly needed
151!                        (safety factor). Recommended value between 1.5 and 2.5
152! mp_min_redist          Do not redistribute particles if below this limit
153  real, parameter :: mp_redist_fract=0.2, mp_maxpart_factor=1.5
154  integer,parameter :: mp_min_redist=100000
155
156
157contains
158
159  subroutine mpif_init
160!***********************************************************************
161! DESCRIPTION
162!   Initialize MPI.
163!   
164!   Create the global communicator MPI_COMM_WORLD
165!   If using a separate MPI process for getfields/readwind, a subgroup
166!   is created for the other processes.
167!
168! VARIABLES
169!   mpi_mode    default 0, set to 2/3 if running MPI version
170!   mp_np       number of running processes, decided at run-time
171!***********************************************************************
172    use par_mod, only: maxpart, numwfmem, dep_prec, maxreceptor, maxspec
173    use com_mod, only: mpi_mode, verbosity, creceptor0
174
175    implicit none
176
177    integer :: i,j,s,addmaxpart=0
178
179! Each process gets an ID (mp_pid) in the range 0,..,mp_np-1
180    call MPI_INIT(mp_ierr)
181    if (mp_ierr /= 0) goto 100
182    call MPI_COMM_RANK(MPI_COMM_WORLD, mp_pid, mp_ierr)
183    if (mp_ierr /= 0) goto 100
184    call MPI_COMM_SIZE(MPI_COMM_WORLD, mp_np, mp_ierr)
185    if (mp_ierr /= 0) goto 100
186
187
188! Variable mpi_mode is used to handle subroutines common to parallel/serial version
189    if (lmp_sync) then
190      mpi_mode=2 ! hold 2 windfields in memory
191    else
192      mpi_mode=3 ! hold 3 windfields in memory
193    end if
194
195    if (mp_pid.ne.0) then
196      lroot = .false.
197    end if
198
199! Set MPI precision to use for transferring deposition fields
200!************************************************************
201    if (dep_prec==dp) then
202      mp_cp = MPI_REAL8
203! TODO: write info message for serial version as well
204      if (lroot.and.verbosity>0) write(*,*) 'Using double precision for deposition fields'
205    else if (dep_prec==sp) then
206      mp_cp = MPI_REAL4
207      if (lroot.and.verbosity>0) write(*,*) 'Using single precision for deposition fields'
208    else
209      write(*,*) 'ERROR: something went wrong setting MPI real precision'
210      stop
211    end if
212
213! Check for sensible combination of parameters
214!*********************************************
215
216    if (.not.lmp_sync.and.numwfmem.ne.3) then
217      if (lroot) then
218        write(*,FMT='(80("#"))')
219        write(*,*) '#### mpi_mod::mpif_init> ERROR: ', &
220             & 'numwfmem must be set to 3 for asyncronous reading ####'
221        write(*,FMT='(80("#"))')
222      end if
223      call MPI_FINALIZE(mp_ierr)
224      stop
225    else if (lmp_sync.and.numwfmem.ne.2.and.lroot) then
226      write(*,FMT='(80("#"))')
227      write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
228           & 'numwfmem should be set to 2 for syncronous'
229      write(*,*) ' reading. Results will still be valid, but unneccesary memory &
230           &is allocated.'
231      write(*,FMT='(80("#"))')
232! Force "syncronized" version if all processes will call getfields
233    else if ((.not.lmp_sync.and.mp_np.lt.read_grp_min).or.(mp_np.eq.1)) then
234      if (lroot) then
235        write(*,FMT='(80("#"))')
236        write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
237             & 'all procs call getfields. Setting lmp_sync=.true.'
238        write(*,FMT='(80("#"))')
239      end if
240      lmp_sync=.true.
241    end if
242
243! TODO: Add more warnings for unimplemented flexpart features
244
245! Set ID of process that calls getfield/readwind.
246! Using the last process in the group ensures statistical identical results
247! as running with one process less but not using separate read process
248!**********************************************************************
249
250!    id_read = min(mp_np-1, 1)
251    id_read = mp_np-1
252
253    if (mp_pid.eq.id_read) lmpreader=.true.
254
255    call MPI_Comm_group (MPI_COMM_WORLD, world_group_id, mp_ierr)
256
257! Create a MPI group/communicator that will calculate trajectories.
258! Skip this step if program is run with only a few processes
259!************************************************************************
260    allocate(mp_partgroup_rank(0:mp_np-2))
261
262! This allows checking for allocation of arrays when no subgroup is used
263    mp_partgroup_pid=0
264
265    if (read_grp_min.lt.2) then
266      write(*,*) '#### mpi_mod::mpif_init> ERROR ####', &
267           & 'read_grp_min should be at least 2. Exiting'
268      stop
269    end if
270
271    if (mp_np.ge.read_grp_min) then
272      lmp_use_reader = .true.
273
274! Map the subgroup IDs= 0,1,2,...,mp_np-2, skipping reader process
275      j=-1
276      do i=0, mp_np-2 !loop over all (subgroup) IDs
277        j=j+1
278        if (i.eq.id_read) then
279          j=j+1
280        end if
281        mp_partgroup_rank(i) = j
282      end do
283
284      call MPI_Group_incl (world_group_id, mp_np-1, mp_partgroup_rank, &
285           &mp_partgroup_pid, mp_ierr)
286      if (mp_ierr /= 0) goto 100
287      call MPI_Comm_create (MPI_COMM_WORLD, mp_partgroup_pid, mp_partgroup_comm, mp_ierr)
288      if (mp_ierr /= 0) goto 100
289
290      if (mp_pid.ne.id_read) then
291        call MPI_Comm_rank (mp_partgroup_comm, mp_partgroup_pid, mp_ierr)
292        if (mp_ierr /= 0) goto 100
293
294! Get size of new group (=mp_np-1)
295        call MPI_COMM_SIZE(mp_partgroup_comm, mp_partgroup_np, mp_ierr)
296        if (mp_ierr /= 0) goto 100
297        if (mp_partgroup_np.ne.mp_np-1) then
298          write(*,*)  '#### mpi_mod:: mpif_init> ERROR ####&
299               & mp_partgroup_np.ne.mp_np-1'
300          stop
301        endif
302
303      else
304        mp_partgroup_pid = -1
305      end if
306    end if
307
308    if (lmp_use_reader) then
309      mp_comm_used = mp_partgroup_comm
310      mp_partid = mp_partgroup_pid
311      mp_partgroup_np=mp_np-1
312    else
313      mp_comm_used = MPI_COMM_WORLD
314      mp_partgroup_np = mp_np
315      mp_partid = mp_pid
316    end if
317
318! Set maxpart per process
319! ESO 08/2016: Increase maxpart per process, in case of unbalanced distribution
320    maxpart_mpi=int(mp_maxpart_factor*real(maxpart)/real(mp_partgroup_np))
321    if (mp_np == 1) maxpart_mpi = maxpart
322
323! Do not allocate particle data arrays for readwind process
324    if (lmpreader.and.lmp_use_reader) then
325      maxpart_mpi=0
326    end if
327
328! Set random seed for each non-root process
329    if (mp_pid.gt.0) then
330      s = 244
331      mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729))
332    end if
333    if (mp_dbg_mode) then
334      mp_seed=0
335      if (lroot) write(*,*) 'MPI: setting seed=0'
336    end if
337
338! Allocate request array for asynchronous MPI
339    if (.not.lmp_sync) then
340      allocate(reqs(0:nvar_async*mp_np-1))
341      reqs(:)=MPI_REQUEST_NULL
342    else
343      allocate(reqs(0:1))
344      reqs(:)=MPI_REQUEST_NULL
345    end if
346
347! Allocate array for number of particles per process   
348    allocate(npart_per_process(0:mp_partgroup_np-1))
349
350! Write whether MPI_IN_PLACE is used or not
351#ifdef USE_MPIINPLACE
352    if (lroot) write(*,*) 'Using MPI_IN_PLACE operations'
353#else
354    if (lroot) allocate(creceptor0(maxreceptor,maxspec))
355    if (lroot) write(*,*) 'Not using MPI_IN_PLACE operations'
356#endif
357    goto 101
358
359100 write(*,*) '#### mpi_mod::mpif_init> ERROR ####', mp_ierr
360    stop
361
362101 end subroutine mpif_init
363
364
365  subroutine mpif_mpi_barrier
366!***********************************************************************
367! Set MPI barrier (all processes are synchronized here), with the
368! possible exception of a separate 'readwind' process.
369! Optionally measure cpu/wall time.
370!
371!***********************************************************************
372    implicit none
373
374    if (mp_time_barrier) then
375      call cpu_time(mp_barrier_time_beg)
376      mp_barrier_wtime_beg = mpi_wtime()
377    endif
378
379    call MPI_BARRIER(mp_comm_used, mp_ierr)
380    if (mp_ierr /= 0) goto 100
381
382    if (mp_time_barrier) then
383      call cpu_time(mp_barrier_time_end)
384      mp_barrier_wtime_end = mpi_wtime()
385
386      mp_barrier_time_total=mp_barrier_time_total+&
387           &(mp_barrier_time_end-mp_barrier_time_beg)
388      mp_barrier_wtime_total=mp_barrier_wtime_total+&
389           &(mp_barrier_wtime_end-mp_barrier_wtime_beg)
390    end if
391
392    goto 101
393
394100 write(*,*) '#### mpi_mod::mpif_mpi_barrier> ERROR ####', mp_ierr
395    stop
396
397101 end subroutine mpif_mpi_barrier
398
399
400  subroutine mpif_com_mod_allocate
401!*******************************************************************************   
402! Dynamic allocation of arrays (in serial code these have size maxpart)
403!
404! Not in use anymore, moved to com_mod for interoperability with serial version
405!
406! TODO: remove
407!*******************************************************************************
408    use com_mod
409
410    implicit none
411
412    integer :: array_size
413
414    array_size = maxpart_mpi
415
416    allocate(itra1(array_size),npoint(array_size),&
417         & nclass(array_size),idt(array_size),&
418         & itramem(array_size),itrasplit(array_size),&
419         & xtra1(array_size),ytra1(array_size),&
420         & ztra1(array_size),xmass1(array_size, maxspec))
421
422    allocate(uap(array_size),ucp(array_size),&
423         & uzp(array_size),us(array_size),&
424         & vs(array_size),&
425         & ws(array_size),cbt(array_size))
426
427
428  end subroutine mpif_com_mod_allocate
429
430
431  subroutine mpif_tm_send_dims
432!***********************************************************************
433! Distribute array dimensions from pid0 to all processes.
434! numpart_mpi/numpart is sent to allow dynamic allocation
435!
436! Currently not in use.
437!
438! Variables of similar type (integer, real) are placed in an array
439! and sent collectively, to avoid the overhead associated with individual
440! MPI send/receives
441!
442!
443!***********************************************************************
444    use com_mod
445
446    implicit none
447
448    integer :: add_part=0
449
450    call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
451
452! MPI subgroup does the particle-loop
453    if (lmp_use_reader) then
454      if (mod(numpart,mp_partgroup_np).ne.0) add_part=1
455      numpart_mpi=int(numpart/mp_partgroup_np)+add_part
456    else
457      if (mod(numpart,mp_np).ne.0) add_part=1
458      numpart_mpi=int(numpart/mp_np)+add_part
459    end if
460
461
462! redefine numpart as 'numpart per process' throughout the code
463!**************************************************************
464    numpart = numpart_mpi
465
466  end subroutine mpif_tm_send_dims
467
468
469  subroutine mpif_send_part_properties(num_part)
470!***********************************************************************
471! Distribute particle properties from root to all processes.
472
473! Used by init_domainfill_mpi
474!
475! Variables:
476!
477! num_part        input, number of particles per process (rounded up)
478!
479!***********************************************************************
480    use com_mod
481
482    implicit none
483
484    integer,intent(in) :: num_part
485    integer :: i,jj, addone
486
487! Exit if too many particles
488    if (num_part.gt.maxpart_mpi) then
489      write(*,*) '#####################################################'
490      write(*,*) '#### ERROR - TOTAL NUMBER OF PARTICLES REQUIRED  ####'
491      write(*,*) '#### EXCEEDS THE MAXIMUM ALLOWED NUMBER. REDUCE  ####'
492      write(*,*) '#### EITHER NUMBER OF PARTICLES PER RELEASE POINT####'
493      write(*,*) '#### OR INCREASE MAXPART.                        ####'
494      write(*,*) '#####################################################'
495!      call MPI_FINALIZE(mp_ierr)
496      stop
497    end if
498
499
500! Time for MPI communications
501!****************************
502    if (mp_measure_time) call mpif_mtime('commtime',0)
503
504! Distribute variables, send from pid 0 to other processes (including itself)
505!****************************************************************************
506
507    call MPI_SCATTER(nclass_tmp,num_part,MPI_INTEGER,nclass,&
508         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
509    if (mp_ierr /= 0) goto 600
510    call MPI_SCATTER(npoint_tmp,num_part,MPI_INTEGER,npoint,&
511         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
512    if (mp_ierr /= 0) goto 600
513    call MPI_SCATTER(itra1_tmp,num_part,MPI_INTEGER,itra1,&
514         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
515    if (mp_ierr /= 0) goto 600
516    call MPI_SCATTER(idt_tmp,num_part,MPI_INTEGER,idt,&
517         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
518    if (mp_ierr /= 0) goto 600
519    call MPI_SCATTER(itramem_tmp,num_part,MPI_INTEGER,itramem,&
520         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
521    if (mp_ierr /= 0) goto 600
522    call MPI_SCATTER(itrasplit_tmp,num_part,MPI_INTEGER,itrasplit,&
523         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
524    if (mp_ierr /= 0) goto 600
525    call MPI_SCATTER(xtra1_tmp,num_part,mp_dp,xtra1,&
526         &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)
527    if (mp_ierr /= 0) goto 600
528    call MPI_SCATTER(ytra1_tmp,num_part,mp_dp,ytra1,&
529         &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)
530    if (mp_ierr /= 0) goto 600
531    call MPI_SCATTER(ztra1_tmp,num_part,mp_sp,ztra1,&
532         &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)
533    if (mp_ierr /= 0) goto 600
534    do i=1,nspec
535      call MPI_SCATTER(xmass1_tmp(:,i),num_part,mp_sp,xmass1(:,i),&
536           &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)
537      if (mp_ierr /= 0) goto 600
538    end do
539
540    if (mp_measure_time) call mpif_mtime('commtime',1)
541
542    goto 601
543
544600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
545    stop
546
547! After the transfer of particles it it possible that the value of
548! "numpart" is larger than the actual used, so we reduce it if there are
549! invalid particles at the end of the arrays
550
551601 do i=numpart, 1, -1
552      if (itra1(i).eq.-999999999) then
553        numpart=numpart-1
554      else
555        exit
556      end if
557    end do
558
559
560  end subroutine mpif_send_part_properties
561
562
563  subroutine mpif_calculate_part_redist(itime)
564!***********************************************************************
565! Calculate number of particles to redistribute between processes. This routine
566! can be called at regular time intervals to keep a level number of
567! particles on each process.
568!
569! First, all processes report their local "numpart" to each other, which is
570! stored in array "numpart_mpi(np)". This array is sorted from low to
571! high values, along with a corresponding process ID array "idx_arr(np)".
572! If the relative difference between the highest and lowest "numpart_mpi" value
573! is above a threshold, particles are transferred from process idx_arr(np-1)
574! to process idx_arr(0). Repeat for processes idx_arr(np-i) and idx_arr(i)
575! for all valid i.
576! Note: If np is an odd number, the process with the median
577! number of particles is left unchanged
578!
579! VARIABLES
580! itime        input, current time
581!***********************************************************************
582    use com_mod
583
584    implicit none
585   
586    integer, intent(in) :: itime
587    real :: pmin,z
588    integer :: i,jj,nn, num_part=1,m,imin, num_trans
589    logical :: first_iter
590    integer,allocatable,dimension(:) :: idx_arr
591    real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this
592
593! Exit if running with only 1 process
594!************************************************************************
595    if (mp_np.eq.1) return
596
597! All processes exchange information on number of particles
598!****************************************************************************
599    allocate( idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1))
600
601    call MPI_Allgather(numpart, 1, MPI_INTEGER, npart_per_process, &
602         & 1, MPI_INTEGER, mp_comm_used, mp_ierr)
603
604
605! Sort from lowest to highest
606! Initial guess: correct order
607    sorted(:) = npart_per_process(:)
608    do i=0, mp_partgroup_np-1
609      idx_arr(i) = i
610    end do
611
612! Do not rebalance particles for ipout=3   
613    if (ipout.eq.3) return
614
615! For each successive element in index array, see if a lower value exists
616    do i=0, mp_partgroup_np-2
617      pmin=sorted(i)
618      imin=idx_arr(i)
619      m=i+1
620      do jj=m, mp_partgroup_np-1
621        if (pmin.le.sorted(jj)) cycle
622        z=pmin
623        pmin=sorted(jj)
624        sorted(jj)=z
625
626        nn=imin
627        imin=idx_arr(jj)
628        idx_arr(jj)=nn
629      end do
630      sorted(i)=pmin
631      idx_arr(i)=imin
632    end do
633
634! For each pair of processes, transfer particles if the difference is above a
635! limit, and numpart at sending process large enough
636
637    m=mp_partgroup_np-1 ! index for last sorted process (most particles)
638    do i=0,mp_partgroup_np/2-1
639      num_trans = npart_per_process(idx_arr(m)) - npart_per_process(idx_arr(i))
640      if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then
641        if ( npart_per_process(idx_arr(m)).gt.mp_min_redist.and.&
642             & real(num_trans)/real(npart_per_process(idx_arr(m))).gt.mp_redist_fract) then
643! DBG
644          ! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart_per_process', &
645          !      &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart_per_process
646! DBG
647          call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2)
648        end if
649      end if
650      m=m-1
651    end do
652
653    deallocate(idx_arr, sorted)
654
655  end subroutine mpif_calculate_part_redist
656
657
658  subroutine mpif_redist_part(itime, src_proc, dest_proc, num_trans)
659!***********************************************************************
660! Transfer particle properties between two arbitrary processes.
661!
662! VARIABLES
663!
664! itime           input, current time
665! src_proc        input, ID of source process
666! dest_proc       input, ID of destination process
667! num_trans       input, number of particles to transfer
668!
669!************************************************************************
670    use com_mod !TODO:  ,only: nclass etc
671
672    implicit none
673
674    integer, intent(in) :: itime, src_proc, dest_proc, num_trans
675    integer :: ll, ul ! lower and upper indices in arrays
676    integer :: arr_sz ! size of temporary arrays
677    integer :: mtag   ! MPI message tag
678    integer :: i, j, minpart, ipart, maxnumpart
679 
680! Check for invalid input arguments
681!**********************************
682 if (src_proc.eq.dest_proc) then
683   write(*,*) '<mpi_mod::mpif_redist_part>: Error: &
684        &src_proc.eq.dest_proc'
685   stop
686 end if
687
688! Measure time for MPI communications
689!************************************
690    if (mp_measure_time) call mpif_mtime('commtime',0)
691
692! Send particles
693!***************
694    if (mp_partid.eq.src_proc) then
695      mtag = 2000
696
697! Array indices for the transferred particles
698      ll=numpart-num_trans+1
699      ul=numpart
700
701      if (mp_dev_mode) then
702        write(*,FMT='(72("#"))')
703        write(*,*) "Sending ", num_trans, "particles (from/to)", src_proc, dest_proc
704        write(*,*) "numpart before: ", numpart
705      end if
706
707      call MPI_SEND(nclass(ll:ul),num_trans,&
708           & MPI_INTEGER,dest_proc,mtag+1,mp_comm_used,mp_ierr)
709
710      call MPI_SEND(npoint(ll:ul),num_trans,&
711           & MPI_INTEGER,dest_proc,mtag+2,mp_comm_used,mp_ierr)
712
713      call MPI_SEND(itra1(ll:ul),num_trans, &
714           & MPI_INTEGER,dest_proc,mtag+3,mp_comm_used,mp_ierr)
715
716      call MPI_SEND(idt(ll:ul),num_trans, &
717           & MPI_INTEGER,dest_proc,mtag+4,mp_comm_used,mp_ierr)
718
719      call MPI_SEND(itramem(ll:ul),num_trans, &
720           & MPI_INTEGER,dest_proc,mtag+5,mp_comm_used,mp_ierr)
721
722      call MPI_SEND(itrasplit(ll:ul),num_trans,&
723           & MPI_INTEGER,dest_proc,mtag+6,mp_comm_used,mp_ierr)
724
725      call MPI_SEND(xtra1(ll:ul),num_trans, &
726           & mp_dp,dest_proc,mtag+7,mp_comm_used,mp_ierr)
727
728      call MPI_SEND(ytra1(ll:ul),num_trans,&
729           & mp_dp,dest_proc,mtag+8,mp_comm_used,mp_ierr)
730
731      call MPI_SEND(ztra1(ll:ul),num_trans,&
732           & mp_sp,dest_proc,mtag+9,mp_comm_used,mp_ierr)
733
734      do j=1,nspec
735        call MPI_SEND(xmass1(ll:ul,j),num_trans,&
736             & mp_sp,dest_proc,mtag+(9+j),mp_comm_used,mp_ierr)
737      end do
738
739! Terminate transferred particles, update numpart
740      itra1(ll:ul) = -999999999
741
742      numpart = numpart-num_trans
743
744      if (mp_dev_mode) then
745        write(*,*) "numpart after: ", numpart
746        write(*,FMT='(72("#"))')
747      end if
748
749    else if (mp_partid.eq.dest_proc) then
750
751! Receive particles
752!******************
753      mtag = 2000
754      ! Array indices for the transferred particles
755      ll=numpart+1
756      ul=numpart+num_trans
757
758      if (mp_dev_mode) then
759        write(*,FMT='(72("#"))')
760        write(*,*) "Receiving ", num_trans, "particles (from/to)", src_proc, dest_proc
761        write(*,*) "numpart before: ", numpart
762      end if
763
764! We could receive the data directly at nclass(ll:ul) etc., but this leaves
765! vacant spaces at lower indices. Using temporary arrays instead.
766      arr_sz = ul-ll+1
767      allocate(itra1_tmp(arr_sz),npoint_tmp(arr_sz),nclass_tmp(arr_sz),&
768           & idt_tmp(arr_sz),itramem_tmp(arr_sz),itrasplit_tmp(arr_sz),&
769           & xtra1_tmp(arr_sz),ytra1_tmp(arr_sz),ztra1_tmp(arr_sz),&
770           & xmass1_tmp(arr_sz, maxspec))
771     
772      call MPI_RECV(nclass_tmp,num_trans,&
773           & MPI_INTEGER,src_proc,mtag+1,mp_comm_used,mp_status,mp_ierr)
774
775      call MPI_RECV(npoint_tmp,num_trans,&
776           & MPI_INTEGER,src_proc,mtag+2,mp_comm_used,mp_status,mp_ierr)
777
778      call MPI_RECV(itra1_tmp,num_trans, &
779           & MPI_INTEGER,src_proc,mtag+3,mp_comm_used,mp_status,mp_ierr)
780
781      call MPI_RECV(idt_tmp,num_trans, &
782           & MPI_INTEGER,src_proc,mtag+4,mp_comm_used,mp_status,mp_ierr)
783
784      call MPI_RECV(itramem_tmp,num_trans, &
785           & MPI_INTEGER,src_proc,mtag+5,mp_comm_used,mp_status,mp_ierr)
786
787      call MPI_RECV(itrasplit_tmp,num_trans,&
788           & MPI_INTEGER,src_proc,mtag+6,mp_comm_used,mp_status,mp_ierr)
789
790      call MPI_RECV(xtra1_tmp,num_trans, &
791           & mp_dp,src_proc,mtag+7,mp_comm_used,mp_status,mp_ierr)
792
793      call MPI_RECV(ytra1_tmp,num_trans,&
794           & mp_dp,src_proc,mtag+8,mp_comm_used,mp_status,mp_ierr)
795
796      call MPI_RECV(ztra1_tmp,num_trans,&
797           & mp_sp,src_proc,mtag+9,mp_comm_used,mp_status,mp_ierr)
798
799      do j=1,nspec
800        call MPI_RECV(xmass1_tmp(:,j),num_trans,&
801             & mp_sp,src_proc,mtag+(9+j),mp_comm_used,mp_status,mp_ierr)
802      end do
803
804! This is the maximum value numpart can possibly have after the transfer
805      maxnumpart=numpart+num_trans
806
807! Search for vacant space and copy from temporary storage
808!********************************************************
809      minpart=1
810      do i=1, num_trans
811! Take into acount that we may have transferred invalid particles
812        if (itra1_tmp(i).ne.itime) cycle
813        do ipart=minpart,maxnumpart
814          if (itra1(ipart).ne.itime) then
815            itra1(ipart) = itra1_tmp(i)
816            npoint(ipart) = npoint_tmp(i)
817            nclass(ipart) = nclass_tmp(i)
818            idt(ipart) = idt_tmp(i)
819            itramem(ipart) = itramem_tmp(i)
820            itrasplit(ipart) = itrasplit_tmp(i)
821            xtra1(ipart) = xtra1_tmp(i)
822            ytra1(ipart) = ytra1_tmp(i)
823            ztra1(ipart) = ztra1_tmp(i)
824            xmass1(ipart,:) = xmass1_tmp(i,:)
825            goto 200 ! Storage space has been found, stop searching
826          end if
827! :TODO: add check for if too many particles requiried
828        end do
829200     minpart=ipart+1
830      end do
831! Increase numpart, if necessary
832      numpart=max(numpart,ipart)
833
834      deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,&
835           &itrasplit_tmp,xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp)
836
837      if (mp_dev_mode) then
838        write(*,*) "numpart after: ", numpart
839        write(*,FMT='(72("#"))')
840      end if
841
842    else
843! This routine should only be called by the two participating processes
844      write(*,*) "ERROR: wrong process has entered mpi_mod::mpif_redist_part"
845      stop
846      return
847    end if
848
849! Measure time for MPI communications
850!************************************
851    if (mp_measure_time) call mpif_mtime('commtime',1)
852
853  end subroutine mpif_redist_part
854
855
856  subroutine mpif_tm_send_vars
857!***********************************************************************
858! Distribute particle variables from pid0 to all processes.
859! Called from timemanager
860! *NOT IN USE* at the moment, but can be useful for debugging
861!
862!***********************************************************************
863    use com_mod
864
865    implicit none
866
867    integer :: i
868
869! Time for MPI communications
870!****************************
871    if (mp_measure_time) call mpif_mtime('commtime',0)
872
873! Distribute variables, send from pid 0 to other processes
874!**********************************************************************
875
876! integers
877    if (lroot) then
878      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
879           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
880      if (mp_ierr /= 0) goto 600
881      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
882           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
883      if (mp_ierr /= 0) goto 600
884      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
885           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
886      if (mp_ierr /= 0) goto 600
887      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
888           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
889      if (mp_ierr /= 0) goto 600
890      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
891           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
892      if (mp_ierr /= 0) goto 600
893
894! int2
895      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,&
896           & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
897      if (mp_ierr /= 0) goto 600
898
899! real
900      call MPI_SCATTER(uap,numpart_mpi,mp_sp,MPI_IN_PLACE,&
901           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
902      if (mp_ierr /= 0) goto 600
903      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
904           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
905      if (mp_ierr /= 0) goto 600
906      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
907           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
908      if (mp_ierr /= 0) goto 600
909
910      call MPI_SCATTER(us,numpart_mpi,mp_sp,MPI_IN_PLACE,&
911           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
912      if (mp_ierr /= 0) goto 600
913      call MPI_SCATTER(vs,numpart_mpi,mp_sp,MPI_IN_PLACE,&
914           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
915      if (mp_ierr /= 0) goto 600
916      call MPI_SCATTER(ws,numpart_mpi,mp_sp,MPI_IN_PLACE,&
917           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
918      if (mp_ierr /= 0) goto 600
919
920      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
921           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
922      if (mp_ierr /= 0) goto 600
923      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
924           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
925      if (mp_ierr /= 0) goto 600
926      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,MPI_IN_PLACE,&
927           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
928      if (mp_ierr /= 0) goto 600
929
930      do i=1,nspec
931        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,MPI_IN_PLACE,&
932             & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
933        if (mp_ierr /= 0) goto 600
934      end do
935
936    else ! (mp_pid >= 1)
937! integers
938      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,&
939           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
940      if (mp_ierr /= 0) goto 600
941      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,&
942           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
943      if (mp_ierr /= 0) goto 600
944      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,&
945           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
946      if (mp_ierr /= 0) goto 600
947      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,&
948           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
949      if (mp_ierr /= 0) goto 600
950      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,&
951           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
952      if (mp_ierr /= 0) goto 600
953
954! int2
955      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,&
956           & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
957      if (mp_ierr /= 0) goto 600
958
959! reals
960      call MPI_SCATTER(uap,numpart_mpi,mp_sp,uap,&
961           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
962      if (mp_ierr /= 0) goto 600
963      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,ucp,&
964           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
965      if (mp_ierr /= 0) goto 600
966      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,uzp,&
967           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
968      if (mp_ierr /= 0) goto 600
969
970      call MPI_SCATTER(us,numpart_mpi,mp_sp,us,&
971           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
972      if (mp_ierr /= 0) goto 600
973      call MPI_SCATTER(vs,numpart_mpi,mp_sp,vs,&
974           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
975      if (mp_ierr /= 0) goto 600
976      call MPI_SCATTER(ws,numpart_mpi,mp_sp,ws,&
977           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
978      if (mp_ierr /= 0) goto 600
979
980      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,&
981           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
982      if (mp_ierr /= 0) goto 600
983      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,&
984           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
985      if (mp_ierr /= 0) goto 600
986      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,ztra1,&
987           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
988      if (mp_ierr /= 0) goto 600
989
990      do i=1,nspec
991        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,xmass1(:,i),&
992             & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
993        if (mp_ierr /= 0) goto 600
994      end do
995
996    end if !(lroot)
997
998    if (mp_measure_time) call mpif_mtime('commtime',1)
999
1000    goto 601
1001
1002600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1003    stop
1004601 end subroutine mpif_tm_send_vars
1005
1006
1007  subroutine mpif_tm_collect_vars
1008!*******************************************************************************
1009! Collect particle data to PID 0 from all processes.                           *
1010!                                                                              *
1011! This can be called from timemanager_mpi, before caclulating                  *
1012! concentrations/writing output grids.                                         *
1013!                                                                              *
1014! Currently *not in use*, as each process calculates concentrations            *
1015! separately, but can be useful for debugging                                  *
1016!                                                                              *
1017! To use this routine (together with mpif_tm_send_vars) to transfer data       *
1018! to the MPI root process (useful for debugging), insert code like this        *
1019! (in timemanager_mpi.f90)                                                     *
1020!                                                                              *
1021!      if (lroot) tot_numpart=numpart ! root stores total numpart              *
1022!      call mpif_tm_send_dims                                                  *
1023!      if (numpart>1) then                                                     *
1024!        call mpif_tm_send_vars                                                *
1025!      end if                                                                  *
1026!                                                                              *
1027!    To collect data from all processes to root process after a parallel       *
1028!    region:                                                                   *
1029!                                                                              *
1030!      if (numpart>0) then                                                     *
1031!          if (lroot) numpart = tot_numpart                                    *
1032!       call mpif_tm_collect_vars                                              *
1033!      end if                                                                  *
1034!                                                                              *
1035! Then a section that begins with "if (lroot) ..." will behave like            *
1036! serial flexpart                                                              *
1037!                                                                              *
1038!*******************************************************************************
1039    use com_mod !, only: numpart, nspec, itra1, npoint, nclass
1040
1041    implicit none
1042
1043    integer :: i
1044
1045    logical :: use_mp_vars = .false.! use mp_... type allocated vars
1046    logical :: use_in_place = .true.! use MPI_IN_PLACE to collect vars.
1047
1048
1049! Time for MPI communications
1050    if (mp_measure_time) call mpif_mtime('commtime',0)
1051
1052! Distribute variables, send from pid 0 to other processes
1053!**********************************************************************
1054
1055    if (.not. use_mp_vars.and..not.use_in_place) then
1056
1057! Integers:
1058      call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
1059           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1060      if (mp_ierr /= 0) goto 600
1061      call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
1062           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1063      if (mp_ierr /= 0) goto 600
1064      call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
1065           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1066      if (mp_ierr /= 0) goto 600
1067      call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
1068           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1069      if (mp_ierr /= 0) goto 600
1070      call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
1071           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1072      if (mp_ierr /= 0) goto 600
1073
1074! int2
1075      call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
1076           &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
1077      if (mp_ierr /= 0) goto 600
1078
1079! Reals:
1080      call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, &
1081           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1082      if (mp_ierr /= 0) goto 600
1083      call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, &
1084           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1085      if (mp_ierr /= 0) goto 600
1086      call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, &
1087           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1088      if (mp_ierr /= 0) goto 600
1089
1090      call MPI_GATHER(us, numpart_mpi, mp_sp, us, &
1091           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1092      if (mp_ierr /= 0) goto 600
1093      call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, &
1094           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1095      if (mp_ierr /= 0) goto 600
1096      call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, &
1097           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1098      if (mp_ierr /= 0) goto 600
1099
1100
1101      call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
1102           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1103      if (mp_ierr /= 0) goto 600
1104      call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
1105           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1106      if (mp_ierr /= 0) goto 600
1107      call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, &
1108           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1109      if (mp_ierr /= 0) goto 600
1110
1111      do i=1, nspec
1112        call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), &
1113             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1114        if (mp_ierr /= 0) goto 600
1115
1116      end do
1117
1118! Use variables npoint etc directly for communications
1119!***********************************************************************
1120    else if (use_in_place.and..not.use_mp_vars) then
1121      if (lroot) then
1122! Integers:
1123        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, npoint, &
1124             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1125        if (mp_ierr /= 0) goto 600
1126        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, idt, &
1127             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1128        if (mp_ierr /= 0) goto 600
1129        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itra1, &
1130             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1131        if (mp_ierr /= 0) goto 600
1132        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, nclass, &
1133             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1134        if (mp_ierr /= 0) goto 600
1135        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itramem, &
1136             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1137        if (mp_ierr /= 0) goto 600
1138
1139! int2
1140        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER2, cbt, &
1141             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
1142        if (mp_ierr /= 0) goto 600
1143
1144! Reals:
1145        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uap, &
1146             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1147        if (mp_ierr /= 0) goto 600
1148        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ucp, &
1149             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1150        if (mp_ierr /= 0) goto 600
1151        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uzp, &
1152             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1153        if (mp_ierr /= 0) goto 600
1154
1155        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, us, &
1156             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1157        if (mp_ierr /= 0) goto 600
1158        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, vs, &
1159             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1160        if (mp_ierr /= 0) goto 600
1161        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ws, &
1162             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1163        if (mp_ierr /= 0) goto 600
1164
1165
1166        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, xtra1, &
1167             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1168        if (mp_ierr /= 0) goto 600
1169        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, ytra1, &
1170             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1171        if (mp_ierr /= 0) goto 600
1172        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ztra1, &
1173             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1174        if (mp_ierr /= 0) goto 600
1175
1176        do i=1, nspec
1177          call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, xmass1(:,i), &
1178               &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1179          if (mp_ierr /= 0) goto 600
1180        end do
1181
1182      else ! (for mp_pid >= 1)
1183
1184! Integers:
1185        call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
1186             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1187        if (mp_ierr /= 0) goto 600
1188        call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
1189             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1190        if (mp_ierr /= 0) goto 600
1191        call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
1192             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1193        if (mp_ierr /= 0) goto 600
1194        call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
1195             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1196        if (mp_ierr /= 0) goto 600
1197        call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
1198             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
1199        if (mp_ierr /= 0) goto 600
1200
1201! int2
1202        call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
1203             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
1204        if (mp_ierr /= 0) goto 600
1205
1206! Reals:
1207        call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, &
1208             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1209        if (mp_ierr /= 0) goto 600
1210        call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, &
1211             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1212        if (mp_ierr /= 0) goto 600
1213        call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, &
1214             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1215        if (mp_ierr /= 0) goto 600
1216
1217        call MPI_GATHER(us, numpart_mpi, mp_sp, us, &
1218             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1219        if (mp_ierr /= 0) goto 600
1220        call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, &
1221             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1222        if (mp_ierr /= 0) goto 600
1223        call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, &
1224             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1225        if (mp_ierr /= 0) goto 600
1226
1227
1228        call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
1229             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1230        if (mp_ierr /= 0) goto 600
1231        call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
1232             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
1233        if (mp_ierr /= 0) goto 600
1234        call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, &
1235             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1236        if (mp_ierr /= 0) goto 600
1237
1238        do i=1, nspec
1239          call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), &
1240               &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1241          if (mp_ierr /= 0) goto 600
1242        end do
1243      end if ! (lroot)
1244    end if !  (use_in_place)
1245
1246    if (mp_measure_time) call mpif_mtime('commtime',1)
1247
1248    goto 601
1249
1250600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1251    stop
1252601 end subroutine mpif_tm_collect_vars
1253
1254
1255  subroutine mpif_gf_send_vars(memstat)
1256!*******************************************************************************
1257! DESCRIPTION
1258!   Distribute 'getfield' variables from reader process
1259!
1260!   Called from timemanager
1261!
1262! NOTE
1263!   This subroutine distributes windfields read from the reader process to
1264!   all other processes. Usually one set of fields are transfered, but at
1265!   first timestep when there are no fields in memory, both are transfered.
1266!   MPI_Bcast is used, so implicitly all processes are synchronized at this
1267!   step
1268!
1269!
1270!*******************************************************************************
1271    use com_mod
1272    use par_mod,only: numwfmem
1273
1274    implicit none
1275
1276    integer, intent(in) :: memstat
1277
1278! Common array sizes used for communications
1279    integer, parameter :: d3_size1 = nxmax*nymax*nzmax
1280    integer, parameter :: d3_size2 = nxmax*nymax*nuvzmax
1281    integer, parameter :: d2_size1 = nxmax*nymax
1282    integer, parameter :: d2_size2 = nxmax*nymax*maxspec
1283    integer, parameter :: d2_size3 = nxmax*nymax
1284    integer, parameter :: d1_size1 = maxwf
1285
1286    integer :: d3s1,d3s2,d2s1,d2s2
1287
1288! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
1289    integer :: li,ui
1290
1291! First time routine is called the unchangeable fields will be transfered   
1292    logical :: first_call=.true.
1293
1294!**********************************************************************
1295
1296! Sizes of arrays transfered
1297    d3s1=d3_size1
1298    d3s2=d3_size2
1299    d2s1=d2_size1
1300    d2s2=d2_size2
1301
1302! Decide which field(s) need to be transfered
1303    if (memstat.ge.32) then ! distribute 2 fields, to 1st/2nd indices
1304      li=1; ui=2
1305      d3s1=2*d3_size1
1306      d3s2=2*d3_size2
1307      d2s1=2*d2_size1
1308      d2s2=2*d2_size2
1309    else if (memstat.eq.17) then ! distribute 1 field, place on 1st index
1310      li=1; ui=1
1311    else if (memstat.eq.18) then ! distribute 1 field, place on 2nd index
1312      li=2; ui=2
1313    else if (memstat.eq.19) then ! distribute 1 field, place on 3nd index
1314      li=3; ui=3
1315    else
1316      write(*,*) '#### mpi_mod::mpif_gf_send_vars> ERROR: ', &
1317           & 'wrong value of memstat, exiting ####', memstat
1318      stop
1319    end if
1320
1321
1322! Time for MPI communications
1323    if (mp_measure_time) call mpif_mtime('commtime',0)
1324
1325! Send variables from getfield process (id_read) to other processes
1326!**********************************************************************
1327
1328! The non-reader processes need to know if cloud water was read.
1329    call MPI_Bcast(readclouds,1,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
1330    if (mp_ierr /= 0) goto 600
1331
1332! Static fields/variables sent only at startup
1333    if (first_call) then
1334      call MPI_Bcast(oro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1335      if (mp_ierr /= 0) goto 600
1336      call MPI_Bcast(excessoro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1337      if (mp_ierr /= 0) goto 600
1338      call MPI_Bcast(lsm(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1339      if (mp_ierr /= 0) goto 600
1340      call MPI_Bcast(xlanduse(:,:,:),d2_size3*numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1341      if (mp_ierr /= 0) goto 600
1342      call MPI_Bcast(wftime,d1_size1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1343      if (mp_ierr /= 0) goto 600
1344      call MPI_Bcast(numbwf,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1345      if (mp_ierr /= 0) goto 600
1346      call MPI_Bcast(nmixz,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1347      if (mp_ierr /= 0) goto 600
1348      call MPI_Bcast(height,nzmax,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
1349      if (mp_ierr /= 0) goto 600
1350
1351      first_call=.false.
1352    endif
1353
1354    call MPI_Bcast(uu(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1355    if (mp_ierr /= 0) goto 600
1356    call MPI_Bcast(vv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1357    if (mp_ierr /= 0) goto 600
1358    call MPI_Bcast(uupol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1359    if (mp_ierr /= 0) goto 600
1360    call MPI_Bcast(vvpol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1361    if (mp_ierr /= 0) goto 600
1362    call MPI_Bcast(ww(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1363    if (mp_ierr /= 0) goto 600
1364    call MPI_Bcast(tt(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1365    if (mp_ierr /= 0) goto 600
1366    call MPI_Bcast(rho(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1367    if (mp_ierr /= 0) goto 600
1368    call MPI_Bcast(drhodz(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1369    if (mp_ierr /= 0) goto 600
1370    call MPI_Bcast(tth(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1371    if (mp_ierr /= 0) goto 600
1372    call MPI_Bcast(qvh(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1373    if (mp_ierr /= 0) goto 600
1374    call MPI_Bcast(qv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1375    if (mp_ierr /= 0) goto 600
1376    call MPI_Bcast(pv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1377    if (mp_ierr /= 0) goto 600
1378    call MPI_Bcast(clouds(:,:,:,li:ui),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
1379    if (mp_ierr /= 0) goto 600
1380
1381! cloud water/ice:
1382    if (readclouds) then
1383      call MPI_Bcast(ctwc(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1384      if (mp_ierr /= 0) goto 600
1385    end if
1386
1387! 2D fields
1388    call MPI_Bcast(cloudsh(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1389    if (mp_ierr /= 0) goto 600
1390    call MPI_Bcast(vdep(:,:,:,li:ui),d2s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1391    if (mp_ierr /= 0) goto 600
1392    call MPI_Bcast(ps(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1393    if (mp_ierr /= 0) goto 600
1394    call MPI_Bcast(sd(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1395    if (mp_ierr /= 0) goto 600
1396    call MPI_Bcast(tcc(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1397    if (mp_ierr /= 0) goto 600
1398    call MPI_Bcast(tt2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1399    if (mp_ierr /= 0) goto 600
1400    call MPI_Bcast(td2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1401    if (mp_ierr /= 0) goto 600
1402    call MPI_Bcast(lsprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1403    if (mp_ierr /= 0) goto 600
1404    call MPI_Bcast(convprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1405    if (mp_ierr /= 0) goto 600
1406    call MPI_Bcast(ustar(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1407    if (mp_ierr /= 0) goto 600
1408    call MPI_Bcast(wstar(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1409    if (mp_ierr /= 0) goto 600
1410    call MPI_Bcast(hmix(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1411    if (mp_ierr /= 0) goto 600
1412    call MPI_Bcast(tropopause(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1413    if (mp_ierr /= 0) goto 600
1414    call MPI_Bcast(oli(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1415    if (mp_ierr /= 0) goto 600
1416
1417    if (mp_measure_time) call mpif_mtime('commtime',1)
1418
1419    goto 601
1420
1421600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1422    stop
1423
1424601 end subroutine mpif_gf_send_vars
1425
1426
1427  subroutine mpif_gf_send_vars_nest(memstat)
1428!***********************************************************************
1429! DESCRIPTION
1430!   Distribute 'getfield' variables from reader process to all processes.
1431!   For nested fields
1432!
1433!   Called from timemanager
1434!
1435! NOTE
1436!   This subroutine distributes nested windfields from the reader process to
1437!   all other processes. Usually one set of fields is transfered, but at
1438!   first timestep when there are no fields in memory, both are transfered.
1439!   MPI_Bcast is used, so implicitly all processes are synchronized at this
1440!   step
1441!
1442!
1443!***********************************************************************
1444    use com_mod
1445    use par_mod,only: numwfmem
1446
1447    implicit none
1448
1449    integer, intent(in) :: memstat
1450    integer :: i
1451! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
1452    integer :: li,ui
1453
1454! Common array sizes used for communications
1455    integer :: d3_size1 = nxmaxn*nymaxn*nzmax
1456    integer :: d3_size2 = nxmaxn*nymaxn*nuvzmax
1457    integer :: d2_size1 = nxmaxn*nymaxn
1458    integer :: d2_size2 = nxmaxn*nymaxn*maxspec
1459    integer :: d2_size3 = nxmaxn*nymaxn
1460
1461    integer :: d3s1,d3s2,d2s1,d2s2
1462
1463! First time routine is called the unchangeable fields will be transfered   
1464    logical :: first_call=.true.
1465
1466!**********************************************************************
1467
1468! Sizes of arrays transfered
1469    d3s1=d3_size1
1470    d3s2=d3_size2
1471    d2s1=d2_size1
1472    d2s2=d2_size2
1473
1474! Decide which field(s) need to be transfered
1475    if (memstat.ge.32) then ! distribute 2 fields
1476      li=1; ui=2
1477      d3s1=2*d3_size1
1478      d3s2=2*d3_size2
1479      d2s1=2*d2_size1
1480      d2s2=2*d2_size2
1481    else if (memstat.eq.17) then ! distribute 1 field, on 1st index
1482      li=1; ui=1
1483    else if (memstat.eq.18) then ! distribute 1 field, on 2nd index
1484      li=2; ui=2
1485    else if (memstat.eq.19) then ! distribute 1 field, on 3nd index
1486      li=3; ui=3
1487    else
1488      write(*,*) '#### mpi_mod::mpif_gf_send_vars_nest> ERROR: ', &
1489           & 'wrong value of memstat, exiting ####', memstat
1490      stop
1491    end if
1492
1493
1494! Time for MPI communications
1495    if (mp_measure_time) call mpif_mtime('commtime',0)
1496
1497! Distribute variables, send from getfield process (id_read) to other
1498! processes
1499!**********************************************************************
1500
1501! The non-reader processes need to know if cloud water was read.
1502    call MPI_Bcast(readclouds_nest,maxnests,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
1503    if (mp_ierr /= 0) goto 600
1504
1505! Static fields/variables sent only at startup
1506    if (first_call) then
1507      call MPI_Bcast(oron(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1508      if (mp_ierr /= 0) goto 600
1509      call MPI_Bcast(excessoron(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1510      if (mp_ierr /= 0) goto 600
1511      call MPI_Bcast(lsmn(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1512      if (mp_ierr /= 0) goto 600
1513      call MPI_Bcast(xlandusen(:,:,:,:),d2_size3*numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1514      if (mp_ierr /= 0) goto 600
1515      first_call=.false.
1516    end if
1517
1518! MPI prefers contiguous arrays for sending (else a buffer is created),
1519! hence the loop over nests
1520!**********************************************************************
1521    do i=1, numbnests
1522! 3D fields
1523      call MPI_Bcast(uun(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1524      if (mp_ierr /= 0) goto 600
1525      call MPI_Bcast(vvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1526      if (mp_ierr /= 0) goto 600
1527      call MPI_Bcast(wwn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1528      if (mp_ierr /= 0) goto 600
1529      call MPI_Bcast(ttn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1530      if (mp_ierr /= 0) goto 600
1531      call MPI_Bcast(rhon(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1532      if (mp_ierr /= 0) goto 600
1533      call MPI_Bcast(drhodzn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1534      if (mp_ierr /= 0) goto 600
1535      call MPI_Bcast(tthn(:,:,:,li:ui,i),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1536      if (mp_ierr /= 0) goto 600
1537      call MPI_Bcast(qvhn(:,:,:,li:ui,i),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1538      if (mp_ierr /= 0) goto 600
1539      call MPI_Bcast(qvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1540      if (mp_ierr /= 0) goto 600
1541      call MPI_Bcast(pvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1542      if (mp_ierr /= 0) goto 600
1543      call MPI_Bcast(cloudsn(:,:,:,li:ui,i),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
1544      if (mp_ierr /= 0) goto 600
1545
1546! cloud water/ice:
1547      if (readclouds_nest(i)) then
1548! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1549! if (mp_ierr /= 0) goto 600
1550        call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1551        if (mp_ierr /= 0) goto 600
1552      end if
1553
1554! 2D fields
1555      call MPI_Bcast(cloudshn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1556      if (mp_ierr /= 0) goto 600
1557      call MPI_Bcast(vdepn(:,:,:,li:ui,i),d2s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1558      if (mp_ierr /= 0) goto 600
1559      call MPI_Bcast(psn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1560      if (mp_ierr /= 0) goto 600
1561      call MPI_Bcast(sdn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1562      if (mp_ierr /= 0) goto 600
1563      call MPI_Bcast(tccn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1564      if (mp_ierr /= 0) goto 600
1565      call MPI_Bcast(tt2n(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1566      if (mp_ierr /= 0) goto 600
1567      call MPI_Bcast(td2n(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1568      if (mp_ierr /= 0) goto 600
1569      call MPI_Bcast(lsprecn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1570      if (mp_ierr /= 0) goto 600
1571      call MPI_Bcast(convprecn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1572      if (mp_ierr /= 0) goto 600
1573      call MPI_Bcast(ustarn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1574      if (mp_ierr /= 0) goto 600
1575      call MPI_Bcast(wstarn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1576      if (mp_ierr /= 0) goto 600
1577      call MPI_Bcast(olin(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1578      if (mp_ierr /= 0) goto 600
1579      call MPI_Bcast(hmixn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1580      if (mp_ierr /= 0) goto 600
1581      call MPI_Bcast(tropopausen(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1582      if (mp_ierr /= 0) goto 600
1583    end do
1584
1585    if (mp_measure_time) call mpif_mtime('commtime',1)
1586
1587    goto 601
1588
1589600 write(*,*) "mpi_mod> mp_ierr \= 0",mp_ierr
1590    stop
1591
1592601 end subroutine mpif_gf_send_vars_nest
1593
1594
1595  subroutine mpif_gf_send_vars_async(memstat)
1596!*******************************************************************************
1597! DESCRIPTION
1598!   Distribute 'getfield' variables from reader process to all processes.
1599!   Called from timemanager by reader process only
1600!
1601! NOTE
1602!   This version uses asynchronious sends. The newest fields are sent in the
1603!   background, so calculations can continue while
1604!   MPI communications are performed.
1605!
1606!   The way the MPI tags/requests are sequenced means that this routine must
1607!   carefully match routine 'mpif_gf_recv_vars_async'
1608!
1609! VARIABLES
1610!   memstat -- input, for resolving pointer to windfield index being read
1611!   mind    -- index where to place new fields
1612!
1613!
1614!*******************************************************************************
1615    use com_mod
1616
1617    implicit none
1618
1619    integer, intent(in) :: memstat
1620    integer :: mind
1621    integer :: dest,i
1622
1623! Common array sizes used for communications
1624    integer :: d3s1 = nxmax*nymax*nzmax
1625    integer :: d3s2 = nxmax*nymax*nuvzmax
1626    integer :: d2s1 = nxmax*nymax
1627    integer :: d2s2 = nxmax*nymax*maxspec
1628!    integer :: d1s1 = maxwf
1629
1630!*******************************************************************************
1631
1632! At the time the send is posted, the reader process is one step further
1633! in the permutation of memstat compared with the receiving processes
1634
1635    if (memstat.ge.32) then
1636! last read was synchronous, to indices 1 and 2, 3 is free
1637      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1638           & memstat>=32 should never happen here."
1639      stop
1640    else if (memstat.eq.17) then
1641! old fields on 1,2, send 3
1642      mind=3
1643    else if (memstat.eq.18) then
1644! old fields on 2,3, send 1
1645      mind=1
1646    else if (memstat.eq.19) then
1647! old fields on 3,1, send 2
1648      mind=2
1649    else
1650      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1651           & invalid memstat"
1652      mind=-1
1653      stop
1654    end if
1655
1656    if (mp_dev_mode) then
1657      if (mp_pid.ne.id_read) then
1658        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_async'
1659      end if
1660    end if
1661
1662    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1663
1664! Time for MPI communications
1665    if (mp_measure_time) call mpif_mtime('commtime',0)
1666
1667! Loop over receiving processes, initiate data sending
1668!*****************************************************
1669
1670    do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading
1671! TODO: use mp_partgroup_np here
1672      if (dest.eq.id_read) cycle
1673      i=dest*nvar_async
1674      call MPI_Isend(uu(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1675      if (mp_ierr /= 0) goto 600
1676      i=i+1
1677      call MPI_Isend(vv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1678      if (mp_ierr /= 0) goto 600
1679      i=i+1
1680      call MPI_Isend(uupol(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1681      if (mp_ierr /= 0) goto 600
1682      i=i+1
1683      call MPI_Isend(vvpol(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1684      if (mp_ierr /= 0) goto 600
1685      i=i+1
1686      call MPI_Isend(ww(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1687      if (mp_ierr /= 0) goto 600
1688      i=i+1
1689      call MPI_Isend(tt(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1690      if (mp_ierr /= 0) goto 600
1691      i=i+1
1692      call MPI_Isend(rho(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1693      if (mp_ierr /= 0) goto 600
1694      i=i+1
1695      call MPI_Isend(drhodz(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1696      if (mp_ierr /= 0) goto 600
1697      i=i+1
1698      call MPI_Isend(tth(:,:,:,mind),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1699      if (mp_ierr /= 0) goto 600
1700      i=i+1
1701      call MPI_Isend(qvh(:,:,:,mind),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1702      if (mp_ierr /= 0) goto 600
1703      i=i+1
1704      call MPI_Isend(qv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1705      if (mp_ierr /= 0) goto 600
1706      i=i+1
1707      call MPI_Isend(pv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1708      if (mp_ierr /= 0) goto 600
1709      i=i+1
1710      call MPI_Isend(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1711      i=i+1
1712      if (mp_ierr /= 0) goto 600
1713      call MPI_Isend(cloudsh(:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1714      if (mp_ierr /= 0) goto 600
1715      i=i+1
1716      call MPI_Isend(vdep(:,:,:,mind),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1717      if (mp_ierr /= 0) goto 600
1718      i=i+1
1719! 15
1720      call MPI_Isend(ps(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1721      if (mp_ierr /= 0) goto 600
1722      i=i+1
1723      call MPI_Isend(sd(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1724      if (mp_ierr /= 0) goto 600
1725      i=i+1
1726      call MPI_Isend(tcc(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1727      if (mp_ierr /= 0) goto 600
1728      i=i+1
1729      call MPI_Isend(tt2(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1730      if (mp_ierr /= 0) goto 600
1731      i=i+1
1732      call MPI_Isend(td2(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1733      if (mp_ierr /= 0) goto 600
1734      i=i+1
1735      call MPI_Isend(lsprec(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1736      if (mp_ierr /= 0) goto 600
1737      i=i+1
1738      call MPI_Isend(convprec(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1739      if (mp_ierr /= 0) goto 600
1740      i=i+1
1741      call MPI_Isend(ustar(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1742      if (mp_ierr /= 0) goto 600
1743      i=i+1
1744      call MPI_Isend(wstar(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1745      if (mp_ierr /= 0) goto 600
1746      i=i+1
1747      call MPI_Isend(hmix(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1748      if (mp_ierr /= 0) goto 600
1749      i=i+1
1750! 25
1751      call MPI_Isend(tropopause(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1752      if (mp_ierr /= 0) goto 600
1753      i=i+1
1754      call MPI_Isend(oli(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1755      if (mp_ierr /= 0) goto 600
1756
1757! Send cloud water if it exists. Increment counter always (as on receiving end)
1758      if (readclouds) then
1759        i=i+1
1760        call MPI_Isend(ctwc(:,:,mind),d2s1,mp_sp,dest,tm1,&
1761             &MPI_COMM_WORLD,reqs(i),mp_ierr)
1762        if (mp_ierr /= 0) goto 600
1763      end if
1764    end do
1765
1766    if (mp_measure_time) call mpif_mtime('commtime',1)
1767
1768    goto 601
1769
1770600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> mp_ierr \= 0", mp_ierr
1771    stop
1772
1773601 end subroutine mpif_gf_send_vars_async
1774
1775
1776  subroutine mpif_gf_recv_vars_async(memstat)
1777!*******************************************************************************
1778! DESCRIPTION
1779!   Receive 'getfield' variables from reader process.
1780!   Called from timemanager by all processes except reader
1781!
1782! NOTE
1783!   This version uses asynchronious communications.
1784!
1785! VARIABLES
1786!   memstat -- input, used to resolve windfield index being received
1787!
1788!
1789!*******************************************************************************
1790    use com_mod
1791
1792    implicit none
1793
1794    integer, intent(in) :: memstat
1795    integer :: mind,j
1796
1797! Common array sizes used for communications
1798    integer :: d3s1 = nxmax*nymax*nzmax
1799    integer :: d3s2 = nxmax*nymax*nuvzmax
1800    integer :: d2s1 = nxmax*nymax
1801    integer :: d2s2 = nxmax*nymax*maxspec
1802!integer :: d1_size1 = maxwf
1803
1804!    integer :: d3s1,d3s2,d2s1,d2s2
1805!*******************************************************************************
1806
1807! At the time this immediate receive is posted, memstat is the state of
1808! windfield indices at the previous/current time. From this, the future
1809! state is deduced.
1810    if (memstat.eq.32) then
1811! last read was synchronous to indices 1 and 2, 3 is free
1812      mind=3
1813    else if (memstat.eq.17) then
1814! last read was asynchronous to index 3, 1 is free
1815      mind=1
1816    else if (memstat.eq.18) then
1817! last read was asynchronous to index 1, 2 is free
1818      mind=2
1819    else if (memstat.eq.19) then
1820! last read was asynchronous to index 2, 3 is free
1821      mind=3
1822    else
1823! illegal state
1824      write(*,*) 'mpi_mod> FLEXPART ERROR: Illegal memstat value. Exiting.'
1825      stop
1826    end if
1827
1828    if (mp_dev_mode) then
1829      if (mp_pid.eq.id_read) then
1830        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async'
1831      end if
1832    end if
1833
1834! Time for MPI communications
1835    if (mp_measure_time) call mpif_mtime('commtime',0)
1836
1837    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1838
1839! Initiate receiving of data
1840!***************************
1841
1842! Get MPI tags/requests for communications
1843    j=mp_pid*nvar_async
1844    call MPI_Irecv(uu(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1845         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1846    if (mp_ierr /= 0) goto 600
1847    j=j+1
1848    call MPI_Irecv(vv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1849         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1850    if (mp_ierr /= 0) goto 600
1851    j=j+1
1852    call MPI_Irecv(uupol(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1853         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1854    if (mp_ierr /= 0) goto 600
1855    j=j+1
1856    call MPI_Irecv(vvpol(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1857         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1858    if (mp_ierr /= 0) goto 600
1859    j=j+1
1860    call MPI_Irecv(ww(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1861         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1862    if (mp_ierr /= 0) goto 600
1863    j=j+1
1864    call MPI_Irecv(tt(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1865         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1866    if (mp_ierr /= 0) goto 600
1867    j=j+1
1868    call MPI_Irecv(rho(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1869         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1870    if (mp_ierr /= 0) goto 600
1871    j=j+1
1872    call MPI_Irecv(drhodz(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1873         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1874    if (mp_ierr /= 0) goto 600
1875    j=j+1
1876    call MPI_Irecv(tth(:,:,:,mind),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
1877         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1878    if (mp_ierr /= 0) goto 600
1879    j=j+1
1880    call MPI_Irecv(qvh(:,:,:,mind),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
1881         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1882    if (mp_ierr /= 0) goto 600
1883    j=j+1
1884    call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1885         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1886    if (mp_ierr /= 0) goto 600
1887    j=j+1
1888    call MPI_Irecv(pv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1889         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1890    if (mp_ierr /= 0) goto 600
1891    j=j+1
1892    call MPI_Irecv(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
1893         &MPI_COMM_WORLD,reqs(j),mp_ierr)   
1894    if (mp_ierr /= 0) goto 600
1895    j=j+1
1896    call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1897         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1898    if (mp_ierr /= 0) goto 600
1899    j=j+1
1900    call MPI_Irecv(vdep(:,:,:,mind),d2s2,mp_sp,id_read,MPI_ANY_TAG,&
1901         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1902    if (mp_ierr /= 0) goto 600
1903    j=j+1
1904    call MPI_Irecv(ps(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1905         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1906    if (mp_ierr /= 0) goto 600
1907    j=j+1
1908    call MPI_Irecv(sd(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1909         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1910    if (mp_ierr /= 0) goto 600
1911    j=j+1
1912    call MPI_Irecv(tcc(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1913         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1914    if (mp_ierr /= 0) goto 600
1915    j=j+1
1916    call MPI_Irecv(tt2(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1917         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1918    if (mp_ierr /= 0) goto 600
1919    j=j+1
1920    call MPI_Irecv(td2(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1921         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1922    if (mp_ierr /= 0) goto 600
1923    j=j+1
1924    call MPI_Irecv(lsprec(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1925         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1926    if (mp_ierr /= 0) goto 600
1927    j=j+1
1928    call MPI_Irecv(convprec(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1929         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1930    if (mp_ierr /= 0) goto 600
1931    call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1932         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1933    if (mp_ierr /= 0) goto 600
1934    j=j+1
1935    call MPI_Irecv(wstar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1936         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1937    if (mp_ierr /= 0) goto 600
1938    j=j+1
1939    call MPI_Irecv(hmix(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1940         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1941    if (mp_ierr /= 0) goto 600
1942    j=j+1
1943    call MPI_Irecv(tropopause(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1944         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1945    if (mp_ierr /= 0) goto 600
1946    j=j+1
1947    call MPI_Irecv(oli(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1948         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1949    if (mp_ierr /= 0) goto 600
1950
1951! Post request for clwc. These data are possibly not sent, request must then be cancelled
1952! For now assume that data at all steps either have or do not have water
1953    if (readclouds) then
1954      j=j+1
1955      call MPI_Irecv(ctwc(:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
1956           &MPI_COMM_WORLD,reqs(j),mp_ierr)
1957      if (mp_ierr /= 0) goto 600
1958    end if
1959
1960
1961    if (mp_measure_time) call mpif_mtime('commtime',1)
1962
1963    goto 601
1964
1965600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_async> MPI ERROR ", mp_ierr
1966    stop
1967
1968601 end subroutine mpif_gf_recv_vars_async
1969
1970
1971  subroutine mpif_gf_send_vars_nest_async(memstat)
1972!*******************************************************************************
1973! DESCRIPTION
1974!   Distribute 'getfield' variables from reader process to all processes.
1975!   Called from timemanager by reader process only.
1976!   Version for nested wind fields
1977!
1978! NOTE
1979!   This version uses asynchronious sends. The newest fields are sent in the
1980!   background, so calculations can continue while
1981!   MPI communications are performed.
1982!
1983!   The way the MPI tags/requests are sequenced means that this routine must
1984!   carefully match routine 'mpif_gf_recv_vars_async'
1985!
1986! VARIABLES
1987!   memstat -- input, for resolving pointer to windfield index being read
1988!   mind    -- index where to place new fields
1989!
1990! TODO
1991!   Some unused arrays are currently sent (uupoln,..)
1992!*******************************************************************************
1993    use com_mod
1994
1995    implicit none
1996
1997    integer, intent(in) :: memstat
1998    integer :: mind
1999    integer :: dest,i,k
2000
2001! Common array sizes used for communications
2002    integer :: d3s1 = nxmaxn*nymaxn*nzmax
2003    integer :: d3s2 = nxmaxn*nymaxn*nuvzmax
2004    integer :: d2s1 = nxmaxn*nymaxn
2005    integer :: d2s2 = nxmaxn*nymaxn*maxspec
2006
2007!*******************************************************************************
2008
2009! At the time the send is posted, the reader process is one step further
2010! in the permutation of memstat compared with the receiving processes
2011
2012    if (memstat.ge.32) then
2013! last read was synchronous, to indices 1 and 2, 3 is free
2014      write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: &
2015           & memstat>=32 should never happen here."
2016      stop
2017    else if (memstat.eq.17) then
2018! old fields on 1,2, send 3
2019      mind=3
2020    else if (memstat.eq.18) then
2021! old fields on 2,3, send 1
2022      mind=1
2023    else if (memstat.eq.19) then
2024! old fields on 3,1, send 2
2025      mind=2
2026    else
2027      write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: &
2028           & invalid memstat"
2029      mind=-1
2030      stop
2031    end if
2032
2033    if (mp_dev_mode) then
2034      if (mp_pid.ne.id_read) then
2035        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_nest_async'
2036      end if
2037    end if
2038
2039    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_nest_async, memstat:', memstat
2040
2041! Time for MPI communications
2042    if (mp_measure_time) call mpif_mtime('commtime',0)
2043
2044! Loop over receiving processes, initiate data sending
2045!*****************************************************
2046
2047    do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading
2048! TODO: use mp_partgroup_np here
2049      if (dest.eq.id_read) cycle
2050      do k=1, numbnests
2051        i=dest*nvar_async
2052        call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2053        if (mp_ierr /= 0) goto 600
2054        i=i+1
2055        call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2056        if (mp_ierr /= 0) goto 600
2057        i=i+1
2058        call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2059        if (mp_ierr /= 0) goto 600
2060        i=i+1
2061        call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2062        if (mp_ierr /= 0) goto 600
2063        i=i+1
2064        call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2065        if (mp_ierr /= 0) goto 600
2066        i=i+1
2067        call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2068        if (mp_ierr /= 0) goto 600
2069        i=i+1
2070        call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2071        if (mp_ierr /= 0) goto 600
2072        i=i+1
2073        call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2074        if (mp_ierr /= 0) goto 600
2075        i=i+1
2076        call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2077        if (mp_ierr /= 0) goto 600
2078        i=i+1
2079        call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2080        if (mp_ierr /= 0) goto 600
2081        i=i+1
2082        call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2083        i=i+1
2084        if (mp_ierr /= 0) goto 600
2085        call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2086        if (mp_ierr /= 0) goto 600
2087        i=i+1
2088        call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2089        if (mp_ierr /= 0) goto 600
2090        i=i+1
2091        call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2092        if (mp_ierr /= 0) goto 600
2093        i=i+1
2094        call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2095        if (mp_ierr /= 0) goto 600
2096        i=i+1
2097! 15
2098        call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2099        if (mp_ierr /= 0) goto 600
2100        i=i+1
2101        call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2102        if (mp_ierr /= 0) goto 600
2103        i=i+1
2104        call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2105        if (mp_ierr /= 0) goto 600
2106        i=i+1
2107        call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2108        if (mp_ierr /= 0) goto 600
2109        i=i+1
2110        call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2111        if (mp_ierr /= 0) goto 600
2112        i=i+1
2113        call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2114        if (mp_ierr /= 0) goto 600
2115        i=i+1
2116        call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2117        if (mp_ierr /= 0) goto 600
2118        i=i+1
2119        call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2120        if (mp_ierr /= 0) goto 600
2121        i=i+1
2122        call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2123        if (mp_ierr /= 0) goto 600
2124        i=i+1
2125        call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
2126        if (mp_ierr /= 0) goto 600
2127! 25
2128
2129! Send cloud water if it exists. Increment counter always (as on receiving end)
2130        if (readclouds) then
2131          i=i+1
2132          call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,&
2133               &MPI_COMM_WORLD,reqs(i),mp_ierr)
2134          if (mp_ierr /= 0) goto 600
2135        end if
2136      end do
2137    end do
2138
2139    if (mp_measure_time) call mpif_mtime('commtime',1)
2140
2141    goto 601
2142
2143600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> mp_ierr \= 0", mp_ierr
2144    stop
2145
2146601 end subroutine mpif_gf_send_vars_nest_async
2147
2148
2149  subroutine mpif_gf_recv_vars_nest_async(memstat)
2150!*******************************************************************************
2151! DESCRIPTION
2152!   Receive 'getfield' variables from reader process.
2153!   Called from timemanager by all processes except reader
2154!   Version for nested wind fields
2155!
2156! NOTE
2157!   This version uses asynchronious communications.
2158!
2159! VARIABLES
2160!   memstat -- input, used to resolve windfield index being received
2161!
2162!
2163!*******************************************************************************
2164    use com_mod
2165
2166    implicit none
2167
2168    integer, intent(in) :: memstat
2169    integer :: mind,j,k
2170
2171! Common array sizes used for communications
2172    integer :: d3s1 = nxmaxn*nymaxn*nzmax
2173    integer :: d3s2 = nxmaxn*nymaxn*nuvzmax
2174    integer :: d2s1 = nxmaxn*nymaxn
2175    integer :: d2s2 = nxmaxn*nymaxn*maxspec
2176
2177!*******************************************************************************
2178
2179! At the time this immediate receive is posted, memstat is the state of
2180! windfield indices at the previous/current time. From this, the future
2181! state is deduced.
2182    if (memstat.eq.32) then
2183! last read was synchronous to indices 1 and 2, 3 is free
2184      mind=3
2185    else if (memstat.eq.17) then
2186! last read was asynchronous to index 3, 1 is free
2187      mind=1
2188    else if (memstat.eq.18) then
2189! last read was asynchronous to index 1, 2 is free
2190      mind=2
2191    else if (memstat.eq.19) then
2192! last read was asynchronous to index 2, 3 is free
2193      mind=3
2194    else
2195! illegal state
2196      write(*,*) 'mpi_mod> FLEXPART ERROR: Illegal memstat value. Exiting.'
2197      stop
2198    end if
2199
2200    if (mp_dev_mode) then
2201      if (mp_pid.eq.id_read) then
2202        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async'
2203      end if
2204    end if
2205
2206! Time for MPI communications
2207    if (mp_measure_time) call mpif_mtime('commtime',0)
2208
2209    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
2210
2211! Initiate receiving of data
2212!***************************
2213
2214    do k=1, numbnests
2215! Get MPI tags/requests for communications
2216      j=mp_pid*nvar_async
2217      call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2218           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2219      if (mp_ierr /= 0) goto 600
2220      j=j+1
2221      call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2222           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2223      if (mp_ierr /= 0) goto 600
2224      j=j+1
2225      call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2226           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2227      if (mp_ierr /= 0) goto 600
2228      j=j+1
2229      call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2230           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2231      if (mp_ierr /= 0) goto 600
2232      j=j+1
2233      call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2234           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2235      if (mp_ierr /= 0) goto 600
2236      j=j+1
2237      call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2238           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2239      if (mp_ierr /= 0) goto 600
2240      j=j+1
2241      call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
2242           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2243      if (mp_ierr /= 0) goto 600
2244      j=j+1
2245      call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
2246           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2247      if (mp_ierr /= 0) goto 600
2248      j=j+1
2249      call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2250           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2251      if (mp_ierr /= 0) goto 600
2252      j=j+1
2253      call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
2254           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2255      if (mp_ierr /= 0) goto 600
2256      j=j+1
2257      call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
2258           &MPI_COMM_WORLD,reqs(j),mp_ierr)   
2259      if (mp_ierr /= 0) goto 600
2260      j=j+1
2261      call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2262           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2263      if (mp_ierr /= 0) goto 600
2264      j=j+1
2265      call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,&
2266           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2267      if (mp_ierr /= 0) goto 600
2268      j=j+1
2269      call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2270           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2271      if (mp_ierr /= 0) goto 600
2272      j=j+1
2273      call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2274           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2275      if (mp_ierr /= 0) goto 600
2276      j=j+1
2277      call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2278           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2279      if (mp_ierr /= 0) goto 600
2280      j=j+1
2281      call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2282           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2283      if (mp_ierr /= 0) goto 600
2284      j=j+1
2285      call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2286           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2287      if (mp_ierr /= 0) goto 600
2288      j=j+1
2289      call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2290           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2291      if (mp_ierr /= 0) goto 600
2292      j=j+1
2293      call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2294           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2295      if (mp_ierr /= 0) goto 600
2296      call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2297           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2298      if (mp_ierr /= 0) goto 600
2299      j=j+1
2300      call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2301           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2302      if (mp_ierr /= 0) goto 600
2303      j=j+1
2304      call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2305           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2306      if (mp_ierr /= 0) goto 600
2307      j=j+1
2308      call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2309           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2310      if (mp_ierr /= 0) goto 600
2311      j=j+1
2312      call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
2313           &MPI_COMM_WORLD,reqs(j),mp_ierr)
2314      if (mp_ierr /= 0) goto 600
2315
2316! Post request for clwc. These data are possibly not sent, request must then be cancelled
2317! For now assume that data at all steps either have or do not have water
2318      if (readclouds) then
2319        j=j+1
2320        call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
2321             &MPI_COMM_WORLD,reqs(j),mp_ierr)
2322        if (mp_ierr /= 0) goto 600
2323      end if
2324    end do
2325
2326    if (mp_measure_time) call mpif_mtime('commtime',1)
2327
2328    goto 601
2329
2330600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_nest_async> MPI ERROR ", mp_ierr
2331    stop
2332
2333601 end subroutine mpif_gf_recv_vars_nest_async
2334
2335
2336  subroutine mpif_gf_request
2337!*******************************************************************************
2338! DESCRIPTION
2339!   Check for completion of MPI_Isend/MPI_Irecv data transfer.
2340!   
2341!   
2342! NOTE
2343!   implicit synchronisation between all processes takes place here
2344!
2345! TODO
2346!   NB: take into account nested wind fields by using a separate
2347!   subroutine mpif_gf_request_nest (and separate request arrays for the
2348!   nested variables)
2349!
2350!*******************************************************************************
2351!    use com_mod,only: readclouds
2352
2353    implicit none
2354
2355
2356    integer :: n_req !,i
2357
2358!***********************************************************************
2359
2360    n_req=nvar_async*mp_np
2361
2362    if (mp_measure_time) call mpif_mtime('commtime',0)
2363
2364!    call MPI_Wait(rm1,MPI_STATUS_IGNORE,mp_ierr)
2365
2366! TODO: cancel recv request if readclouds=.false.
2367!    if (readclouds) then
2368    call MPI_Waitall(n_req,reqs,MPI_STATUSES_IGNORE,mp_ierr)
2369!    endif
2370! else
2371!   do i = 0, nvar_async*mp_np-1
2372!     if (mod(i,27).eq.0 .or. mod(i,28).eq.0) then
2373!       call MPI_Cancel(reqs(i),mp_ierr)
2374!       cycle
2375!     end if
2376!     call MPI_Wait(reqs(i),MPI_STATUS_IGNORE,mp_ierr)
2377!   end do
2378! end if
2379
2380    if (mp_ierr /= 0) goto 600
2381
2382    if (mp_measure_time) call mpif_mtime('commtime',1)
2383
2384    goto 601
2385
2386600 write(*,*) "#### mpi_mod::mpif_gf_request> MPI ERROR ", mp_ierr
2387    stop
2388
2389601 end subroutine mpif_gf_request
2390
2391
2392  subroutine mpif_tm_reduce_grid
2393!***********************************************************************
2394! Collect grid variable to PID 0, adding from all processes.
2395!
2396! NOTE
2397!   - Older MPI implementations may lack the MPI_IN_PLACE operation.
2398!     If so use 1) below and change unc_mod
2399!
2400!***********************************************************************
2401    use com_mod
2402    use unc_mod
2403    use par_mod
2404
2405    implicit none
2406
2407    integer :: grid_size2d,grid_size3d
2408    integer, parameter :: rcpt_size=maxreceptor*maxspec
2409
2410!**********************************************************************
2411    grid_size2d=numxgrid*numygrid*maxspec* &
2412         & maxpointspec_act*nclassunc*maxageclass
2413    grid_size3d=numxgrid*numygrid*numzgrid*maxspec* &
2414         & maxpointspec_act*nclassunc*maxageclass
2415
2416
2417! Time for MPI communications
2418    if (mp_measure_time) call mpif_mtime('commtime',0)
2419
2420! 1) Using a separate grid (gridunc0) for received values
2421! call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2422!      & mp_comm_used, mp_ierr)
2423! if (mp_ierr /= 0) goto 600
2424
2425! 2) Using in-place reduction
2426
2427!!!--------------------------------------------------------------------
2428!!! DJM - 2017-05-09 change - MPI_IN_PLACE option for MPI_Reduce() causes
2429!!! severe numerical problems in some cases.  I'm guessing there are memory
2430!!! overrun issues in this complex code, but have so far failed to identify
2431!!! a specific problem.  And/or, when one searches the Internet for this
2432!!! issue, there is "some" hint that the implementation may be buggy. 
2433!!!
2434!!! At this point, with the following CPP USE_MPIINPLACE directive, the
2435!!! default behaviour will be to not use the MPI_IN_PLACE option.
2436!!! Users will have to compile with -DUSE_MPIINPLACE if they want that option.
2437!!! Introduction of the CPP directives also requires that the code be compiled
2438!!! with the "-x f95-cpp-input" option.
2439!!!
2440!!! Modification of this section requires the addition of array gridunc0, which
2441!!! requires an allocation in outgrid_init.f90 and initial declaration in
2442!!! unc_mod.f90.
2443!!!--------------------------------------------------------------------
2444
2445#ifdef USE_MPIINPLACE
2446
2447    if (lroot) then
2448      call MPI_Reduce(MPI_IN_PLACE, gridunc, grid_size3d, mp_sp, MPI_SUM, id_root, &
2449           & mp_comm_used, mp_ierr)
2450      if (mp_ierr /= 0) goto 600
2451    else
2452      call MPI_Reduce(gridunc, 0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2453           & mp_comm_used, mp_ierr)
2454    end if
2455
2456  ! Receptor concentrations   
2457    if (lroot) then
2458      call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &
2459           & mp_comm_used,mp_ierr)
2460      if (mp_ierr /= 0) goto 600
2461    else
2462      call MPI_Reduce(creceptor,0,rcpt_size,mp_sp,MPI_SUM,id_root, &
2463           & mp_comm_used,mp_ierr)
2464    end if
2465
2466#else
2467
2468      call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2469           & mp_comm_used, mp_ierr)
2470      if (mp_ierr /= 0) goto 600
2471      if (lroot) gridunc = gridunc0
2472
2473      call MPI_Reduce(creceptor, creceptor0,rcpt_size,mp_sp,MPI_SUM,id_root, &
2474           & mp_comm_used,mp_ierr)
2475      if (mp_ierr /= 0) goto 600
2476      if (lroot) creceptor = creceptor0
2477
2478#endif
2479
2480    if ((WETDEP).and.(ldirect.gt.0)) then
2481      call MPI_Reduce(wetgridunc, wetgridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2482           & mp_comm_used, mp_ierr)
2483      if (mp_ierr /= 0) goto 600
2484    end if
2485
2486    if ((DRYDEP).and.(ldirect.gt.0)) then
2487      call MPI_Reduce(drygridunc, drygridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2488           & mp_comm_used, mp_ierr)
2489      if (mp_ierr /= 0) goto 600
2490    end if
2491
2492
2493    if (mp_measure_time) call mpif_mtime('commtime',1)
2494
2495    goto 601
2496
2497600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
2498    stop
2499
2500601 end subroutine mpif_tm_reduce_grid
2501
2502
2503  subroutine mpif_tm_reduce_grid_nest
2504!***********************************************************************
2505! Collect nested grids to PID 0, adding from all processes.
2506!
2507! NOTE
2508!   - Compiling with 'fcheck=all' (gfortran) will cause a run-time error
2509!     as wetgriduncn0 is only allocated for root process. Possibly valid
2510!     MPI code but invalid gfortran.
2511!
2512!***********************************************************************
2513    use com_mod
2514    use unc_mod
2515    use par_mod
2516
2517    implicit none
2518
2519    integer :: grid_size2d,grid_size3d
2520
2521!**********************************************************************
2522
2523    grid_size3d=numxgridn*numygridn*numzgrid*maxspec* &
2524         & maxpointspec_act*nclassunc*maxageclass
2525    grid_size2d=numxgridn*numygridn*maxspec* &
2526         & maxpointspec_act*nclassunc*maxageclass
2527
2528
2529! Time for MPI communications
2530    if (mp_measure_time) call mpif_mtime('commtime',0)
2531
2532! Using a separate grid (gridunc0) for received values, for debugging
2533! call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2534!      & mp_comm_used, mp_ierr)
2535! if (mp_ierr /= 0) goto 600
2536
2537#ifdef USE_MPIINPLACE
2538! Using in-place reduction
2539    if (lroot) then
2540      call MPI_Reduce(MPI_IN_PLACE, griduncn, grid_size3d, mp_sp, MPI_SUM, id_root, &
2541           & mp_comm_used, mp_ierr)
2542      if (mp_ierr /= 0) goto 600
2543    else
2544      call MPI_Reduce(griduncn, 0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2545           & mp_comm_used, mp_ierr)
2546      if (mp_ierr /= 0) goto 600
2547    end if
2548
2549#else
2550    call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_sp, MPI_SUM, id_root, &
2551         & mp_comm_used, mp_ierr)
2552    if (mp_ierr /= 0) goto 600
2553    if (lroot) griduncn = griduncn0
2554
2555#endif
2556
2557    if ((WETDEP).and.(ldirect.gt.0)) then
2558      call MPI_Reduce(wetgriduncn, wetgriduncn0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2559           & mp_comm_used, mp_ierr)
2560      if (mp_ierr /= 0) goto 600
2561    end if
2562
2563    if ((DRYDEP).and.(ldirect.gt.0)) then
2564      call MPI_Reduce(drygriduncn, drygriduncn0, grid_size2d, mp_cp, MPI_SUM, id_root, &
2565           & mp_comm_used, mp_ierr)
2566      if (mp_ierr /= 0) goto 600
2567    end if
2568
2569    if (mp_measure_time) call mpif_mtime('commtime',1)
2570
2571    goto 601
2572
2573600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
2574    stop
2575
2576601 end subroutine mpif_tm_reduce_grid_nest
2577
2578
2579  subroutine mpif_mtime(ident,imode)
2580!***********************************************************************
2581! Measure CPU/Wall time in various parts of the code
2582!
2583! VARIABLES
2584!   ident        character, identifies routine to measure
2585!   imode        integer, 0:start clock(s)  1: stop clock(s)
2586!
2587!***********************************************************************
2588    implicit none
2589
2590    character(LEN=*), intent(in) :: ident
2591    integer, intent(in) :: imode
2592
2593!***********************************************************************
2594
2595    select case(ident)
2596
2597    case ('timemanager')
2598      if (imode.eq.0) then
2599        call cpu_time(tm_tot_beg)
2600      else
2601        call cpu_time(tm_tot_end)
2602        tm_tot_total = tm_tot_total + (tm_tot_end-tm_tot_beg)
2603      end if
2604
2605    case ('wetdepo')
2606      if (imode.eq.0) then
2607        mp_wetdepo_wtime_beg = mpi_wtime()
2608        call cpu_time(mp_wetdepo_time_beg)
2609      else
2610        mp_wetdepo_wtime_end = mpi_wtime()
2611        call cpu_time(mp_wetdepo_time_end)
2612
2613        mp_wetdepo_wtime_total = mp_wetdepo_wtime_total + &
2614             &(mp_wetdepo_wtime_end - mp_wetdepo_wtime_beg)
2615        mp_wetdepo_time_total = mp_wetdepo_time_total + &
2616             &(mp_wetdepo_time_end - mp_wetdepo_time_beg)
2617      end if
2618
2619    case ('advance')
2620      if (imode.eq.0) then
2621        mp_advance_wtime_beg = mpi_wtime()
2622      else
2623        mp_advance_wtime_end = mpi_wtime()
2624
2625        mp_advance_wtime_total = mp_advance_wtime_total + &
2626             &(mp_advance_wtime_end - mp_advance_wtime_beg)
2627      end if
2628
2629    case ('getfields')
2630      if (imode.eq.0) then
2631        mp_getfields_wtime_beg = mpi_wtime()
2632        call cpu_time(mp_getfields_time_beg)
2633      else
2634        mp_getfields_wtime_end = mpi_wtime()
2635        call cpu_time(mp_getfields_time_end)
2636
2637        mp_getfields_wtime_total = mp_getfields_wtime_total + &
2638             &(mp_getfields_wtime_end - mp_getfields_wtime_beg)
2639        mp_getfields_time_total = mp_getfields_time_total + &
2640             &(mp_getfields_time_end - mp_getfields_time_beg)
2641      end if
2642
2643    case ('partloop1')
2644      if (imode.eq.0) then
2645        call cpu_time(tm_nploop_beg)
2646      else
2647        call cpu_time(tm_nploop_end)
2648        tm_nploop_total = tm_nploop_total + (tm_nploop_end - tm_nploop_beg)
2649      end if
2650
2651    case ('conccalc')
2652      if (imode.eq.0) then
2653        call cpu_time(mp_conccalc_time_beg)
2654      else
2655        call cpu_time(mp_conccalc_time_end)
2656        mp_conccalc_time_total = mp_conccalc_time_total + mp_conccalc_time_end - &
2657             &mp_conccalc_time_beg
2658      end if
2659
2660    case ('rootonly')
2661      if (imode.eq.0) then
2662        call cpu_time(mp_root_time_beg)
2663        mp_root_wtime_beg = mpi_wtime()
2664      else
2665        call cpu_time(mp_root_time_end)
2666        mp_root_wtime_end = mpi_wtime()
2667        mp_root_time_total = mp_root_time_total + &
2668             &(mp_root_time_end - mp_root_time_beg)
2669        mp_root_wtime_total = mp_root_wtime_total + &
2670             &(mp_root_wtime_end - mp_root_wtime_beg)
2671      end if
2672
2673    case ('iotime')
2674      if (imode.eq.0) then
2675        mp_io_wtime_beg = mpi_wtime()
2676        call cpu_time(mp_io_time_beg)
2677      else
2678        mp_io_wtime_end = mpi_wtime()
2679        call cpu_time(mp_io_time_end)
2680
2681        mp_io_wtime_total = mp_io_wtime_total + (mp_io_wtime_end - &
2682             & mp_io_wtime_beg)
2683        mp_io_time_total = mp_io_time_total + (mp_io_time_end - &
2684             & mp_io_time_beg)
2685      end if
2686
2687    case ('verttransform')
2688      if (imode.eq.0) then
2689        mp_vt_wtime_beg = mpi_wtime()
2690        call cpu_time(mp_vt_time_beg)
2691      else
2692        mp_vt_wtime_end = mpi_wtime()
2693        call cpu_time(mp_vt_time_end)
2694
2695        mp_vt_wtime_total = mp_vt_wtime_total + (mp_vt_wtime_end - &
2696             & mp_vt_wtime_beg)
2697        mp_vt_time_total = mp_vt_time_total + (mp_vt_time_end - &
2698             & mp_vt_time_beg)
2699      end if
2700
2701   case ('readwind')
2702     if (imode.eq.0) then
2703       call cpu_time(mp_readwind_time_beg)
2704       mp_readwind_wtime_beg = mpi_wtime()
2705     else
2706       call cpu_time(mp_readwind_time_end)
2707       mp_readwind_wtime_end = mpi_wtime()
2708
2709       mp_readwind_time_total = mp_readwind_time_total + &
2710            &(mp_readwind_time_end - mp_readwind_time_beg)
2711       mp_readwind_wtime_total = mp_readwind_wtime_total + &
2712            &(mp_readwind_wtime_end - mp_readwind_wtime_beg)
2713     end if
2714
2715    case ('commtime')
2716      if (imode.eq.0) then
2717        call cpu_time(mp_comm_time_beg)
2718        mp_comm_wtime_beg = mpi_wtime()
2719      else
2720        call cpu_time(mp_comm_time_end)
2721        mp_comm_wtime_end = mpi_wtime()
2722        mp_comm_time_total = mp_comm_time_total + &
2723             &(mp_comm_time_end - mp_comm_time_beg)
2724        mp_comm_wtime_total = mp_comm_wtime_total + &
2725             &(mp_comm_wtime_end - mp_comm_wtime_beg)
2726      end if
2727
2728    case ('flexpart')
2729      if (imode.eq.0) then
2730        mp_total_wtime_beg=mpi_wtime()
2731      else
2732        mp_total_wtime_end=mpi_wtime()
2733        mp_total_wtime_total = mp_total_wtime_end-mp_total_wtime_beg
2734      end if
2735
2736    case default
2737      write(*,*) 'mpi_mod::mpif_mtime> WARNING: unknown case identifier'
2738
2739    end select
2740
2741
2742  end subroutine mpif_mtime
2743
2744
2745  subroutine mpif_finalize
2746!***********************************************************************
2747! Finalize MPI
2748! Optionally print detailed time diagnostics
2749!***********************************************************************
2750    implicit none
2751
2752    integer :: ip !,j,r
2753
2754!***********************************************************************
2755
2756    IF (mp_measure_time) THEN
2757      do ip=0, mp_np-1
2758        call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr)
2759
2760        if (ip == mp_pid) then
2761          write(*,FMT='(72("#"))')
2762          write(*,FMT='(A60,I3)') 'STATISTICS FOR MPI PROCESS:', mp_pid
2763          if (ip == 0) then
2764            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR ROOT-ONLY (PID 0) &
2765                 &SECTIONS: ', mp_root_time_total
2766            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ROOT-ONLY (PID 0) &
2767                 &SECTIONS:', mp_root_wtime_total
2768          end if
2769          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FLEXPART: ' &
2770               &, mp_total_wtime_total
2771          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI COMMUNICATIONS:',&
2772               & mp_comm_time_total
2773          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI COMMUNICATIONS:',&
2774               & mp_comm_wtime_total
2775          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI BARRIERS:',&
2776               & mp_barrier_time_total
2777          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI BARRIERS:',&
2778               & mp_barrier_wtime_total
2779          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER:',&
2780               & tm_tot_total
2781          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER/NUMPART LOOP:',&
2782               & tm_nploop_total
2783          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ADVANCE:',&
2784               & mp_advance_wtime_total
2785          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR GETFIELDS:',&
2786               & mp_getfields_wtime_total
2787          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR GETFIELDS:',&
2788               & mp_getfields_time_total
2789!          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',&
2790!               & mp_readwind_wtime_total
2791!          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',&
2792!               & mp_readwind_time_total
2793          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FILE IO:',&
2794               & mp_io_wtime_total
2795          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR FILE IO:',&
2796               & mp_io_time_total
2797          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR WETDEPO:',&
2798               & mp_wetdepo_wtime_total
2799          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR WETDEPO:',&
2800               & mp_wetdepo_time_total
2801          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',&
2802               & mp_conccalc_time_total
2803! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',&
2804!      & mp_vt_wtime_total
2805! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',&
2806!      & mp_vt_time_total
2807! NB: the 'flush' function is possibly a gfortran-specific extension,
2808! comment out if it gives problems
2809!          call flush()
2810        end if
2811      end do
2812    end if
2813
2814! This call to barrier is for correctly formatting output
2815    call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr)
2816
2817    if (lroot.and.mp_measure_time) then
2818      write(*,FMT='(72("#"))')
2819      WRITE(*,*) "To turn off output of time measurements, set "
2820      WRITE(*,*) "    mp_measure_time=.false."
2821      WRITE(*,*) "in file mpi_mod.f90"
2822      write(*,FMT='(72("#"))')
2823    end if
2824
2825! j=mp_pid*nvar_async
2826! In the implementation with 3 fields, the processes may have posted
2827! MPI_Irecv requests that should be cancelled here
2828! if (.not.lmp_sync) then
2829!   r=mp_pid*nvar_async
2830!   do j=r,r+nvar_async-1
2831!     call MPI_Cancel(j,mp_ierr)
2832!     if (mp_ierr /= 0) write(*,*) '#### mpif_finalize::MPI_Cancel> ERROR ####'
2833!   end do
2834! end if
2835
2836    call MPI_FINALIZE(mp_ierr)
2837    if (mp_ierr /= 0) then
2838      write(*,*) '#### mpif_finalize::MPI_FINALIZE> MPI ERROR ', mp_ierr, ' ####'
2839      stop
2840    end if
2841
2842
2843  end subroutine mpif_finalize
2844
2845
2846  subroutine get_lun(my_lun)
2847!***********************************************************************
2848! get_lun:
2849!   Starting from 100, get next free logical unit number
2850!***********************************************************************
2851
2852    implicit none
2853
2854    integer, intent(inout) :: my_lun
2855    integer, save :: free_lun=100
2856    logical :: exists, iopen
2857
2858!***********************************************************************
2859
2860    loop1: do
2861      inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen)
2862      if (exists .and. .not.iopen) exit loop1
2863      free_lun = free_lun+1
2864    end do loop1
2865    my_lun = free_lun
2866
2867  end subroutine get_lun
2868
2869
2870  subroutine write_data_dbg(array_in, array_name, tstep, ident)
2871!***********************************************************************
2872! Write one-dimensional arrays to file (for debugging purposes)
2873!***********************************************************************
2874    implicit none
2875
2876    real, intent(in), dimension(:) :: array_in
2877    integer, intent(in) :: tstep
2878    integer :: lios
2879    character(LEN=*), intent(in) :: ident, array_name
2880
2881    character(LEN=8) :: c_ts
2882    character(LEN=40) :: fn_1, fn_2
2883
2884!***********************************************************************
2885
2886    write(c_ts, FMT='(I8.8,BZ)') tstep
2887    fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident)
2888    write(c_ts, FMT='(I2.2,BZ)') mp_np
2889    fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat'
2890
2891    call get_lun(dat_lun)
2892    open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', &
2893         FORM='UNFORMATTED', STATUS='REPLACE')
2894    write(UNIT=dat_lun, IOSTAT=lios) array_in
2895    close(UNIT=dat_lun)
2896
2897  end subroutine write_data_dbg
2898
2899
2900  subroutine set_fields_synthetic()
2901!*******************************************************************************
2902! DESCRIPTION
2903!   Set all meteorological fields to synthetic (usually constant/homogeneous)
2904!   values.
2905!   Used for validation and error-checking
2906!
2907! NOTE
2908!   This version uses asynchronious communications.
2909!
2910! VARIABLES
2911!   
2912!
2913!
2914!*******************************************************************************
2915    use com_mod
2916
2917    implicit none
2918
2919    integer :: li=1, ui=2 ! wfmem indices (i.e, operate on entire field)
2920
2921    if (.not.lmp_sync) ui=3
2922
2923
2924! Variables transferred at initialization only
2925!*********************************************
2926!    readclouds=readclouds_
2927    oro(:,:)=0.0
2928    excessoro(:,:)=0.0
2929    lsm(:,:)=0.0
2930    xlanduse(:,:,:)=0.0
2931!    wftime
2932!    numbwf
2933!    nmixz
2934!    height
2935
2936! Time-varying fields:
2937    uu(:,:,:,li:ui) = 10.0
2938    vv(:,:,:,li:ui) = 0.0
2939    uupol(:,:,:,li:ui) = 10.0
2940    vvpol(:,:,:,li:ui)=0.0
2941    ww(:,:,:,li:ui)=0.
2942    tt(:,:,:,li:ui)=300.
2943    rho(:,:,:,li:ui)=1.3
2944    drhodz(:,:,:,li:ui)=.0
2945    tth(:,:,:,li:ui)=0.0
2946    qvh(:,:,:,li:ui)=1.0
2947    qv(:,:,:,li:ui)=1.0
2948
2949    pv(:,:,:,li:ui)=1.0
2950    clouds(:,:,:,li:ui)=0
2951
2952    clwc(:,:,:,li:ui)=0.0
2953    ciwc(:,:,:,li:ui)=0.0
2954
2955! 2D fields
2956
2957    cloudsh(:,:,li:ui)=0
2958    vdep(:,:,:,li:ui)=0.0
2959    ps(:,:,:,li:ui)=1.0e5
2960    sd(:,:,:,li:ui)=0.0
2961    tcc(:,:,:,li:ui)=0.0
2962    tt2(:,:,:,li:ui)=300.
2963    td2(:,:,:,li:ui)=250.
2964    lsprec(:,:,:,li:ui)=0.0
2965    convprec(:,:,:,li:ui)=0.0
2966    ustar(:,:,:,li:ui)=1.0
2967    wstar(:,:,:,li:ui)=1.0
2968    hmix(:,:,:,li:ui)=10000.
2969    tropopause(:,:,:,li:ui)=10000.
2970    oli(:,:,:,li:ui)=0.01
2971
2972  end subroutine set_fields_synthetic
2973
2974end module mpi_mod
Note: See TracBrowser for help on using the repository browser.
hosted by ZAMG