source: flexpart.git/src/mpi_mod.f90 @ 6a678e3

10.4.1_peseiGFS_025bugfixes+enhancementsdevrelease-10release-10.4.1scaling-bugunivie
Last change on this file since 6a678e3 was 6a678e3, checked in by Espen Sollum ATMOS <eso@…>, 8 years ago

Added option to use double precision for calculating the deposition fields

  • Property mode set to 100644
File size: 79.8 KB
RevLine 
[8a65cb0]1!**********************************************************************
2! Copyright 1998,1999,2000,2001,2002,2005,2007,2008,2009,2010         *
3! Andreas Stohl, Petra Seibert, A. Frank, Gerhard Wotawa,             *
4! Caroline Forster, Sabine Eckhardt, John Burkhart, Harald Sodemann   *
5!                                                                     *
6! This file is part of FLEXPART.                                      *
7!                                                                     *
8! FLEXPART is free software: you can redistribute it and/or modify    *
9! it under the terms of the GNU General Public License as published by*
10! the Free Software Foundation, either version 3 of the License, or   *
11! (at your option) any later version.                                 *
12!                                                                     *
13! FLEXPART is distributed in the hope that it will be useful,         *
14! but WITHOUT ANY WARRANTY; without even the implied warranty of      *
15! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the       *
16! GNU General Public License for more details.                        *
17!                                                                     *
18! You should have received a copy of the GNU General Public License   *
19! along with FLEXPART.  If not, see <http://www.gnu.org/licenses/>.   *
20!**********************************************************************
21
22module mpi_mod
23
24!*****************************************************************************
25!                                                                            *
26!  DESCRIPTION                                                               *
27!    This module contains subroutines and common variables used for the      *
28!    MPI parallelization of FLEXPART.                                        *
29!                                                                            *
30!  NOTE                                                                      *
31!    Depending on the MPI library installed on your system (e.g. mpich2,     *
32!    OpenMPI) you may need to choose below in this file between              *
33!      use mpi                                                               *
34!    (if the MPI library comes with the file 'mpi.mod'); or                  *
35!      include 'mpif.h'                                                      *
36!                                                                            *
37!                                                                            *
38!*****************************************************************************
39!                                                                            *
40! Variables:                                                                 *
41!                                                                            *
42! mp_ierr                 MPI error code                                     *
43! mp_np                   Number of MPI processes                            *
44! mp_pid                  Process ID of each MPI process                     *
45! mp_seed                 Parameter for random number seed                   *
[5f9d14a]46! read_grp_min            Minimum number of processes at which one will be   *
47!                         used as reader                                     *
[8a65cb0]48! numpart_mpi,            Number of particles per node                       *
49! maxpart_mpi                                                                *
50! mp_partid               MPI process ID for particle calculation            *
51! mp_partgroup_           Refers to the subset of processors performing      *
52!                         loops over particles. Will be all processes        *
53!                         unless a dedicated process runs getfields/readwind *
[5f9d14a]54! lmp_sync                If .false., use asynchronous MPI                   *
[6a678e3]55! mp_cp                   Real precision to use for deposition fields        *
[8a65cb0]56!                                                                            *
57!                                                                            *
58!                                                                            *
59!                                                                            *
60!*****************************************************************************
61
62  use mpi
63  use par_mod, only: dp, sp
64  use com_mod, only: lroot
65
66  implicit none
67
68!  include 'mpif.h'
69
70  public
71
72! Set aside a process for reading windfields if using at least these many processes
73!==================================================
[5f9d14a]74  integer, parameter, private :: read_grp_min=4
[8a65cb0]75!==================================================
76
77! Variables for each MPI process in the world group
78  integer :: mp_ierr, mp_np, mp_pid, mp_partid
79  integer, private :: world_group_id
80
81! Variables for MPI processes in the 'particle' group
82  integer, allocatable, dimension(:) :: mp_partgroup_rank
83  integer :: mp_partgroup_comm, mp_partgroup_pid, mp_partgroup_np
84
85  integer :: mp_seed=0
86  integer, parameter :: mp_sp=MPI_REAL4, mp_dp=MPI_REAL8
[6a678e3]87  integer :: mp_cp
[8a65cb0]88  integer, parameter :: id_root=0 ! master process
89
90! MPI tags/requests for send/receive operation
[5f9d14a]91  integer :: tm1
[d6a0977]92  integer, parameter :: nvar_async=26 !27 !29 :DBG:
[78e62dc]93!integer, dimension(:), allocatable :: tags
[8a65cb0]94  integer, dimension(:), allocatable :: reqs
95
96
97  integer :: id_read   ! readwind/getfield process
98  integer :: numpart_mpi,maxpart_mpi ! number of particles per node
99  integer :: tot_numpart=0
100  integer :: mp_comm_used ! global or subgroup communicator
101
102  logical :: lmpreader=.false. ! is set to true for reading process(es) only.
103  logical :: lmp_use_reader=.false. ! true if separate readwind process is used
104
105! true if only using synchronous MPI send/recv:
106! If setting this to .false., numwfmem must be set to 3
107!===============================================================================
[a1f4dd6]108  logical :: lmp_sync=.true.
[8a65cb0]109!===============================================================================
110
[5f9d14a]111! mp_dbg_mode       Used for debugging MPI.
[8a65cb0]112! mp_dev_mode       various checks related to debugging the parallel code
113! mp_dbg_out        write some arrays to data file for debugging
114! mp_measure_time   Measure cpu/wall time, write out at end of run
115! mp_time_barrier   Measure MPI barrier time
[f55fdce]116! mp_exact_numpart  Use an extra MPI communication to give the exact number of particles
117!                   to standard output (this does *not* otherwise affect the simulation)
[8a65cb0]118  logical, parameter :: mp_dbg_mode = .false.
119  logical, parameter :: mp_dev_mode = .false.
120  logical, parameter :: mp_dbg_out = .false.
121  logical, parameter :: mp_time_barrier=.true.
[fdc0f03]122  logical, parameter :: mp_measure_time=.false.
[f55fdce]123  logical, parameter :: mp_exact_numpart=.true.
[8a65cb0]124
125! for measuring CPU/Wall time
126  real(sp) :: mp_comm_time_beg, mp_comm_time_end, mp_comm_time_total=0.
127  real(dp) :: mp_comm_wtime_beg, mp_comm_wtime_end, mp_comm_wtime_total=0.
128  real(sp) :: mp_root_time_beg, mp_root_time_end, mp_root_time_total=0.
129  real(dp) :: mp_root_wtime_beg, mp_root_wtime_end, mp_root_wtime_total=0.
130  real(sp) :: mp_barrier_time_beg, mp_barrier_time_end, mp_barrier_time_total=0.
131  real(dp) :: mp_barrier_wtime_beg, mp_barrier_wtime_end, mp_barrier_wtime_total=0.
132  real(sp) :: tm_nploop_beg, tm_nploop_end, tm_nploop_total=0.
133  real(sp) :: tm_tot_beg, tm_tot_end, tm_tot_total=0.
134  real(dp) :: mp_getfields_wtime_beg, mp_getfields_wtime_end, mp_getfields_wtime_total=0.
135  real(sp) :: mp_getfields_time_beg, mp_getfields_time_end, mp_getfields_time_total=0.
136  real(dp) :: mp_readwind_wtime_beg, mp_readwind_wtime_end, mp_readwind_wtime_total=0.
137  real(sp) :: mp_readwind_time_beg, mp_readwind_time_end, mp_readwind_time_total=0.
138  real(dp) :: mp_io_wtime_beg, mp_io_wtime_end, mp_io_wtime_total=0.
139  real(sp) :: mp_io_time_beg, mp_io_time_end, mp_io_time_total=0.
140  real(dp) :: mp_wetdepo_wtime_beg, mp_wetdepo_wtime_end, mp_wetdepo_wtime_total=0.
141  real(sp) :: mp_wetdepo_time_beg, mp_wetdepo_time_end, mp_wetdepo_time_total=0.
142  real(dp) :: mp_advance_wtime_beg, mp_advance_wtime_end, mp_advance_wtime_total=0.
143  real(dp) :: mp_conccalc_time_beg, mp_conccalc_time_end, mp_conccalc_time_total=0.
144  real(dp) :: mp_total_wtime_beg, mp_total_wtime_end, mp_total_wtime_total=0.
[78e62dc]145  real(dp) :: mp_vt_wtime_beg, mp_vt_wtime_end, mp_vt_wtime_total
146  real(sp) :: mp_vt_time_beg, mp_vt_time_end, mp_vt_time_total
[8a65cb0]147
148! dat_lun           logical unit number for i/o
149  integer, private :: dat_lun
150
151contains
152
153  subroutine mpif_init
154!***********************************************************************
155! DESCRIPTION
156!   Initialize MPI.
157!   
158!   Create the global communicator MPI_COMM_WORLD
159!   If using a separate MPI process for getfields/readwind, a subgroup
160!   is created for the other processes.
161!
162! VARIABLES
163!   mpi_mode    default 0, set to 2/3 if running MPI version
164!   mp_np       number of running processes, decided at run-time
165!***********************************************************************
[6a678e3]166    use par_mod, only: maxpart, numwfmem, dep_prec
[8a65cb0]167    use com_mod, only: mpi_mode
168
169    implicit none
170
171    integer :: i,j,s,addmaxpart=0
172
[6a678e3]173! Each process gets an ID (mp_pid) in the range 0,..,mp_np-1
[8a65cb0]174    call MPI_INIT(mp_ierr)
175    if (mp_ierr /= 0) goto 100
176    call MPI_COMM_RANK(MPI_COMM_WORLD, mp_pid, mp_ierr)
177    if (mp_ierr /= 0) goto 100
178    call MPI_COMM_SIZE(MPI_COMM_WORLD, mp_np, mp_ierr)
179    if (mp_ierr /= 0) goto 100
180
181
[6a678e3]182! Variable mpi_mode is used to handle subroutines common to parallel/serial version
[8a65cb0]183    if (lmp_sync) then
184      mpi_mode=2 ! hold 2 windfields in memory
185    else
186      mpi_mode=3 ! hold 3 windfields in memory
187    end if
188
189    if (mp_pid.ne.0) then
190      lroot = .false.
191    end if
192
[6a678e3]193! Set MPI precision to use for transferring deposition fields
194!************************************************************
195    if (dep_prec==dp) then
196      mp_cp = MPI_REAL8
197      if (lroot) write(*,*) 'Using double precision for deposition fields'
198    else if (dep_prec==sp) then
199      mp_cp = MPI_REAL4
200      if (lroot) write(*,*) 'Using single precision for deposition fields'
201    else
202      write(*,*) 'ERROR: something went wrong setting MPI real precision'
203      stop
204    end if
205
[8a65cb0]206! Check for sensible combination of parameters
207!*********************************************
208
209    if (.not.lmp_sync.and.numwfmem.ne.3.and.lroot) then
210      write(*,FMT='(80("#"))')
211      write(*,*) '#### mpi_mod::mpif_init> ERROR: ', &
212           & 'numwfmem must be set to 3 for asyncronous reading ####'
213      write(*,FMT='(80("#"))')
214      stop
215    else if (lmp_sync.and.numwfmem.ne.2.and.lroot) then
216      write(*,FMT='(80("#"))')
217      write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
[a1f4dd6]218           & 'numwfmem should be set to 2 for syncronous'
219      write(*,*) ' reading. Results will still be valid, but unneccesary '
220      write(*,*) 'amount of memory is being allocated.'
[8a65cb0]221      write(*,FMT='(80("#"))')
[5f9d14a]222! Force "syncronized" version if all processes will call getfields
[d6a0977]223    else if ((.not.lmp_sync.and.mp_np.lt.read_grp_min).or.(mp_np.eq.1)) then
[5f9d14a]224      if (lroot) then
225        write(*,FMT='(80("#"))')
226        write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
227             & 'all procs call getfields. Setting lmp_sync=.true.'
228        write(*,FMT='(80("#"))')
229      end if
230      lmp_sync=.true. ! :DBG: eso fix this...
[8a65cb0]231    end if
[6b22af9]232
233! TODO: Add more warnings for unimplemented flexpart features
[8a65cb0]234
235! Set ID of process that calls getfield/readwind.
236! Using the last process in the group ensures statistical identical results
237! as running with one process less but not using separate read process
238!**********************************************************************
239
240!    id_read = min(mp_np-1, 1)
241    id_read = mp_np-1
242
243    if (mp_pid.eq.id_read) lmpreader=.true.
244
245    call MPI_Comm_group (MPI_COMM_WORLD, world_group_id, mp_ierr)
246
[5f9d14a]247! Create a MPI group/communicator that will calculate trajectories.
[8a65cb0]248! Skip this step if program is run with only a few processes
249!************************************************************************
250    allocate(mp_partgroup_rank(0:mp_np-2))
251
252! This allows checking for allocation of arrays when no subgroup is used
253    mp_partgroup_pid=0
254
255    if (read_grp_min.lt.2) then
256      write(*,*) '#### mpi_mod::mpif_init> ERROR ####', &
257           & 'read_grp_min should be at least 2. Exiting'
258      stop
259    end if
260
261    if (mp_np.ge.read_grp_min) then
262      lmp_use_reader = .true.
263
[5f9d14a]264! Map the subgroup IDs= 0,1,2,...,mp_np-2, skipping reader process
[8a65cb0]265      j=-1
266      do i=0, mp_np-2 !loop over all (subgroup) IDs
267        j=j+1
268        if (i.eq.id_read) then
269          j=j+1
270        end if
271        mp_partgroup_rank(i) = j
272      end do
273
274      call MPI_Group_incl (world_group_id, mp_np-1, mp_partgroup_rank, &
275           &mp_partgroup_pid, mp_ierr)
276      if (mp_ierr /= 0) goto 100
277      call MPI_Comm_create (MPI_COMM_WORLD, mp_partgroup_pid, mp_partgroup_comm, mp_ierr)
278      if (mp_ierr /= 0) goto 100
279
280      if (mp_pid.ne.id_read) then
281        call MPI_Comm_rank (mp_partgroup_comm, mp_partgroup_pid, mp_ierr)
282        if (mp_ierr /= 0) goto 100
283
284! Get size of new group (=mp_np-1)
285        call MPI_COMM_SIZE(mp_partgroup_comm, mp_partgroup_np, mp_ierr)
286        if (mp_ierr /= 0) goto 100
287        if (mp_partgroup_np.ne.mp_np-1) then
288          write(*,*)  '#### mpi_mod:: mpif_init> ERROR ####&
289               & mp_partgroup_np.ne.mp_np-1'
290          stop
291        endif
292
293      else
294        mp_partgroup_pid = -1
295      end if
296    end if
297
298    if (lmp_use_reader) then
299      mp_comm_used = mp_partgroup_comm
300      mp_partid = mp_partgroup_pid
301      mp_partgroup_np=mp_np-1
302    else
303      mp_comm_used = MPI_COMM_WORLD
304      mp_partgroup_np = mp_np
305      mp_partid = mp_pid
306    end if
307
308! Set maxpart per process
309    if (mp_partid.lt.mod(maxpart,mp_partgroup_np)) addmaxpart=1
310    maxpart_mpi=int(maxpart/mp_partgroup_np)+addmaxpart
311
312! Do not allocate particle data arrays for readwind process
313    if (lmpreader.and.lmp_use_reader) then
314      maxpart_mpi=0
315    end if
316
317! Set random seed for each non-root process
318    if (mp_pid.gt.0) then
319!    if (mp_pid.ge.0) then
320!      call system_clock(s)
321      s = 244
322      mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729))
323    end if
324    if (mp_dev_mode) write(*,*) 'PID, mp_seed: ',mp_pid, mp_seed
[5f9d14a]325    if (mp_dbg_mode) then
326! :DBG: For debugging, set all seed to 0 and maxrand to e.g. 20
327      mp_seed=0
328      if (lroot) write(*,*) 'MPI: setting seed=0'
329    end if
[8a65cb0]330
331! Allocate request array for asynchronous MPI
332    if (.not.lmp_sync) then
333      allocate(reqs(0:nvar_async*mp_np-1))
[5f9d14a]334      reqs(:)=MPI_REQUEST_NULL
[8a65cb0]335    else
336      allocate(reqs(0:1))
[5f9d14a]337      reqs(:)=MPI_REQUEST_NULL
[8a65cb0]338    end if
339
340    goto 101
341
342100 write(*,*) '#### mpi_mod::mpif_init> ERROR ####', mp_ierr
343    stop
344
345101 end subroutine mpif_init
346
347
348  subroutine mpif_mpi_barrier
349!***********************************************************************
350! Set MPI barrier (all processes are synchronized here), with the
351! possible exception of a separate 'readwind' process.
352! Optionally measure cpu/wall time.
353!
354!***********************************************************************
355    implicit none
356
357    if (mp_time_barrier) then
358      call cpu_time(mp_barrier_time_beg)
359      mp_barrier_wtime_beg = mpi_wtime()
360    endif
361
362    call MPI_BARRIER(mp_comm_used, mp_ierr)
363    if (mp_ierr /= 0) goto 100
364
365    if (mp_time_barrier) then
366      call cpu_time(mp_barrier_time_end)
367      mp_barrier_wtime_end = mpi_wtime()
368
369      mp_barrier_time_total=mp_barrier_time_total+&
370           &(mp_barrier_time_end-mp_barrier_time_beg)
371      mp_barrier_wtime_total=mp_barrier_wtime_total+&
372           &(mp_barrier_wtime_end-mp_barrier_wtime_beg)
373    end if
374
375    goto 101
376
377100 write(*,*) '#### mpi_mod::mpif_mpi_barrier> ERROR ####', mp_ierr
378    stop
379
380101 end subroutine mpif_mpi_barrier
381
382
383  subroutine mpif_com_mod_allocate
384!*******************************************************************************   
385! Dynamic allocation of arrays (in serial code these have size maxpart)
386!
387!*******************************************************************************
388    use com_mod
389
390    implicit none
391
392    integer :: array_size
393
394    array_size = maxpart_mpi
395
396    allocate(itra1(array_size),npoint(array_size),&
397         & nclass(array_size),idt(array_size),&
398         & itramem(array_size),itrasplit(array_size),&
399         & xtra1(array_size),ytra1(array_size),&
400         & ztra1(array_size),xmass1(array_size, maxspec))
401
402    allocate(uap(array_size),ucp(array_size),&
403         & uzp(array_size),us(array_size),&
404         & vs(array_size),&
405         & ws(array_size),cbt(array_size))
406
407
408  end subroutine mpif_com_mod_allocate
409
410
411  subroutine mpif_tm_send_dims
412!***********************************************************************
413! Distribute array dimensions from pid0 to all processes.
414! numpart_mpi/numpart is sent to allow dynamic allocation
415!
416! Currently not in use.
417!
418! Variables of similar type (integer, real) are placed in an array
419! and sent collectively, to avoid the overhead associated with individual
420! MPI send/receives
421!
422!
423!***********************************************************************
424    use com_mod
425
426    implicit none
427
428    integer :: add_part=0
429
[5f9d14a]430    call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
[8a65cb0]431
432! MPI subgroup does the particle-loop
433    if (lmp_use_reader) then
434      if (mod(numpart,mp_partgroup_np).ne.0) add_part=1
435      numpart_mpi=int(numpart/mp_partgroup_np)+add_part
436    else
437      if (mod(numpart,mp_np).ne.0) add_part=1
438      numpart_mpi=int(numpart/mp_np)+add_part
439    end if
440
441
442! redefine numpart as 'numpart per process' throughout the code
443!**************************************************************
444    numpart = numpart_mpi
445
446  end subroutine mpif_tm_send_dims
447
448
449  subroutine mpif_tm_send_vars
450!***********************************************************************
451! Distribute particle variables from pid0 to all processes.
452! Called from timemanager
[6b22af9]453! *NOT IN USE* at the moment, but can be useful for debugging
[8a65cb0]454!
455!***********************************************************************
456    use com_mod
457
458    implicit none
459
460    integer :: i
461
462! Time for MPI communications
463!****************************
464    if (mp_measure_time) call mpif_mtime('commtime',0)
465
466! Distribute variables, send from pid 0 to other processes
467!**********************************************************************
468
469! integers
470    if (lroot) then
471      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
472           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
473      if (mp_ierr /= 0) goto 600
474      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
475           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
476      if (mp_ierr /= 0) goto 600
477      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
478           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
479      if (mp_ierr /= 0) goto 600
480      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
481           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
482      if (mp_ierr /= 0) goto 600
483      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
484           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
485      if (mp_ierr /= 0) goto 600
486
487! int2
488      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,&
489           &numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
490      if (mp_ierr /= 0) goto 600
491
492! real
[fdc0f03]493      call MPI_SCATTER(uap,numpart_mpi,mp_sp,MPI_IN_PLACE,&
494           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]495      if (mp_ierr /= 0) goto 600
[fdc0f03]496      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
497           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]498      if (mp_ierr /= 0) goto 600
[fdc0f03]499      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
500           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]501      if (mp_ierr /= 0) goto 600
502
[fdc0f03]503      call MPI_SCATTER(us,numpart_mpi,mp_sp,MPI_IN_PLACE,&
504           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]505      if (mp_ierr /= 0) goto 600
[fdc0f03]506      call MPI_SCATTER(vs,numpart_mpi,mp_sp,MPI_IN_PLACE,&
507           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]508      if (mp_ierr /= 0) goto 600
[fdc0f03]509      call MPI_SCATTER(ws,numpart_mpi,mp_sp,MPI_IN_PLACE,&
510           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]511      if (mp_ierr /= 0) goto 600
512
513      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
514           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
515      if (mp_ierr /= 0) goto 600
516      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
517           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
518      if (mp_ierr /= 0) goto 600
[fdc0f03]519      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,MPI_IN_PLACE,&
520           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]521      if (mp_ierr /= 0) goto 600
522
523      do i=1,nspec
[fdc0f03]524        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,MPI_IN_PLACE,&
525             &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]526        if (mp_ierr /= 0) goto 600
527      end do
528
529    else ! (mp_pid >= 1)
530! integers
531      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,&
532           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
533      if (mp_ierr /= 0) goto 600
534      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,&
535           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
536      if (mp_ierr /= 0) goto 600
537      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,&
538           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
539      if (mp_ierr /= 0) goto 600
540      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,&
541           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
542      if (mp_ierr /= 0) goto 600
543      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,&
544           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
545      if (mp_ierr /= 0) goto 600
546
547! int2
548      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,&
549           &numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
550      if (mp_ierr /= 0) goto 600
551
552! reals
[fdc0f03]553      call MPI_SCATTER(uap,numpart_mpi,mp_sp,uap,&
554           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]555      if (mp_ierr /= 0) goto 600
[fdc0f03]556      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,ucp,&
557           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]558      if (mp_ierr /= 0) goto 600
[fdc0f03]559      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,uzp,&
560           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]561      if (mp_ierr /= 0) goto 600
562
[fdc0f03]563      call MPI_SCATTER(us,numpart_mpi,mp_sp,us,&
564           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]565      if (mp_ierr /= 0) goto 600
[fdc0f03]566      call MPI_SCATTER(vs,numpart_mpi,mp_sp,vs,&
567           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]568      if (mp_ierr /= 0) goto 600
[fdc0f03]569      call MPI_SCATTER(ws,numpart_mpi,mp_sp,ws,&
570           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]571      if (mp_ierr /= 0) goto 600
572
573      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,&
574           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
575      if (mp_ierr /= 0) goto 600
576      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,&
577           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
578      if (mp_ierr /= 0) goto 600
[fdc0f03]579      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,ztra1,&
580           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]581      if (mp_ierr /= 0) goto 600
582
583      do i=1,nspec
[fdc0f03]584        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,xmass1(:,i),&
585             &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
[8a65cb0]586        if (mp_ierr /= 0) goto 600
587      end do
588
589    end if !(lroot)
590
591    if (mp_measure_time) call mpif_mtime('commtime',1)
592
593    goto 601
594
595600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
596    stop
597601 end subroutine mpif_tm_send_vars
598
599
600  subroutine mpif_tm_collect_vars
601!*******************************************************************************
602! Collect particle data to PID 0 from all processes.                           *
603!                                                                              *
604! This can be called from timemanager_mpi, before caclulating                  *
605! concentrations/writing output grids.                                         *
606!                                                                              *
[6b22af9]607! Currently *not in use*, as each process calculates concentrations            *
608! separately, but can be useful for debugging                                  *
[8a65cb0]609!                                                                              *
610! To use this routine (together with mpif_tm_send_vars) to transfer data       *
611! to the MPI root process (useful for debugging), insert code like this        *
612! (in timemanager_mpi.f90)                                                     *
613!                                                                              *
[6b22af9]614!      if (lroot) tot_numpart=numpart ! root stores total numpart              *
[8a65cb0]615!      call mpif_tm_send_dims                                                  *
616!      if (numpart>1) then                                                     *
617!        call mpif_tm_send_vars                                                *
618!      end if                                                                  *
619!                                                                              *
620!    To collect data from all processes to root process after a parallel       *
621!    region:                                                                   *
622!                                                                              *
623!      if (numpart>0) then                                                     *
[6b22af9]624!          if (lroot) numpart = tot_numpart                                    *
[8a65cb0]625!       call mpif_tm_collect_vars                                              *
626!      end if                                                                  *
627!                                                                              *
[6b22af9]628! Then a section that begins with "if (lroot) ..." will behave like            *
629! serial flexpart                                                              *
[8a65cb0]630!                                                                              *
631!*******************************************************************************
632    use com_mod !, only: numpart, nspec, itra1, npoint, nclass
633
634    implicit none
635
636    integer :: i
637
638    logical :: use_mp_vars = .false.! use mp_... type allocated vars
639    logical :: use_in_place = .true.! use MPI_IN_PLACE to collect vars.
640
641
642! Time for MPI communications
643    if (mp_measure_time) call mpif_mtime('commtime',0)
644
645! Distribute variables, send from pid 0 to other processes
646!**********************************************************************
647
648    if (.not. use_mp_vars.and..not.use_in_place) then
649
650! Integers:
651      call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
652           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
653      if (mp_ierr /= 0) goto 600
654      call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
655           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
656      if (mp_ierr /= 0) goto 600
657      call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
658           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
659      if (mp_ierr /= 0) goto 600
660      call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
661           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
662      if (mp_ierr /= 0) goto 600
663      call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
664           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
665      if (mp_ierr /= 0) goto 600
666
667! int2
668      call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
669           &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
670      if (mp_ierr /= 0) goto 600
671
672! Reals:
[fdc0f03]673      call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, &
674           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]675      if (mp_ierr /= 0) goto 600
[fdc0f03]676      call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, &
677           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]678      if (mp_ierr /= 0) goto 600
[fdc0f03]679      call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, &
680           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]681      if (mp_ierr /= 0) goto 600
682
[fdc0f03]683      call MPI_GATHER(us, numpart_mpi, mp_sp, us, &
684           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]685      if (mp_ierr /= 0) goto 600
[fdc0f03]686      call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, &
687           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]688      if (mp_ierr /= 0) goto 600
[fdc0f03]689      call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, &
690           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]691      if (mp_ierr /= 0) goto 600
692
693
694      call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
695           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
696      if (mp_ierr /= 0) goto 600
697      call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
698           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
699      if (mp_ierr /= 0) goto 600
[fdc0f03]700      call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, &
701           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]702      if (mp_ierr /= 0) goto 600
703
704      do i=1, nspec
[fdc0f03]705        call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), &
706             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]707        if (mp_ierr /= 0) goto 600
708
709      end do
710
711! Use variables npoint etc directly for communications
712!***********************************************************************
713    else if (use_in_place.and..not.use_mp_vars) then
714      if (lroot) then
715! Integers:
716        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, npoint, &
717             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
718        if (mp_ierr /= 0) goto 600
719        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, idt, &
720             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
721        if (mp_ierr /= 0) goto 600
722        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itra1, &
723             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
724        if (mp_ierr /= 0) goto 600
725        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, nclass, &
726             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
727        if (mp_ierr /= 0) goto 600
728        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itramem, &
729             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
730        if (mp_ierr /= 0) goto 600
731
732! int2
733        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER2, cbt, &
734             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
735        if (mp_ierr /= 0) goto 600
736
737! Reals:
[fdc0f03]738        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uap, &
739             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]740        if (mp_ierr /= 0) goto 600
[fdc0f03]741        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ucp, &
742             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]743        if (mp_ierr /= 0) goto 600
[fdc0f03]744        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uzp, &
745             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]746        if (mp_ierr /= 0) goto 600
747
[fdc0f03]748        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, us, &
749             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]750        if (mp_ierr /= 0) goto 600
[fdc0f03]751        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, vs, &
752             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]753        if (mp_ierr /= 0) goto 600
[fdc0f03]754        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ws, &
755             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]756        if (mp_ierr /= 0) goto 600
757
758
759        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, xtra1, &
760             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
761        if (mp_ierr /= 0) goto 600
762        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, ytra1, &
763             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
764        if (mp_ierr /= 0) goto 600
[fdc0f03]765        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ztra1, &
766             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]767        if (mp_ierr /= 0) goto 600
768
769        do i=1, nspec
[fdc0f03]770          call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, xmass1(:,i), &
771               &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]772          if (mp_ierr /= 0) goto 600
773        end do
774
775      else ! (for mp_pid >= 1)
776
777! Integers:
778        call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
779             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
780        if (mp_ierr /= 0) goto 600
781        call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
782             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
783        if (mp_ierr /= 0) goto 600
784        call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
785             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
786        if (mp_ierr /= 0) goto 600
787        call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
788             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
789        if (mp_ierr /= 0) goto 600
790        call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
791             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
792        if (mp_ierr /= 0) goto 600
793
794! int2
795        call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
796             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
797        if (mp_ierr /= 0) goto 600
798
799! Reals:
[fdc0f03]800        call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, &
801             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]802        if (mp_ierr /= 0) goto 600
[fdc0f03]803        call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, &
804             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]805        if (mp_ierr /= 0) goto 600
[fdc0f03]806        call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, &
807             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]808        if (mp_ierr /= 0) goto 600
809
[fdc0f03]810        call MPI_GATHER(us, numpart_mpi, mp_sp, us, &
811             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]812        if (mp_ierr /= 0) goto 600
[fdc0f03]813        call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, &
814             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]815        if (mp_ierr /= 0) goto 600
[fdc0f03]816        call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, &
817             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]818        if (mp_ierr /= 0) goto 600
819
820
821        call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
822             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
823        if (mp_ierr /= 0) goto 600
824        call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
825             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
826        if (mp_ierr /= 0) goto 600
[fdc0f03]827        call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, &
828             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]829        if (mp_ierr /= 0) goto 600
830
831        do i=1, nspec
[fdc0f03]832          call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), &
833               &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
[8a65cb0]834          if (mp_ierr /= 0) goto 600
835        end do
836      end if ! (lroot)
837    end if !  (use_in_place)
838
839    if (mp_measure_time) call mpif_mtime('commtime',1)
840
841    goto 601
842
843600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
844    stop
845601 end subroutine mpif_tm_collect_vars
846
847
848  subroutine mpif_gf_send_vars(memstat)
849!*******************************************************************************
850! DESCRIPTION
[5f9d14a]851!   Distribute 'getfield' variables from reader process
[8a65cb0]852!
853!   Called from timemanager
854!
855! NOTE
856!   This subroutine distributes windfields read from the reader process to
857!   all other processes. Usually one set of fields is transfered, but at
858!   first timestep when there are no fields in memory, both are transfered.
859!   MPI_Bcast is used, so implicitly all processes are synchronized at this
860!   step
861!
862! TODO
863!   GFS version
864!
865!*******************************************************************************
866    use com_mod
867    use par_mod,only: numwfmem
868
869    implicit none
870
871    integer, intent(in) :: memstat
872
873! Common array sizes used for communications
874    integer, parameter :: d3_size1 = nxmax*nymax*nzmax
875    integer, parameter :: d3_size2 = nxmax*nymax*nuvzmax
876    integer, parameter :: d2_size1 = nxmax*nymax
877    integer, parameter :: d2_size2 = nxmax*nymax*maxspec
878    integer, parameter :: d2_size3 = nxmax*nymax
879    integer, parameter :: d1_size1 = maxwf
880
881    integer :: d3s1,d3s2,d2s1,d2s2
882
883! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
884    integer :: li,ui
885
886! First time routine is called the unchangeable fields will be transfered   
887    logical :: first_call=.true.
888
889!**********************************************************************
890
891! Sizes of arrays transfered
892    d3s1=d3_size1
893    d3s2=d3_size2
894    d2s1=d2_size1
895    d2s2=d2_size2
896
897! Decide which field(s) need to be transfered
898    if (memstat.ge.32) then ! distribute 2 fields, to 1st/2nd indices
899      li=1; ui=2
900      d3s1=2*d3_size1
901      d3s2=2*d3_size2
902      d2s1=2*d2_size1
903      d2s2=2*d2_size2
904    else if (memstat.eq.17) then ! distribute 1 field, place on 1st index
905      li=1; ui=1
906    else if (memstat.eq.18) then ! distribute 1 field, place on 2nd index
907      li=2; ui=2
908    else if (memstat.eq.19) then ! distribute 1 field, place on 3nd index
909      li=3; ui=3
910    else
911      write(*,*) '#### mpi_mod::mpif_gf_send_vars> ERROR: ', &
912           & 'wrong value of memstat, exiting ####', memstat
913      stop
914    end if
915
916
917! Time for MPI communications
918    if (mp_measure_time) call mpif_mtime('commtime',0)
919
920! Send variables from getfield process (id_read) to other processes
921!**********************************************************************
922
[41d8574]923! The non-reader processes need to know if cloud water were read.
[5f9d14a]924! TODO: only at first step or always?
925    call MPI_Bcast(readclouds,1,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
926    if (mp_ierr /= 0) goto 600
927
[8a65cb0]928! Static fields/variables sent only at startup
929    if (first_call) then
[fdc0f03]930      call MPI_Bcast(oro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]931      if (mp_ierr /= 0) goto 600
[fdc0f03]932      call MPI_Bcast(excessoro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]933      if (mp_ierr /= 0) goto 600
[fdc0f03]934      call MPI_Bcast(lsm(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]935      if (mp_ierr /= 0) goto 600
[fdc0f03]936      call MPI_Bcast(xlanduse(:,:,:),d2_size3*numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]937      if (mp_ierr /= 0) goto 600
938      call MPI_Bcast(wftime,d1_size1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
939      if (mp_ierr /= 0) goto 600
940      call MPI_Bcast(numbwf,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
941      if (mp_ierr /= 0) goto 600
942      call MPI_Bcast(nmixz,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
943      if (mp_ierr /= 0) goto 600
944      call MPI_Bcast(height,nzmax,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
[5f9d14a]945      if (mp_ierr /= 0) goto 600
[8a65cb0]946
947      first_call=.false.
948    endif
949
[fdc0f03]950    call MPI_Bcast(uu(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]951    if (mp_ierr /= 0) goto 600
[fdc0f03]952    call MPI_Bcast(vv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]953    if (mp_ierr /= 0) goto 600
[fdc0f03]954    call MPI_Bcast(uupol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]955    if (mp_ierr /= 0) goto 600
[fdc0f03]956    call MPI_Bcast(vvpol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]957    if (mp_ierr /= 0) goto 600
[fdc0f03]958    call MPI_Bcast(ww(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]959    if (mp_ierr /= 0) goto 600
[fdc0f03]960    call MPI_Bcast(tt(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]961    if (mp_ierr /= 0) goto 600
[fdc0f03]962    call MPI_Bcast(rho(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]963    if (mp_ierr /= 0) goto 600
[fdc0f03]964    call MPI_Bcast(drhodz(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]965    if (mp_ierr /= 0) goto 600
[fdc0f03]966    call MPI_Bcast(tth(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]967    if (mp_ierr /= 0) goto 600
[fdc0f03]968    call MPI_Bcast(qvh(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]969    if (mp_ierr /= 0) goto 600
[fdc0f03]970    call MPI_Bcast(qv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]971    if (mp_ierr /= 0) goto 600
[fdc0f03]972    call MPI_Bcast(pv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]973    if (mp_ierr /= 0) goto 600
974    call MPI_Bcast(clouds(:,:,:,li:ui),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
[5f9d14a]975    if (mp_ierr /= 0) goto 600
976
977! cloud water/ice:
978    if (readclouds) then
[fdc0f03]979      ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[41d8574]980      ! if (mp_ierr /= 0) goto 600
[fdc0f03]981      call MPI_Bcast(clw4(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[5f9d14a]982      if (mp_ierr /= 0) goto 600
[fdc0f03]983      ! call MPI_Bcast(clwc(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[d6a0977]984      ! if (mp_ierr /= 0) goto 600
[fdc0f03]985      ! call MPI_Bcast(ciwc(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[d6a0977]986      ! if (mp_ierr /= 0) goto 600
[5f9d14a]987    end if
[8a65cb0]988
989! 2D fields
[fdc0f03]990    call MPI_Bcast(cloudsh(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]991    if (mp_ierr /= 0) goto 600
[fdc0f03]992    call MPI_Bcast(vdep(:,:,:,li:ui),d2s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]993    if (mp_ierr /= 0) goto 600
[fdc0f03]994    call MPI_Bcast(ps(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]995    if (mp_ierr /= 0) goto 600
[fdc0f03]996    call MPI_Bcast(sd(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]997    if (mp_ierr /= 0) goto 600
[fdc0f03]998    call MPI_Bcast(tcc(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]999    if (mp_ierr /= 0) goto 600
[fdc0f03]1000    call MPI_Bcast(tt2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1001    if (mp_ierr /= 0) goto 600
[fdc0f03]1002    call MPI_Bcast(td2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1003    if (mp_ierr /= 0) goto 600
[fdc0f03]1004    call MPI_Bcast(lsprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1005    if (mp_ierr /= 0) goto 600
[fdc0f03]1006    call MPI_Bcast(convprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1007    if (mp_ierr /= 0) goto 600
[fdc0f03]1008    call MPI_Bcast(ustar(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1009    if (mp_ierr /= 0) goto 600
[fdc0f03]1010    call MPI_Bcast(wstar(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1011    if (mp_ierr /= 0) goto 600
[fdc0f03]1012    call MPI_Bcast(hmix(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1013    if (mp_ierr /= 0) goto 600
[fdc0f03]1014    call MPI_Bcast(tropopause(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1015    if (mp_ierr /= 0) goto 600
[fdc0f03]1016    call MPI_Bcast(oli(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1017    if (mp_ierr /= 0) goto 600
1018
[fdc0f03]1019    call MPI_Bcast(z0,numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1020    if (mp_ierr /= 0) goto 600
1021
1022    if (mp_measure_time) call mpif_mtime('commtime',1)
1023
1024    goto 601
1025
1026600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1027    stop
1028
1029601 end subroutine mpif_gf_send_vars
1030
1031
1032  subroutine mpif_gf_send_vars_nest(memstat)
1033!***********************************************************************
1034! DESCRIPTION
1035!   Distribute 'getfield' variables from reader process to all processes.
1036!   For nested fields
1037!
1038!   Called from timemanager
1039!
1040! NOTE
1041!   This subroutine distributes nested windfields from the reader process to
1042!   all other processes. Usually one set of fields is transfered, but at
1043!   first timestep when there are no fields in memory, both are transfered.
1044!   MPI_Bcast is used, so implicitly all processes are synchronized at this
1045!   step
1046!
[5f9d14a]1047! TODO
1048!   Transfer cloud water/ice if and when available for nested
[8a65cb0]1049!
1050!***********************************************************************
1051    use com_mod
1052    use par_mod, only: numwfmem
1053
1054    implicit none
1055
1056    integer, intent(in) :: memstat
1057    integer :: i
1058! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
1059    integer :: li,ui
1060
1061! Common array sizes used for communications
1062    integer :: d3_size1 = nxmaxn*nymaxn*nzmax*maxnests
1063    integer :: d3_size2 = nxmaxn*nymaxn*nuvzmax*maxnests
1064    integer :: d2_size1 = nxmaxn*nymaxn*maxnests
1065    integer :: d2_size2 = nxmaxn*nymaxn*maxspec*maxnests
1066    integer :: d2_size3 = nxmaxn*nymaxn*maxnests
1067
1068    integer :: d3s1,d3s2,d2s1,d2s2
1069
1070! First time routine is called the unchangeable fields will be transfered   
1071    logical :: first_call=.true.
1072
1073!**********************************************************************
1074
1075! Sizes of arrays transfered
1076    d3s1=d3_size1
1077    d3s2=d3_size2
1078    d2s1=d2_size1
1079    d2s2=d2_size2
1080
1081! Decide which field(s) need to be transfered
1082    if (memstat.ge.32) then ! distribute 2 fields
1083      li=1; ui=2
1084      d3s1=2*d3_size1
1085      d3s2=2*d3_size2
1086      d2s1=2*d2_size1
1087      d2s2=2*d2_size2
1088    else if (memstat.eq.17) then ! distribute 1 field, on 1st index
1089      li=1; ui=1
1090    else if (memstat.eq.18) then ! distribute 1 field, on 2nd index
1091      li=2; ui=2
1092    else if (memstat.eq.19) then ! distribute 1 field, on 3nd index
1093      li=3; ui=3
1094    else
1095      write(*,*) '#### mpi_mod::mpif_gf_send_vars_nest> ERROR: ', &
1096           & 'wrong value of memstat, exiting ####', memstat
1097      stop
1098    end if
1099
1100
1101! Time for MPI communications
1102    if (mp_measure_time) call mpif_mtime('commtime',0)
1103
1104! Distribute variables, send from getfield process (id_read) to other
1105! processes
1106!**********************************************************************
1107
1108! Static fields/variables sent only at startup
1109    if (first_call) then
[fdc0f03]1110      call MPI_Bcast(oron(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1111      if (mp_ierr /= 0) goto 600
[fdc0f03]1112      call MPI_Bcast(excessoron(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1113      if (mp_ierr /= 0) goto 600
[fdc0f03]1114      call MPI_Bcast(lsmn(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1115      if (mp_ierr /= 0) goto 600
[fdc0f03]1116      call MPI_Bcast(xlandusen(:,:,:,:),d2_size3*numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1117      if (mp_ierr /= 0) goto 600
1118      first_call=.false.
1119    end if
1120
1121! MPI prefers contiguous arrays for sending (else a buffer is created),
1122! hence the loop
1123
1124    do i=1, numbnests
1125! 3D fields
[fdc0f03]1126      call MPI_Bcast(uun(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1127      if (mp_ierr /= 0) goto 600
[fdc0f03]1128      call MPI_Bcast(vvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1129      if (mp_ierr /= 0) goto 600
[fdc0f03]1130      call MPI_Bcast(wwn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1131      if (mp_ierr /= 0) goto 600
[fdc0f03]1132      call MPI_Bcast(ttn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1133      if (mp_ierr /= 0) goto 600
[fdc0f03]1134      call MPI_Bcast(rhon(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1135      if (mp_ierr /= 0) goto 600
[fdc0f03]1136      call MPI_Bcast(drhodzn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1137      if (mp_ierr /= 0) goto 600
[fdc0f03]1138      call MPI_Bcast(tthn(:,:,:,li:ui,i),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1139      if (mp_ierr /= 0) goto 600
[fdc0f03]1140      call MPI_Bcast(qvhn(:,:,:,li:ui,i),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1141      if (mp_ierr /= 0) goto 600
[fdc0f03]1142      call MPI_Bcast(qvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1143      if (mp_ierr /= 0) goto 600
[fdc0f03]1144      call MPI_Bcast(pvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1145      if (mp_ierr /= 0) goto 600
1146      call MPI_Bcast(cloudsn(:,:,:,li:ui,i),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
1147      if (mp_ierr /= 0) goto 600
1148
1149! 2D fields
[fdc0f03]1150      call MPI_Bcast(cloudsnh(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1151      if (mp_ierr /= 0) goto 600
[fdc0f03]1152      call MPI_Bcast(vdepn(:,:,:,li:ui,i),d2s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1153      if (mp_ierr /= 0) goto 600
[fdc0f03]1154      call MPI_Bcast(psn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1155      if (mp_ierr /= 0) goto 600
[fdc0f03]1156      call MPI_Bcast(sdn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1157      if (mp_ierr /= 0) goto 600
[fdc0f03]1158      call MPI_Bcast(tccn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1159      if (mp_ierr /= 0) goto 600
[fdc0f03]1160      call MPI_Bcast(tt2n(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1161      if (mp_ierr /= 0) goto 600
[fdc0f03]1162      call MPI_Bcast(td2n(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1163      if (mp_ierr /= 0) goto 600
[fdc0f03]1164      call MPI_Bcast(lsprecn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1165      if (mp_ierr /= 0) goto 600
[fdc0f03]1166      call MPI_Bcast(convprecn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1167      if (mp_ierr /= 0) goto 600
[fdc0f03]1168      call MPI_Bcast(ustarn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1169      if (mp_ierr /= 0) goto 600
[fdc0f03]1170      call MPI_Bcast(wstarn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1171      if (mp_ierr /= 0) goto 600
[fdc0f03]1172      call MPI_Bcast(olin(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1173      if (mp_ierr /= 0) goto 600
[fdc0f03]1174      call MPI_Bcast(hmixn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1175      if (mp_ierr /= 0) goto 600
[fdc0f03]1176      call MPI_Bcast(tropopausen(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
[8a65cb0]1177      if (mp_ierr /= 0) goto 600
1178    end do
1179
1180    if (mp_measure_time) call mpif_mtime('commtime',1)
1181
1182    goto 601
1183
1184600 write(*,*) "mpi_mod> mp_ierr \= 0",mp_ierr
1185    stop
1186
1187601 end subroutine mpif_gf_send_vars_nest
1188
1189
1190  subroutine mpif_gf_send_vars_async(memstat)
1191!*******************************************************************************
1192! DESCRIPTION
1193!   Distribute 'getfield' variables from reader process to all processes.
[5f9d14a]1194!   Called from timemanager by reader process only
[8a65cb0]1195!
1196! NOTE
1197!   This version uses asynchronious sends. The newest fields are sent in the
1198!   background, so calculations can continue while
1199!   MPI communications are performed.
1200!
1201!   The way the MPI tags/requests are sequenced means that this routine must
1202!   carefully match routine 'mpif_gf_recv_vars_async'
1203!
1204! VARIABLES
1205!   memstat -- input, for resolving pointer to windfield index being read
1206!   mind    -- index where to place new fields
1207!
[5f9d14a]1208! TODO
[8a65cb0]1209!
1210!*******************************************************************************
1211    use com_mod
1212
1213    implicit none
1214
1215    integer, intent(in) :: memstat
1216    integer :: mind
1217    integer :: dest,i
1218
1219! Common array sizes used for communications
1220    integer :: d3s1 = nxmax*nymax*nzmax
1221    integer :: d3s2 = nxmax*nymax*nuvzmax
1222    integer :: d2s1 = nxmax*nymax
1223    integer :: d2s2 = nxmax*nymax*maxspec
1224    integer :: d1s1 = maxwf
1225
1226!*******************************************************************************
1227
1228! At the time the send is posted, the reader process is one step further
1229! in the permutation of memstat compared with the receiving processes
1230
1231    if (memstat.ge.32) then
1232! last read was synchronous, to indices 1 and 2, 3 is free
1233      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1234           & memstat>=32 should never happen here."
1235      stop
1236    else if (memstat.eq.17) then
1237! old fields on 1,2, send 3
1238      mind=3
1239    else if (memstat.eq.18) then
1240! old fields on 2,3, send 1
1241      mind=1
1242    else if (memstat.eq.19) then
1243! old fields on 3,1, send 2
1244      mind=2
1245    else
1246      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1247           & invalid memstat"
1248    end if
1249
1250    if (mp_dev_mode) then
1251      if (mp_pid.ne.id_read) then
1252        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_async'
1253      end if
1254    end if
1255
1256    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1257
1258! Time for MPI communications
1259    if (mp_measure_time) call mpif_mtime('commtime',0)
1260
1261! Loop over receiving processes, initiate data sending
1262!*****************************************************
1263
1264    do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading
[78e62dc]1265! TODO: use mp_partgroup_np here
[8a65cb0]1266      if (dest.eq.id_read) cycle
1267      i=dest*nvar_async
[fdc0f03]1268      call MPI_Isend(uu(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1269      if (mp_ierr /= 0) goto 600
1270      i=i+1
[fdc0f03]1271      call MPI_Isend(vv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1272      if (mp_ierr /= 0) goto 600
1273      i=i+1
[fdc0f03]1274      call MPI_Isend(uupol(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1275      if (mp_ierr /= 0) goto 600
1276      i=i+1
[fdc0f03]1277      call MPI_Isend(vvpol(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1278      if (mp_ierr /= 0) goto 600
1279      i=i+1
[fdc0f03]1280      call MPI_Isend(ww(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1281      if (mp_ierr /= 0) goto 600
1282      i=i+1
[fdc0f03]1283      call MPI_Isend(tt(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1284      if (mp_ierr /= 0) goto 600
1285      i=i+1
[fdc0f03]1286      call MPI_Isend(rho(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1287      if (mp_ierr /= 0) goto 600
1288      i=i+1
[fdc0f03]1289      call MPI_Isend(drhodz(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1290      if (mp_ierr /= 0) goto 600
1291      i=i+1
[fdc0f03]1292      call MPI_Isend(tth(:,:,:,mind),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1293      if (mp_ierr /= 0) goto 600
1294      i=i+1
[fdc0f03]1295      call MPI_Isend(qvh(:,:,:,mind),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1296      if (mp_ierr /= 0) goto 600
1297      i=i+1
[fdc0f03]1298      call MPI_Isend(qv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1299      if (mp_ierr /= 0) goto 600
1300      i=i+1
[fdc0f03]1301      call MPI_Isend(pv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1302      if (mp_ierr /= 0) goto 600
1303      i=i+1
1304      call MPI_Isend(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1305      i=i+1
1306      if (mp_ierr /= 0) goto 600
1307
[fdc0f03]1308      call MPI_Isend(cloudsh(:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1309      if (mp_ierr /= 0) goto 600
1310      i=i+1
[fdc0f03]1311      call MPI_Isend(vdep(:,:,:,mind),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1312      if (mp_ierr /= 0) goto 600
1313      i=i+1
[fdc0f03]1314      call MPI_Isend(ps(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1315      if (mp_ierr /= 0) goto 600
1316      i=i+1
[fdc0f03]1317      call MPI_Isend(sd(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1318      if (mp_ierr /= 0) goto 600
1319      i=i+1
[fdc0f03]1320      call MPI_Isend(tcc(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1321      if (mp_ierr /= 0) goto 600
1322      i=i+1
[fdc0f03]1323      call MPI_Isend(tt2(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1324      if (mp_ierr /= 0) goto 600
1325      i=i+1
[fdc0f03]1326      call MPI_Isend(td2(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1327      if (mp_ierr /= 0) goto 600
1328      i=i+1
[fdc0f03]1329      call MPI_Isend(lsprec(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1330      if (mp_ierr /= 0) goto 600
1331      i=i+1
[fdc0f03]1332      call MPI_Isend(convprec(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1333      if (mp_ierr /= 0) goto 600
1334      i=i+1
[fdc0f03]1335      call MPI_Isend(ustar(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1336      if (mp_ierr /= 0) goto 600
1337      i=i+1
[fdc0f03]1338      call MPI_Isend(wstar(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1339      if (mp_ierr /= 0) goto 600
1340      i=i+1
[fdc0f03]1341      call MPI_Isend(hmix(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1342      if (mp_ierr /= 0) goto 600
1343      i=i+1
[fdc0f03]1344      call MPI_Isend(tropopause(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1345      if (mp_ierr /= 0) goto 600
1346      i=i+1
[fdc0f03]1347      call MPI_Isend(oli(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
[8a65cb0]1348      if (mp_ierr /= 0) goto 600
1349
[5f9d14a]1350! Send cloud water if it exists. Increment counter always (as on receiving end)
1351      if (readclouds) then
1352        i=i+1
[fdc0f03]1353        ! call MPI_Isend(icloud_stats(:,:,:,mind),d2s1*5,mp_sp,dest,tm1,&
[41d8574]1354        !      &MPI_COMM_WORLD,reqs(i),mp_ierr)
[fdc0f03]1355        call MPI_Isend(clw4(:,:,mind),d2s1,mp_sp,dest,tm1,&
[5f9d14a]1356             &MPI_COMM_WORLD,reqs(i),mp_ierr)
[41d8574]1357
[5f9d14a]1358        if (mp_ierr /= 0) goto 600
1359
[fdc0f03]1360        ! call MPI_Isend(clwc(:,:,:,mind),d3s1,mp_sp,dest,tm1,&
[d6a0977]1361        !      &MPI_COMM_WORLD,reqs(i),mp_ierr)
1362        ! if (mp_ierr /= 0) goto 600
1363        ! i=i+1
[5f9d14a]1364
[fdc0f03]1365        ! call MPI_Isend(ciwc(:,:,:,mind),d3s1,mp_sp,dest,tm1,&
[d6a0977]1366        !      &MPI_COMM_WORLD,reqs(i),mp_ierr)
1367        ! if (mp_ierr /= 0) goto 600
1368
1369      end if
[8a65cb0]1370    end do
1371
1372    if (mp_measure_time) call mpif_mtime('commtime',1)
1373
1374    goto 601
1375
[5f9d14a]1376600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> mp_ierr \= 0", mp_ierr
[8a65cb0]1377    stop
1378
1379601 end subroutine mpif_gf_send_vars_async
1380
1381
1382  subroutine mpif_gf_recv_vars_async(memstat)
1383!*******************************************************************************
1384! DESCRIPTION
[5f9d14a]1385!   Receive 'getfield' variables from reader process.
1386!   Called from timemanager by all processes except reader
[8a65cb0]1387!
1388! NOTE
1389!   This version uses asynchronious communications.
1390!
1391! VARIABLES
1392!   memstat -- input, used to resolve windfield index being received
1393!
[5f9d14a]1394! TODO
[8a65cb0]1395!
1396!*******************************************************************************
1397    use com_mod
1398
1399    implicit none
1400
1401    integer, intent(in) :: memstat
1402    integer :: mind,j
1403
1404! Common array sizes used for communications
1405    integer :: d3s1 = nxmax*nymax*nzmax
1406    integer :: d3s2 = nxmax*nymax*nuvzmax
1407    integer :: d2s1 = nxmax*nymax
1408    integer :: d2s2 = nxmax*nymax*maxspec
1409    integer :: d1_size1 = maxwf
1410
1411!    integer :: d3s1,d3s2,d2s1,d2s2
1412!*******************************************************************************
1413
1414! At the time this immediate receive is posted, memstat is the state of
1415! windfield indices at the previous/current time. From this, the future
1416! state is deduced.
1417    if (memstat.eq.32) then
1418! last read was synchronous to indices 1 and 2, 3 is free
1419      mind=3
1420    else if (memstat.eq.17) then
1421! last read was asynchronous to index 3, 1 is free
1422      mind=1
1423    else if (memstat.eq.18) then
1424! last read was asynchronous to index 1, 2 is free
1425      mind=2
1426    else if (memstat.eq.19) then
1427! last read was asynchronous to index 2, 3 is free
1428      mind=3
1429    end if
1430
1431    if (mp_dev_mode) then
1432      if (mp_pid.eq.id_read) then
1433        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async'
1434      end if
1435    end if
1436
1437! Time for MPI communications
1438    if (mp_measure_time) call mpif_mtime('commtime',0)
1439
1440    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1441
1442! Initiate receiving of data
1443!***************************
1444
1445! Get MPI tags/requests for communications
1446    j=mp_pid*nvar_async
[fdc0f03]1447    call MPI_Irecv(uu(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1448         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1449    if (mp_ierr /= 0) goto 600
1450    j=j+1
[fdc0f03]1451    call MPI_Irecv(vv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1452         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1453    if (mp_ierr /= 0) goto 600
1454    j=j+1
[fdc0f03]1455    call MPI_Irecv(uupol(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1456         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1457    if (mp_ierr /= 0) goto 600
1458    j=j+1
[fdc0f03]1459    call MPI_Irecv(vvpol(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1460         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1461    if (mp_ierr /= 0) goto 600
1462    j=j+1
[fdc0f03]1463    call MPI_Irecv(ww(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1464         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1465    if (mp_ierr /= 0) goto 600
1466    j=j+1
[fdc0f03]1467    call MPI_Irecv(tt(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1468         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1469    if (mp_ierr /= 0) goto 600
1470    j=j+1
[fdc0f03]1471    call MPI_Irecv(rho(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1472         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1473    if (mp_ierr /= 0) goto 600
1474    j=j+1
[fdc0f03]1475    call MPI_Irecv(drhodz(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1476         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1477    if (mp_ierr /= 0) goto 600
1478    j=j+1
[fdc0f03]1479    call MPI_Irecv(tth(:,:,:,mind),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1480         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1481    if (mp_ierr /= 0) goto 600
1482    j=j+1
[fdc0f03]1483    call MPI_Irecv(qvh(:,:,:,mind),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1484         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1485    if (mp_ierr /= 0) goto 600
1486    j=j+1
1487
[fdc0f03]1488    call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1489         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1490    if (mp_ierr /= 0) goto 600
[5f9d14a]1491    j=j+1
[fdc0f03]1492    call MPI_Irecv(pv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1493         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1494    if (mp_ierr /= 0) goto 600
1495    j=j+1
[5f9d14a]1496    call MPI_Irecv(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
1497         &MPI_COMM_WORLD,reqs(j),mp_ierr)   
[8a65cb0]1498    if (mp_ierr /= 0) goto 600
1499    j=j+1
1500
[fdc0f03]1501    call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1502         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1503    if (mp_ierr /= 0) goto 600
1504    j=j+1
[fdc0f03]1505    call MPI_Irecv(vdep(:,:,:,mind),d2s2,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1506         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1507    if (mp_ierr /= 0) goto 600
1508    j=j+1
[fdc0f03]1509    call MPI_Irecv(ps(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1510         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1511    if (mp_ierr /= 0) goto 600
1512    j=j+1
[fdc0f03]1513    call MPI_Irecv(sd(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1514         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1515    if (mp_ierr /= 0) goto 600
1516    j=j+1
[fdc0f03]1517    call MPI_Irecv(tcc(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1518         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1519    if (mp_ierr /= 0) goto 600
1520    j=j+1
[fdc0f03]1521    call MPI_Irecv(tt2(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1522         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1523    if (mp_ierr /= 0) goto 600
1524    j=j+1
[fdc0f03]1525    call MPI_Irecv(td2(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1526         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1527    if (mp_ierr /= 0) goto 600
1528    j=j+1
[fdc0f03]1529    call MPI_Irecv(lsprec(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1530         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1531    if (mp_ierr /= 0) goto 600
1532    j=j+1
[fdc0f03]1533    call MPI_Irecv(convprec(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1534         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1535    if (mp_ierr /= 0) goto 600
1536
[fdc0f03]1537    call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1538         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1539    if (mp_ierr /= 0) goto 600
1540    j=j+1
[fdc0f03]1541    call MPI_Irecv(wstar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1542         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1543    if (mp_ierr /= 0) goto 600
1544    j=j+1
[fdc0f03]1545    call MPI_Irecv(hmix(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1546         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1547    if (mp_ierr /= 0) goto 600
1548    j=j+1
[fdc0f03]1549    call MPI_Irecv(tropopause(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1550         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1551    if (mp_ierr /= 0) goto 600
1552    j=j+1
[fdc0f03]1553    call MPI_Irecv(oli(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
[5f9d14a]1554         &MPI_COMM_WORLD,reqs(j),mp_ierr)
[8a65cb0]1555    if (mp_ierr /= 0) goto 600
1556
1557
[5f9d14a]1558! Post request for clwc. These data are possibly not sent, request must then be cancelled
1559! For now assume that data at all steps either have or do not have water
1560    if (readclouds) then
1561      j=j+1
[d6a0977]1562
[fdc0f03]1563      ! call MPI_Irecv(icloud_stats(:,:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
[41d8574]1564      !      &MPI_COMM_WORLD,reqs(j),mp_ierr)
[fdc0f03]1565      call MPI_Irecv(clw4(:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
[d6a0977]1566           &MPI_COMM_WORLD,reqs(j),mp_ierr)
1567      if (mp_ierr /= 0) goto 600
1568
[fdc0f03]1569      ! call MPI_Irecv(clwc(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
[d6a0977]1570      !      &MPI_COMM_WORLD,reqs(j),mp_ierr)   
1571      ! if (mp_ierr /= 0) goto 600
1572      ! j=j+1
[fdc0f03]1573      ! call MPI_Irecv(ciwc(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
[d6a0977]1574      !      &MPI_COMM_WORLD,reqs(j),mp_ierr)   
1575      ! if (mp_ierr /= 0) goto 600
1576
[5f9d14a]1577    end if
1578
1579
[8a65cb0]1580    if (mp_measure_time) call mpif_mtime('commtime',1)
1581
1582    goto 601
1583
[5f9d14a]1584600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_async> MPI ERROR ", mp_ierr
[8a65cb0]1585    stop
1586
1587601 end subroutine mpif_gf_recv_vars_async
1588
1589
1590  subroutine mpif_gf_request
1591!*******************************************************************************
1592! DESCRIPTION
1593!   Check for completion of MPI_Isend/MPI_Irecv data transfer.
1594!   
1595!   
1596! NOTE
1597!   implicit synchronisation between all processes takes place here
1598!
1599!
1600!*******************************************************************************
[5f9d14a]1601    use com_mod, only: readclouds
1602
[8a65cb0]1603    implicit none
1604
[5f9d14a]1605
1606    integer :: n_req
1607    integer :: i
1608
1609!***********************************************************************
1610
1611    n_req=nvar_async*mp_np
1612
1613    if (mp_measure_time) call mpif_mtime('commtime',0)
[8a65cb0]1614
1615!    call MPI_Wait(rm1,MPI_STATUS_IGNORE,mp_ierr)
[5f9d14a]1616
1617! TODO: cancel recv request if readclouds=.false.
1618!    if (readclouds) then
1619    call MPI_Waitall(n_req,reqs,MPI_STATUSES_IGNORE,mp_ierr)
1620!    endif
[78e62dc]1621! else
1622!   do i = 0, nvar_async*mp_np-1
1623!     if (mod(i,27).eq.0 .or. mod(i,28).eq.0) then
1624!       call MPI_Cancel(reqs(i),mp_ierr)
1625!       cycle
1626!     end if
1627!     call MPI_Wait(reqs(i),MPI_STATUS_IGNORE,mp_ierr)
1628!   end do
1629! end if
[5f9d14a]1630
[8a65cb0]1631    if (mp_ierr /= 0) goto 600
1632
[5f9d14a]1633    if (mp_measure_time) call mpif_mtime('commtime',1)
[8a65cb0]1634
1635    goto 601
1636
[5f9d14a]1637600 write(*,*) "#### mpi_mod::mpif_gf_request> MPI ERROR ", mp_ierr
[8a65cb0]1638    stop
1639
1640601 end subroutine mpif_gf_request
1641
1642
1643  subroutine mpif_tm_reduce_grid
1644!***********************************************************************
1645! Collect grid variable to PID 0, adding from all processes.
1646!
1647! NOTE
1648!   - Older MPI implementations may lack the MPI_IN_PLACE operation.
1649!     If so use 1) below and change unc_mod
1650!
1651!***********************************************************************
1652    use com_mod
1653    use unc_mod
1654    use par_mod
1655
1656    implicit none
1657
1658    integer :: grid_size2d,grid_size3d
1659    integer, parameter :: rcpt_size=maxreceptor*maxspec
1660
1661!**********************************************************************
1662    grid_size2d=numxgrid*numygrid*maxspec* &
1663         & maxpointspec_act*nclassunc*maxageclass
1664    grid_size3d=numxgrid*numygrid*numzgrid*maxspec* &
1665         & maxpointspec_act*nclassunc*maxageclass
1666
1667
1668! Time for MPI communications
1669    if (mp_measure_time) call mpif_mtime('commtime',0)
1670
1671! 1) Using a separate grid (gridunc0) for received values
[fdc0f03]1672! call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, &
[8a65cb0]1673!      & mp_comm_used, mp_ierr)
1674! if (mp_ierr /= 0) goto 600
1675
1676! 2) Using in-place reduction
1677    if (lroot) then
[fdc0f03]1678      call MPI_Reduce(MPI_IN_PLACE, gridunc, grid_size3d, mp_sp, MPI_SUM, id_root, &
[8a65cb0]1679           & mp_comm_used, mp_ierr)
1680      if (mp_ierr /= 0) goto 600
1681    else
[fdc0f03]1682      call MPI_Reduce(gridunc, gridunc, grid_size3d, mp_sp, MPI_SUM, id_root, &
[8a65cb0]1683           & mp_comm_used, mp_ierr)
1684    end if
1685
1686    if ((WETDEP).and.(ldirect.gt.0)) then
[6a678e3]1687      call MPI_Reduce(wetgridunc, wetgridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, &
[8a65cb0]1688           & mp_comm_used, mp_ierr)
1689      if (mp_ierr /= 0) goto 600
1690    end if
1691
1692    if ((DRYDEP).and.(ldirect.gt.0)) then
[6a678e3]1693      call MPI_Reduce(drygridunc, drygridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, &
[8a65cb0]1694           & mp_comm_used, mp_ierr)
1695      if (mp_ierr /= 0) goto 600
1696    end if
1697
1698! Receptor concentrations   
1699    if (lroot) then
[fdc0f03]1700      call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &
[8a65cb0]1701           & mp_comm_used,mp_ierr)
1702      if (mp_ierr /= 0) goto 600
1703    else
[fdc0f03]1704      call MPI_Reduce(creceptor,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &
[8a65cb0]1705           & mp_comm_used,mp_ierr)
1706    end if
1707
1708    if (mp_measure_time) call mpif_mtime('commtime',1)
1709
1710    goto 601
1711
1712600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1713    stop
1714
1715601 end subroutine mpif_tm_reduce_grid
1716
1717
1718  subroutine mpif_tm_reduce_grid_nest
1719!***********************************************************************
1720! Collect nested grids to PID 0, adding from all processes.
1721!
1722! NOTE
1723!   - Compiling with 'fcheck=all' (gfortran) will cause a run-time error
1724!     as wetgriduncn0 is only allocated for root process. Possibly valid
1725!     MPI code but invalid gfortran.
1726!
1727!***********************************************************************
1728    use com_mod
1729    use unc_mod
1730    use par_mod
1731
1732    implicit none
1733
1734    integer :: grid_size2d,grid_size3d
1735
1736!**********************************************************************
[5f9d14a]1737
[8a65cb0]1738    grid_size3d=numxgridn*numygridn*numzgrid*maxspec* &
1739         & maxpointspec_act*nclassunc*maxageclass
1740    grid_size2d=numxgridn*numygridn*maxspec* &
1741         & maxpointspec_act*nclassunc*maxageclass
1742
1743
1744! Time for MPI communications
1745    if (mp_measure_time) call mpif_mtime('commtime',0)
1746
1747! Using a separate grid (gridunc0) for received values, for debugging
[fdc0f03]1748! call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_sp, MPI_SUM, id_root, &
[8a65cb0]1749!      & mp_comm_used, mp_ierr)
1750! if (mp_ierr /= 0) goto 600
1751
1752! Using in-place reduction
1753    if (lroot) then
[fdc0f03]1754      call MPI_Reduce(MPI_IN_PLACE, griduncn, grid_size3d, mp_sp, MPI_SUM, id_root, &
[8a65cb0]1755           & mp_comm_used, mp_ierr)
1756      if (mp_ierr /= 0) goto 600
1757    else
[fdc0f03]1758      call MPI_Reduce(griduncn, griduncn, grid_size3d, mp_sp, MPI_SUM, id_root, &
[8a65cb0]1759           & mp_comm_used, mp_ierr)
1760    end if
1761
1762    if ((WETDEP).and.(ldirect.gt.0)) then
[6a678e3]1763      call MPI_Reduce(wetgriduncn, wetgriduncn0, grid_size2d, mp_cp, MPI_SUM, id_root, &
[8a65cb0]1764           & mp_comm_used, mp_ierr)
1765      if (mp_ierr /= 0) goto 600
1766    end if
1767
1768    if ((DRYDEP).and.(ldirect.gt.0)) then
[6a678e3]1769      call MPI_Reduce(drygriduncn, drygriduncn0, grid_size2d, mp_cp, MPI_SUM, id_root, &
[8a65cb0]1770           & mp_comm_used, mp_ierr)
1771      if (mp_ierr /= 0) goto 600
1772    end if
1773
1774    if (mp_measure_time) call mpif_mtime('commtime',1)
1775
1776    goto 601
1777
1778600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1779    stop
1780
1781601 end subroutine mpif_tm_reduce_grid_nest
1782
1783
1784  subroutine mpif_mtime(ident,imode)
1785!***********************************************************************
1786! Measure CPU/Wall time in various parts of the code
1787!
1788! VARIABLES
1789!   ident        character, identifies routine to measure
1790!   imode        integer, 0:start clock(s)  1: stop clock(s)
1791!
1792!***********************************************************************
1793    implicit none
1794
1795    character(LEN=*), intent(in) :: ident
1796    integer, intent(in) :: imode
1797
[5f9d14a]1798!***********************************************************************
1799
[8a65cb0]1800    select case(ident)
1801
1802    case ('timemanager')
1803      if (imode.eq.0) then
1804        call cpu_time(tm_tot_beg)
1805      else
1806        call cpu_time(tm_tot_end)
1807        tm_tot_total = tm_tot_total + (tm_tot_end-tm_tot_beg)
1808      end if
1809
1810    case ('wetdepo')
1811      if (imode.eq.0) then
1812        mp_wetdepo_wtime_beg = mpi_wtime()
1813        call cpu_time(mp_wetdepo_time_beg)
1814      else
1815        mp_wetdepo_wtime_end = mpi_wtime()
1816        call cpu_time(mp_wetdepo_time_end)
1817
1818        mp_wetdepo_wtime_total = mp_wetdepo_wtime_total + &
1819             &(mp_wetdepo_wtime_end - mp_wetdepo_wtime_beg)
1820        mp_wetdepo_time_total = mp_wetdepo_time_total + &
1821             &(mp_wetdepo_time_end - mp_wetdepo_time_beg)
1822      end if
1823
1824    case ('getfields')
1825      if (imode.eq.0) then
1826        mp_getfields_wtime_beg = mpi_wtime()
1827        call cpu_time(mp_getfields_time_beg)
1828      else
1829        mp_getfields_wtime_end = mpi_wtime()
1830        call cpu_time(mp_getfields_time_end)
1831
1832        mp_getfields_wtime_total = mp_getfields_wtime_total + &
1833             &(mp_getfields_wtime_end - mp_getfields_wtime_beg)
1834        mp_getfields_time_total = mp_getfields_time_total + &
1835             &(mp_getfields_time_end - mp_getfields_time_beg)
1836      end if
1837
1838    case ('partloop1')
1839      if (imode.eq.0) then
1840        call cpu_time(tm_nploop_beg)
1841      else
1842        call cpu_time(tm_nploop_end)
1843        tm_nploop_total = tm_nploop_total + (tm_nploop_end - tm_nploop_beg)
1844      end if
1845
1846    case ('conccalc')
1847      if (imode.eq.0) then
1848        call cpu_time(mp_conccalc_time_beg)
1849      else
1850        call cpu_time(mp_conccalc_time_end)
1851        mp_conccalc_time_total = mp_conccalc_time_total + mp_conccalc_time_end - &
1852             &mp_conccalc_time_beg
1853      end if
1854    case ('rootonly')
1855      if (imode.eq.0) then
1856        call cpu_time(mp_root_time_beg)
1857        mp_root_wtime_beg = mpi_wtime()
1858      else
1859        call cpu_time(mp_root_time_end)
1860        mp_root_wtime_end = mpi_wtime()
1861        mp_root_time_total = mp_root_time_total + &
1862             &(mp_root_time_end - mp_root_time_beg)
1863        mp_root_wtime_total = mp_root_wtime_total + &
1864             &(mp_root_wtime_end - mp_root_wtime_beg)
1865      end if
1866
1867    case ('iotime')
1868      if (imode.eq.0) then
1869        mp_io_wtime_beg = mpi_wtime()
1870        call cpu_time(mp_io_time_beg)
1871      else
1872        mp_io_wtime_end = mpi_wtime()
1873        call cpu_time(mp_io_time_end)
1874
1875        mp_io_wtime_total = mp_io_wtime_total + (mp_io_wtime_end - &
1876             & mp_io_wtime_beg)
1877        mp_io_time_total = mp_io_time_total + (mp_io_time_end - &
1878             & mp_io_time_beg)
1879      end if
1880
[78e62dc]1881    case ('verttransform')
1882      if (imode.eq.0) then
1883        mp_vt_wtime_beg = mpi_wtime()
1884        call cpu_time(mp_vt_time_beg)
1885      else
1886        mp_vt_wtime_end = mpi_wtime()
1887        call cpu_time(mp_vt_time_end)
1888
1889        mp_vt_wtime_total = mp_vt_wtime_total + (mp_vt_wtime_end - &
1890             & mp_vt_wtime_beg)
1891        mp_vt_time_total = mp_vt_time_total + (mp_vt_time_end - &
1892             & mp_vt_time_beg)
1893      end if
1894
[8a65cb0]1895    case ('readwind')
1896      if (imode.eq.0) then
1897        call cpu_time(mp_readwind_time_beg)
1898        mp_readwind_wtime_beg = mpi_wtime()
1899      else
1900        call cpu_time(mp_readwind_time_end)
1901        mp_readwind_wtime_end = mpi_wtime()
1902
1903        mp_readwind_time_total = mp_readwind_time_total + &
1904             &(mp_readwind_time_end - mp_readwind_time_beg)
1905        mp_readwind_wtime_total = mp_readwind_wtime_total + &
1906             &(mp_readwind_wtime_end - mp_readwind_wtime_beg)
1907      end if
1908
1909    case ('commtime')
1910      if (imode.eq.0) then
1911        call cpu_time(mp_comm_time_beg)
1912        mp_comm_wtime_beg = mpi_wtime()
1913      else
1914        call cpu_time(mp_comm_time_end)
1915        mp_comm_wtime_end = mpi_wtime()
1916        mp_comm_time_total = mp_comm_time_total + &
1917             &(mp_comm_time_end - mp_comm_time_beg)
1918        mp_comm_wtime_total = mp_comm_wtime_total + &
1919             &(mp_comm_wtime_end - mp_comm_wtime_beg)
1920      end if
1921
1922    case ('flexpart')
1923      if (imode.eq.0) then
1924        mp_total_wtime_beg=mpi_wtime()
1925      else
1926        mp_total_wtime_end=mpi_wtime()
1927        mp_total_wtime_total = mp_total_wtime_end-mp_total_wtime_beg
1928      end if
1929
1930    case default
1931      write(*,*) 'mpi_mod::mpif_mtime> WARNING: unknown case identifier'
1932
1933    end select
1934
1935
1936  end subroutine mpif_mtime
1937
1938
1939  subroutine mpif_finalize
1940!***********************************************************************
1941! Finalize MPI
1942! Optionally print detailed time diagnostics
1943!***********************************************************************
1944    implicit none
1945
1946    integer :: ip,j,r
1947
[5f9d14a]1948!***********************************************************************
1949
[78e62dc]1950    IF (mp_measure_time) THEN
[8a65cb0]1951      do ip=0, mp_np-1
1952        call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr)
1953
1954        if (ip == mp_pid) then
1955          write(*,FMT='(72("#"))')
1956          write(*,FMT='(A60,I3)') 'STATISTICS FOR MPI PROCESS:', mp_pid
1957          if (ip == 0) then
1958            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR ROOT-ONLY (PID 0) &
1959                 &SECTIONS: ', mp_root_time_total
1960            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ROOT-ONLY (PID 0) &
1961                 &SECTIONS:', mp_root_wtime_total
1962          end if
1963          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FLEXPART: ' &
1964               &, mp_total_wtime_total
1965          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI COMMUNICATIONS:',&
1966               & mp_comm_time_total
1967          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI COMMUNICATIONS:',&
1968               & mp_comm_wtime_total
1969          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI BARRIERS:',&
1970               & mp_barrier_time_total
1971          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI BARRIERS:',&
1972               & mp_barrier_wtime_total
1973          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER:',&
1974               & tm_tot_total
1975          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER/NUMPART LOOP:',&
1976               & tm_nploop_total
1977          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ADVANCE:',&
1978               & mp_advance_wtime_total
1979          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR GETFIELDS:',&
1980               & mp_getfields_wtime_total
1981          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR GETFIELDS:',&
1982               & mp_getfields_time_total
1983          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',&
1984               & mp_readwind_wtime_total
1985          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',&
1986               & mp_readwind_time_total
1987          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FILE IO:',&
1988               & mp_io_wtime_total
1989          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR FILE IO:',&
1990               & mp_io_time_total
1991          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR WETDEPO:',&
1992               & mp_wetdepo_wtime_total
1993          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR WETDEPO:',&
1994               & mp_wetdepo_time_total
1995          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',&
1996               & mp_conccalc_time_total
[78e62dc]1997          ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',&
1998          !      & mp_vt_wtime_total
1999          ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',&
2000          !      & mp_vt_time_total
[8a65cb0]2001! NB: the 'flush' function is possibly a gfortran-specific extension
2002          call flush()
2003        end if
2004      end do
2005    end if
2006
[78e62dc]2007! This call to barrier is for correctly formatting output
2008    call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr)
2009
[f55fdce]2010    if (lroot.and.mp_measure_time) then
[78e62dc]2011      write(*,FMT='(72("#"))')
2012      WRITE(*,*) "To turn off output of time measurements, set "
2013      WRITE(*,*) "    mp_measure_time=.false."
2014      WRITE(*,*) "in file mpi_mod.f90"
2015      write(*,FMT='(72("#"))')
2016    end if
2017
[8a65cb0]2018! j=mp_pid*nvar_async
2019! In the implementation with 3 fields, the processes may have posted
2020! MPI_Irecv requests that should be cancelled here
[5f9d14a]2021!! TODO:
[78e62dc]2022! if (.not.lmp_sync) then
2023!   r=mp_pid*nvar_async
2024!   do j=r,r+nvar_async-1
2025!     call MPI_Cancel(j,mp_ierr)
2026!     if (mp_ierr /= 0) write(*,*) '#### mpif_finalize::MPI_Cancel> ERROR ####'
2027!   end do
2028! end if
[8a65cb0]2029
2030    call MPI_FINALIZE(mp_ierr)
2031    if (mp_ierr /= 0) then
[5f9d14a]2032      write(*,*) '#### mpif_finalize::MPI_FINALIZE> MPI ERROR ', mp_ierr, ' ####'
[8a65cb0]2033      stop
2034    end if
2035
2036
[78e62dc]2037    end subroutine mpif_finalize
[8a65cb0]2038
2039
[78e62dc]2040    subroutine get_lun(my_lun)
[8a65cb0]2041!***********************************************************************
2042! get_lun:
2043!   Starting from 100, get next free logical unit number
2044!***********************************************************************
2045
[78e62dc]2046      implicit none
[8a65cb0]2047
[78e62dc]2048      integer, intent(inout) :: my_lun
2049      integer, save :: free_lun=100
2050      logical :: exists, iopen
[8a65cb0]2051
[5f9d14a]2052!***********************************************************************
2053
[78e62dc]2054      loop1: do
2055        inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen)
2056        if (exists .and. .not.iopen) exit loop1
2057        free_lun = free_lun+1
2058      end do loop1
2059      my_lun = free_lun
[8a65cb0]2060
[78e62dc]2061    end subroutine get_lun
[8a65cb0]2062
2063
[78e62dc]2064    subroutine write_data_dbg(array_in, array_name, tstep, ident)
[8a65cb0]2065!***********************************************************************
[78e62dc]2066! Write one-dimensional arrays to file (for debugging purposes)
[8a65cb0]2067!***********************************************************************
[78e62dc]2068      implicit none
[8a65cb0]2069
[78e62dc]2070      real, intent(in), dimension(:) :: array_in
2071      integer, intent(in) :: tstep
2072      integer :: lios
2073      character(LEN=*), intent(in) :: ident, array_name
[8a65cb0]2074
[78e62dc]2075      character(LEN=8) :: c_ts
2076      character(LEN=40) :: fn_1, fn_2
[8a65cb0]2077
[5f9d14a]2078!***********************************************************************
2079
[78e62dc]2080      write(c_ts, FMT='(I8.8,BZ)') tstep
2081      fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident)
2082      write(c_ts, FMT='(I2.2,BZ)') mp_np
2083      fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat'
[8a65cb0]2084
[78e62dc]2085      call get_lun(dat_lun)
2086      open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', &
2087           FORM='UNFORMATTED', STATUS='REPLACE')
2088      write(UNIT=dat_lun, IOSTAT=lios) array_in
2089      close(UNIT=dat_lun)
[8a65cb0]2090
[78e62dc]2091    end subroutine write_data_dbg
[8a65cb0]2092
2093
[6b22af9]2094  subroutine set_fields_synthetic()
2095!*******************************************************************************
2096! DESCRIPTION
2097!   Set all meteorological fields to synthetic (usually constant/homogeneous)
2098!   values.
2099!   Used for validation and error-checking
2100!
2101! NOTE
2102!   This version uses asynchronious communications.
2103!
2104! VARIABLES
2105!   
2106!
2107! TODO
2108!
2109!*******************************************************************************
2110    use com_mod
2111
2112    implicit none
2113
[d6a0977]2114    integer :: li=1, ui=2 ! wfmem indices (i.e, operate on entire field)
2115
2116    if (.not.lmp_sync) ui=3
[6b22af9]2117
2118
2119! Variables transferred at initialization only
2120!*********************************************
2121!    readclouds=readclouds_
2122    oro(:,:)=0.0
2123    excessoro(:,:)=0.0
2124    lsm(:,:)=0.0
2125    xlanduse(:,:,:)=0.0
2126!    wftime
2127!    numbwf
2128!    nmixz
2129!    height
2130
2131! Time-varying fields:
2132    uu(:,:,:,li:ui) = 10.0
2133    vv(:,:,:,li:ui) = 0.0
2134    uupol(:,:,:,li:ui) = 10.0 ! TODO check if ok
2135    vvpol(:,:,:,li:ui)=0.0
2136    ww(:,:,:,li:ui)=0.
2137    tt(:,:,:,li:ui)=300.
2138    rho(:,:,:,li:ui)=1.3
2139    drhodz(:,:,:,li:ui)=.0
2140    tth(:,:,:,li:ui)=0.0
2141    qvh(:,:,:,li:ui)=1.0
2142    qv(:,:,:,li:ui)=1.0
2143
2144    pv(:,:,:,li:ui)=1.0
2145    clouds(:,:,:,li:ui)=0.0
2146
2147    clwc(:,:,:,li:ui)=0.0
2148    ciwc(:,:,:,li:ui)=0.0
2149 
2150! 2D fields
2151
2152    cloudsh(:,:,li:ui)=0.0
2153    vdep(:,:,:,li:ui)=0.0
2154    ps(:,:,:,li:ui)=1.0e5
2155    sd(:,:,:,li:ui)=0.0
2156    tcc(:,:,:,li:ui)=0.0
2157    tt2(:,:,:,li:ui)=300.
2158    td2(:,:,:,li:ui)=250.
2159    lsprec(:,:,:,li:ui)=0.0
2160    convprec(:,:,:,li:ui)=0.0
2161    ustar(:,:,:,li:ui)=1.0
2162    wstar(:,:,:,li:ui)=1.0
2163    hmix(:,:,:,li:ui)=10000.
2164    tropopause(:,:,:,li:ui)=10000.
2165    oli(:,:,:,li:ui)=0.01
2166    z0=1.0
2167 
2168  end subroutine set_fields_synthetic
2169
[8a65cb0]2170end module mpi_mod
Note: See TracBrowser for help on using the repository browser.
hosted by ZAMG