source: flexpart.git/src/mpi_mod.f90 @ 8ed5f11

10.4.1_peseiGFS_025bugfixes+enhancementsdevrelease-10release-10.4.1scaling-bugunivie
Last change on this file since 8ed5f11 was 8ed5f11, checked in by Espen Sollum ATMOS <eso@…>, 8 years ago

Minor cosmetic changes

  • Property mode set to 100644
File size: 80.6 KB
Line 
1!**********************************************************************
2! Copyright 1998,1999,2000,2001,2002,2005,2007,2008,2009,2010         *
3! Andreas Stohl, Petra Seibert, A. Frank, Gerhard Wotawa,             *
4! Caroline Forster, Sabine Eckhardt, John Burkhart, Harald Sodemann   *
5!                                                                     *
6! This file is part of FLEXPART.                                      *
7!                                                                     *
8! FLEXPART is free software: you can redistribute it and/or modify    *
9! it under the terms of the GNU General Public License as published by*
10! the Free Software Foundation, either version 3 of the License, or   *
11! (at your option) any later version.                                 *
12!                                                                     *
13! FLEXPART is distributed in the hope that it will be useful,         *
14! but WITHOUT ANY WARRANTY; without even the implied warranty of      *
15! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the       *
16! GNU General Public License for more details.                        *
17!                                                                     *
18! You should have received a copy of the GNU General Public License   *
19! along with FLEXPART.  If not, see <http://www.gnu.org/licenses/>.   *
20!**********************************************************************
21
22module mpi_mod
23
24!*****************************************************************************
25!                                                                            *
26!  DESCRIPTION                                                               *
27!    This module contains subroutines and common variables used for the      *
28!    MPI parallelization of FLEXPART.                                        *
29!                                                                            *
30!  NOTE                                                                      *
31!    Depending on the MPI library installed on your system (e.g. mpich2,     *
32!    OpenMPI) you may need to choose below in this file between              *
33!      use mpi                                                               *
34!    (if the MPI library comes with the file 'mpi.mod'); or                  *
35!      include 'mpif.h'                                                      *
36!                                                                            *
37!                                                                            *
38!*****************************************************************************
39!                                                                            *
40! Variables:                                                                 *
41!                                                                            *
42! mp_ierr                 MPI error code                                     *
43! mp_np                   Number of MPI processes                            *
44! mp_pid                  Process ID of each MPI process                     *
45! mp_seed                 Parameter for random number seed                   *
46! read_grp_min            Minimum number of processes at which one will be   *
47!                         used as reader                                     *
48! numpart_mpi,            Number of particles per node                       *
49! maxpart_mpi                                                                *
50! mp_partid               MPI process ID for particle calculation            *
51! mp_partgroup_           Refers to the subset of processors performing      *
52!                         loops over particles. Will be all processes        *
53!                         unless a dedicated process runs getfields/readwind *
54! lmp_sync                If .false., use asynchronous MPI                   *
55! mp_cp                   Real precision to use for deposition fields        *
56!                                                                            *
57!                                                                            *
58!                                                                            *
59!                                                                            *
60!*****************************************************************************
61
62  use mpi
63  use par_mod, only: dp,sp
64  use com_mod, only: lroot
65
66  implicit none
67
68!  include 'mpif.h'
69
70  public
71
72! Set aside a process for reading windfields if using at least these many processes
73!==================================================
74  integer, parameter, private :: read_grp_min=4
75!==================================================
76
77! Variables for each MPI process in the world group
78  integer :: mp_ierr, mp_np, mp_pid, mp_partid
79  integer, private :: world_group_id
80
81! Variables for MPI processes in the 'particle' group
82  integer, allocatable, dimension(:) :: mp_partgroup_rank
83  integer :: mp_partgroup_comm, mp_partgroup_pid, mp_partgroup_np
84
85  integer :: mp_seed=0
86  integer, parameter :: mp_sp=MPI_REAL4, mp_dp=MPI_REAL8
87  integer :: mp_cp
88  integer, parameter :: id_root=0 ! master process
89
90! MPI tags/requests for send/receive operation
91  integer :: tm1
92  integer, parameter :: nvar_async=26 !27 !29 :DBG:
93!integer, dimension(:), allocatable :: tags
94  integer, dimension(:), allocatable :: reqs
95
96
97  integer :: id_read   ! readwind/getfield process
98  integer :: numpart_mpi,maxpart_mpi ! number of particles per node
99  integer :: tot_numpart=0
100  integer :: mp_comm_used ! global or subgroup communicator
101
102  logical :: lmpreader=.false. ! is set to true for reading process(es) only.
103  logical :: lmp_use_reader=.false. ! true if separate readwind process is used
104
105! true if only using synchronous MPI send/recv:
106! If setting this to .false., numwfmem must be set to 3
107!===============================================================================
108  logical :: lmp_sync=.true.
109!===============================================================================
110
111! mp_dbg_mode       Used for debugging MPI.
112! mp_dev_mode       various checks related to debugging the parallel code
113! mp_dbg_out        write some arrays to data file for debugging
114! mp_measure_time   Measure cpu/wall time, write out at end of run
115! mp_time_barrier   Measure MPI barrier time
116! mp_exact_numpart  Use an extra MPI communication to give the exact number of particles
117!                   to standard output (this does *not* otherwise affect the simulation)
118  logical, parameter :: mp_dbg_mode = .false.
119  logical, parameter :: mp_dev_mode = .false.
120  logical, parameter :: mp_dbg_out = .false.
121  logical, parameter :: mp_time_barrier=.true.
122  logical, parameter :: mp_measure_time=.true.
123  logical, parameter :: mp_exact_numpart=.true.
124
125! for measuring CPU/Wall time
126  real(sp),private :: mp_comm_time_beg, mp_comm_time_end, mp_comm_time_total=0.
127  real(dp),private :: mp_comm_wtime_beg, mp_comm_wtime_end, mp_comm_wtime_total=0.
128  real(sp),private :: mp_root_time_beg, mp_root_time_end, mp_root_time_total=0.
129  real(dp),private :: mp_root_wtime_beg, mp_root_wtime_end, mp_root_wtime_total=0.
130  real(sp),private :: mp_barrier_time_beg, mp_barrier_time_end, mp_barrier_time_total=0.
131  real(dp),private :: mp_barrier_wtime_beg, mp_barrier_wtime_end, mp_barrier_wtime_total=0.
132  real(sp),private :: tm_nploop_beg, tm_nploop_end, tm_nploop_total=0.
133  real(sp),private :: tm_tot_beg, tm_tot_end, tm_tot_total=0.
134  real(dp),private :: mp_getfields_wtime_beg, mp_getfields_wtime_end, mp_getfields_wtime_total=0.
135  real(sp),private :: mp_getfields_time_beg, mp_getfields_time_end, mp_getfields_time_total=0.
136  real(dp),private :: mp_readwind_wtime_beg, mp_readwind_wtime_end, mp_readwind_wtime_total=0.
137  real(sp),private :: mp_readwind_time_beg, mp_readwind_time_end, mp_readwind_time_total=0.
138  real(dp),private :: mp_io_wtime_beg, mp_io_wtime_end, mp_io_wtime_total=0.
139  real(sp),private :: mp_io_time_beg, mp_io_time_end, mp_io_time_total=0.
140  real(dp),private :: mp_wetdepo_wtime_beg, mp_wetdepo_wtime_end, mp_wetdepo_wtime_total=0.
141  real(sp),private :: mp_wetdepo_time_beg, mp_wetdepo_time_end, mp_wetdepo_time_total=0.
142  real(dp),private :: mp_advance_wtime_beg, mp_advance_wtime_end, mp_advance_wtime_total=0.
143  real(dp),private :: mp_conccalc_time_beg, mp_conccalc_time_end, mp_conccalc_time_total=0.
144  real(dp),private :: mp_total_wtime_beg, mp_total_wtime_end, mp_total_wtime_total=0.
145  real(dp),private :: mp_vt_wtime_beg, mp_vt_wtime_end, mp_vt_wtime_total
146  real(sp),private :: mp_vt_time_beg, mp_vt_time_end, mp_vt_time_total
147
148! dat_lun           logical unit number for i/o
149  integer, private :: dat_lun
150
151contains
152
153  subroutine mpif_init
154!***********************************************************************
155! DESCRIPTION
156!   Initialize MPI.
157!   
158!   Create the global communicator MPI_COMM_WORLD
159!   If using a separate MPI process for getfields/readwind, a subgroup
160!   is created for the other processes.
161!
162! VARIABLES
163!   mpi_mode    default 0, set to 2/3 if running MPI version
164!   mp_np       number of running processes, decided at run-time
165!***********************************************************************
166    use par_mod, only: maxpart, numwfmem, dep_prec
167    use com_mod, only: mpi_mode
168
169    implicit none
170
171    integer :: i,j,s,addmaxpart=0
172
173! Each process gets an ID (mp_pid) in the range 0,..,mp_np-1
174    call MPI_INIT(mp_ierr)
175    if (mp_ierr /= 0) goto 100
176    call MPI_COMM_RANK(MPI_COMM_WORLD, mp_pid, mp_ierr)
177    if (mp_ierr /= 0) goto 100
178    call MPI_COMM_SIZE(MPI_COMM_WORLD, mp_np, mp_ierr)
179    if (mp_ierr /= 0) goto 100
180
181
182! Variable mpi_mode is used to handle subroutines common to parallel/serial version
183    if (lmp_sync) then
184      mpi_mode=2 ! hold 2 windfields in memory
185    else
186      mpi_mode=3 ! hold 3 windfields in memory
187    end if
188
189    if (mp_pid.ne.0) then
190      lroot = .false.
191    end if
192
193! Set MPI precision to use for transferring deposition fields
194!************************************************************
195    if (dep_prec==dp) then
196      mp_cp = MPI_REAL8
197      ! TODO: write info message for serial version as well
198      if (lroot.and.verbosity>0) write(*,*) 'Using double precision for deposition fields'
199    else if (dep_prec==sp) then
200      mp_cp = MPI_REAL4
201      if (lroot.and.verbosity>0) write(*,*) 'Using single precision for deposition fields'
202    else
203      write(*,*) 'ERROR: something went wrong setting MPI real precision'
204      stop
205    end if
206
207! Check for sensible combination of parameters
208!*********************************************
209
210    if (.not.lmp_sync.and.numwfmem.ne.3.and.lroot) then
211      write(*,FMT='(80("#"))')
212      write(*,*) '#### mpi_mod::mpif_init> ERROR: ', &
213           & 'numwfmem must be set to 3 for asyncronous reading ####'
214      write(*,FMT='(80("#"))')
215      stop
216    else if (lmp_sync.and.numwfmem.ne.2.and.lroot) then
217      write(*,FMT='(80("#"))')
218      write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
219           & 'numwfmem should be set to 2 for syncronous'
220      write(*,*) ' reading. Results will still be valid, but unneccesary '
221      write(*,*) 'amount of memory is being allocated.'
222      write(*,FMT='(80("#"))')
223! Force "syncronized" version if all processes will call getfields
224    else if ((.not.lmp_sync.and.mp_np.lt.read_grp_min).or.(mp_np.eq.1)) then
225      if (lroot) then
226        write(*,FMT='(80("#"))')
227        write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
228             & 'all procs call getfields. Setting lmp_sync=.true.'
229        write(*,FMT='(80("#"))')
230      end if
231      lmp_sync=.true. ! :DBG: eso fix this...
232    end if
233
234! TODO: Add more warnings for unimplemented flexpart features
235
236! Set ID of process that calls getfield/readwind.
237! Using the last process in the group ensures statistical identical results
238! as running with one process less but not using separate read process
239!**********************************************************************
240
241!    id_read = min(mp_np-1, 1)
242    id_read = mp_np-1
243
244    if (mp_pid.eq.id_read) lmpreader=.true.
245
246    call MPI_Comm_group (MPI_COMM_WORLD, world_group_id, mp_ierr)
247
248! Create a MPI group/communicator that will calculate trajectories.
249! Skip this step if program is run with only a few processes
250!************************************************************************
251    allocate(mp_partgroup_rank(0:mp_np-2))
252
253! This allows checking for allocation of arrays when no subgroup is used
254    mp_partgroup_pid=0
255
256    if (read_grp_min.lt.2) then
257      write(*,*) '#### mpi_mod::mpif_init> ERROR ####', &
258           & 'read_grp_min should be at least 2. Exiting'
259      stop
260    end if
261
262    if (mp_np.ge.read_grp_min) then
263      lmp_use_reader = .true.
264
265! Map the subgroup IDs= 0,1,2,...,mp_np-2, skipping reader process
266      j=-1
267      do i=0, mp_np-2 !loop over all (subgroup) IDs
268        j=j+1
269        if (i.eq.id_read) then
270          j=j+1
271        end if
272        mp_partgroup_rank(i) = j
273      end do
274
275      call MPI_Group_incl (world_group_id, mp_np-1, mp_partgroup_rank, &
276           &mp_partgroup_pid, mp_ierr)
277      if (mp_ierr /= 0) goto 100
278      call MPI_Comm_create (MPI_COMM_WORLD, mp_partgroup_pid, mp_partgroup_comm, mp_ierr)
279      if (mp_ierr /= 0) goto 100
280
281      if (mp_pid.ne.id_read) then
282        call MPI_Comm_rank (mp_partgroup_comm, mp_partgroup_pid, mp_ierr)
283        if (mp_ierr /= 0) goto 100
284
285! Get size of new group (=mp_np-1)
286        call MPI_COMM_SIZE(mp_partgroup_comm, mp_partgroup_np, mp_ierr)
287        if (mp_ierr /= 0) goto 100
288        if (mp_partgroup_np.ne.mp_np-1) then
289          write(*,*)  '#### mpi_mod:: mpif_init> ERROR ####&
290               & mp_partgroup_np.ne.mp_np-1'
291          stop
292        endif
293
294      else
295        mp_partgroup_pid = -1
296      end if
297    end if
298
299    if (lmp_use_reader) then
300      mp_comm_used = mp_partgroup_comm
301      mp_partid = mp_partgroup_pid
302      mp_partgroup_np=mp_np-1
303    else
304      mp_comm_used = MPI_COMM_WORLD
305      mp_partgroup_np = mp_np
306      mp_partid = mp_pid
307    end if
308
309! Set maxpart per process
310    if (mp_partid.lt.mod(maxpart,mp_partgroup_np)) addmaxpart=1
311    maxpart_mpi=int(maxpart/mp_partgroup_np)+addmaxpart
312
313! Do not allocate particle data arrays for readwind process
314    if (lmpreader.and.lmp_use_reader) then
315      maxpart_mpi=0
316    end if
317
318! Set random seed for each non-root process
319    if (mp_pid.gt.0) then
320!    if (mp_pid.ge.0) then
321!      call system_clock(s)
322      s = 244
323      mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729))
324    end if
325    if (mp_dev_mode) write(*,*) 'PID, mp_seed: ',mp_pid, mp_seed
326    if (mp_dbg_mode) then
327! :DBG: For debugging, set all seed to 0 and maxrand to e.g. 20
328      mp_seed=0
329      if (lroot) write(*,*) 'MPI: setting seed=0'
330    end if
331
332! Allocate request array for asynchronous MPI
333    if (.not.lmp_sync) then
334      allocate(reqs(0:nvar_async*mp_np-1))
335      reqs(:)=MPI_REQUEST_NULL
336    else
337      allocate(reqs(0:1))
338      reqs(:)=MPI_REQUEST_NULL
339    end if
340
341    goto 101
342
343100 write(*,*) '#### mpi_mod::mpif_init> ERROR ####', mp_ierr
344    stop
345
346101 end subroutine mpif_init
347
348
349  subroutine mpif_mpi_barrier
350!***********************************************************************
351! Set MPI barrier (all processes are synchronized here), with the
352! possible exception of a separate 'readwind' process.
353! Optionally measure cpu/wall time.
354!
355!***********************************************************************
356    implicit none
357
358    if (mp_time_barrier) then
359      call cpu_time(mp_barrier_time_beg)
360      mp_barrier_wtime_beg = mpi_wtime()
361    endif
362
363    call MPI_BARRIER(mp_comm_used, mp_ierr)
364    if (mp_ierr /= 0) goto 100
365
366    if (mp_time_barrier) then
367      call cpu_time(mp_barrier_time_end)
368      mp_barrier_wtime_end = mpi_wtime()
369
370      mp_barrier_time_total=mp_barrier_time_total+&
371           &(mp_barrier_time_end-mp_barrier_time_beg)
372      mp_barrier_wtime_total=mp_barrier_wtime_total+&
373           &(mp_barrier_wtime_end-mp_barrier_wtime_beg)
374    end if
375
376    goto 101
377
378100 write(*,*) '#### mpi_mod::mpif_mpi_barrier> ERROR ####', mp_ierr
379    stop
380
381101 end subroutine mpif_mpi_barrier
382
383
384  subroutine mpif_com_mod_allocate
385!*******************************************************************************   
386! Dynamic allocation of arrays (in serial code these have size maxpart)
387!
388!*******************************************************************************
389    use com_mod
390
391    implicit none
392
393    integer :: array_size
394
395    array_size = maxpart_mpi
396
397    allocate(itra1(array_size),npoint(array_size),&
398         & nclass(array_size),idt(array_size),&
399         & itramem(array_size),itrasplit(array_size),&
400         & xtra1(array_size),ytra1(array_size),&
401         & ztra1(array_size),xmass1(array_size, maxspec))
402
403    allocate(uap(array_size),ucp(array_size),&
404         & uzp(array_size),us(array_size),&
405         & vs(array_size),&
406         & ws(array_size),cbt(array_size))
407
408
409  end subroutine mpif_com_mod_allocate
410
411
412  subroutine mpif_tm_send_dims
413!***********************************************************************
414! Distribute array dimensions from pid0 to all processes.
415! numpart_mpi/numpart is sent to allow dynamic allocation
416!
417! Currently not in use.
418!
419! Variables of similar type (integer, real) are placed in an array
420! and sent collectively, to avoid the overhead associated with individual
421! MPI send/receives
422!
423!
424!***********************************************************************
425    use com_mod
426
427    implicit none
428
429    integer :: add_part=0
430
431    call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
432
433! MPI subgroup does the particle-loop
434    if (lmp_use_reader) then
435      if (mod(numpart,mp_partgroup_np).ne.0) add_part=1
436      numpart_mpi=int(numpart/mp_partgroup_np)+add_part
437    else
438      if (mod(numpart,mp_np).ne.0) add_part=1
439      numpart_mpi=int(numpart/mp_np)+add_part
440    end if
441
442
443! redefine numpart as 'numpart per process' throughout the code
444!**************************************************************
445    numpart = numpart_mpi
446
447  end subroutine mpif_tm_send_dims
448
449
450  subroutine mpif_tm_send_vars
451!***********************************************************************
452! Distribute particle variables from pid0 to all processes.
453! Called from timemanager
454! *NOT IN USE* at the moment, but can be useful for debugging
455!
456!***********************************************************************
457    use com_mod
458
459    implicit none
460
461    integer :: i
462
463! Time for MPI communications
464!****************************
465    if (mp_measure_time) call mpif_mtime('commtime',0)
466
467! Distribute variables, send from pid 0 to other processes
468!**********************************************************************
469
470! integers
471    if (lroot) then
472      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
473           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
474      if (mp_ierr /= 0) goto 600
475      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
476           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
477      if (mp_ierr /= 0) goto 600
478      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
479           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
480      if (mp_ierr /= 0) goto 600
481      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
482           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
483      if (mp_ierr /= 0) goto 600
484      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
485           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
486      if (mp_ierr /= 0) goto 600
487
488! int2
489      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,&
490           &numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
491      if (mp_ierr /= 0) goto 600
492
493! real
494      call MPI_SCATTER(uap,numpart_mpi,mp_sp,MPI_IN_PLACE,&
495           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
496      if (mp_ierr /= 0) goto 600
497      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
498           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
499      if (mp_ierr /= 0) goto 600
500      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
501           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
502      if (mp_ierr /= 0) goto 600
503
504      call MPI_SCATTER(us,numpart_mpi,mp_sp,MPI_IN_PLACE,&
505           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
506      if (mp_ierr /= 0) goto 600
507      call MPI_SCATTER(vs,numpart_mpi,mp_sp,MPI_IN_PLACE,&
508           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
509      if (mp_ierr /= 0) goto 600
510      call MPI_SCATTER(ws,numpart_mpi,mp_sp,MPI_IN_PLACE,&
511           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
512      if (mp_ierr /= 0) goto 600
513
514      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
515           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
516      if (mp_ierr /= 0) goto 600
517      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
518           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
519      if (mp_ierr /= 0) goto 600
520      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,MPI_IN_PLACE,&
521           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
522      if (mp_ierr /= 0) goto 600
523
524      do i=1,nspec
525        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,MPI_IN_PLACE,&
526             &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
527        if (mp_ierr /= 0) goto 600
528      end do
529
530    else ! (mp_pid >= 1)
531! integers
532      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,&
533           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
534      if (mp_ierr /= 0) goto 600
535      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,&
536           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
537      if (mp_ierr /= 0) goto 600
538      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,&
539           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
540      if (mp_ierr /= 0) goto 600
541      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,&
542           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
543      if (mp_ierr /= 0) goto 600
544      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,&
545           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
546      if (mp_ierr /= 0) goto 600
547
548! int2
549      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,&
550           &numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
551      if (mp_ierr /= 0) goto 600
552
553! reals
554      call MPI_SCATTER(uap,numpart_mpi,mp_sp,uap,&
555           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
556      if (mp_ierr /= 0) goto 600
557      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,ucp,&
558           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
559      if (mp_ierr /= 0) goto 600
560      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,uzp,&
561           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
562      if (mp_ierr /= 0) goto 600
563
564      call MPI_SCATTER(us,numpart_mpi,mp_sp,us,&
565           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
566      if (mp_ierr /= 0) goto 600
567      call MPI_SCATTER(vs,numpart_mpi,mp_sp,vs,&
568           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
569      if (mp_ierr /= 0) goto 600
570      call MPI_SCATTER(ws,numpart_mpi,mp_sp,ws,&
571           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
572      if (mp_ierr /= 0) goto 600
573
574      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,&
575           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
576      if (mp_ierr /= 0) goto 600
577      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,&
578           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
579      if (mp_ierr /= 0) goto 600
580      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,ztra1,&
581           &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
582      if (mp_ierr /= 0) goto 600
583
584      do i=1,nspec
585        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,xmass1(:,i),&
586             &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
587        if (mp_ierr /= 0) goto 600
588      end do
589
590    end if !(lroot)
591
592    if (mp_measure_time) call mpif_mtime('commtime',1)
593
594    goto 601
595
596600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
597    stop
598601 end subroutine mpif_tm_send_vars
599
600
601  subroutine mpif_tm_collect_vars
602!*******************************************************************************
603! Collect particle data to PID 0 from all processes.                           *
604!                                                                              *
605! This can be called from timemanager_mpi, before caclulating                  *
606! concentrations/writing output grids.                                         *
607!                                                                              *
608! Currently *not in use*, as each process calculates concentrations            *
609! separately, but can be useful for debugging                                  *
610!                                                                              *
611! To use this routine (together with mpif_tm_send_vars) to transfer data       *
612! to the MPI root process (useful for debugging), insert code like this        *
613! (in timemanager_mpi.f90)                                                     *
614!                                                                              *
615!      if (lroot) tot_numpart=numpart ! root stores total numpart              *
616!      call mpif_tm_send_dims                                                  *
617!      if (numpart>1) then                                                     *
618!        call mpif_tm_send_vars                                                *
619!      end if                                                                  *
620!                                                                              *
621!    To collect data from all processes to root process after a parallel       *
622!    region:                                                                   *
623!                                                                              *
624!      if (numpart>0) then                                                     *
625!          if (lroot) numpart = tot_numpart                                    *
626!       call mpif_tm_collect_vars                                              *
627!      end if                                                                  *
628!                                                                              *
629! Then a section that begins with "if (lroot) ..." will behave like            *
630! serial flexpart                                                              *
631!                                                                              *
632!*******************************************************************************
633    use com_mod !, only: numpart, nspec, itra1, npoint, nclass
634
635    implicit none
636
637    integer :: i
638
639    logical :: use_mp_vars = .false.! use mp_... type allocated vars
640    logical :: use_in_place = .true.! use MPI_IN_PLACE to collect vars.
641
642
643! Time for MPI communications
644    if (mp_measure_time) call mpif_mtime('commtime',0)
645
646! Distribute variables, send from pid 0 to other processes
647!**********************************************************************
648
649    if (.not. use_mp_vars.and..not.use_in_place) then
650
651! Integers:
652      call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
653           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
654      if (mp_ierr /= 0) goto 600
655      call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
656           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
657      if (mp_ierr /= 0) goto 600
658      call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
659           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
660      if (mp_ierr /= 0) goto 600
661      call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
662           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
663      if (mp_ierr /= 0) goto 600
664      call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
665           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
666      if (mp_ierr /= 0) goto 600
667
668! int2
669      call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
670           &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
671      if (mp_ierr /= 0) goto 600
672
673! Reals:
674      call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, &
675           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
676      if (mp_ierr /= 0) goto 600
677      call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, &
678           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
679      if (mp_ierr /= 0) goto 600
680      call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, &
681           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
682      if (mp_ierr /= 0) goto 600
683
684      call MPI_GATHER(us, numpart_mpi, mp_sp, us, &
685           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
686      if (mp_ierr /= 0) goto 600
687      call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, &
688           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
689      if (mp_ierr /= 0) goto 600
690      call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, &
691           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
692      if (mp_ierr /= 0) goto 600
693
694
695      call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
696           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
697      if (mp_ierr /= 0) goto 600
698      call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
699           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
700      if (mp_ierr /= 0) goto 600
701      call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, &
702           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
703      if (mp_ierr /= 0) goto 600
704
705      do i=1, nspec
706        call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), &
707             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
708        if (mp_ierr /= 0) goto 600
709
710      end do
711
712! Use variables npoint etc directly for communications
713!***********************************************************************
714    else if (use_in_place.and..not.use_mp_vars) then
715      if (lroot) then
716! Integers:
717        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, npoint, &
718             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
719        if (mp_ierr /= 0) goto 600
720        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, idt, &
721             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
722        if (mp_ierr /= 0) goto 600
723        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itra1, &
724             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
725        if (mp_ierr /= 0) goto 600
726        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, nclass, &
727             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
728        if (mp_ierr /= 0) goto 600
729        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itramem, &
730             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
731        if (mp_ierr /= 0) goto 600
732
733! int2
734        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER2, cbt, &
735             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
736        if (mp_ierr /= 0) goto 600
737
738! Reals:
739        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uap, &
740             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
741        if (mp_ierr /= 0) goto 600
742        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ucp, &
743             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
744        if (mp_ierr /= 0) goto 600
745        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uzp, &
746             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
747        if (mp_ierr /= 0) goto 600
748
749        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, us, &
750             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
751        if (mp_ierr /= 0) goto 600
752        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, vs, &
753             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
754        if (mp_ierr /= 0) goto 600
755        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ws, &
756             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
757        if (mp_ierr /= 0) goto 600
758
759
760        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, xtra1, &
761             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
762        if (mp_ierr /= 0) goto 600
763        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, ytra1, &
764             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
765        if (mp_ierr /= 0) goto 600
766        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ztra1, &
767             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
768        if (mp_ierr /= 0) goto 600
769
770        do i=1, nspec
771          call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, xmass1(:,i), &
772               &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
773          if (mp_ierr /= 0) goto 600
774        end do
775
776      else ! (for mp_pid >= 1)
777
778! Integers:
779        call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
780             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
781        if (mp_ierr /= 0) goto 600
782        call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
783             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
784        if (mp_ierr /= 0) goto 600
785        call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
786             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
787        if (mp_ierr /= 0) goto 600
788        call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
789             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
790        if (mp_ierr /= 0) goto 600
791        call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
792             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
793        if (mp_ierr /= 0) goto 600
794
795! int2
796        call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
797             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
798        if (mp_ierr /= 0) goto 600
799
800! Reals:
801        call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, &
802             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
803        if (mp_ierr /= 0) goto 600
804        call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, &
805             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
806        if (mp_ierr /= 0) goto 600
807        call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, &
808             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
809        if (mp_ierr /= 0) goto 600
810
811        call MPI_GATHER(us, numpart_mpi, mp_sp, us, &
812             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
813        if (mp_ierr /= 0) goto 600
814        call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, &
815             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
816        if (mp_ierr /= 0) goto 600
817        call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, &
818             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
819        if (mp_ierr /= 0) goto 600
820
821
822        call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
823             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
824        if (mp_ierr /= 0) goto 600
825        call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
826             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
827        if (mp_ierr /= 0) goto 600
828        call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, &
829             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
830        if (mp_ierr /= 0) goto 600
831
832        do i=1, nspec
833          call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), &
834               &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
835          if (mp_ierr /= 0) goto 600
836        end do
837      end if ! (lroot)
838    end if !  (use_in_place)
839
840    if (mp_measure_time) call mpif_mtime('commtime',1)
841
842    goto 601
843
844600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
845    stop
846601 end subroutine mpif_tm_collect_vars
847
848
849  subroutine mpif_gf_send_vars(memstat)
850!*******************************************************************************
851! DESCRIPTION
852!   Distribute 'getfield' variables from reader process
853!
854!   Called from timemanager
855!
856! NOTE
857!   This subroutine distributes windfields read from the reader process to
858!   all other processes. Usually one set of fields is transfered, but at
859!   first timestep when there are no fields in memory, both are transfered.
860!   MPI_Bcast is used, so implicitly all processes are synchronized at this
861!   step
862!
863!
864!*******************************************************************************
865    use com_mod
866    use par_mod,only: numwfmem
867
868    implicit none
869
870    integer, intent(in) :: memstat
871
872! Common array sizes used for communications
873    integer, parameter :: d3_size1 = nxmax*nymax*nzmax
874    integer, parameter :: d3_size2 = nxmax*nymax*nuvzmax
875    integer, parameter :: d2_size1 = nxmax*nymax
876    integer, parameter :: d2_size2 = nxmax*nymax*maxspec
877    integer, parameter :: d2_size3 = nxmax*nymax
878    integer, parameter :: d1_size1 = maxwf
879
880    integer :: d3s1,d3s2,d2s1,d2s2
881
882! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
883    integer :: li,ui
884
885! First time routine is called the unchangeable fields will be transfered   
886    logical :: first_call=.true.
887
888!**********************************************************************
889
890! Sizes of arrays transfered
891    d3s1=d3_size1
892    d3s2=d3_size2
893    d2s1=d2_size1
894    d2s2=d2_size2
895
896! Decide which field(s) need to be transfered
897    if (memstat.ge.32) then ! distribute 2 fields, to 1st/2nd indices
898      li=1; ui=2
899      d3s1=2*d3_size1
900      d3s2=2*d3_size2
901      d2s1=2*d2_size1
902      d2s2=2*d2_size2
903    else if (memstat.eq.17) then ! distribute 1 field, place on 1st index
904      li=1; ui=1
905    else if (memstat.eq.18) then ! distribute 1 field, place on 2nd index
906      li=2; ui=2
907    else if (memstat.eq.19) then ! distribute 1 field, place on 3nd index
908      li=3; ui=3
909    else
910      write(*,*) '#### mpi_mod::mpif_gf_send_vars> ERROR: ', &
911           & 'wrong value of memstat, exiting ####', memstat
912      stop
913    end if
914
915
916! Time for MPI communications
917    if (mp_measure_time) call mpif_mtime('commtime',0)
918
919! Send variables from getfield process (id_read) to other processes
920!**********************************************************************
921
922! The non-reader processes need to know if cloud water were read.
923    call MPI_Bcast(readclouds,1,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
924    if (mp_ierr /= 0) goto 600
925
926! Static fields/variables sent only at startup
927    if (first_call) then
928      call MPI_Bcast(oro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
929      if (mp_ierr /= 0) goto 600
930      call MPI_Bcast(excessoro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
931      if (mp_ierr /= 0) goto 600
932      call MPI_Bcast(lsm(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
933      if (mp_ierr /= 0) goto 600
934      call MPI_Bcast(xlanduse(:,:,:),d2_size3*numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
935      if (mp_ierr /= 0) goto 600
936      call MPI_Bcast(wftime,d1_size1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
937      if (mp_ierr /= 0) goto 600
938      call MPI_Bcast(numbwf,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
939      if (mp_ierr /= 0) goto 600
940      call MPI_Bcast(nmixz,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
941      if (mp_ierr /= 0) goto 600
942      call MPI_Bcast(height,nzmax,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
943      if (mp_ierr /= 0) goto 600
944
945      first_call=.false.
946    endif
947
948    call MPI_Bcast(uu(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
949    if (mp_ierr /= 0) goto 600
950    call MPI_Bcast(vv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
951    if (mp_ierr /= 0) goto 600
952    call MPI_Bcast(uupol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
953    if (mp_ierr /= 0) goto 600
954    call MPI_Bcast(vvpol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
955    if (mp_ierr /= 0) goto 600
956    call MPI_Bcast(ww(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
957    if (mp_ierr /= 0) goto 600
958    call MPI_Bcast(tt(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
959    if (mp_ierr /= 0) goto 600
960    call MPI_Bcast(rho(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
961    if (mp_ierr /= 0) goto 600
962    call MPI_Bcast(drhodz(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
963    if (mp_ierr /= 0) goto 600
964    call MPI_Bcast(tth(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
965    if (mp_ierr /= 0) goto 600
966    call MPI_Bcast(qvh(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
967    if (mp_ierr /= 0) goto 600
968    call MPI_Bcast(qv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
969    if (mp_ierr /= 0) goto 600
970    call MPI_Bcast(pv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
971    if (mp_ierr /= 0) goto 600
972    call MPI_Bcast(clouds(:,:,:,li:ui),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
973    if (mp_ierr /= 0) goto 600
974
975! cloud water/ice:
976    if (readclouds) then
977      ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
978      ! if (mp_ierr /= 0) goto 600
979      call MPI_Bcast(clw4(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
980      if (mp_ierr /= 0) goto 600
981      ! call MPI_Bcast(clwc(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
982      ! if (mp_ierr /= 0) goto 600
983      ! call MPI_Bcast(ciwc(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
984      ! if (mp_ierr /= 0) goto 600
985    end if
986
987! 2D fields
988    call MPI_Bcast(cloudsh(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
989    if (mp_ierr /= 0) goto 600
990    call MPI_Bcast(vdep(:,:,:,li:ui),d2s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
991    if (mp_ierr /= 0) goto 600
992    call MPI_Bcast(ps(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
993    if (mp_ierr /= 0) goto 600
994    call MPI_Bcast(sd(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
995    if (mp_ierr /= 0) goto 600
996    call MPI_Bcast(tcc(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
997    if (mp_ierr /= 0) goto 600
998    call MPI_Bcast(tt2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
999    if (mp_ierr /= 0) goto 600
1000    call MPI_Bcast(td2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1001    if (mp_ierr /= 0) goto 600
1002    call MPI_Bcast(lsprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1003    if (mp_ierr /= 0) goto 600
1004    call MPI_Bcast(convprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1005    if (mp_ierr /= 0) goto 600
1006    call MPI_Bcast(ustar(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1007    if (mp_ierr /= 0) goto 600
1008    call MPI_Bcast(wstar(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1009    if (mp_ierr /= 0) goto 600
1010    call MPI_Bcast(hmix(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1011    if (mp_ierr /= 0) goto 600
1012    call MPI_Bcast(tropopause(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1013    if (mp_ierr /= 0) goto 600
1014    call MPI_Bcast(oli(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1015    if (mp_ierr /= 0) goto 600
1016
1017    if (mp_measure_time) call mpif_mtime('commtime',1)
1018
1019    goto 601
1020
1021600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1022    stop
1023
1024601 end subroutine mpif_gf_send_vars
1025
1026
1027  subroutine mpif_gf_send_vars_nest(memstat)
1028!***********************************************************************
1029! DESCRIPTION
1030!   Distribute 'getfield' variables from reader process to all processes.
1031!   For nested fields
1032!
1033!   Called from timemanager
1034!
1035! NOTE
1036!   This subroutine distributes nested windfields from the reader process to
1037!   all other processes. Usually one set of fields is transfered, but at
1038!   first timestep when there are no fields in memory, both are transfered.
1039!   MPI_Bcast is used, so implicitly all processes are synchronized at this
1040!   step
1041!
1042!
1043!***********************************************************************
1044    use com_mod
1045    use par_mod, only: numwfmem
1046
1047    implicit none
1048
1049    integer, intent(in) :: memstat
1050    integer :: i
1051! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
1052    integer :: li,ui
1053
1054! Common array sizes used for communications
1055    integer :: d3_size1 = nxmaxn*nymaxn*nzmax
1056    integer :: d3_size2 = nxmaxn*nymaxn*nuvzmax
1057    integer :: d2_size1 = nxmaxn*nymaxn
1058    integer :: d2_size2 = nxmaxn*nymaxn*maxspec
1059    integer :: d2_size3 = nxmaxn*nymaxn
1060
1061    integer :: d3s1,d3s2,d2s1,d2s2
1062
1063! First time routine is called the unchangeable fields will be transfered   
1064    logical :: first_call=.true.
1065
1066!**********************************************************************
1067
1068! Sizes of arrays transfered
1069    d3s1=d3_size1
1070    d3s2=d3_size2
1071    d2s1=d2_size1
1072    d2s2=d2_size2
1073
1074! Decide which field(s) need to be transfered
1075    if (memstat.ge.32) then ! distribute 2 fields
1076      li=1; ui=2
1077      d3s1=2*d3_size1
1078      d3s2=2*d3_size2
1079      d2s1=2*d2_size1
1080      d2s2=2*d2_size2
1081    else if (memstat.eq.17) then ! distribute 1 field, on 1st index
1082      li=1; ui=1
1083    else if (memstat.eq.18) then ! distribute 1 field, on 2nd index
1084      li=2; ui=2
1085    else if (memstat.eq.19) then ! distribute 1 field, on 3nd index
1086      li=3; ui=3
1087    else
1088      write(*,*) '#### mpi_mod::mpif_gf_send_vars_nest> ERROR: ', &
1089           & 'wrong value of memstat, exiting ####', memstat
1090      stop
1091    end if
1092
1093
1094! Time for MPI communications
1095    if (mp_measure_time) call mpif_mtime('commtime',0)
1096
1097! Distribute variables, send from getfield process (id_read) to other
1098! processes
1099!**********************************************************************
1100
1101! The non-reader processes need to know if cloud water were read.
1102    call MPI_Bcast(readclouds_nest,maxnests,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
1103    if (mp_ierr /= 0) goto 600
1104
1105! Static fields/variables sent only at startup
1106    if (first_call) then
1107      call MPI_Bcast(oron(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1108      if (mp_ierr /= 0) goto 600
1109      call MPI_Bcast(excessoron(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1110      if (mp_ierr /= 0) goto 600
1111      call MPI_Bcast(lsmn(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1112      if (mp_ierr /= 0) goto 600
1113      call MPI_Bcast(xlandusen(:,:,:,:),d2_size3*numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1114      if (mp_ierr /= 0) goto 600
1115      first_call=.false.
1116    end if
1117
1118! MPI prefers contiguous arrays for sending (else a buffer is created),
1119! hence the loop over nests
1120!**********************************************************************
1121    do i=1, numbnests
1122! 3D fields
1123      call MPI_Bcast(uun(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1124      if (mp_ierr /= 0) goto 600
1125      call MPI_Bcast(vvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1126      if (mp_ierr /= 0) goto 600
1127      call MPI_Bcast(wwn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1128      if (mp_ierr /= 0) goto 600
1129      call MPI_Bcast(ttn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1130      if (mp_ierr /= 0) goto 600
1131      call MPI_Bcast(rhon(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1132      if (mp_ierr /= 0) goto 600
1133      call MPI_Bcast(drhodzn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1134      if (mp_ierr /= 0) goto 600
1135      call MPI_Bcast(tthn(:,:,:,li:ui,i),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1136      if (mp_ierr /= 0) goto 600
1137      call MPI_Bcast(qvhn(:,:,:,li:ui,i),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1138      if (mp_ierr /= 0) goto 600
1139      call MPI_Bcast(qvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1140      if (mp_ierr /= 0) goto 600
1141      call MPI_Bcast(pvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1142      if (mp_ierr /= 0) goto 600
1143      call MPI_Bcast(cloudsn(:,:,:,li:ui,i),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
1144      if (mp_ierr /= 0) goto 600
1145
1146! cloud water/ice:
1147    if (readclouds_nest(i)) then
1148      ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1149      ! if (mp_ierr /= 0) goto 600
1150      call MPI_Bcast(clw4n(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1151      if (mp_ierr /= 0) goto 600
1152    end if
1153
1154! 2D fields
1155      call MPI_Bcast(cloudshn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1156      if (mp_ierr /= 0) goto 600
1157      call MPI_Bcast(vdepn(:,:,:,li:ui,i),d2s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1158      if (mp_ierr /= 0) goto 600
1159      call MPI_Bcast(psn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1160      if (mp_ierr /= 0) goto 600
1161      call MPI_Bcast(sdn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1162      if (mp_ierr /= 0) goto 600
1163      call MPI_Bcast(tccn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1164      if (mp_ierr /= 0) goto 600
1165      call MPI_Bcast(tt2n(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1166      if (mp_ierr /= 0) goto 600
1167      call MPI_Bcast(td2n(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1168      if (mp_ierr /= 0) goto 600
1169      call MPI_Bcast(lsprecn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1170      if (mp_ierr /= 0) goto 600
1171      call MPI_Bcast(convprecn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1172      if (mp_ierr /= 0) goto 600
1173      call MPI_Bcast(ustarn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1174      if (mp_ierr /= 0) goto 600
1175      call MPI_Bcast(wstarn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1176      if (mp_ierr /= 0) goto 600
1177      call MPI_Bcast(olin(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1178      if (mp_ierr /= 0) goto 600
1179      call MPI_Bcast(hmixn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1180      if (mp_ierr /= 0) goto 600
1181      call MPI_Bcast(tropopausen(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1182      if (mp_ierr /= 0) goto 600
1183    end do
1184
1185    if (mp_measure_time) call mpif_mtime('commtime',1)
1186
1187    goto 601
1188
1189600 write(*,*) "mpi_mod> mp_ierr \= 0",mp_ierr
1190    stop
1191
1192601 end subroutine mpif_gf_send_vars_nest
1193
1194
1195  subroutine mpif_gf_send_vars_async(memstat)
1196!*******************************************************************************
1197! DESCRIPTION
1198!   Distribute 'getfield' variables from reader process to all processes.
1199!   Called from timemanager by reader process only
1200!
1201! NOTE
1202!   This version uses asynchronious sends. The newest fields are sent in the
1203!   background, so calculations can continue while
1204!   MPI communications are performed.
1205!
1206!   The way the MPI tags/requests are sequenced means that this routine must
1207!   carefully match routine 'mpif_gf_recv_vars_async'
1208!
1209! VARIABLES
1210!   memstat -- input, for resolving pointer to windfield index being read
1211!   mind    -- index where to place new fields
1212!
1213!
1214!*******************************************************************************
1215    use com_mod
1216
1217    implicit none
1218
1219    integer, intent(in) :: memstat
1220    integer :: mind
1221    integer :: dest,i
1222
1223! Common array sizes used for communications
1224    integer :: d3s1 = nxmax*nymax*nzmax
1225    integer :: d3s2 = nxmax*nymax*nuvzmax
1226    integer :: d2s1 = nxmax*nymax
1227    integer :: d2s2 = nxmax*nymax*maxspec
1228    integer :: d1s1 = maxwf
1229
1230!*******************************************************************************
1231
1232! At the time the send is posted, the reader process is one step further
1233! in the permutation of memstat compared with the receiving processes
1234
1235    if (memstat.ge.32) then
1236! last read was synchronous, to indices 1 and 2, 3 is free
1237      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1238           & memstat>=32 should never happen here."
1239      stop
1240    else if (memstat.eq.17) then
1241! old fields on 1,2, send 3
1242      mind=3
1243    else if (memstat.eq.18) then
1244! old fields on 2,3, send 1
1245      mind=1
1246    else if (memstat.eq.19) then
1247! old fields on 3,1, send 2
1248      mind=2
1249    else
1250      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1251           & invalid memstat"
1252    end if
1253
1254    if (mp_dev_mode) then
1255      if (mp_pid.ne.id_read) then
1256        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_async'
1257      end if
1258    end if
1259
1260    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1261
1262! Time for MPI communications
1263    if (mp_measure_time) call mpif_mtime('commtime',0)
1264
1265! Loop over receiving processes, initiate data sending
1266!*****************************************************
1267
1268    do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading
1269! TODO: use mp_partgroup_np here
1270      if (dest.eq.id_read) cycle
1271      i=dest*nvar_async
1272      call MPI_Isend(uu(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1273      if (mp_ierr /= 0) goto 600
1274      i=i+1
1275      call MPI_Isend(vv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1276      if (mp_ierr /= 0) goto 600
1277      i=i+1
1278      call MPI_Isend(uupol(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1279      if (mp_ierr /= 0) goto 600
1280      i=i+1
1281      call MPI_Isend(vvpol(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1282      if (mp_ierr /= 0) goto 600
1283      i=i+1
1284      call MPI_Isend(ww(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1285      if (mp_ierr /= 0) goto 600
1286      i=i+1
1287      call MPI_Isend(tt(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1288      if (mp_ierr /= 0) goto 600
1289      i=i+1
1290      call MPI_Isend(rho(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1291      if (mp_ierr /= 0) goto 600
1292      i=i+1
1293      call MPI_Isend(drhodz(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1294      if (mp_ierr /= 0) goto 600
1295      i=i+1
1296      call MPI_Isend(tth(:,:,:,mind),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1297      if (mp_ierr /= 0) goto 600
1298      i=i+1
1299      call MPI_Isend(qvh(:,:,:,mind),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1300      if (mp_ierr /= 0) goto 600
1301      i=i+1
1302      call MPI_Isend(qv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1303      if (mp_ierr /= 0) goto 600
1304      i=i+1
1305      call MPI_Isend(pv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1306      if (mp_ierr /= 0) goto 600
1307      i=i+1
1308      call MPI_Isend(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1309      i=i+1
1310      if (mp_ierr /= 0) goto 600
1311
1312      call MPI_Isend(cloudsh(:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1313      if (mp_ierr /= 0) goto 600
1314      i=i+1
1315      call MPI_Isend(vdep(:,:,:,mind),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1316      if (mp_ierr /= 0) goto 600
1317      i=i+1
1318      call MPI_Isend(ps(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1319      if (mp_ierr /= 0) goto 600
1320      i=i+1
1321      call MPI_Isend(sd(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1322      if (mp_ierr /= 0) goto 600
1323      i=i+1
1324      call MPI_Isend(tcc(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1325      if (mp_ierr /= 0) goto 600
1326      i=i+1
1327      call MPI_Isend(tt2(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1328      if (mp_ierr /= 0) goto 600
1329      i=i+1
1330      call MPI_Isend(td2(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1331      if (mp_ierr /= 0) goto 600
1332      i=i+1
1333      call MPI_Isend(lsprec(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1334      if (mp_ierr /= 0) goto 600
1335      i=i+1
1336      call MPI_Isend(convprec(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1337      if (mp_ierr /= 0) goto 600
1338      i=i+1
1339      call MPI_Isend(ustar(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1340      if (mp_ierr /= 0) goto 600
1341      i=i+1
1342      call MPI_Isend(wstar(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1343      if (mp_ierr /= 0) goto 600
1344      i=i+1
1345      call MPI_Isend(hmix(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1346      if (mp_ierr /= 0) goto 600
1347      i=i+1
1348      call MPI_Isend(tropopause(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1349      if (mp_ierr /= 0) goto 600
1350      i=i+1
1351      call MPI_Isend(oli(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1352      if (mp_ierr /= 0) goto 600
1353
1354! Send cloud water if it exists. Increment counter always (as on receiving end)
1355      if (readclouds) then
1356        i=i+1
1357        ! call MPI_Isend(icloud_stats(:,:,:,mind),d2s1*5,mp_sp,dest,tm1,&
1358        !      &MPI_COMM_WORLD,reqs(i),mp_ierr)
1359        call MPI_Isend(clw4(:,:,mind),d2s1,mp_sp,dest,tm1,&
1360             &MPI_COMM_WORLD,reqs(i),mp_ierr)
1361
1362        if (mp_ierr /= 0) goto 600
1363
1364        ! call MPI_Isend(clwc(:,:,:,mind),d3s1,mp_sp,dest,tm1,&
1365        !      &MPI_COMM_WORLD,reqs(i),mp_ierr)
1366        ! if (mp_ierr /= 0) goto 600
1367        ! i=i+1
1368
1369        ! call MPI_Isend(ciwc(:,:,:,mind),d3s1,mp_sp,dest,tm1,&
1370        !      &MPI_COMM_WORLD,reqs(i),mp_ierr)
1371        ! if (mp_ierr /= 0) goto 600
1372
1373      end if
1374    end do
1375
1376    if (mp_measure_time) call mpif_mtime('commtime',1)
1377
1378    goto 601
1379
1380600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> mp_ierr \= 0", mp_ierr
1381    stop
1382
1383601 end subroutine mpif_gf_send_vars_async
1384
1385
1386  subroutine mpif_gf_recv_vars_async(memstat)
1387!*******************************************************************************
1388! DESCRIPTION
1389!   Receive 'getfield' variables from reader process.
1390!   Called from timemanager by all processes except reader
1391!
1392! NOTE
1393!   This version uses asynchronious communications.
1394!
1395! VARIABLES
1396!   memstat -- input, used to resolve windfield index being received
1397!
1398!
1399!*******************************************************************************
1400    use com_mod
1401
1402    implicit none
1403
1404    integer, intent(in) :: memstat
1405    integer :: mind,j
1406
1407! Common array sizes used for communications
1408    integer :: d3s1 = nxmax*nymax*nzmax
1409    integer :: d3s2 = nxmax*nymax*nuvzmax
1410    integer :: d2s1 = nxmax*nymax
1411    integer :: d2s2 = nxmax*nymax*maxspec
1412    !integer :: d1_size1 = maxwf
1413
1414!    integer :: d3s1,d3s2,d2s1,d2s2
1415!*******************************************************************************
1416
1417! At the time this immediate receive is posted, memstat is the state of
1418! windfield indices at the previous/current time. From this, the future
1419! state is deduced.
1420    if (memstat.eq.32) then
1421! last read was synchronous to indices 1 and 2, 3 is free
1422      mind=3
1423    else if (memstat.eq.17) then
1424! last read was asynchronous to index 3, 1 is free
1425      mind=1
1426    else if (memstat.eq.18) then
1427! last read was asynchronous to index 1, 2 is free
1428      mind=2
1429    else if (memstat.eq.19) then
1430! last read was asynchronous to index 2, 3 is free
1431      mind=3
1432    end if
1433
1434    if (mp_dev_mode) then
1435      if (mp_pid.eq.id_read) then
1436        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async'
1437      end if
1438    end if
1439
1440! Time for MPI communications
1441    if (mp_measure_time) call mpif_mtime('commtime',0)
1442
1443    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1444
1445! Initiate receiving of data
1446!***************************
1447
1448! Get MPI tags/requests for communications
1449    j=mp_pid*nvar_async
1450    call MPI_Irecv(uu(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1451         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1452    if (mp_ierr /= 0) goto 600
1453    j=j+1
1454    call MPI_Irecv(vv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1455         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1456    if (mp_ierr /= 0) goto 600
1457    j=j+1
1458    call MPI_Irecv(uupol(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1459         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1460    if (mp_ierr /= 0) goto 600
1461    j=j+1
1462    call MPI_Irecv(vvpol(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1463         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1464    if (mp_ierr /= 0) goto 600
1465    j=j+1
1466    call MPI_Irecv(ww(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1467         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1468    if (mp_ierr /= 0) goto 600
1469    j=j+1
1470    call MPI_Irecv(tt(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1471         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1472    if (mp_ierr /= 0) goto 600
1473    j=j+1
1474    call MPI_Irecv(rho(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1475         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1476    if (mp_ierr /= 0) goto 600
1477    j=j+1
1478    call MPI_Irecv(drhodz(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1479         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1480    if (mp_ierr /= 0) goto 600
1481    j=j+1
1482    call MPI_Irecv(tth(:,:,:,mind),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
1483         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1484    if (mp_ierr /= 0) goto 600
1485    j=j+1
1486    call MPI_Irecv(qvh(:,:,:,mind),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
1487         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1488    if (mp_ierr /= 0) goto 600
1489    j=j+1
1490
1491    call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1492         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1493    if (mp_ierr /= 0) goto 600
1494    j=j+1
1495    call MPI_Irecv(pv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1496         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1497    if (mp_ierr /= 0) goto 600
1498    j=j+1
1499    call MPI_Irecv(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
1500         &MPI_COMM_WORLD,reqs(j),mp_ierr)   
1501    if (mp_ierr /= 0) goto 600
1502    j=j+1
1503
1504    call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1505         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1506    if (mp_ierr /= 0) goto 600
1507    j=j+1
1508    call MPI_Irecv(vdep(:,:,:,mind),d2s2,mp_sp,id_read,MPI_ANY_TAG,&
1509         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1510    if (mp_ierr /= 0) goto 600
1511    j=j+1
1512    call MPI_Irecv(ps(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1513         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1514    if (mp_ierr /= 0) goto 600
1515    j=j+1
1516    call MPI_Irecv(sd(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1517         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1518    if (mp_ierr /= 0) goto 600
1519    j=j+1
1520    call MPI_Irecv(tcc(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1521         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1522    if (mp_ierr /= 0) goto 600
1523    j=j+1
1524    call MPI_Irecv(tt2(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1525         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1526    if (mp_ierr /= 0) goto 600
1527    j=j+1
1528    call MPI_Irecv(td2(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1529         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1530    if (mp_ierr /= 0) goto 600
1531    j=j+1
1532    call MPI_Irecv(lsprec(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1533         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1534    if (mp_ierr /= 0) goto 600
1535    j=j+1
1536    call MPI_Irecv(convprec(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1537         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1538    if (mp_ierr /= 0) goto 600
1539
1540    call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1541         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1542    if (mp_ierr /= 0) goto 600
1543    j=j+1
1544    call MPI_Irecv(wstar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1545         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1546    if (mp_ierr /= 0) goto 600
1547    j=j+1
1548    call MPI_Irecv(hmix(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1549         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1550    if (mp_ierr /= 0) goto 600
1551    j=j+1
1552    call MPI_Irecv(tropopause(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1553         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1554    if (mp_ierr /= 0) goto 600
1555    j=j+1
1556    call MPI_Irecv(oli(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
1557         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1558    if (mp_ierr /= 0) goto 600
1559
1560
1561! Post request for clwc. These data are possibly not sent, request must then be cancelled
1562! For now assume that data at all steps either have or do not have water
1563    if (readclouds) then
1564      j=j+1
1565
1566      ! call MPI_Irecv(icloud_stats(:,:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
1567      !      &MPI_COMM_WORLD,reqs(j),mp_ierr)
1568      call MPI_Irecv(clw4(:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
1569           &MPI_COMM_WORLD,reqs(j),mp_ierr)
1570      if (mp_ierr /= 0) goto 600
1571
1572      ! call MPI_Irecv(clwc(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1573      !      &MPI_COMM_WORLD,reqs(j),mp_ierr)   
1574      ! if (mp_ierr /= 0) goto 600
1575      ! j=j+1
1576      ! call MPI_Irecv(ciwc(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
1577      !      &MPI_COMM_WORLD,reqs(j),mp_ierr)   
1578      ! if (mp_ierr /= 0) goto 600
1579
1580    end if
1581
1582
1583    if (mp_measure_time) call mpif_mtime('commtime',1)
1584
1585    goto 601
1586
1587600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_async> MPI ERROR ", mp_ierr
1588    stop
1589
1590601 end subroutine mpif_gf_recv_vars_async
1591
1592
1593  subroutine mpif_gf_request
1594!*******************************************************************************
1595! DESCRIPTION
1596!   Check for completion of MPI_Isend/MPI_Irecv data transfer.
1597!   
1598!   
1599! NOTE
1600!   implicit synchronisation between all processes takes place here
1601!
1602!
1603!*******************************************************************************
1604    use com_mod, only: readclouds
1605
1606    implicit none
1607
1608
1609    integer :: n_req
1610    integer :: i
1611
1612!***********************************************************************
1613
1614    n_req=nvar_async*mp_np
1615
1616    if (mp_measure_time) call mpif_mtime('commtime',0)
1617
1618!    call MPI_Wait(rm1,MPI_STATUS_IGNORE,mp_ierr)
1619
1620! TODO: cancel recv request if readclouds=.false.
1621!    if (readclouds) then
1622    call MPI_Waitall(n_req,reqs,MPI_STATUSES_IGNORE,mp_ierr)
1623!    endif
1624! else
1625!   do i = 0, nvar_async*mp_np-1
1626!     if (mod(i,27).eq.0 .or. mod(i,28).eq.0) then
1627!       call MPI_Cancel(reqs(i),mp_ierr)
1628!       cycle
1629!     end if
1630!     call MPI_Wait(reqs(i),MPI_STATUS_IGNORE,mp_ierr)
1631!   end do
1632! end if
1633
1634    if (mp_ierr /= 0) goto 600
1635
1636    if (mp_measure_time) call mpif_mtime('commtime',1)
1637
1638    goto 601
1639
1640600 write(*,*) "#### mpi_mod::mpif_gf_request> MPI ERROR ", mp_ierr
1641    stop
1642
1643601 end subroutine mpif_gf_request
1644
1645
1646  subroutine mpif_tm_reduce_grid
1647!***********************************************************************
1648! Collect grid variable to PID 0, adding from all processes.
1649!
1650! NOTE
1651!   - Older MPI implementations may lack the MPI_IN_PLACE operation.
1652!     If so use 1) below and change unc_mod
1653!
1654!***********************************************************************
1655    use com_mod
1656    use unc_mod
1657    use par_mod
1658
1659    implicit none
1660
1661    integer :: grid_size2d,grid_size3d
1662    integer, parameter :: rcpt_size=maxreceptor*maxspec
1663
1664!**********************************************************************
1665    grid_size2d=numxgrid*numygrid*maxspec* &
1666         & maxpointspec_act*nclassunc*maxageclass
1667    grid_size3d=numxgrid*numygrid*numzgrid*maxspec* &
1668         & maxpointspec_act*nclassunc*maxageclass
1669
1670
1671! Time for MPI communications
1672    if (mp_measure_time) call mpif_mtime('commtime',0)
1673
1674! 1) Using a separate grid (gridunc0) for received values
1675! call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, &
1676!      & mp_comm_used, mp_ierr)
1677! if (mp_ierr /= 0) goto 600
1678
1679! 2) Using in-place reduction
1680    if (lroot) then
1681      call MPI_Reduce(MPI_IN_PLACE, gridunc, grid_size3d, mp_sp, MPI_SUM, id_root, &
1682           & mp_comm_used, mp_ierr)
1683      if (mp_ierr /= 0) goto 600
1684    else
1685      call MPI_Reduce(gridunc, gridunc, grid_size3d, mp_sp, MPI_SUM, id_root, &
1686           & mp_comm_used, mp_ierr)
1687    end if
1688
1689    if ((WETDEP).and.(ldirect.gt.0)) then
1690      call MPI_Reduce(wetgridunc, wetgridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, &
1691           & mp_comm_used, mp_ierr)
1692      if (mp_ierr /= 0) goto 600
1693    end if
1694
1695    if ((DRYDEP).and.(ldirect.gt.0)) then
1696      call MPI_Reduce(drygridunc, drygridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, &
1697           & mp_comm_used, mp_ierr)
1698      if (mp_ierr /= 0) goto 600
1699    end if
1700
1701! Receptor concentrations   
1702    if (lroot) then
1703      call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &
1704           & mp_comm_used,mp_ierr)
1705      if (mp_ierr /= 0) goto 600
1706    else
1707      call MPI_Reduce(creceptor,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &
1708           & mp_comm_used,mp_ierr)
1709    end if
1710
1711    if (mp_measure_time) call mpif_mtime('commtime',1)
1712
1713    goto 601
1714
1715600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1716    stop
1717
1718601 end subroutine mpif_tm_reduce_grid
1719
1720
1721  subroutine mpif_tm_reduce_grid_nest
1722!***********************************************************************
1723! Collect nested grids to PID 0, adding from all processes.
1724!
1725! NOTE
1726!   - Compiling with 'fcheck=all' (gfortran) will cause a run-time error
1727!     as wetgriduncn0 is only allocated for root process. Possibly valid
1728!     MPI code but invalid gfortran.
1729!
1730!***********************************************************************
1731    use com_mod
1732    use unc_mod
1733    use par_mod
1734
1735    implicit none
1736
1737    integer :: grid_size2d,grid_size3d
1738
1739!**********************************************************************
1740
1741    grid_size3d=numxgridn*numygridn*numzgrid*maxspec* &
1742         & maxpointspec_act*nclassunc*maxageclass
1743    grid_size2d=numxgridn*numygridn*maxspec* &
1744         & maxpointspec_act*nclassunc*maxageclass
1745
1746
1747! Time for MPI communications
1748    if (mp_measure_time) call mpif_mtime('commtime',0)
1749
1750! Using a separate grid (gridunc0) for received values, for debugging
1751! call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_sp, MPI_SUM, id_root, &
1752!      & mp_comm_used, mp_ierr)
1753! if (mp_ierr /= 0) goto 600
1754
1755! Using in-place reduction
1756    if (lroot) then
1757      call MPI_Reduce(MPI_IN_PLACE, griduncn, grid_size3d, mp_sp, MPI_SUM, id_root, &
1758           & mp_comm_used, mp_ierr)
1759      if (mp_ierr /= 0) goto 600
1760    else
1761      call MPI_Reduce(griduncn, griduncn, grid_size3d, mp_sp, MPI_SUM, id_root, &
1762           & mp_comm_used, mp_ierr)
1763    end if
1764
1765    if ((WETDEP).and.(ldirect.gt.0)) then
1766      call MPI_Reduce(wetgriduncn, wetgriduncn0, grid_size2d, mp_cp, MPI_SUM, id_root, &
1767           & mp_comm_used, mp_ierr)
1768      if (mp_ierr /= 0) goto 600
1769    end if
1770
1771    if ((DRYDEP).and.(ldirect.gt.0)) then
1772      call MPI_Reduce(drygriduncn, drygriduncn0, grid_size2d, mp_cp, MPI_SUM, id_root, &
1773           & mp_comm_used, mp_ierr)
1774      if (mp_ierr /= 0) goto 600
1775    end if
1776
1777    if (mp_measure_time) call mpif_mtime('commtime',1)
1778
1779    goto 601
1780
1781600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1782    stop
1783
1784601 end subroutine mpif_tm_reduce_grid_nest
1785
1786
1787  subroutine mpif_mtime(ident,imode)
1788!***********************************************************************
1789! Measure CPU/Wall time in various parts of the code
1790!
1791! VARIABLES
1792!   ident        character, identifies routine to measure
1793!   imode        integer, 0:start clock(s)  1: stop clock(s)
1794!
1795!***********************************************************************
1796    implicit none
1797
1798    character(LEN=*), intent(in) :: ident
1799    integer, intent(in) :: imode
1800
1801!***********************************************************************
1802
1803    select case(ident)
1804
1805    case ('timemanager')
1806      if (imode.eq.0) then
1807        call cpu_time(tm_tot_beg)
1808      else
1809        call cpu_time(tm_tot_end)
1810        tm_tot_total = tm_tot_total + (tm_tot_end-tm_tot_beg)
1811      end if
1812
1813    case ('wetdepo')
1814      if (imode.eq.0) then
1815        mp_wetdepo_wtime_beg = mpi_wtime()
1816        call cpu_time(mp_wetdepo_time_beg)
1817      else
1818        mp_wetdepo_wtime_end = mpi_wtime()
1819        call cpu_time(mp_wetdepo_time_end)
1820
1821        mp_wetdepo_wtime_total = mp_wetdepo_wtime_total + &
1822             &(mp_wetdepo_wtime_end - mp_wetdepo_wtime_beg)
1823        mp_wetdepo_time_total = mp_wetdepo_time_total + &
1824             &(mp_wetdepo_time_end - mp_wetdepo_time_beg)
1825      end if
1826
1827    case ('advance')
1828      if (imode.eq.0) then
1829        mp_advance_wtime_beg = mpi_wtime()
1830      else
1831        mp_advance_wtime_end = mpi_wtime()
1832
1833        mp_advance_wtime_total = mp_advance_wtime_total + &
1834             &(mp_advance_wtime_end - mp_advance_wtime_beg)
1835      end if
1836
1837    case ('getfields')
1838      if (imode.eq.0) then
1839        mp_getfields_wtime_beg = mpi_wtime()
1840        call cpu_time(mp_getfields_time_beg)
1841      else
1842        mp_getfields_wtime_end = mpi_wtime()
1843        call cpu_time(mp_getfields_time_end)
1844
1845        mp_getfields_wtime_total = mp_getfields_wtime_total + &
1846             &(mp_getfields_wtime_end - mp_getfields_wtime_beg)
1847        mp_getfields_time_total = mp_getfields_time_total + &
1848             &(mp_getfields_time_end - mp_getfields_time_beg)
1849      end if
1850
1851    case ('partloop1')
1852      if (imode.eq.0) then
1853        call cpu_time(tm_nploop_beg)
1854      else
1855        call cpu_time(tm_nploop_end)
1856        tm_nploop_total = tm_nploop_total + (tm_nploop_end - tm_nploop_beg)
1857      end if
1858
1859    case ('conccalc')
1860      if (imode.eq.0) then
1861        call cpu_time(mp_conccalc_time_beg)
1862      else
1863        call cpu_time(mp_conccalc_time_end)
1864        mp_conccalc_time_total = mp_conccalc_time_total + mp_conccalc_time_end - &
1865             &mp_conccalc_time_beg
1866      end if
1867
1868    case ('rootonly')
1869      if (imode.eq.0) then
1870        call cpu_time(mp_root_time_beg)
1871        mp_root_wtime_beg = mpi_wtime()
1872      else
1873        call cpu_time(mp_root_time_end)
1874        mp_root_wtime_end = mpi_wtime()
1875        mp_root_time_total = mp_root_time_total + &
1876             &(mp_root_time_end - mp_root_time_beg)
1877        mp_root_wtime_total = mp_root_wtime_total + &
1878             &(mp_root_wtime_end - mp_root_wtime_beg)
1879      end if
1880
1881    case ('iotime')
1882      if (imode.eq.0) then
1883        mp_io_wtime_beg = mpi_wtime()
1884        call cpu_time(mp_io_time_beg)
1885      else
1886        mp_io_wtime_end = mpi_wtime()
1887        call cpu_time(mp_io_time_end)
1888
1889        mp_io_wtime_total = mp_io_wtime_total + (mp_io_wtime_end - &
1890             & mp_io_wtime_beg)
1891        mp_io_time_total = mp_io_time_total + (mp_io_time_end - &
1892             & mp_io_time_beg)
1893      end if
1894
1895    case ('verttransform')
1896      if (imode.eq.0) then
1897        mp_vt_wtime_beg = mpi_wtime()
1898        call cpu_time(mp_vt_time_beg)
1899      else
1900        mp_vt_wtime_end = mpi_wtime()
1901        call cpu_time(mp_vt_time_end)
1902
1903        mp_vt_wtime_total = mp_vt_wtime_total + (mp_vt_wtime_end - &
1904             & mp_vt_wtime_beg)
1905        mp_vt_time_total = mp_vt_time_total + (mp_vt_time_end - &
1906             & mp_vt_time_beg)
1907      end if
1908
1909    case ('readwind')
1910      if (imode.eq.0) then
1911        call cpu_time(mp_readwind_time_beg)
1912        mp_readwind_wtime_beg = mpi_wtime()
1913      else
1914        call cpu_time(mp_readwind_time_end)
1915        mp_readwind_wtime_end = mpi_wtime()
1916
1917        mp_readwind_time_total = mp_readwind_time_total + &
1918             &(mp_readwind_time_end - mp_readwind_time_beg)
1919        mp_readwind_wtime_total = mp_readwind_wtime_total + &
1920             &(mp_readwind_wtime_end - mp_readwind_wtime_beg)
1921      end if
1922
1923    case ('commtime')
1924      if (imode.eq.0) then
1925        call cpu_time(mp_comm_time_beg)
1926        mp_comm_wtime_beg = mpi_wtime()
1927      else
1928        call cpu_time(mp_comm_time_end)
1929        mp_comm_wtime_end = mpi_wtime()
1930        mp_comm_time_total = mp_comm_time_total + &
1931             &(mp_comm_time_end - mp_comm_time_beg)
1932        mp_comm_wtime_total = mp_comm_wtime_total + &
1933             &(mp_comm_wtime_end - mp_comm_wtime_beg)
1934      end if
1935
1936    case ('flexpart')
1937      if (imode.eq.0) then
1938        mp_total_wtime_beg=mpi_wtime()
1939      else
1940        mp_total_wtime_end=mpi_wtime()
1941        mp_total_wtime_total = mp_total_wtime_end-mp_total_wtime_beg
1942      end if
1943
1944    case default
1945      write(*,*) 'mpi_mod::mpif_mtime> WARNING: unknown case identifier'
1946
1947    end select
1948
1949
1950  end subroutine mpif_mtime
1951
1952
1953  subroutine mpif_finalize
1954!***********************************************************************
1955! Finalize MPI
1956! Optionally print detailed time diagnostics
1957!***********************************************************************
1958    implicit none
1959
1960    integer :: ip,j,r
1961
1962!***********************************************************************
1963
1964    IF (mp_measure_time) THEN
1965      do ip=0, mp_np-1
1966        call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr)
1967
1968        if (ip == mp_pid) then
1969          write(*,FMT='(72("#"))')
1970          write(*,FMT='(A60,I3)') 'STATISTICS FOR MPI PROCESS:', mp_pid
1971          if (ip == 0) then
1972            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR ROOT-ONLY (PID 0) &
1973                 &SECTIONS: ', mp_root_time_total
1974            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ROOT-ONLY (PID 0) &
1975                 &SECTIONS:', mp_root_wtime_total
1976          end if
1977          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FLEXPART: ' &
1978               &, mp_total_wtime_total
1979          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI COMMUNICATIONS:',&
1980               & mp_comm_time_total
1981          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI COMMUNICATIONS:',&
1982               & mp_comm_wtime_total
1983          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI BARRIERS:',&
1984               & mp_barrier_time_total
1985          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI BARRIERS:',&
1986               & mp_barrier_wtime_total
1987          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER:',&
1988               & tm_tot_total
1989          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER/NUMPART LOOP:',&
1990               & tm_nploop_total
1991          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ADVANCE:',&
1992               & mp_advance_wtime_total
1993          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR GETFIELDS:',&
1994               & mp_getfields_wtime_total
1995          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR GETFIELDS:',&
1996               & mp_getfields_time_total
1997          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',&
1998               & mp_readwind_wtime_total
1999          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',&
2000               & mp_readwind_time_total
2001          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FILE IO:',&
2002               & mp_io_wtime_total
2003          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR FILE IO:',&
2004               & mp_io_time_total
2005          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR WETDEPO:',&
2006               & mp_wetdepo_wtime_total
2007          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR WETDEPO:',&
2008               & mp_wetdepo_time_total
2009          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',&
2010               & mp_conccalc_time_total
2011          ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',&
2012          !      & mp_vt_wtime_total
2013          ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',&
2014          !      & mp_vt_time_total
2015! NB: the 'flush' function is possibly a gfortran-specific extension
2016          call flush()
2017        end if
2018      end do
2019    end if
2020
2021! This call to barrier is for correctly formatting output
2022    call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr)
2023
2024    if (lroot.and.mp_measure_time) then
2025      write(*,FMT='(72("#"))')
2026      WRITE(*,*) "To turn off output of time measurements, set "
2027      WRITE(*,*) "    mp_measure_time=.false."
2028      WRITE(*,*) "in file mpi_mod.f90"
2029      write(*,FMT='(72("#"))')
2030    end if
2031
2032! j=mp_pid*nvar_async
2033! In the implementation with 3 fields, the processes may have posted
2034! MPI_Irecv requests that should be cancelled here
2035! if (.not.lmp_sync) then
2036!   r=mp_pid*nvar_async
2037!   do j=r,r+nvar_async-1
2038!     call MPI_Cancel(j,mp_ierr)
2039!     if (mp_ierr /= 0) write(*,*) '#### mpif_finalize::MPI_Cancel> ERROR ####'
2040!   end do
2041! end if
2042
2043    call MPI_FINALIZE(mp_ierr)
2044    if (mp_ierr /= 0) then
2045      write(*,*) '#### mpif_finalize::MPI_FINALIZE> MPI ERROR ', mp_ierr, ' ####'
2046      stop
2047    end if
2048
2049
2050    end subroutine mpif_finalize
2051
2052
2053    subroutine get_lun(my_lun)
2054!***********************************************************************
2055! get_lun:
2056!   Starting from 100, get next free logical unit number
2057!***********************************************************************
2058
2059      implicit none
2060
2061      integer, intent(inout) :: my_lun
2062      integer, save :: free_lun=100
2063      logical :: exists, iopen
2064
2065!***********************************************************************
2066
2067      loop1: do
2068        inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen)
2069        if (exists .and. .not.iopen) exit loop1
2070        free_lun = free_lun+1
2071      end do loop1
2072      my_lun = free_lun
2073
2074    end subroutine get_lun
2075
2076
2077    subroutine write_data_dbg(array_in, array_name, tstep, ident)
2078!***********************************************************************
2079! Write one-dimensional arrays to file (for debugging purposes)
2080!***********************************************************************
2081      implicit none
2082
2083      real, intent(in), dimension(:) :: array_in
2084      integer, intent(in) :: tstep
2085      integer :: lios
2086      character(LEN=*), intent(in) :: ident, array_name
2087
2088      character(LEN=8) :: c_ts
2089      character(LEN=40) :: fn_1, fn_2
2090
2091!***********************************************************************
2092
2093      write(c_ts, FMT='(I8.8,BZ)') tstep
2094      fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident)
2095      write(c_ts, FMT='(I2.2,BZ)') mp_np
2096      fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat'
2097
2098      call get_lun(dat_lun)
2099      open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', &
2100           FORM='UNFORMATTED', STATUS='REPLACE')
2101      write(UNIT=dat_lun, IOSTAT=lios) array_in
2102      close(UNIT=dat_lun)
2103
2104    end subroutine write_data_dbg
2105
2106
2107  subroutine set_fields_synthetic()
2108!*******************************************************************************
2109! DESCRIPTION
2110!   Set all meteorological fields to synthetic (usually constant/homogeneous)
2111!   values.
2112!   Used for validation and error-checking
2113!
2114! NOTE
2115!   This version uses asynchronious communications.
2116!
2117! VARIABLES
2118!   
2119!
2120!
2121!*******************************************************************************
2122    use com_mod
2123
2124    implicit none
2125
2126    integer :: li=1, ui=2 ! wfmem indices (i.e, operate on entire field)
2127
2128    if (.not.lmp_sync) ui=3
2129
2130
2131! Variables transferred at initialization only
2132!*********************************************
2133!    readclouds=readclouds_
2134    oro(:,:)=0.0
2135    excessoro(:,:)=0.0
2136    lsm(:,:)=0.0
2137    xlanduse(:,:,:)=0.0
2138!    wftime
2139!    numbwf
2140!    nmixz
2141!    height
2142
2143! Time-varying fields:
2144    uu(:,:,:,li:ui) = 10.0
2145    vv(:,:,:,li:ui) = 0.0
2146    uupol(:,:,:,li:ui) = 10.0
2147    vvpol(:,:,:,li:ui)=0.0
2148    ww(:,:,:,li:ui)=0.
2149    tt(:,:,:,li:ui)=300.
2150    rho(:,:,:,li:ui)=1.3
2151    drhodz(:,:,:,li:ui)=.0
2152    tth(:,:,:,li:ui)=0.0
2153    qvh(:,:,:,li:ui)=1.0
2154    qv(:,:,:,li:ui)=1.0
2155
2156    pv(:,:,:,li:ui)=1.0
2157    clouds(:,:,:,li:ui)=0
2158
2159    clwc(:,:,:,li:ui)=0.0
2160    ciwc(:,:,:,li:ui)=0.0
2161 
2162! 2D fields
2163
2164    cloudsh(:,:,li:ui)=0
2165    vdep(:,:,:,li:ui)=0.0
2166    ps(:,:,:,li:ui)=1.0e5
2167    sd(:,:,:,li:ui)=0.0
2168    tcc(:,:,:,li:ui)=0.0
2169    tt2(:,:,:,li:ui)=300.
2170    td2(:,:,:,li:ui)=250.
2171    lsprec(:,:,:,li:ui)=0.0
2172    convprec(:,:,:,li:ui)=0.0
2173    ustar(:,:,:,li:ui)=1.0
2174    wstar(:,:,:,li:ui)=1.0
2175    hmix(:,:,:,li:ui)=10000.
2176    tropopause(:,:,:,li:ui)=10000.
2177    oli(:,:,:,li:ui)=0.01
2178 
2179  end subroutine set_fields_synthetic
2180
2181end module mpi_mod
Note: See TracBrowser for help on using the repository browser.
hosted by ZAMG