source: flexpart.git/src/mpi_mod.f90 @ 5f9d14a

10.4.1_peseiGFS_025bugfixes+enhancementsdevrelease-10release-10.4.1scaling-bugunivie
Last change on this file since 5f9d14a was 5f9d14a, checked in by Espen Sollum ATMOS <eso@…>, 9 years ago

Updated wet depo scheme

  • Property mode set to 100644
File size: 75.5 KB
Line 
1!**********************************************************************
2! Copyright 1998,1999,2000,2001,2002,2005,2007,2008,2009,2010         *
3! Andreas Stohl, Petra Seibert, A. Frank, Gerhard Wotawa,             *
4! Caroline Forster, Sabine Eckhardt, John Burkhart, Harald Sodemann   *
5!                                                                     *
6! This file is part of FLEXPART.                                      *
7!                                                                     *
8! FLEXPART is free software: you can redistribute it and/or modify    *
9! it under the terms of the GNU General Public License as published by*
10! the Free Software Foundation, either version 3 of the License, or   *
11! (at your option) any later version.                                 *
12!                                                                     *
13! FLEXPART is distributed in the hope that it will be useful,         *
14! but WITHOUT ANY WARRANTY; without even the implied warranty of      *
15! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the       *
16! GNU General Public License for more details.                        *
17!                                                                     *
18! You should have received a copy of the GNU General Public License   *
19! along with FLEXPART.  If not, see <http://www.gnu.org/licenses/>.   *
20!**********************************************************************
21
22module mpi_mod
23
24!*****************************************************************************
25!                                                                            *
26!  DESCRIPTION                                                               *
27!    This module contains subroutines and common variables used for the      *
28!    MPI parallelization of FLEXPART.                                        *
29!                                                                            *
30!  NOTE                                                                      *
31!    Depending on the MPI library installed on your system (e.g. mpich2,     *
32!    OpenMPI) you may need to choose below in this file between              *
33!      use mpi                                                               *
34!    (if the MPI library comes with the file 'mpi.mod'); or                  *
35!      include 'mpif.h'                                                      *
36!                                                                            *
37!                                                                            *
38!*****************************************************************************
39!                                                                            *
40! Variables:                                                                 *
41!                                                                            *
42! mp_ierr                 MPI error code                                     *
43! mp_np                   Number of MPI processes                            *
44! mp_pid                  Process ID of each MPI process                     *
45! mp_seed                 Parameter for random number seed                   *
46! read_grp_min            Minimum number of processes at which one will be   *
47!                         used as reader                                     *
48! numpart_mpi,            Number of particles per node                       *
49! maxpart_mpi                                                                *
50! mp_partid               MPI process ID for particle calculation            *
51! mp_partgroup_           Refers to the subset of processors performing      *
52!                         loops over particles. Will be all processes        *
53!                         unless a dedicated process runs getfields/readwind *
54! lmp_sync                If .false., use asynchronous MPI                   *
55!                                                                            *
56!                                                                            *
57!                                                                            *
58!                                                                            *
59!*****************************************************************************
60
61  use mpi
62  use par_mod, only: dp, sp
63  use com_mod, only: lroot
64
65  implicit none
66
67!  include 'mpif.h'
68
69  public
70
71! Set aside a process for reading windfields if using at least these many processes
72!==================================================
73  integer, parameter, private :: read_grp_min=4
74!==================================================
75
76! Variables for each MPI process in the world group
77  integer :: mp_ierr, mp_np, mp_pid, mp_partid
78  integer, private :: world_group_id
79
80! Variables for MPI processes in the 'particle' group
81  integer, allocatable, dimension(:) :: mp_partgroup_rank
82  integer :: mp_partgroup_comm, mp_partgroup_pid, mp_partgroup_np
83
84  integer :: mp_seed=0
85  integer, parameter :: mp_sp=MPI_REAL4, mp_dp=MPI_REAL8
86  integer, parameter :: mp_pp=mp_sp
87  integer, parameter :: id_root=0 ! master process
88
89! MPI tags/requests for send/receive operation
90  integer :: tm1
91  integer, parameter :: nvar_async=27 !29 :DBG:
92  !integer, dimension(:), allocatable :: tags
93  integer, dimension(:), allocatable :: reqs
94
95
96  integer :: id_read   ! readwind/getfield process
97  integer :: numpart_mpi,maxpart_mpi ! number of particles per node
98  integer :: tot_numpart=0
99  integer :: mp_comm_used ! global or subgroup communicator
100
101  logical :: lmpreader=.false. ! is set to true for reading process(es) only.
102  logical :: lmp_use_reader=.false. ! true if separate readwind process is used
103
104! true if only using synchronous MPI send/recv:
105! If setting this to .false., numwfmem must be set to 3
106!===============================================================================
107  logical :: lmp_sync=.true.
108!===============================================================================
109
110! mp_dbg_mode       Used for debugging MPI.
111! mp_dev_mode       various checks related to debugging the parallel code
112! mp_dbg_out        write some arrays to data file for debugging
113! mp_measure_time   Measure cpu/wall time, write out at end of run
114! mp_time_barrier   Measure MPI barrier time
115  logical, parameter :: mp_dbg_mode = .false.
116  logical, parameter :: mp_dev_mode = .false.
117  logical, parameter :: mp_dbg_out = .false.
118  logical, parameter :: mp_time_barrier=.true.
119  logical, parameter :: mp_measure_time=.true.
120
121! for measuring CPU/Wall time
122  real(sp) :: mp_comm_time_beg, mp_comm_time_end, mp_comm_time_total=0.
123  real(dp) :: mp_comm_wtime_beg, mp_comm_wtime_end, mp_comm_wtime_total=0.
124  real(sp) :: mp_root_time_beg, mp_root_time_end, mp_root_time_total=0.
125  real(dp) :: mp_root_wtime_beg, mp_root_wtime_end, mp_root_wtime_total=0.
126  real(sp) :: mp_barrier_time_beg, mp_barrier_time_end, mp_barrier_time_total=0.
127  real(dp) :: mp_barrier_wtime_beg, mp_barrier_wtime_end, mp_barrier_wtime_total=0.
128  real(sp) :: tm_nploop_beg, tm_nploop_end, tm_nploop_total=0.
129  real(sp) :: tm_tot_beg, tm_tot_end, tm_tot_total=0.
130  real(dp) :: mp_getfields_wtime_beg, mp_getfields_wtime_end, mp_getfields_wtime_total=0.
131  real(sp) :: mp_getfields_time_beg, mp_getfields_time_end, mp_getfields_time_total=0.
132  real(dp) :: mp_readwind_wtime_beg, mp_readwind_wtime_end, mp_readwind_wtime_total=0.
133  real(sp) :: mp_readwind_time_beg, mp_readwind_time_end, mp_readwind_time_total=0.
134  real(dp) :: mp_io_wtime_beg, mp_io_wtime_end, mp_io_wtime_total=0.
135  real(sp) :: mp_io_time_beg, mp_io_time_end, mp_io_time_total=0.
136  real(dp) :: mp_wetdepo_wtime_beg, mp_wetdepo_wtime_end, mp_wetdepo_wtime_total=0.
137  real(sp) :: mp_wetdepo_time_beg, mp_wetdepo_time_end, mp_wetdepo_time_total=0.
138  real(dp) :: mp_advance_wtime_beg, mp_advance_wtime_end, mp_advance_wtime_total=0.
139  real(dp) :: mp_conccalc_time_beg, mp_conccalc_time_end, mp_conccalc_time_total=0.
140  real(dp) :: mp_total_wtime_beg, mp_total_wtime_end, mp_total_wtime_total=0.
141
142! dat_lun           logical unit number for i/o
143  integer, private :: dat_lun
144
145contains
146
147  subroutine mpif_init
148!***********************************************************************
149! DESCRIPTION
150!   Initialize MPI.
151!   
152!   Create the global communicator MPI_COMM_WORLD
153!   If using a separate MPI process for getfields/readwind, a subgroup
154!   is created for the other processes.
155!
156! VARIABLES
157!   mpi_mode    default 0, set to 2/3 if running MPI version
158!   mp_np       number of running processes, decided at run-time
159!***********************************************************************
160    use par_mod, only: maxpart, numwfmem
161    use com_mod, only: mpi_mode
162
163    implicit none
164
165    integer :: i,j,s,addmaxpart=0
166
167! each process gets an ID (mp_pid) in the range 0,..,mp_np-1
168    call MPI_INIT(mp_ierr)
169    if (mp_ierr /= 0) goto 100
170    call MPI_COMM_RANK(MPI_COMM_WORLD, mp_pid, mp_ierr)
171    if (mp_ierr /= 0) goto 100
172    call MPI_COMM_SIZE(MPI_COMM_WORLD, mp_np, mp_ierr)
173    if (mp_ierr /= 0) goto 100
174
175
176! this variable is used to handle subroutines common to parallel/serial version
177    if (lmp_sync) then
178      mpi_mode=2 ! hold 2 windfields in memory
179    else
180      mpi_mode=3 ! hold 3 windfields in memory
181    end if
182
183    if (mp_pid.ne.0) then
184      lroot = .false.
185    end if
186
187! Check for sensible combination of parameters
188!*********************************************
189
190    if (.not.lmp_sync.and.numwfmem.ne.3.and.lroot) then
191      write(*,FMT='(80("#"))')
192      write(*,*) '#### mpi_mod::mpif_init> ERROR: ', &
193           & 'numwfmem must be set to 3 for asyncronous reading ####'
194      write(*,FMT='(80("#"))')
195      stop
196    else if (lmp_sync.and.numwfmem.ne.2.and.lroot) then
197      write(*,FMT='(80("#"))')
198      write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
199           & 'numwfmem should be set to 2 for syncronous reading'
200      write(*,FMT='(80("#"))')
201! Force "syncronized" version if all processes will call getfields
202    else if (.not.lmp_sync.and.mp_np.lt.read_grp_min) then
203      if (lroot) then
204        write(*,FMT='(80("#"))')
205        write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
206             & 'all procs call getfields. Setting lmp_sync=.true.'
207        write(*,FMT='(80("#"))')
208      end if
209      lmp_sync=.true. ! :DBG: eso fix this...
210    end if
211! TODO: Add warnings for unimplemented flexpart features
212
213! Set ID of process that calls getfield/readwind.
214! Using the last process in the group ensures statistical identical results
215! as running with one process less but not using separate read process
216!**********************************************************************
217
218!    id_read = min(mp_np-1, 1)
219    id_read = mp_np-1
220
221    if (mp_pid.eq.id_read) lmpreader=.true.
222
223    call MPI_Comm_group (MPI_COMM_WORLD, world_group_id, mp_ierr)
224
225! Create a MPI group/communicator that will calculate trajectories.
226! Skip this step if program is run with only a few processes
227!************************************************************************
228    allocate(mp_partgroup_rank(0:mp_np-2))
229
230! This allows checking for allocation of arrays when no subgroup is used
231    mp_partgroup_pid=0
232
233    if (read_grp_min.lt.2) then
234      write(*,*) '#### mpi_mod::mpif_init> ERROR ####', &
235           & 'read_grp_min should be at least 2. Exiting'
236      stop
237    end if
238
239    if (mp_np.ge.read_grp_min) then
240      lmp_use_reader = .true.
241
242! Map the subgroup IDs= 0,1,2,...,mp_np-2, skipping reader process
243      j=-1
244      do i=0, mp_np-2 !loop over all (subgroup) IDs
245        j=j+1
246        if (i.eq.id_read) then
247          j=j+1
248        end if
249        mp_partgroup_rank(i) = j
250      end do
251
252      call MPI_Group_incl (world_group_id, mp_np-1, mp_partgroup_rank, &
253           &mp_partgroup_pid, mp_ierr)
254      if (mp_ierr /= 0) goto 100
255      call MPI_Comm_create (MPI_COMM_WORLD, mp_partgroup_pid, mp_partgroup_comm, mp_ierr)
256      if (mp_ierr /= 0) goto 100
257
258      if (mp_pid.ne.id_read) then
259        call MPI_Comm_rank (mp_partgroup_comm, mp_partgroup_pid, mp_ierr)
260        if (mp_ierr /= 0) goto 100
261
262! Get size of new group (=mp_np-1)
263        call MPI_COMM_SIZE(mp_partgroup_comm, mp_partgroup_np, mp_ierr)
264        if (mp_ierr /= 0) goto 100
265        if (mp_partgroup_np.ne.mp_np-1) then
266          write(*,*)  '#### mpi_mod:: mpif_init> ERROR ####&
267               & mp_partgroup_np.ne.mp_np-1'
268          stop
269        endif
270
271      else
272        mp_partgroup_pid = -1
273      end if
274    end if
275
276    if (lmp_use_reader) then
277      mp_comm_used = mp_partgroup_comm
278      mp_partid = mp_partgroup_pid
279      mp_partgroup_np=mp_np-1
280    else
281      mp_comm_used = MPI_COMM_WORLD
282      mp_partgroup_np = mp_np
283      mp_partid = mp_pid
284    end if
285
286! Set maxpart per process
287    if (mp_partid.lt.mod(maxpart,mp_partgroup_np)) addmaxpart=1
288    maxpart_mpi=int(maxpart/mp_partgroup_np)+addmaxpart
289
290! Do not allocate particle data arrays for readwind process
291    if (lmpreader.and.lmp_use_reader) then
292      maxpart_mpi=0
293    end if
294
295! Set random seed for each non-root process
296    if (mp_pid.gt.0) then
297!    if (mp_pid.ge.0) then
298!      call system_clock(s)
299      s = 244
300      mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729))
301    end if
302    if (mp_dev_mode) write(*,*) 'PID, mp_seed: ',mp_pid, mp_seed
303    if (mp_dbg_mode) then
304! :DBG: For debugging, set all seed to 0 and maxrand to e.g. 20
305      mp_seed=0
306      if (lroot) write(*,*) 'MPI: setting seed=0'
307    end if
308
309! Allocate request array for asynchronous MPI
310    if (.not.lmp_sync) then
311      allocate(reqs(0:nvar_async*mp_np-1))
312      reqs(:)=MPI_REQUEST_NULL
313    else
314      allocate(reqs(0:1))
315      reqs(:)=MPI_REQUEST_NULL
316    end if
317
318    goto 101
319
320100 write(*,*) '#### mpi_mod::mpif_init> ERROR ####', mp_ierr
321    stop
322
323101 end subroutine mpif_init
324
325
326  subroutine mpif_mpi_barrier
327!***********************************************************************
328! Set MPI barrier (all processes are synchronized here), with the
329! possible exception of a separate 'readwind' process.
330! Optionally measure cpu/wall time.
331!
332!***********************************************************************
333    implicit none
334
335    if (mp_time_barrier) then
336      call cpu_time(mp_barrier_time_beg)
337      mp_barrier_wtime_beg = mpi_wtime()
338    endif
339
340    call MPI_BARRIER(mp_comm_used, mp_ierr)
341    if (mp_ierr /= 0) goto 100
342
343    if (mp_time_barrier) then
344      call cpu_time(mp_barrier_time_end)
345      mp_barrier_wtime_end = mpi_wtime()
346
347      mp_barrier_time_total=mp_barrier_time_total+&
348           &(mp_barrier_time_end-mp_barrier_time_beg)
349      mp_barrier_wtime_total=mp_barrier_wtime_total+&
350           &(mp_barrier_wtime_end-mp_barrier_wtime_beg)
351    end if
352
353    goto 101
354
355100 write(*,*) '#### mpi_mod::mpif_mpi_barrier> ERROR ####', mp_ierr
356    stop
357
358101 end subroutine mpif_mpi_barrier
359
360
361  subroutine mpif_com_mod_allocate
362!*******************************************************************************   
363! Dynamic allocation of arrays (in serial code these have size maxpart)
364!
365!*******************************************************************************
366    use com_mod
367
368    implicit none
369
370    integer :: array_size
371
372    array_size = maxpart_mpi
373
374    allocate(itra1(array_size),npoint(array_size),&
375         & nclass(array_size),idt(array_size),&
376         & itramem(array_size),itrasplit(array_size),&
377         & xtra1(array_size),ytra1(array_size),&
378         & ztra1(array_size),xmass1(array_size, maxspec))
379
380    allocate(uap(array_size),ucp(array_size),&
381         & uzp(array_size),us(array_size),&
382         & vs(array_size),&
383         & ws(array_size),cbt(array_size))
384
385
386  end subroutine mpif_com_mod_allocate
387
388
389  subroutine mpif_tm_send_dims
390!***********************************************************************
391! Distribute array dimensions from pid0 to all processes.
392! numpart_mpi/numpart is sent to allow dynamic allocation
393!
394! Currently not in use.
395!
396! Variables of similar type (integer, real) are placed in an array
397! and sent collectively, to avoid the overhead associated with individual
398! MPI send/receives
399!
400!
401!***********************************************************************
402    use com_mod
403
404    implicit none
405
406    integer :: add_part=0
407
408    call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
409
410! MPI subgroup does the particle-loop
411    if (lmp_use_reader) then
412      if (mod(numpart,mp_partgroup_np).ne.0) add_part=1
413      numpart_mpi=int(numpart/mp_partgroup_np)+add_part
414    else
415      if (mod(numpart,mp_np).ne.0) add_part=1
416      numpart_mpi=int(numpart/mp_np)+add_part
417    end if
418
419
420
421! redefine numpart as 'numpart per process' throughout the code
422!**************************************************************
423    numpart = numpart_mpi
424
425  end subroutine mpif_tm_send_dims
426
427
428  subroutine mpif_tm_send_vars
429!***********************************************************************
430! Distribute particle variables from pid0 to all processes.
431! Called from timemanager
432!
433!
434!***********************************************************************
435    use com_mod
436
437    implicit none
438
439    integer :: i
440
441
442!***********************************************************************
443
444
445! Time for MPI communications
446!****************************
447    if (mp_measure_time) call mpif_mtime('commtime',0)
448
449! Distribute variables, send from pid 0 to other processes
450!**********************************************************************
451
452! integers
453    if (lroot) then
454      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
455           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
456      if (mp_ierr /= 0) goto 600
457      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
458           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
459      if (mp_ierr /= 0) goto 600
460      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
461           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
462      if (mp_ierr /= 0) goto 600
463      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
464           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
465      if (mp_ierr /= 0) goto 600
466      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
467           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
468      if (mp_ierr /= 0) goto 600
469
470! int2
471      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,&
472           &numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
473      if (mp_ierr /= 0) goto 600
474
475! real
476      call MPI_SCATTER(uap,numpart_mpi,mp_pp,MPI_IN_PLACE,&
477           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
478      if (mp_ierr /= 0) goto 600
479      call MPI_SCATTER(ucp,numpart_mpi,mp_pp,MPI_IN_PLACE,&
480           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
481      if (mp_ierr /= 0) goto 600
482      call MPI_SCATTER(uzp,numpart_mpi,mp_pp,MPI_IN_PLACE,&
483           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
484      if (mp_ierr /= 0) goto 600
485
486      call MPI_SCATTER(us,numpart_mpi,mp_pp,MPI_IN_PLACE,&
487           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
488      if (mp_ierr /= 0) goto 600
489      call MPI_SCATTER(vs,numpart_mpi,mp_pp,MPI_IN_PLACE,&
490           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
491      if (mp_ierr /= 0) goto 600
492      call MPI_SCATTER(ws,numpart_mpi,mp_pp,MPI_IN_PLACE,&
493           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
494      if (mp_ierr /= 0) goto 600
495
496      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
497           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
498      if (mp_ierr /= 0) goto 600
499      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
500           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
501      if (mp_ierr /= 0) goto 600
502      call MPI_SCATTER(ztra1,numpart_mpi,mp_pp,MPI_IN_PLACE,&
503           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
504      if (mp_ierr /= 0) goto 600
505
506      do i=1,nspec
507        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_pp,MPI_IN_PLACE,&
508             &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
509        if (mp_ierr /= 0) goto 600
510      end do
511
512    else ! (mp_pid >= 1)
513! integers
514      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,&
515           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
516      if (mp_ierr /= 0) goto 600
517      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,&
518           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
519      if (mp_ierr /= 0) goto 600
520      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,&
521           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
522      if (mp_ierr /= 0) goto 600
523      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,&
524           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
525      if (mp_ierr /= 0) goto 600
526      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,&
527           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
528      if (mp_ierr /= 0) goto 600
529
530! int2
531      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,&
532           &numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
533      if (mp_ierr /= 0) goto 600
534
535! reals
536      call MPI_SCATTER(uap,numpart_mpi,mp_pp,uap,&
537           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
538      if (mp_ierr /= 0) goto 600
539      call MPI_SCATTER(ucp,numpart_mpi,mp_pp,ucp,&
540           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
541      if (mp_ierr /= 0) goto 600
542      call MPI_SCATTER(uzp,numpart_mpi,mp_pp,uzp,&
543           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
544      if (mp_ierr /= 0) goto 600
545
546      call MPI_SCATTER(us,numpart_mpi,mp_pp,us,&
547           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
548      if (mp_ierr /= 0) goto 600
549      call MPI_SCATTER(vs,numpart_mpi,mp_pp,vs,&
550           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
551      if (mp_ierr /= 0) goto 600
552      call MPI_SCATTER(ws,numpart_mpi,mp_pp,ws,&
553           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
554      if (mp_ierr /= 0) goto 600
555
556      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,&
557           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
558      if (mp_ierr /= 0) goto 600
559      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,&
560           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
561      if (mp_ierr /= 0) goto 600
562      call MPI_SCATTER(ztra1,numpart_mpi,mp_pp,ztra1,&
563           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
564      if (mp_ierr /= 0) goto 600
565
566      do i=1,nspec
567        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_pp,xmass1(:,i),&
568             &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
569        if (mp_ierr /= 0) goto 600
570      end do
571
572    end if !(lroot)
573
574    if (mp_measure_time) call mpif_mtime('commtime',1)
575
576    goto 601
577
578600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
579    stop
580601 end subroutine mpif_tm_send_vars
581
582
583  subroutine mpif_tm_collect_vars
584!*******************************************************************************
585! Collect particle data to PID 0 from all processes.                           *
586!                                                                              *
587! This can be called from timemanager_mpi, before caclulating                  *
588! concentrations/writing output grids.                                         *
589!                                                                              *
590! Currently not in use, as each process calculates concentrations              *
591! separately.                                                                  *
592!                                                                              *
593! To use this routine (together with mpif_tm_send_vars) to transfer data       *
594! to the MPI root process (useful for debugging), insert code like this        *
595! (in timemanager_mpi.f90)                                                     *
596!                                                                              *
597!      if (lroot) tot_numpart=numpart ! root stores total numpart            *
598!      call mpif_tm_send_dims                                                  *
599!      if (numpart>1) then                                                     *
600!        call mpif_tm_send_vars                                                *
601!      end if                                                                  *
602!                                                                              *
603!    To collect data from all processes to root process after a parallel       *
604!    region:                                                                   *
605!                                                                              *
606!      if (numpart>0) then                                                     *
607!          if (lroot) numpart = tot_numpart                                  *
608!       call mpif_tm_collect_vars                                              *
609!      end if                                                                  *
610!                                                                              *
611!    Then a section that begins with "if (lroot) ..." will behave like       *
612!    serial flexpart                                                           *
613!                                                                              *
614!*******************************************************************************
615    use com_mod !, only: numpart, nspec, itra1, npoint, nclass
616
617    implicit none
618
619    integer :: i
620
621    logical :: use_mp_vars = .false.! use mp_... type allocated vars
622    logical :: use_in_place = .true.! use MPI_IN_PLACE to collect vars.
623
624
625! Time for MPI communications
626    if (mp_measure_time) call mpif_mtime('commtime',0)
627
628! Distribute variables, send from pid 0 to other processes
629!**********************************************************************
630
631    if (.not. use_mp_vars.and..not.use_in_place) then
632
633! Integers:
634      call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
635           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
636      if (mp_ierr /= 0) goto 600
637      call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
638           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
639      if (mp_ierr /= 0) goto 600
640      call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
641           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
642      if (mp_ierr /= 0) goto 600
643      call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
644           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
645      if (mp_ierr /= 0) goto 600
646      call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
647           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
648      if (mp_ierr /= 0) goto 600
649
650! int2
651      call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
652           &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
653      if (mp_ierr /= 0) goto 600
654
655! Reals:
656      call MPI_GATHER(uap, numpart_mpi, mp_pp, uap, &
657           &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
658      if (mp_ierr /= 0) goto 600
659      call MPI_GATHER(ucp, numpart_mpi, mp_pp, ucp, &
660           &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
661      if (mp_ierr /= 0) goto 600
662      call MPI_GATHER(uzp, numpart_mpi, mp_pp, uzp, &
663           &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
664      if (mp_ierr /= 0) goto 600
665
666      call MPI_GATHER(us, numpart_mpi, mp_pp, us, &
667           &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
668      if (mp_ierr /= 0) goto 600
669      call MPI_GATHER(vs, numpart_mpi, mp_pp, vs, &
670           &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
671      if (mp_ierr /= 0) goto 600
672      call MPI_GATHER(ws, numpart_mpi, mp_pp, ws, &
673           &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
674      if (mp_ierr /= 0) goto 600
675
676
677      call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
678           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
679      if (mp_ierr /= 0) goto 600
680      call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
681           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
682      if (mp_ierr /= 0) goto 600
683      call MPI_GATHER(ztra1, numpart_mpi, mp_pp, ztra1, &
684           &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
685      if (mp_ierr /= 0) goto 600
686
687      do i=1, nspec
688        call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_pp, xmass1(:,i), &
689             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
690        if (mp_ierr /= 0) goto 600
691
692      end do
693
694! Use variables npoint etc directly for communications
695!***********************************************************************
696    else if (use_in_place.and..not.use_mp_vars) then
697      if (lroot) then
698! Integers:
699        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, npoint, &
700             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
701        if (mp_ierr /= 0) goto 600
702        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, idt, &
703             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
704        if (mp_ierr /= 0) goto 600
705        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itra1, &
706             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
707        if (mp_ierr /= 0) goto 600
708        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, nclass, &
709             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
710        if (mp_ierr /= 0) goto 600
711        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itramem, &
712             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
713        if (mp_ierr /= 0) goto 600
714
715! int2
716        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER2, cbt, &
717             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
718        if (mp_ierr /= 0) goto 600
719
720! Reals:
721        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, uap, &
722             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
723        if (mp_ierr /= 0) goto 600
724        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, ucp, &
725             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
726        if (mp_ierr /= 0) goto 600
727        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, uzp, &
728             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
729        if (mp_ierr /= 0) goto 600
730
731        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, us, &
732             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
733        if (mp_ierr /= 0) goto 600
734        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, vs, &
735             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
736        if (mp_ierr /= 0) goto 600
737        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, ws, &
738             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
739        if (mp_ierr /= 0) goto 600
740
741
742        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, xtra1, &
743             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
744        if (mp_ierr /= 0) goto 600
745        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, ytra1, &
746             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
747        if (mp_ierr /= 0) goto 600
748        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, ztra1, &
749             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
750        if (mp_ierr /= 0) goto 600
751
752        do i=1, nspec
753          call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, xmass1(:,i), &
754               &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
755          if (mp_ierr /= 0) goto 600
756        end do
757
758      else ! (for mp_pid >= 1)
759
760! Integers:
761        call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
762             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
763        if (mp_ierr /= 0) goto 600
764        call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
765             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
766        if (mp_ierr /= 0) goto 600
767        call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
768             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
769        if (mp_ierr /= 0) goto 600
770        call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
771             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
772        if (mp_ierr /= 0) goto 600
773        call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
774             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
775        if (mp_ierr /= 0) goto 600
776
777! int2
778        call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
779             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
780        if (mp_ierr /= 0) goto 600
781
782! Reals:
783        call MPI_GATHER(uap, numpart_mpi, mp_pp, uap, &
784             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
785        if (mp_ierr /= 0) goto 600
786        call MPI_GATHER(ucp, numpart_mpi, mp_pp, ucp, &
787             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
788        if (mp_ierr /= 0) goto 600
789        call MPI_GATHER(uzp, numpart_mpi, mp_pp, uzp, &
790             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
791        if (mp_ierr /= 0) goto 600
792
793        call MPI_GATHER(us, numpart_mpi, mp_pp, us, &
794             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
795        if (mp_ierr /= 0) goto 600
796        call MPI_GATHER(vs, numpart_mpi, mp_pp, vs, &
797             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
798        if (mp_ierr /= 0) goto 600
799        call MPI_GATHER(ws, numpart_mpi, mp_pp, ws, &
800             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
801        if (mp_ierr /= 0) goto 600
802
803
804        call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
805             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
806        if (mp_ierr /= 0) goto 600
807        call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
808             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
809        if (mp_ierr /= 0) goto 600
810        call MPI_GATHER(ztra1, numpart_mpi, mp_pp, ztra1, &
811             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
812        if (mp_ierr /= 0) goto 600
813
814        do i=1, nspec
815          call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_pp, xmass1(:,i), &
816               &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
817          if (mp_ierr /= 0) goto 600
818        end do
819      end if ! (lroot)
820    end if !  (use_in_place)
821
822    if (mp_measure_time) call mpif_mtime('commtime',1)
823
824    goto 601
825
826600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
827    stop
828601 end subroutine mpif_tm_collect_vars
829
830
831  subroutine mpif_gf_send_vars(memstat)
832!*******************************************************************************
833! DESCRIPTION
834!   Distribute 'getfield' variables from reader process
835!
836!   Called from timemanager
837!
838! NOTE
839!   This subroutine distributes windfields read from the reader process to
840!   all other processes. Usually one set of fields is transfered, but at
841!   first timestep when there are no fields in memory, both are transfered.
842!   MPI_Bcast is used, so implicitly all processes are synchronized at this
843!   step
844!
845! TODO
846!   GFS version
847!
848!*******************************************************************************
849    use com_mod
850    use par_mod,only: numwfmem
851
852    implicit none
853
854    integer, intent(in) :: memstat
855
856! Common array sizes used for communications
857    integer, parameter :: d3_size1 = nxmax*nymax*nzmax
858    integer, parameter :: d3_size2 = nxmax*nymax*nuvzmax
859    integer, parameter :: d2_size1 = nxmax*nymax
860    integer, parameter :: d2_size2 = nxmax*nymax*maxspec
861    integer, parameter :: d2_size3 = nxmax*nymax
862    integer, parameter :: d1_size1 = maxwf
863
864    integer :: d3s1,d3s2,d2s1,d2s2
865
866! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
867    integer :: li,ui
868
869! First time routine is called the unchangeable fields will be transfered   
870    logical :: first_call=.true.
871
872!**********************************************************************
873
874! Sizes of arrays transfered
875    d3s1=d3_size1
876    d3s2=d3_size2
877    d2s1=d2_size1
878    d2s2=d2_size2
879
880! Decide which field(s) need to be transfered
881    if (memstat.ge.32) then ! distribute 2 fields, to 1st/2nd indices
882      li=1; ui=2
883      d3s1=2*d3_size1
884      d3s2=2*d3_size2
885      d2s1=2*d2_size1
886      d2s2=2*d2_size2
887    else if (memstat.eq.17) then ! distribute 1 field, place on 1st index
888      li=1; ui=1
889    else if (memstat.eq.18) then ! distribute 1 field, place on 2nd index
890      li=2; ui=2
891    else if (memstat.eq.19) then ! distribute 1 field, place on 3nd index
892      li=3; ui=3
893    else
894      write(*,*) '#### mpi_mod::mpif_gf_send_vars> ERROR: ', &
895           & 'wrong value of memstat, exiting ####', memstat
896      stop
897    end if
898
899
900! Time for MPI communications
901    if (mp_measure_time) call mpif_mtime('commtime',0)
902
903! Send variables from getfield process (id_read) to other processes
904!**********************************************************************
905
906! The non-reader processes need to know if clouds were read.
907! TODO: only at first step or always?
908    call MPI_Bcast(readclouds,1,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
909    if (mp_ierr /= 0) goto 600
910
911! Static fields/variables sent only at startup
912    if (first_call) then
913      call MPI_Bcast(oro(:,:),d2_size3,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
914      if (mp_ierr /= 0) goto 600
915      call MPI_Bcast(excessoro(:,:),d2_size3,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
916      if (mp_ierr /= 0) goto 600
917      call MPI_Bcast(lsm(:,:),d2_size3,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
918      if (mp_ierr /= 0) goto 600
919      call MPI_Bcast(xlanduse(:,:,:),d2_size3*numclass,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
920      if (mp_ierr /= 0) goto 600
921      call MPI_Bcast(wftime,d1_size1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
922      if (mp_ierr /= 0) goto 600
923      call MPI_Bcast(numbwf,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
924      if (mp_ierr /= 0) goto 600
925      call MPI_Bcast(nmixz,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
926      if (mp_ierr /= 0) goto 600
927      call MPI_Bcast(height,nzmax,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
928      if (mp_ierr /= 0) goto 600
929
930      first_call=.false.
931    endif
932
933    call MPI_Bcast(uu(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
934    if (mp_ierr /= 0) goto 600
935    call MPI_Bcast(vv(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
936    if (mp_ierr /= 0) goto 600
937    call MPI_Bcast(uupol(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
938    if (mp_ierr /= 0) goto 600
939    call MPI_Bcast(vvpol(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
940    if (mp_ierr /= 0) goto 600
941    call MPI_Bcast(ww(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
942    if (mp_ierr /= 0) goto 600
943    call MPI_Bcast(tt(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
944    if (mp_ierr /= 0) goto 600
945    call MPI_Bcast(rho(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
946    if (mp_ierr /= 0) goto 600
947    call MPI_Bcast(drhodz(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
948    if (mp_ierr /= 0) goto 600
949    call MPI_Bcast(tth(:,:,:,li:ui),d3s2,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
950    if (mp_ierr /= 0) goto 600
951    call MPI_Bcast(qvh(:,:,:,li:ui),d3s2,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
952    if (mp_ierr /= 0) goto 600
953    call MPI_Bcast(qv(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
954    if (mp_ierr /= 0) goto 600
955    call MPI_Bcast(pv(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
956    if (mp_ierr /= 0) goto 600
957    call MPI_Bcast(clouds(:,:,:,li:ui),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
958    if (mp_ierr /= 0) goto 600
959
960! cloud water/ice:
961    if (readclouds) then
962      call MPI_Bcast(clwc(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
963      if (mp_ierr /= 0) goto 600
964      call MPI_Bcast(ciwc(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
965      if (mp_ierr /= 0) goto 600
966    end if
967
968! 2D fields
969    call MPI_Bcast(cloudsh(:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
970    if (mp_ierr /= 0) goto 600
971    call MPI_Bcast(vdep(:,:,:,li:ui),d2s2,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
972    if (mp_ierr /= 0) goto 600
973    call MPI_Bcast(ps(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
974    if (mp_ierr /= 0) goto 600
975    call MPI_Bcast(sd(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
976    if (mp_ierr /= 0) goto 600
977    call MPI_Bcast(tcc(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
978    if (mp_ierr /= 0) goto 600
979    call MPI_Bcast(tt2(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
980    if (mp_ierr /= 0) goto 600
981    call MPI_Bcast(td2(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
982    if (mp_ierr /= 0) goto 600
983    call MPI_Bcast(lsprec(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
984    if (mp_ierr /= 0) goto 600
985    call MPI_Bcast(convprec(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
986    if (mp_ierr /= 0) goto 600
987    call MPI_Bcast(ustar(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
988    if (mp_ierr /= 0) goto 600
989    call MPI_Bcast(wstar(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
990    if (mp_ierr /= 0) goto 600
991    call MPI_Bcast(hmix(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
992    if (mp_ierr /= 0) goto 600
993    call MPI_Bcast(tropopause(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
994    if (mp_ierr /= 0) goto 600
995    call MPI_Bcast(oli(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
996    if (mp_ierr /= 0) goto 600
997
998    call MPI_Bcast(z0,numclass,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
999    if (mp_ierr /= 0) goto 600
1000
1001    if (mp_measure_time) call mpif_mtime('commtime',1)
1002
1003    goto 601
1004
1005600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1006    stop
1007
1008601 end subroutine mpif_gf_send_vars
1009
1010
1011  subroutine mpif_gf_send_vars_nest(memstat)
1012!***********************************************************************
1013! DESCRIPTION
1014!   Distribute 'getfield' variables from reader process to all processes.
1015!   For nested fields
1016!
1017!   Called from timemanager
1018!
1019! NOTE
1020!   This subroutine distributes nested windfields from the reader process to
1021!   all other processes. Usually one set of fields is transfered, but at
1022!   first timestep when there are no fields in memory, both are transfered.
1023!   MPI_Bcast is used, so implicitly all processes are synchronized at this
1024!   step
1025!
1026! TODO
1027!   Transfer cloud water/ice if and when available for nested
1028!
1029!***********************************************************************
1030    use com_mod
1031    use par_mod, only: numwfmem
1032
1033    implicit none
1034
1035    integer, intent(in) :: memstat
1036    integer :: i
1037! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
1038    integer :: li,ui
1039
1040! Common array sizes used for communications
1041    integer :: d3_size1 = nxmaxn*nymaxn*nzmax*maxnests
1042    integer :: d3_size2 = nxmaxn*nymaxn*nuvzmax*maxnests
1043    integer :: d2_size1 = nxmaxn*nymaxn*maxnests
1044    integer :: d2_size2 = nxmaxn*nymaxn*maxspec*maxnests
1045    integer :: d2_size3 = nxmaxn*nymaxn*maxnests
1046
1047    integer :: d3s1,d3s2,d2s1,d2s2
1048
1049! First time routine is called the unchangeable fields will be transfered   
1050    logical :: first_call=.true.
1051
1052!**********************************************************************
1053
1054! Sizes of arrays transfered
1055    d3s1=d3_size1
1056    d3s2=d3_size2
1057    d2s1=d2_size1
1058    d2s2=d2_size2
1059
1060! Decide which field(s) need to be transfered
1061    if (memstat.ge.32) then ! distribute 2 fields
1062      li=1; ui=2
1063      d3s1=2*d3_size1
1064      d3s2=2*d3_size2
1065      d2s1=2*d2_size1
1066      d2s2=2*d2_size2
1067    else if (memstat.eq.17) then ! distribute 1 field, on 1st index
1068      li=1; ui=1
1069    else if (memstat.eq.18) then ! distribute 1 field, on 2nd index
1070      li=2; ui=2
1071    else if (memstat.eq.19) then ! distribute 1 field, on 3nd index
1072      li=3; ui=3
1073    else
1074      write(*,*) '#### mpi_mod::mpif_gf_send_vars_nest> ERROR: ', &
1075           & 'wrong value of memstat, exiting ####', memstat
1076      stop
1077    end if
1078
1079
1080! Time for MPI communications
1081    if (mp_measure_time) call mpif_mtime('commtime',0)
1082
1083! Distribute variables, send from getfield process (id_read) to other
1084! processes
1085!**********************************************************************
1086
1087! Static fields/variables sent only at startup
1088    if (first_call) then
1089      call MPI_Bcast(oron(:,:,:),d2_size3,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1090      if (mp_ierr /= 0) goto 600
1091      call MPI_Bcast(excessoron(:,:,:),d2_size3,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1092      if (mp_ierr /= 0) goto 600
1093      call MPI_Bcast(lsmn(:,:,:),d2_size3,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1094      if (mp_ierr /= 0) goto 600
1095      call MPI_Bcast(xlandusen(:,:,:,:),d2_size3*numclass,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1096      if (mp_ierr /= 0) goto 600
1097      first_call=.false.
1098    end if
1099
1100! MPI prefers contiguous arrays for sending (else a buffer is created),
1101! hence the loop
1102
1103    do i=1, numbnests
1104! 3D fields
1105      call MPI_Bcast(uun(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1106      if (mp_ierr /= 0) goto 600
1107      call MPI_Bcast(vvn(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1108      if (mp_ierr /= 0) goto 600
1109      call MPI_Bcast(wwn(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1110      if (mp_ierr /= 0) goto 600
1111      call MPI_Bcast(ttn(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1112      if (mp_ierr /= 0) goto 600
1113      call MPI_Bcast(rhon(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1114      if (mp_ierr /= 0) goto 600
1115      call MPI_Bcast(drhodzn(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1116      if (mp_ierr /= 0) goto 600
1117      call MPI_Bcast(tthn(:,:,:,li:ui,i),d3s2,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1118      if (mp_ierr /= 0) goto 600
1119      call MPI_Bcast(qvhn(:,:,:,li:ui,i),d3s2,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1120      if (mp_ierr /= 0) goto 600
1121      call MPI_Bcast(qvn(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1122      if (mp_ierr /= 0) goto 600
1123      call MPI_Bcast(pvn(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1124      if (mp_ierr /= 0) goto 600
1125      call MPI_Bcast(cloudsn(:,:,:,li:ui,i),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
1126      if (mp_ierr /= 0) goto 600
1127
1128! 2D fields
1129      call MPI_Bcast(cloudsnh(:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1130      if (mp_ierr /= 0) goto 600
1131      call MPI_Bcast(vdepn(:,:,:,li:ui,i),d2s2,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1132      if (mp_ierr /= 0) goto 600
1133      call MPI_Bcast(psn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1134      if (mp_ierr /= 0) goto 600
1135      call MPI_Bcast(sdn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1136      if (mp_ierr /= 0) goto 600
1137      call MPI_Bcast(tccn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1138      if (mp_ierr /= 0) goto 600
1139      call MPI_Bcast(tt2n(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1140      if (mp_ierr /= 0) goto 600
1141      call MPI_Bcast(td2n(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1142      if (mp_ierr /= 0) goto 600
1143      call MPI_Bcast(lsprecn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1144      if (mp_ierr /= 0) goto 600
1145      call MPI_Bcast(convprecn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1146      if (mp_ierr /= 0) goto 600
1147      call MPI_Bcast(ustarn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1148      if (mp_ierr /= 0) goto 600
1149      call MPI_Bcast(wstarn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1150      if (mp_ierr /= 0) goto 600
1151      call MPI_Bcast(olin(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1152      if (mp_ierr /= 0) goto 600
1153      call MPI_Bcast(hmixn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1154      if (mp_ierr /= 0) goto 600
1155      call MPI_Bcast(tropopausen(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1156      if (mp_ierr /= 0) goto 600
1157    end do
1158
1159    if (mp_measure_time) call mpif_mtime('commtime',1)
1160
1161    goto 601
1162
1163600 write(*,*) "mpi_mod> mp_ierr \= 0",mp_ierr
1164    stop
1165
1166601 end subroutine mpif_gf_send_vars_nest
1167
1168
1169  subroutine mpif_gf_send_vars_async(memstat)
1170!*******************************************************************************
1171! DESCRIPTION
1172!   Distribute 'getfield' variables from reader process to all processes.
1173!   Called from timemanager by reader process only
1174!
1175! NOTE
1176!   This version uses asynchronious sends. The newest fields are sent in the
1177!   background, so calculations can continue while
1178!   MPI communications are performed.
1179!
1180!   The way the MPI tags/requests are sequenced means that this routine must
1181!   carefully match routine 'mpif_gf_recv_vars_async'
1182!
1183! VARIABLES
1184!   memstat -- input, for resolving pointer to windfield index being read
1185!   mind    -- index where to place new fields
1186!
1187! TODO
1188!   Transfer cloud water/ice
1189!
1190!*******************************************************************************
1191    use com_mod
1192
1193    implicit none
1194
1195    integer, intent(in) :: memstat
1196    integer :: mind
1197    integer :: dest,i
1198
1199! Common array sizes used for communications
1200    integer :: d3s1 = nxmax*nymax*nzmax
1201    integer :: d3s2 = nxmax*nymax*nuvzmax
1202    integer :: d2s1 = nxmax*nymax
1203    integer :: d2s2 = nxmax*nymax*maxspec
1204    integer :: d1s1 = maxwf
1205
1206!*******************************************************************************
1207
1208! At the time the send is posted, the reader process is one step further
1209! in the permutation of memstat compared with the receiving processes
1210
1211    if (memstat.ge.32) then
1212! last read was synchronous, to indices 1 and 2, 3 is free
1213      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1214           & memstat>=32 should never happen here."
1215      stop
1216    else if (memstat.eq.17) then
1217! old fields on 1,2, send 3
1218      mind=3
1219    else if (memstat.eq.18) then
1220! old fields on 2,3, send 1
1221      mind=1
1222    else if (memstat.eq.19) then
1223! old fields on 3,1, send 2
1224      mind=2
1225    else
1226      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1227           & invalid memstat"
1228    end if
1229
1230    if (mp_dev_mode) then
1231      if (mp_pid.ne.id_read) then
1232        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_async'
1233      end if
1234    end if
1235
1236    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1237
1238! Time for MPI communications
1239    if (mp_measure_time) call mpif_mtime('commtime',0)
1240
1241! Loop over receiving processes, initiate data sending
1242!*****************************************************
1243
1244    do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading
1245                      ! TODO: use mp_partgroup_np here
1246      if (dest.eq.id_read) cycle
1247      i=dest*nvar_async
1248      call MPI_Isend(uu(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1249      if (mp_ierr /= 0) goto 600
1250      i=i+1
1251      call MPI_Isend(vv(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1252      if (mp_ierr /= 0) goto 600
1253      i=i+1
1254      call MPI_Isend(uupol(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1255      if (mp_ierr /= 0) goto 600
1256      i=i+1
1257      call MPI_Isend(vvpol(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1258      if (mp_ierr /= 0) goto 600
1259      i=i+1
1260      call MPI_Isend(ww(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1261      if (mp_ierr /= 0) goto 600
1262      i=i+1
1263      call MPI_Isend(tt(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1264      if (mp_ierr /= 0) goto 600
1265      i=i+1
1266      call MPI_Isend(rho(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1267      if (mp_ierr /= 0) goto 600
1268      i=i+1
1269      call MPI_Isend(drhodz(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1270      if (mp_ierr /= 0) goto 600
1271      i=i+1
1272      call MPI_Isend(tth(:,:,:,mind),d3s2,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1273      if (mp_ierr /= 0) goto 600
1274      i=i+1
1275      call MPI_Isend(qvh(:,:,:,mind),d3s2,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1276      if (mp_ierr /= 0) goto 600
1277      i=i+1
1278      call MPI_Isend(qv(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1279      if (mp_ierr /= 0) goto 600
1280      i=i+1
1281      call MPI_Isend(pv(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1282      if (mp_ierr /= 0) goto 600
1283      i=i+1
1284      call MPI_Isend(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1285      i=i+1
1286      if (mp_ierr /= 0) goto 600
1287
1288      call MPI_Isend(cloudsh(:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1289      if (mp_ierr /= 0) goto 600
1290      i=i+1
1291      call MPI_Isend(vdep(:,:,:,mind),d2s2,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1292      if (mp_ierr /= 0) goto 600
1293      i=i+1
1294      call MPI_Isend(ps(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1295      if (mp_ierr /= 0) goto 600
1296      i=i+1
1297      call MPI_Isend(sd(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1298      if (mp_ierr /= 0) goto 600
1299      i=i+1
1300      call MPI_Isend(tcc(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1301      if (mp_ierr /= 0) goto 600
1302      i=i+1
1303      call MPI_Isend(tt2(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1304      if (mp_ierr /= 0) goto 600
1305      i=i+1
1306      call MPI_Isend(td2(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1307      if (mp_ierr /= 0) goto 600
1308      i=i+1
1309      call MPI_Isend(lsprec(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1310      if (mp_ierr /= 0) goto 600
1311      i=i+1
1312      call MPI_Isend(convprec(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1313      if (mp_ierr /= 0) goto 600
1314      i=i+1
1315      call MPI_Isend(ustar(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1316      if (mp_ierr /= 0) goto 600
1317      i=i+1
1318      call MPI_Isend(wstar(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1319      if (mp_ierr /= 0) goto 600
1320      i=i+1
1321      call MPI_Isend(hmix(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1322      if (mp_ierr /= 0) goto 600
1323      i=i+1
1324      call MPI_Isend(tropopause(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1325      if (mp_ierr /= 0) goto 600
1326      i=i+1
1327      call MPI_Isend(oli(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1328      if (mp_ierr /= 0) goto 600
1329
1330! Send cloud water if it exists. Increment counter always (as on receiving end)
1331      if (readclouds) then
1332        i=i+1
1333        call MPI_Isend(clwc(:,:,:,mind),d3s1,mp_pp,dest,tm1,&
1334             &MPI_COMM_WORLD,reqs(i),mp_ierr)
1335        if (mp_ierr /= 0) goto 600
1336        i=i+1
1337
1338        call MPI_Isend(ciwc(:,:,:,mind),d3s1,mp_pp,dest,tm1,&
1339             &MPI_COMM_WORLD,reqs(i),mp_ierr)
1340        if (mp_ierr /= 0) goto 600
1341      ! else
1342      !   i=i+2
1343      end if
1344
1345    end do
1346
1347    if (mp_measure_time) call mpif_mtime('commtime',1)
1348
1349    goto 601
1350
1351600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> mp_ierr \= 0", mp_ierr
1352    stop
1353
1354601 end subroutine mpif_gf_send_vars_async
1355
1356
1357  subroutine mpif_gf_recv_vars_async(memstat)
1358!*******************************************************************************
1359! DESCRIPTION
1360!   Receive 'getfield' variables from reader process.
1361!   Called from timemanager by all processes except reader
1362!
1363! NOTE
1364!   This version uses asynchronious communications.
1365!
1366! VARIABLES
1367!   memstat -- input, used to resolve windfield index being received
1368!
1369! TODO
1370!   Transfer cloud water/ice
1371!
1372!*******************************************************************************
1373    use com_mod
1374
1375    implicit none
1376
1377    integer, intent(in) :: memstat
1378    integer :: mind,j
1379
1380! Common array sizes used for communications
1381    integer :: d3s1 = nxmax*nymax*nzmax
1382    integer :: d3s2 = nxmax*nymax*nuvzmax
1383    integer :: d2s1 = nxmax*nymax
1384    integer :: d2s2 = nxmax*nymax*maxspec
1385    integer :: d1_size1 = maxwf
1386
1387!    integer :: d3s1,d3s2,d2s1,d2s2
1388!*******************************************************************************
1389
1390! :TODO: don't need these
1391    ! d3s1=d3_size1
1392    ! d3s2=d3_size2
1393    ! d2s1=d2_size1
1394    ! d2s2=d2_size2
1395
1396! At the time this immediate receive is posted, memstat is the state of
1397! windfield indices at the previous/current time. From this, the future
1398! state is deduced.
1399    if (memstat.eq.32) then
1400! last read was synchronous to indices 1 and 2, 3 is free
1401      mind=3
1402    else if (memstat.eq.17) then
1403! last read was asynchronous to index 3, 1 is free
1404      mind=1
1405    else if (memstat.eq.18) then
1406! last read was asynchronous to index 1, 2 is free
1407      mind=2
1408    else if (memstat.eq.19) then
1409! last read was asynchronous to index 2, 3 is free
1410      mind=3
1411    end if
1412
1413    if (mp_dev_mode) then
1414      if (mp_pid.eq.id_read) then
1415        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async'
1416      end if
1417    end if
1418
1419! Time for MPI communications
1420    if (mp_measure_time) call mpif_mtime('commtime',0)
1421
1422    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1423
1424! Initiate receiving of data
1425!***************************
1426
1427! Get MPI tags/requests for communications
1428    j=mp_pid*nvar_async
1429    call MPI_Irecv(uu(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1430         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1431    if (mp_ierr /= 0) goto 600
1432    j=j+1
1433    call MPI_Irecv(vv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1434         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1435    if (mp_ierr /= 0) goto 600
1436    j=j+1
1437    call MPI_Irecv(uupol(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1438         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1439    if (mp_ierr /= 0) goto 600
1440    j=j+1
1441    call MPI_Irecv(vvpol(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1442         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1443    if (mp_ierr /= 0) goto 600
1444    j=j+1
1445    call MPI_Irecv(ww(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1446         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1447    if (mp_ierr /= 0) goto 600
1448    j=j+1
1449    call MPI_Irecv(tt(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1450         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1451    if (mp_ierr /= 0) goto 600
1452    j=j+1
1453    call MPI_Irecv(rho(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1454         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1455    if (mp_ierr /= 0) goto 600
1456    j=j+1
1457    call MPI_Irecv(drhodz(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1458         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1459    if (mp_ierr /= 0) goto 600
1460    j=j+1
1461    call MPI_Irecv(tth(:,:,:,mind),d3s2,mp_pp,id_read,MPI_ANY_TAG,&
1462         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1463    if (mp_ierr /= 0) goto 600
1464    j=j+1
1465    call MPI_Irecv(qvh(:,:,:,mind),d3s2,mp_pp,id_read,MPI_ANY_TAG,&
1466         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1467    if (mp_ierr /= 0) goto 600
1468    j=j+1
1469
1470    call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1471         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1472    if (mp_ierr /= 0) goto 600
1473    j=j+1
1474    call MPI_Irecv(pv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1475         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1476    if (mp_ierr /= 0) goto 600
1477    j=j+1
1478    call MPI_Irecv(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
1479         &MPI_COMM_WORLD,reqs(j),mp_ierr)   
1480    if (mp_ierr /= 0) goto 600
1481    j=j+1
1482
1483    call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1484         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1485    if (mp_ierr /= 0) goto 600
1486    j=j+1
1487    call MPI_Irecv(vdep(:,:,:,mind),d2s2,mp_pp,id_read,MPI_ANY_TAG,&
1488         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1489    if (mp_ierr /= 0) goto 600
1490    j=j+1
1491    call MPI_Irecv(ps(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1492         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1493    if (mp_ierr /= 0) goto 600
1494    j=j+1
1495    call MPI_Irecv(sd(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1496         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1497    if (mp_ierr /= 0) goto 600
1498    j=j+1
1499    call MPI_Irecv(tcc(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1500         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1501    if (mp_ierr /= 0) goto 600
1502    j=j+1
1503    call MPI_Irecv(tt2(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1504         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1505    if (mp_ierr /= 0) goto 600
1506    j=j+1
1507    call MPI_Irecv(td2(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1508         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1509    if (mp_ierr /= 0) goto 600
1510    j=j+1
1511    call MPI_Irecv(lsprec(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1512         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1513    if (mp_ierr /= 0) goto 600
1514    j=j+1
1515    call MPI_Irecv(convprec(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1516         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1517    if (mp_ierr /= 0) goto 600
1518
1519    call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1520         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1521    if (mp_ierr /= 0) goto 600
1522    j=j+1
1523    call MPI_Irecv(wstar(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1524         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1525    if (mp_ierr /= 0) goto 600
1526    j=j+1
1527    call MPI_Irecv(hmix(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1528         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1529    if (mp_ierr /= 0) goto 600
1530    j=j+1
1531    call MPI_Irecv(tropopause(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1532         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1533    if (mp_ierr /= 0) goto 600
1534    j=j+1
1535    call MPI_Irecv(oli(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1536         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1537    if (mp_ierr /= 0) goto 600
1538
1539
1540! Post request for clwc. These data are possibly not sent, request must then be cancelled
1541! For now assume that data at all steps either have or do not have water
1542    if (readclouds) then
1543      j=j+1
1544      call MPI_Irecv(clwc(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1545           &MPI_COMM_WORLD,reqs(j),mp_ierr)   
1546      if (mp_ierr /= 0) goto 600
1547      j=j+1
1548      call MPI_Irecv(ciwc(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1549           &MPI_COMM_WORLD,reqs(j),mp_ierr)   
1550      if (mp_ierr /= 0) goto 600
1551    end if
1552
1553
1554    if (mp_measure_time) call mpif_mtime('commtime',1)
1555
1556    goto 601
1557
1558600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_async> MPI ERROR ", mp_ierr
1559    stop
1560
1561601 end subroutine mpif_gf_recv_vars_async
1562
1563
1564  subroutine mpif_gf_request
1565!*******************************************************************************
1566! DESCRIPTION
1567!   Check for completion of MPI_Isend/MPI_Irecv data transfer.
1568!   
1569!   
1570! NOTE
1571!   implicit synchronisation between all processes takes place here
1572!
1573!
1574!*******************************************************************************
1575    use com_mod, only: readclouds
1576
1577    implicit none
1578
1579
1580    integer :: n_req
1581    integer :: i
1582
1583!***********************************************************************
1584
1585    n_req=nvar_async*mp_np
1586
1587    if (mp_measure_time) call mpif_mtime('commtime',0)
1588
1589!    call MPI_Wait(rm1,MPI_STATUS_IGNORE,mp_ierr)
1590
1591! TODO: cancel recv request if readclouds=.false.
1592!    if (readclouds) then
1593    call MPI_Waitall(n_req,reqs,MPI_STATUSES_IGNORE,mp_ierr)
1594!    endif
1595    ! else
1596    !   do i = 0, nvar_async*mp_np-1
1597    !     if (mod(i,27).eq.0 .or. mod(i,28).eq.0) then
1598    !       call MPI_Cancel(reqs(i),mp_ierr)
1599    !       cycle
1600    !     end if
1601    !     call MPI_Wait(reqs(i),MPI_STATUS_IGNORE,mp_ierr)
1602    !   end do
1603    ! end if
1604
1605    if (mp_ierr /= 0) goto 600
1606
1607    if (mp_measure_time) call mpif_mtime('commtime',1)
1608
1609    goto 601
1610
1611600 write(*,*) "#### mpi_mod::mpif_gf_request> MPI ERROR ", mp_ierr
1612    stop
1613
1614601 end subroutine mpif_gf_request
1615
1616
1617  subroutine mpif_tm_reduce_grid
1618!***********************************************************************
1619! Collect grid variable to PID 0, adding from all processes.
1620!
1621! NOTE
1622!   - Older MPI implementations may lack the MPI_IN_PLACE operation.
1623!     If so use 1) below and change unc_mod
1624!
1625!***********************************************************************
1626    use com_mod
1627    use unc_mod
1628    use par_mod
1629
1630    implicit none
1631
1632    integer :: grid_size2d,grid_size3d
1633    integer, parameter :: rcpt_size=maxreceptor*maxspec
1634
1635!**********************************************************************
1636    grid_size2d=numxgrid*numygrid*maxspec* &
1637         & maxpointspec_act*nclassunc*maxageclass
1638    grid_size3d=numxgrid*numygrid*numzgrid*maxspec* &
1639         & maxpointspec_act*nclassunc*maxageclass
1640
1641
1642! Time for MPI communications
1643    if (mp_measure_time) call mpif_mtime('commtime',0)
1644
1645! 1) Using a separate grid (gridunc0) for received values
1646! call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_pp, MPI_SUM, id_root, &
1647!      & mp_comm_used, mp_ierr)
1648! if (mp_ierr /= 0) goto 600
1649
1650! 2) Using in-place reduction
1651    if (lroot) then
1652      call MPI_Reduce(MPI_IN_PLACE, gridunc, grid_size3d, mp_pp, MPI_SUM, id_root, &
1653           & mp_comm_used, mp_ierr)
1654      if (mp_ierr /= 0) goto 600
1655    else
1656      call MPI_Reduce(gridunc, gridunc, grid_size3d, mp_pp, MPI_SUM, id_root, &
1657           & mp_comm_used, mp_ierr)
1658    end if
1659
1660    if ((WETDEP).and.(ldirect.gt.0)) then
1661      call MPI_Reduce(wetgridunc, wetgridunc0, grid_size2d, mp_pp, MPI_SUM, id_root, &
1662           & mp_comm_used, mp_ierr)
1663      if (mp_ierr /= 0) goto 600
1664    end if
1665
1666    if ((DRYDEP).and.(ldirect.gt.0)) then
1667      call MPI_Reduce(drygridunc, drygridunc0, grid_size2d, mp_pp, MPI_SUM, id_root, &
1668           & mp_comm_used, mp_ierr)
1669      if (mp_ierr /= 0) goto 600
1670    end if
1671
1672! Receptor concentrations   
1673    if (lroot) then
1674      call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_pp,MPI_SUM,id_root, &
1675           & mp_comm_used,mp_ierr)
1676      if (mp_ierr /= 0) goto 600
1677    else
1678      call MPI_Reduce(creceptor,creceptor,rcpt_size,mp_pp,MPI_SUM,id_root, &
1679           & mp_comm_used,mp_ierr)
1680    end if
1681
1682    if (mp_measure_time) call mpif_mtime('commtime',1)
1683
1684    goto 601
1685
1686600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1687    stop
1688
1689601 end subroutine mpif_tm_reduce_grid
1690
1691
1692  subroutine mpif_tm_reduce_grid_nest
1693!***********************************************************************
1694! Collect nested grids to PID 0, adding from all processes.
1695!
1696! NOTE
1697!   - Compiling with 'fcheck=all' (gfortran) will cause a run-time error
1698!     as wetgriduncn0 is only allocated for root process. Possibly valid
1699!     MPI code but invalid gfortran.
1700!
1701!***********************************************************************
1702    use com_mod
1703    use unc_mod
1704    use par_mod
1705
1706    implicit none
1707
1708    integer :: grid_size2d,grid_size3d
1709
1710!**********************************************************************
1711
1712    grid_size3d=numxgridn*numygridn*numzgrid*maxspec* &
1713         & maxpointspec_act*nclassunc*maxageclass
1714    grid_size2d=numxgridn*numygridn*maxspec* &
1715         & maxpointspec_act*nclassunc*maxageclass
1716
1717
1718! Time for MPI communications
1719    if (mp_measure_time) call mpif_mtime('commtime',0)
1720
1721! Using a separate grid (gridunc0) for received values, for debugging
1722! call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_pp, MPI_SUM, id_root, &
1723!      & mp_comm_used, mp_ierr)
1724! if (mp_ierr /= 0) goto 600
1725
1726! Using in-place reduction
1727    if (lroot) then
1728      call MPI_Reduce(MPI_IN_PLACE, griduncn, grid_size3d, mp_pp, MPI_SUM, id_root, &
1729           & mp_comm_used, mp_ierr)
1730      if (mp_ierr /= 0) goto 600
1731    else
1732      call MPI_Reduce(griduncn, griduncn, grid_size3d, mp_pp, MPI_SUM, id_root, &
1733           & mp_comm_used, mp_ierr)
1734    end if
1735
1736
1737    if ((WETDEP).and.(ldirect.gt.0)) then
1738      call MPI_Reduce(wetgriduncn, wetgriduncn0, grid_size2d, mp_pp, MPI_SUM, id_root, &
1739           & mp_comm_used, mp_ierr)
1740      if (mp_ierr /= 0) goto 600
1741    end if
1742
1743    if ((DRYDEP).and.(ldirect.gt.0)) then
1744      call MPI_Reduce(drygriduncn, drygriduncn0, grid_size2d, mp_pp, MPI_SUM, id_root, &
1745           & mp_comm_used, mp_ierr)
1746      if (mp_ierr /= 0) goto 600
1747    end if
1748
1749    if (mp_measure_time) call mpif_mtime('commtime',1)
1750
1751    goto 601
1752
1753600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1754    stop
1755
1756601 end subroutine mpif_tm_reduce_grid_nest
1757
1758
1759  subroutine mpif_mtime(ident,imode)
1760!***********************************************************************
1761! Measure CPU/Wall time in various parts of the code
1762!
1763! VARIABLES
1764!   ident        character, identifies routine to measure
1765!   imode        integer, 0:start clock(s)  1: stop clock(s)
1766!
1767!***********************************************************************
1768    implicit none
1769
1770    character(LEN=*), intent(in) :: ident
1771    integer, intent(in) :: imode
1772
1773!***********************************************************************
1774
1775    select case(ident)
1776
1777    case ('timemanager')
1778      if (imode.eq.0) then
1779        call cpu_time(tm_tot_beg)
1780      else
1781        call cpu_time(tm_tot_end)
1782        tm_tot_total = tm_tot_total + (tm_tot_end-tm_tot_beg)
1783      end if
1784
1785    case ('wetdepo')
1786      if (imode.eq.0) then
1787        mp_wetdepo_wtime_beg = mpi_wtime()
1788        call cpu_time(mp_wetdepo_time_beg)
1789      else
1790        mp_wetdepo_wtime_end = mpi_wtime()
1791        call cpu_time(mp_wetdepo_time_end)
1792
1793        mp_wetdepo_wtime_total = mp_wetdepo_wtime_total + &
1794             &(mp_wetdepo_wtime_end - mp_wetdepo_wtime_beg)
1795        mp_wetdepo_time_total = mp_wetdepo_time_total + &
1796             &(mp_wetdepo_time_end - mp_wetdepo_time_beg)
1797      end if
1798
1799    case ('getfields')
1800      if (imode.eq.0) then
1801        mp_getfields_wtime_beg = mpi_wtime()
1802        call cpu_time(mp_getfields_time_beg)
1803      else
1804        mp_getfields_wtime_end = mpi_wtime()
1805        call cpu_time(mp_getfields_time_end)
1806
1807        mp_getfields_wtime_total = mp_getfields_wtime_total + &
1808             &(mp_getfields_wtime_end - mp_getfields_wtime_beg)
1809        mp_getfields_time_total = mp_getfields_time_total + &
1810             &(mp_getfields_time_end - mp_getfields_time_beg)
1811      end if
1812
1813    case ('partloop1')
1814      if (imode.eq.0) then
1815        call cpu_time(tm_nploop_beg)
1816      else
1817        call cpu_time(tm_nploop_end)
1818        tm_nploop_total = tm_nploop_total + (tm_nploop_end - tm_nploop_beg)
1819      end if
1820
1821    case ('conccalc')
1822      if (imode.eq.0) then
1823        call cpu_time(mp_conccalc_time_beg)
1824      else
1825        call cpu_time(mp_conccalc_time_end)
1826        mp_conccalc_time_total = mp_conccalc_time_total + mp_conccalc_time_end - &
1827             &mp_conccalc_time_beg
1828      end if
1829    case ('rootonly')
1830      if (imode.eq.0) then
1831        call cpu_time(mp_root_time_beg)
1832        mp_root_wtime_beg = mpi_wtime()
1833      else
1834        call cpu_time(mp_root_time_end)
1835        mp_root_wtime_end = mpi_wtime()
1836        mp_root_time_total = mp_root_time_total + &
1837             &(mp_root_time_end - mp_root_time_beg)
1838        mp_root_wtime_total = mp_root_wtime_total + &
1839             &(mp_root_wtime_end - mp_root_wtime_beg)
1840      end if
1841
1842    case ('iotime')
1843      if (imode.eq.0) then
1844        mp_io_wtime_beg = mpi_wtime()
1845        call cpu_time(mp_io_time_beg)
1846      else
1847        mp_io_wtime_end = mpi_wtime()
1848        call cpu_time(mp_io_time_end)
1849
1850        mp_io_wtime_total = mp_io_wtime_total + (mp_io_wtime_end - &
1851             & mp_io_wtime_beg)
1852        mp_io_time_total = mp_io_time_total + (mp_io_time_end - &
1853             & mp_io_time_beg)
1854      end if
1855
1856    case ('readwind')
1857      if (imode.eq.0) then
1858        call cpu_time(mp_readwind_time_beg)
1859        mp_readwind_wtime_beg = mpi_wtime()
1860      else
1861        call cpu_time(mp_readwind_time_end)
1862        mp_readwind_wtime_end = mpi_wtime()
1863
1864        mp_readwind_time_total = mp_readwind_time_total + &
1865             &(mp_readwind_time_end - mp_readwind_time_beg)
1866        mp_readwind_wtime_total = mp_readwind_wtime_total + &
1867             &(mp_readwind_wtime_end - mp_readwind_wtime_beg)
1868      end if
1869
1870    case ('commtime')
1871      if (imode.eq.0) then
1872        call cpu_time(mp_comm_time_beg)
1873        mp_comm_wtime_beg = mpi_wtime()
1874      else
1875        call cpu_time(mp_comm_time_end)
1876        mp_comm_wtime_end = mpi_wtime()
1877        mp_comm_time_total = mp_comm_time_total + &
1878             &(mp_comm_time_end - mp_comm_time_beg)
1879        mp_comm_wtime_total = mp_comm_wtime_total + &
1880             &(mp_comm_wtime_end - mp_comm_wtime_beg)
1881      end if
1882
1883    case ('flexpart')
1884      if (imode.eq.0) then
1885        mp_total_wtime_beg=mpi_wtime()
1886      else
1887        mp_total_wtime_end=mpi_wtime()
1888        mp_total_wtime_total = mp_total_wtime_end-mp_total_wtime_beg
1889      end if
1890
1891    case default
1892      write(*,*) 'mpi_mod::mpif_mtime> WARNING: unknown case identifier'
1893
1894    end select
1895
1896
1897  end subroutine mpif_mtime
1898
1899
1900  subroutine mpif_finalize
1901!***********************************************************************
1902! Finalize MPI
1903! Optionally print detailed time diagnostics
1904!***********************************************************************
1905    implicit none
1906
1907    integer :: ip,j,r
1908
1909!***********************************************************************
1910
1911    if (mp_measure_time) then
1912      do ip=0, mp_np-1
1913        call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr)
1914
1915        if (ip == mp_pid) then
1916          write(*,FMT='(72("#"))')
1917          write(*,FMT='(A60,I3)') 'STATISTICS FOR MPI PROCESS:', mp_pid
1918          if (ip == 0) then
1919            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR ROOT-ONLY (PID 0) &
1920                 &SECTIONS: ', mp_root_time_total
1921            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ROOT-ONLY (PID 0) &
1922                 &SECTIONS:', mp_root_wtime_total
1923          end if
1924          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FLEXPART: ' &
1925               &, mp_total_wtime_total
1926          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI COMMUNICATIONS:',&
1927               & mp_comm_time_total
1928          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI COMMUNICATIONS:',&
1929               & mp_comm_wtime_total
1930          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI BARRIERS:',&
1931               & mp_barrier_time_total
1932          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI BARRIERS:',&
1933               & mp_barrier_wtime_total
1934          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER:',&
1935               & tm_tot_total
1936          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER/NUMPART LOOP:',&
1937               & tm_nploop_total
1938          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ADVANCE:',&
1939               & mp_advance_wtime_total
1940          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR GETFIELDS:',&
1941               & mp_getfields_wtime_total
1942          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR GETFIELDS:',&
1943               & mp_getfields_time_total
1944          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',&
1945               & mp_readwind_wtime_total
1946          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',&
1947               & mp_readwind_time_total
1948          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FILE IO:',&
1949               & mp_io_wtime_total
1950          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR FILE IO:',&
1951               & mp_io_time_total
1952          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR WETDEPO:',&
1953               & mp_wetdepo_wtime_total
1954          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR WETDEPO:',&
1955               & mp_wetdepo_time_total
1956          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',&
1957               & mp_conccalc_time_total
1958! NB: the 'flush' function is possibly a gfortran-specific extension
1959          call flush()
1960        end if
1961      end do
1962    end if
1963
1964! j=mp_pid*nvar_async
1965! In the implementation with 3 fields, the processes may have posted
1966! MPI_Irecv requests that should be cancelled here
1967!! TODO:
1968    ! if (.not.lmp_sync) then
1969    !   r=mp_pid*nvar_async
1970    !   do j=r,r+nvar_async-1
1971    !     call MPI_Cancel(j,mp_ierr)
1972    !     if (mp_ierr /= 0) write(*,*) '#### mpif_finalize::MPI_Cancel> ERROR ####'
1973    !   end do
1974    ! end if
1975
1976    call MPI_FINALIZE(mp_ierr)
1977    if (mp_ierr /= 0) then
1978      write(*,*) '#### mpif_finalize::MPI_FINALIZE> MPI ERROR ', mp_ierr, ' ####'
1979      stop
1980    end if
1981
1982
1983  end subroutine mpif_finalize
1984
1985
1986  subroutine get_lun(my_lun)
1987!***********************************************************************
1988! get_lun:
1989!   Starting from 100, get next free logical unit number
1990!***********************************************************************
1991
1992    implicit none
1993
1994    integer, intent(inout) :: my_lun
1995    integer, save :: free_lun=100
1996    logical :: exists, iopen
1997
1998!***********************************************************************
1999
2000    loop1: do
2001      inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen)
2002      if (exists .and. .not.iopen) exit loop1
2003      free_lun = free_lun+1
2004    end do loop1
2005    my_lun = free_lun
2006
2007  end subroutine get_lun
2008
2009
2010  subroutine write_data_dbg(array_in, array_name, tstep, ident)
2011!***********************************************************************
2012! Write one-dimensional arrays to disk (for debugging purposes)
2013!***********************************************************************
2014    implicit none
2015
2016    real, intent(in), dimension(:) :: array_in
2017    integer, intent(in) :: tstep
2018    integer :: lios
2019    character(LEN=*), intent(in) :: ident, array_name
2020
2021    character(LEN=8) :: c_ts
2022    character(LEN=40) :: fn_1, fn_2
2023
2024!***********************************************************************
2025
2026    write(c_ts, FMT='(I8.8,BZ)') tstep
2027    fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident)
2028    write(c_ts, FMT='(I2.2,BZ)') mp_np
2029    fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat'
2030
2031    call get_lun(dat_lun)
2032    open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', &
2033         FORM='UNFORMATTED', STATUS='REPLACE')
2034    write(UNIT=dat_lun, IOSTAT=lios) array_in
2035    close(UNIT=dat_lun)
2036
2037  end subroutine write_data_dbg
2038
2039
2040end module mpi_mod
Note: See TracBrowser for help on using the repository browser.
hosted by ZAMG