source: flexpart.git/src/mpi_mod.f90 @ a1f4dd6

10.4.1_peseiGFS_025bugfixes+enhancementsdevrelease-10release-10.4.1scaling-bugunivie
Last change on this file since a1f4dd6 was a1f4dd6, checked in by Espen Sollum ATMOS <eso@…>, 8 years ago

Fixed a bug where receptor arrays were not properly zeroed

  • Property mode set to 100644
File size: 78.6 KB
Line 
1!**********************************************************************
2! Copyright 1998,1999,2000,2001,2002,2005,2007,2008,2009,2010         *
3! Andreas Stohl, Petra Seibert, A. Frank, Gerhard Wotawa,             *
4! Caroline Forster, Sabine Eckhardt, John Burkhart, Harald Sodemann   *
5!                                                                     *
6! This file is part of FLEXPART.                                      *
7!                                                                     *
8! FLEXPART is free software: you can redistribute it and/or modify    *
9! it under the terms of the GNU General Public License as published by*
10! the Free Software Foundation, either version 3 of the License, or   *
11! (at your option) any later version.                                 *
12!                                                                     *
13! FLEXPART is distributed in the hope that it will be useful,         *
14! but WITHOUT ANY WARRANTY; without even the implied warranty of      *
15! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the       *
16! GNU General Public License for more details.                        *
17!                                                                     *
18! You should have received a copy of the GNU General Public License   *
19! along with FLEXPART.  If not, see <http://www.gnu.org/licenses/>.   *
20!**********************************************************************
21
22module mpi_mod
23
24!*****************************************************************************
25!                                                                            *
26!  DESCRIPTION                                                               *
27!    This module contains subroutines and common variables used for the      *
28!    MPI parallelization of FLEXPART.                                        *
29!                                                                            *
30!  NOTE                                                                      *
31!    Depending on the MPI library installed on your system (e.g. mpich2,     *
32!    OpenMPI) you may need to choose below in this file between              *
33!      use mpi                                                               *
34!    (if the MPI library comes with the file 'mpi.mod'); or                  *
35!      include 'mpif.h'                                                      *
36!                                                                            *
37!                                                                            *
38!*****************************************************************************
39!                                                                            *
40! Variables:                                                                 *
41!                                                                            *
42! mp_ierr                 MPI error code                                     *
43! mp_np                   Number of MPI processes                            *
44! mp_pid                  Process ID of each MPI process                     *
45! mp_seed                 Parameter for random number seed                   *
46! read_grp_min            Minimum number of processes at which one will be   *
47!                         used as reader                                     *
48! numpart_mpi,            Number of particles per node                       *
49! maxpart_mpi                                                                *
50! mp_partid               MPI process ID for particle calculation            *
51! mp_partgroup_           Refers to the subset of processors performing      *
52!                         loops over particles. Will be all processes        *
53!                         unless a dedicated process runs getfields/readwind *
54! lmp_sync                If .false., use asynchronous MPI                   *
55!                                                                            *
56!                                                                            *
57!                                                                            *
58!                                                                            *
59!*****************************************************************************
60
61  use mpi
62  use par_mod, only: dp, sp
63  use com_mod, only: lroot
64
65  implicit none
66
67!  include 'mpif.h'
68
69  public
70
71! Set aside a process for reading windfields if using at least these many processes
72!==================================================
73  integer, parameter, private :: read_grp_min=4
74!==================================================
75
76! Variables for each MPI process in the world group
77  integer :: mp_ierr, mp_np, mp_pid, mp_partid
78  integer, private :: world_group_id
79
80! Variables for MPI processes in the 'particle' group
81  integer, allocatable, dimension(:) :: mp_partgroup_rank
82  integer :: mp_partgroup_comm, mp_partgroup_pid, mp_partgroup_np
83
84  integer :: mp_seed=0
85  integer, parameter :: mp_sp=MPI_REAL4, mp_dp=MPI_REAL8
86  integer, parameter :: mp_pp=mp_sp
87  integer, parameter :: id_root=0 ! master process
88
89! MPI tags/requests for send/receive operation
90  integer :: tm1
91  integer, parameter :: nvar_async=27 !29 :DBG:
92!integer, dimension(:), allocatable :: tags
93  integer, dimension(:), allocatable :: reqs
94
95
96  integer :: id_read   ! readwind/getfield process
97  integer :: numpart_mpi,maxpart_mpi ! number of particles per node
98  integer :: tot_numpart=0
99  integer :: mp_comm_used ! global or subgroup communicator
100
101  logical :: lmpreader=.false. ! is set to true for reading process(es) only.
102  logical :: lmp_use_reader=.false. ! true if separate readwind process is used
103
104! true if only using synchronous MPI send/recv:
105! If setting this to .false., numwfmem must be set to 3
106!===============================================================================
107  logical :: lmp_sync=.true.
108!===============================================================================
109
110! mp_dbg_mode       Used for debugging MPI.
111! mp_dev_mode       various checks related to debugging the parallel code
112! mp_dbg_out        write some arrays to data file for debugging
113! mp_measure_time   Measure cpu/wall time, write out at end of run
114! mp_time_barrier   Measure MPI barrier time
115! mp_exact_numpart  Use an extra MPI communication to give the exact number of particles
116!                   to standard output (this does *not* otherwise affect the simulation)
117  logical, parameter :: mp_dbg_mode = .false.
118  logical, parameter :: mp_dev_mode = .false.
119  logical, parameter :: mp_dbg_out = .false.
120  logical, parameter :: mp_time_barrier=.true.
121  logical, parameter :: mp_measure_time=.true.
122  logical, parameter :: mp_exact_numpart=.true.
123
124! for measuring CPU/Wall time
125  real(sp) :: mp_comm_time_beg, mp_comm_time_end, mp_comm_time_total=0.
126  real(dp) :: mp_comm_wtime_beg, mp_comm_wtime_end, mp_comm_wtime_total=0.
127  real(sp) :: mp_root_time_beg, mp_root_time_end, mp_root_time_total=0.
128  real(dp) :: mp_root_wtime_beg, mp_root_wtime_end, mp_root_wtime_total=0.
129  real(sp) :: mp_barrier_time_beg, mp_barrier_time_end, mp_barrier_time_total=0.
130  real(dp) :: mp_barrier_wtime_beg, mp_barrier_wtime_end, mp_barrier_wtime_total=0.
131  real(sp) :: tm_nploop_beg, tm_nploop_end, tm_nploop_total=0.
132  real(sp) :: tm_tot_beg, tm_tot_end, tm_tot_total=0.
133  real(dp) :: mp_getfields_wtime_beg, mp_getfields_wtime_end, mp_getfields_wtime_total=0.
134  real(sp) :: mp_getfields_time_beg, mp_getfields_time_end, mp_getfields_time_total=0.
135  real(dp) :: mp_readwind_wtime_beg, mp_readwind_wtime_end, mp_readwind_wtime_total=0.
136  real(sp) :: mp_readwind_time_beg, mp_readwind_time_end, mp_readwind_time_total=0.
137  real(dp) :: mp_io_wtime_beg, mp_io_wtime_end, mp_io_wtime_total=0.
138  real(sp) :: mp_io_time_beg, mp_io_time_end, mp_io_time_total=0.
139  real(dp) :: mp_wetdepo_wtime_beg, mp_wetdepo_wtime_end, mp_wetdepo_wtime_total=0.
140  real(sp) :: mp_wetdepo_time_beg, mp_wetdepo_time_end, mp_wetdepo_time_total=0.
141  real(dp) :: mp_advance_wtime_beg, mp_advance_wtime_end, mp_advance_wtime_total=0.
142  real(dp) :: mp_conccalc_time_beg, mp_conccalc_time_end, mp_conccalc_time_total=0.
143  real(dp) :: mp_total_wtime_beg, mp_total_wtime_end, mp_total_wtime_total=0.
144  real(dp) :: mp_vt_wtime_beg, mp_vt_wtime_end, mp_vt_wtime_total
145  real(sp) :: mp_vt_time_beg, mp_vt_time_end, mp_vt_time_total
146
147! dat_lun           logical unit number for i/o
148  integer, private :: dat_lun
149
150contains
151
152  subroutine mpif_init
153!***********************************************************************
154! DESCRIPTION
155!   Initialize MPI.
156!   
157!   Create the global communicator MPI_COMM_WORLD
158!   If using a separate MPI process for getfields/readwind, a subgroup
159!   is created for the other processes.
160!
161! VARIABLES
162!   mpi_mode    default 0, set to 2/3 if running MPI version
163!   mp_np       number of running processes, decided at run-time
164!***********************************************************************
165    use par_mod, only: maxpart, numwfmem
166    use com_mod, only: mpi_mode
167
168    implicit none
169
170    integer :: i,j,s,addmaxpart=0
171
172! each process gets an ID (mp_pid) in the range 0,..,mp_np-1
173    call MPI_INIT(mp_ierr)
174    if (mp_ierr /= 0) goto 100
175    call MPI_COMM_RANK(MPI_COMM_WORLD, mp_pid, mp_ierr)
176    if (mp_ierr /= 0) goto 100
177    call MPI_COMM_SIZE(MPI_COMM_WORLD, mp_np, mp_ierr)
178    if (mp_ierr /= 0) goto 100
179
180
181! this variable is used to handle subroutines common to parallel/serial version
182    if (lmp_sync) then
183      mpi_mode=2 ! hold 2 windfields in memory
184    else
185      mpi_mode=3 ! hold 3 windfields in memory
186    end if
187
188    if (mp_pid.ne.0) then
189      lroot = .false.
190    end if
191
192! Check for sensible combination of parameters
193!*********************************************
194
195    if (.not.lmp_sync.and.numwfmem.ne.3.and.lroot) then
196      write(*,FMT='(80("#"))')
197      write(*,*) '#### mpi_mod::mpif_init> ERROR: ', &
198           & 'numwfmem must be set to 3 for asyncronous reading ####'
199      write(*,FMT='(80("#"))')
200      stop
201    else if (lmp_sync.and.numwfmem.ne.2.and.lroot) then
202      write(*,FMT='(80("#"))')
203      write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
204           & 'numwfmem should be set to 2 for syncronous'
205      write(*,*) ' reading. Results will still be valid, but unneccesary '
206      write(*,*) 'amount of memory is being allocated.'
207      write(*,FMT='(80("#"))')
208! Force "syncronized" version if all processes will call getfields
209    else if (.not.lmp_sync.and.mp_np.lt.read_grp_min) then
210      if (lroot) then
211        write(*,FMT='(80("#"))')
212        write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
213             & 'all procs call getfields. Setting lmp_sync=.true.'
214        write(*,FMT='(80("#"))')
215      end if
216      lmp_sync=.true. ! :DBG: eso fix this...
217    end if
218
219! TODO: Add more warnings for unimplemented flexpart features
220
221! Set ID of process that calls getfield/readwind.
222! Using the last process in the group ensures statistical identical results
223! as running with one process less but not using separate read process
224!**********************************************************************
225
226!    id_read = min(mp_np-1, 1)
227    id_read = mp_np-1
228
229    if (mp_pid.eq.id_read) lmpreader=.true.
230
231    call MPI_Comm_group (MPI_COMM_WORLD, world_group_id, mp_ierr)
232
233! Create a MPI group/communicator that will calculate trajectories.
234! Skip this step if program is run with only a few processes
235!************************************************************************
236    allocate(mp_partgroup_rank(0:mp_np-2))
237
238! This allows checking for allocation of arrays when no subgroup is used
239    mp_partgroup_pid=0
240
241    if (read_grp_min.lt.2) then
242      write(*,*) '#### mpi_mod::mpif_init> ERROR ####', &
243           & 'read_grp_min should be at least 2. Exiting'
244      stop
245    end if
246
247    if (mp_np.ge.read_grp_min) then
248      lmp_use_reader = .true.
249
250! Map the subgroup IDs= 0,1,2,...,mp_np-2, skipping reader process
251      j=-1
252      do i=0, mp_np-2 !loop over all (subgroup) IDs
253        j=j+1
254        if (i.eq.id_read) then
255          j=j+1
256        end if
257        mp_partgroup_rank(i) = j
258      end do
259
260      call MPI_Group_incl (world_group_id, mp_np-1, mp_partgroup_rank, &
261           &mp_partgroup_pid, mp_ierr)
262      if (mp_ierr /= 0) goto 100
263      call MPI_Comm_create (MPI_COMM_WORLD, mp_partgroup_pid, mp_partgroup_comm, mp_ierr)
264      if (mp_ierr /= 0) goto 100
265
266      if (mp_pid.ne.id_read) then
267        call MPI_Comm_rank (mp_partgroup_comm, mp_partgroup_pid, mp_ierr)
268        if (mp_ierr /= 0) goto 100
269
270! Get size of new group (=mp_np-1)
271        call MPI_COMM_SIZE(mp_partgroup_comm, mp_partgroup_np, mp_ierr)
272        if (mp_ierr /= 0) goto 100
273        if (mp_partgroup_np.ne.mp_np-1) then
274          write(*,*)  '#### mpi_mod:: mpif_init> ERROR ####&
275               & mp_partgroup_np.ne.mp_np-1'
276          stop
277        endif
278
279      else
280        mp_partgroup_pid = -1
281      end if
282    end if
283
284    if (lmp_use_reader) then
285      mp_comm_used = mp_partgroup_comm
286      mp_partid = mp_partgroup_pid
287      mp_partgroup_np=mp_np-1
288    else
289      mp_comm_used = MPI_COMM_WORLD
290      mp_partgroup_np = mp_np
291      mp_partid = mp_pid
292    end if
293
294! Set maxpart per process
295    if (mp_partid.lt.mod(maxpart,mp_partgroup_np)) addmaxpart=1
296    maxpart_mpi=int(maxpart/mp_partgroup_np)+addmaxpart
297
298! Do not allocate particle data arrays for readwind process
299    if (lmpreader.and.lmp_use_reader) then
300      maxpart_mpi=0
301    end if
302
303! Set random seed for each non-root process
304    if (mp_pid.gt.0) then
305!    if (mp_pid.ge.0) then
306!      call system_clock(s)
307      s = 244
308      mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729))
309    end if
310    if (mp_dev_mode) write(*,*) 'PID, mp_seed: ',mp_pid, mp_seed
311    if (mp_dbg_mode) then
312! :DBG: For debugging, set all seed to 0 and maxrand to e.g. 20
313      mp_seed=0
314      if (lroot) write(*,*) 'MPI: setting seed=0'
315    end if
316
317! Allocate request array for asynchronous MPI
318    if (.not.lmp_sync) then
319      allocate(reqs(0:nvar_async*mp_np-1))
320      reqs(:)=MPI_REQUEST_NULL
321    else
322      allocate(reqs(0:1))
323      reqs(:)=MPI_REQUEST_NULL
324    end if
325
326    goto 101
327
328100 write(*,*) '#### mpi_mod::mpif_init> ERROR ####', mp_ierr
329    stop
330
331101 end subroutine mpif_init
332
333
334  subroutine mpif_mpi_barrier
335!***********************************************************************
336! Set MPI barrier (all processes are synchronized here), with the
337! possible exception of a separate 'readwind' process.
338! Optionally measure cpu/wall time.
339!
340!***********************************************************************
341    implicit none
342
343    if (mp_time_barrier) then
344      call cpu_time(mp_barrier_time_beg)
345      mp_barrier_wtime_beg = mpi_wtime()
346    endif
347
348    call MPI_BARRIER(mp_comm_used, mp_ierr)
349    if (mp_ierr /= 0) goto 100
350
351    if (mp_time_barrier) then
352      call cpu_time(mp_barrier_time_end)
353      mp_barrier_wtime_end = mpi_wtime()
354
355      mp_barrier_time_total=mp_barrier_time_total+&
356           &(mp_barrier_time_end-mp_barrier_time_beg)
357      mp_barrier_wtime_total=mp_barrier_wtime_total+&
358           &(mp_barrier_wtime_end-mp_barrier_wtime_beg)
359    end if
360
361    goto 101
362
363100 write(*,*) '#### mpi_mod::mpif_mpi_barrier> ERROR ####', mp_ierr
364    stop
365
366101 end subroutine mpif_mpi_barrier
367
368
369  subroutine mpif_com_mod_allocate
370!*******************************************************************************   
371! Dynamic allocation of arrays (in serial code these have size maxpart)
372!
373!*******************************************************************************
374    use com_mod
375
376    implicit none
377
378    integer :: array_size
379
380    array_size = maxpart_mpi
381
382    allocate(itra1(array_size),npoint(array_size),&
383         & nclass(array_size),idt(array_size),&
384         & itramem(array_size),itrasplit(array_size),&
385         & xtra1(array_size),ytra1(array_size),&
386         & ztra1(array_size),xmass1(array_size, maxspec))
387
388    allocate(uap(array_size),ucp(array_size),&
389         & uzp(array_size),us(array_size),&
390         & vs(array_size),&
391         & ws(array_size),cbt(array_size))
392
393
394  end subroutine mpif_com_mod_allocate
395
396
397  subroutine mpif_tm_send_dims
398!***********************************************************************
399! Distribute array dimensions from pid0 to all processes.
400! numpart_mpi/numpart is sent to allow dynamic allocation
401!
402! Currently not in use.
403!
404! Variables of similar type (integer, real) are placed in an array
405! and sent collectively, to avoid the overhead associated with individual
406! MPI send/receives
407!
408!
409!***********************************************************************
410    use com_mod
411
412    implicit none
413
414    integer :: add_part=0
415
416    call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
417
418! MPI subgroup does the particle-loop
419    if (lmp_use_reader) then
420      if (mod(numpart,mp_partgroup_np).ne.0) add_part=1
421      numpart_mpi=int(numpart/mp_partgroup_np)+add_part
422    else
423      if (mod(numpart,mp_np).ne.0) add_part=1
424      numpart_mpi=int(numpart/mp_np)+add_part
425    end if
426
427
428! redefine numpart as 'numpart per process' throughout the code
429!**************************************************************
430    numpart = numpart_mpi
431
432  end subroutine mpif_tm_send_dims
433
434
435  subroutine mpif_tm_send_vars
436!***********************************************************************
437! Distribute particle variables from pid0 to all processes.
438! Called from timemanager
439! *NOT IN USE* at the moment, but can be useful for debugging
440!
441!***********************************************************************
442    use com_mod
443
444    implicit none
445
446    integer :: i
447
448! Time for MPI communications
449!****************************
450    if (mp_measure_time) call mpif_mtime('commtime',0)
451
452! Distribute variables, send from pid 0 to other processes
453!**********************************************************************
454
455! integers
456    if (lroot) then
457      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
458           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
459      if (mp_ierr /= 0) goto 600
460      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
461           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
462      if (mp_ierr /= 0) goto 600
463      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
464           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
465      if (mp_ierr /= 0) goto 600
466      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
467           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
468      if (mp_ierr /= 0) goto 600
469      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
470           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
471      if (mp_ierr /= 0) goto 600
472
473! int2
474      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,&
475           &numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
476      if (mp_ierr /= 0) goto 600
477
478! real
479      call MPI_SCATTER(uap,numpart_mpi,mp_pp,MPI_IN_PLACE,&
480           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
481      if (mp_ierr /= 0) goto 600
482      call MPI_SCATTER(ucp,numpart_mpi,mp_pp,MPI_IN_PLACE,&
483           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
484      if (mp_ierr /= 0) goto 600
485      call MPI_SCATTER(uzp,numpart_mpi,mp_pp,MPI_IN_PLACE,&
486           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
487      if (mp_ierr /= 0) goto 600
488
489      call MPI_SCATTER(us,numpart_mpi,mp_pp,MPI_IN_PLACE,&
490           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
491      if (mp_ierr /= 0) goto 600
492      call MPI_SCATTER(vs,numpart_mpi,mp_pp,MPI_IN_PLACE,&
493           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
494      if (mp_ierr /= 0) goto 600
495      call MPI_SCATTER(ws,numpart_mpi,mp_pp,MPI_IN_PLACE,&
496           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
497      if (mp_ierr /= 0) goto 600
498
499      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
500           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
501      if (mp_ierr /= 0) goto 600
502      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
503           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
504      if (mp_ierr /= 0) goto 600
505      call MPI_SCATTER(ztra1,numpart_mpi,mp_pp,MPI_IN_PLACE,&
506           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
507      if (mp_ierr /= 0) goto 600
508
509      do i=1,nspec
510        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_pp,MPI_IN_PLACE,&
511             &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
512        if (mp_ierr /= 0) goto 600
513      end do
514
515    else ! (mp_pid >= 1)
516! integers
517      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,&
518           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
519      if (mp_ierr /= 0) goto 600
520      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,&
521           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
522      if (mp_ierr /= 0) goto 600
523      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,&
524           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
525      if (mp_ierr /= 0) goto 600
526      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,&
527           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
528      if (mp_ierr /= 0) goto 600
529      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,&
530           &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
531      if (mp_ierr /= 0) goto 600
532
533! int2
534      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,&
535           &numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
536      if (mp_ierr /= 0) goto 600
537
538! reals
539      call MPI_SCATTER(uap,numpart_mpi,mp_pp,uap,&
540           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
541      if (mp_ierr /= 0) goto 600
542      call MPI_SCATTER(ucp,numpart_mpi,mp_pp,ucp,&
543           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
544      if (mp_ierr /= 0) goto 600
545      call MPI_SCATTER(uzp,numpart_mpi,mp_pp,uzp,&
546           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
547      if (mp_ierr /= 0) goto 600
548
549      call MPI_SCATTER(us,numpart_mpi,mp_pp,us,&
550           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
551      if (mp_ierr /= 0) goto 600
552      call MPI_SCATTER(vs,numpart_mpi,mp_pp,vs,&
553           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
554      if (mp_ierr /= 0) goto 600
555      call MPI_SCATTER(ws,numpart_mpi,mp_pp,ws,&
556           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
557      if (mp_ierr /= 0) goto 600
558
559      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,&
560           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
561      if (mp_ierr /= 0) goto 600
562      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,&
563           &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
564      if (mp_ierr /= 0) goto 600
565      call MPI_SCATTER(ztra1,numpart_mpi,mp_pp,ztra1,&
566           &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
567      if (mp_ierr /= 0) goto 600
568
569      do i=1,nspec
570        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_pp,xmass1(:,i),&
571             &numpart_mpi,mp_pp,id_root,mp_comm_used,mp_ierr)
572        if (mp_ierr /= 0) goto 600
573      end do
574
575    end if !(lroot)
576
577    if (mp_measure_time) call mpif_mtime('commtime',1)
578
579    goto 601
580
581600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
582    stop
583601 end subroutine mpif_tm_send_vars
584
585
586  subroutine mpif_tm_collect_vars
587!*******************************************************************************
588! Collect particle data to PID 0 from all processes.                           *
589!                                                                              *
590! This can be called from timemanager_mpi, before caclulating                  *
591! concentrations/writing output grids.                                         *
592!                                                                              *
593! Currently *not in use*, as each process calculates concentrations            *
594! separately, but can be useful for debugging                                  *
595!                                                                              *
596! To use this routine (together with mpif_tm_send_vars) to transfer data       *
597! to the MPI root process (useful for debugging), insert code like this        *
598! (in timemanager_mpi.f90)                                                     *
599!                                                                              *
600!      if (lroot) tot_numpart=numpart ! root stores total numpart              *
601!      call mpif_tm_send_dims                                                  *
602!      if (numpart>1) then                                                     *
603!        call mpif_tm_send_vars                                                *
604!      end if                                                                  *
605!                                                                              *
606!    To collect data from all processes to root process after a parallel       *
607!    region:                                                                   *
608!                                                                              *
609!      if (numpart>0) then                                                     *
610!          if (lroot) numpart = tot_numpart                                    *
611!       call mpif_tm_collect_vars                                              *
612!      end if                                                                  *
613!                                                                              *
614! Then a section that begins with "if (lroot) ..." will behave like            *
615! serial flexpart                                                              *
616!                                                                              *
617!*******************************************************************************
618    use com_mod !, only: numpart, nspec, itra1, npoint, nclass
619
620    implicit none
621
622    integer :: i
623
624    logical :: use_mp_vars = .false.! use mp_... type allocated vars
625    logical :: use_in_place = .true.! use MPI_IN_PLACE to collect vars.
626
627
628! Time for MPI communications
629    if (mp_measure_time) call mpif_mtime('commtime',0)
630
631! Distribute variables, send from pid 0 to other processes
632!**********************************************************************
633
634    if (.not. use_mp_vars.and..not.use_in_place) then
635
636! Integers:
637      call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
638           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
639      if (mp_ierr /= 0) goto 600
640      call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
641           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
642      if (mp_ierr /= 0) goto 600
643      call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
644           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
645      if (mp_ierr /= 0) goto 600
646      call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
647           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
648      if (mp_ierr /= 0) goto 600
649      call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
650           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
651      if (mp_ierr /= 0) goto 600
652
653! int2
654      call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
655           &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
656      if (mp_ierr /= 0) goto 600
657
658! Reals:
659      call MPI_GATHER(uap, numpart_mpi, mp_pp, uap, &
660           &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
661      if (mp_ierr /= 0) goto 600
662      call MPI_GATHER(ucp, numpart_mpi, mp_pp, ucp, &
663           &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
664      if (mp_ierr /= 0) goto 600
665      call MPI_GATHER(uzp, numpart_mpi, mp_pp, uzp, &
666           &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
667      if (mp_ierr /= 0) goto 600
668
669      call MPI_GATHER(us, numpart_mpi, mp_pp, us, &
670           &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
671      if (mp_ierr /= 0) goto 600
672      call MPI_GATHER(vs, numpart_mpi, mp_pp, vs, &
673           &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
674      if (mp_ierr /= 0) goto 600
675      call MPI_GATHER(ws, numpart_mpi, mp_pp, ws, &
676           &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
677      if (mp_ierr /= 0) goto 600
678
679
680      call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
681           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
682      if (mp_ierr /= 0) goto 600
683      call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
684           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
685      if (mp_ierr /= 0) goto 600
686      call MPI_GATHER(ztra1, numpart_mpi, mp_pp, ztra1, &
687           &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
688      if (mp_ierr /= 0) goto 600
689
690      do i=1, nspec
691        call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_pp, xmass1(:,i), &
692             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
693        if (mp_ierr /= 0) goto 600
694
695      end do
696
697! Use variables npoint etc directly for communications
698!***********************************************************************
699    else if (use_in_place.and..not.use_mp_vars) then
700      if (lroot) then
701! Integers:
702        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, npoint, &
703             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
704        if (mp_ierr /= 0) goto 600
705        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, idt, &
706             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
707        if (mp_ierr /= 0) goto 600
708        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itra1, &
709             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
710        if (mp_ierr /= 0) goto 600
711        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, nclass, &
712             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
713        if (mp_ierr /= 0) goto 600
714        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itramem, &
715             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
716        if (mp_ierr /= 0) goto 600
717
718! int2
719        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER2, cbt, &
720             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
721        if (mp_ierr /= 0) goto 600
722
723! Reals:
724        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, uap, &
725             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
726        if (mp_ierr /= 0) goto 600
727        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, ucp, &
728             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
729        if (mp_ierr /= 0) goto 600
730        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, uzp, &
731             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
732        if (mp_ierr /= 0) goto 600
733
734        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, us, &
735             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
736        if (mp_ierr /= 0) goto 600
737        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, vs, &
738             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
739        if (mp_ierr /= 0) goto 600
740        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, ws, &
741             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
742        if (mp_ierr /= 0) goto 600
743
744
745        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, xtra1, &
746             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
747        if (mp_ierr /= 0) goto 600
748        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, ytra1, &
749             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
750        if (mp_ierr /= 0) goto 600
751        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, ztra1, &
752             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
753        if (mp_ierr /= 0) goto 600
754
755        do i=1, nspec
756          call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_pp, xmass1(:,i), &
757               &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
758          if (mp_ierr /= 0) goto 600
759        end do
760
761      else ! (for mp_pid >= 1)
762
763! Integers:
764        call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
765             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
766        if (mp_ierr /= 0) goto 600
767        call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
768             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
769        if (mp_ierr /= 0) goto 600
770        call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
771             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
772        if (mp_ierr /= 0) goto 600
773        call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
774             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
775        if (mp_ierr /= 0) goto 600
776        call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
777             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
778        if (mp_ierr /= 0) goto 600
779
780! int2
781        call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
782             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
783        if (mp_ierr /= 0) goto 600
784
785! Reals:
786        call MPI_GATHER(uap, numpart_mpi, mp_pp, uap, &
787             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
788        if (mp_ierr /= 0) goto 600
789        call MPI_GATHER(ucp, numpart_mpi, mp_pp, ucp, &
790             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
791        if (mp_ierr /= 0) goto 600
792        call MPI_GATHER(uzp, numpart_mpi, mp_pp, uzp, &
793             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
794        if (mp_ierr /= 0) goto 600
795
796        call MPI_GATHER(us, numpart_mpi, mp_pp, us, &
797             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
798        if (mp_ierr /= 0) goto 600
799        call MPI_GATHER(vs, numpart_mpi, mp_pp, vs, &
800             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
801        if (mp_ierr /= 0) goto 600
802        call MPI_GATHER(ws, numpart_mpi, mp_pp, ws, &
803             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
804        if (mp_ierr /= 0) goto 600
805
806
807        call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
808             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
809        if (mp_ierr /= 0) goto 600
810        call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
811             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
812        if (mp_ierr /= 0) goto 600
813        call MPI_GATHER(ztra1, numpart_mpi, mp_pp, ztra1, &
814             &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
815        if (mp_ierr /= 0) goto 600
816
817        do i=1, nspec
818          call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_pp, xmass1(:,i), &
819               &numpart_mpi, mp_pp, id_root, mp_comm_used, mp_ierr)
820          if (mp_ierr /= 0) goto 600
821        end do
822      end if ! (lroot)
823    end if !  (use_in_place)
824
825    if (mp_measure_time) call mpif_mtime('commtime',1)
826
827    goto 601
828
829600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
830    stop
831601 end subroutine mpif_tm_collect_vars
832
833
834  subroutine mpif_gf_send_vars(memstat)
835!*******************************************************************************
836! DESCRIPTION
837!   Distribute 'getfield' variables from reader process
838!
839!   Called from timemanager
840!
841! NOTE
842!   This subroutine distributes windfields read from the reader process to
843!   all other processes. Usually one set of fields is transfered, but at
844!   first timestep when there are no fields in memory, both are transfered.
845!   MPI_Bcast is used, so implicitly all processes are synchronized at this
846!   step
847!
848! TODO
849!   GFS version
850!
851!*******************************************************************************
852    use com_mod
853    use par_mod,only: numwfmem
854
855    implicit none
856
857    integer, intent(in) :: memstat
858
859! Common array sizes used for communications
860    integer, parameter :: d3_size1 = nxmax*nymax*nzmax
861    integer, parameter :: d3_size2 = nxmax*nymax*nuvzmax
862    integer, parameter :: d2_size1 = nxmax*nymax
863    integer, parameter :: d2_size2 = nxmax*nymax*maxspec
864    integer, parameter :: d2_size3 = nxmax*nymax
865    integer, parameter :: d1_size1 = maxwf
866
867    integer :: d3s1,d3s2,d2s1,d2s2
868
869! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
870    integer :: li,ui
871
872! First time routine is called the unchangeable fields will be transfered   
873    logical :: first_call=.true.
874
875!**********************************************************************
876
877! Sizes of arrays transfered
878    d3s1=d3_size1
879    d3s2=d3_size2
880    d2s1=d2_size1
881    d2s2=d2_size2
882
883! Decide which field(s) need to be transfered
884    if (memstat.ge.32) then ! distribute 2 fields, to 1st/2nd indices
885      li=1; ui=2
886      d3s1=2*d3_size1
887      d3s2=2*d3_size2
888      d2s1=2*d2_size1
889      d2s2=2*d2_size2
890    else if (memstat.eq.17) then ! distribute 1 field, place on 1st index
891      li=1; ui=1
892    else if (memstat.eq.18) then ! distribute 1 field, place on 2nd index
893      li=2; ui=2
894    else if (memstat.eq.19) then ! distribute 1 field, place on 3nd index
895      li=3; ui=3
896    else
897      write(*,*) '#### mpi_mod::mpif_gf_send_vars> ERROR: ', &
898           & 'wrong value of memstat, exiting ####', memstat
899      stop
900    end if
901
902
903! Time for MPI communications
904    if (mp_measure_time) call mpif_mtime('commtime',0)
905
906! Send variables from getfield process (id_read) to other processes
907!**********************************************************************
908
909! The non-reader processes need to know if clouds were read.
910! TODO: only at first step or always?
911    call MPI_Bcast(readclouds,1,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
912    if (mp_ierr /= 0) goto 600
913
914! Static fields/variables sent only at startup
915    if (first_call) then
916      call MPI_Bcast(oro(:,:),d2_size3,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
917      if (mp_ierr /= 0) goto 600
918      call MPI_Bcast(excessoro(:,:),d2_size3,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
919      if (mp_ierr /= 0) goto 600
920      call MPI_Bcast(lsm(:,:),d2_size3,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
921      if (mp_ierr /= 0) goto 600
922      call MPI_Bcast(xlanduse(:,:,:),d2_size3*numclass,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
923      if (mp_ierr /= 0) goto 600
924      call MPI_Bcast(wftime,d1_size1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
925      if (mp_ierr /= 0) goto 600
926      call MPI_Bcast(numbwf,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
927      if (mp_ierr /= 0) goto 600
928      call MPI_Bcast(nmixz,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
929      if (mp_ierr /= 0) goto 600
930      call MPI_Bcast(height,nzmax,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
931      if (mp_ierr /= 0) goto 600
932
933      first_call=.false.
934    endif
935
936    call MPI_Bcast(uu(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
937    if (mp_ierr /= 0) goto 600
938    call MPI_Bcast(vv(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
939    if (mp_ierr /= 0) goto 600
940    call MPI_Bcast(uupol(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
941    if (mp_ierr /= 0) goto 600
942    call MPI_Bcast(vvpol(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
943    if (mp_ierr /= 0) goto 600
944    call MPI_Bcast(ww(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
945    if (mp_ierr /= 0) goto 600
946    call MPI_Bcast(tt(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
947    if (mp_ierr /= 0) goto 600
948    call MPI_Bcast(rho(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
949    if (mp_ierr /= 0) goto 600
950    call MPI_Bcast(drhodz(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
951    if (mp_ierr /= 0) goto 600
952    call MPI_Bcast(tth(:,:,:,li:ui),d3s2,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
953    if (mp_ierr /= 0) goto 600
954    call MPI_Bcast(qvh(:,:,:,li:ui),d3s2,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
955    if (mp_ierr /= 0) goto 600
956    call MPI_Bcast(qv(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
957    if (mp_ierr /= 0) goto 600
958    call MPI_Bcast(pv(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
959    if (mp_ierr /= 0) goto 600
960    call MPI_Bcast(clouds(:,:,:,li:ui),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
961    if (mp_ierr /= 0) goto 600
962
963! cloud water/ice:
964    if (readclouds) then
965      call MPI_Bcast(clwc(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
966      if (mp_ierr /= 0) goto 600
967      call MPI_Bcast(ciwc(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
968      if (mp_ierr /= 0) goto 600
969    end if
970
971! 2D fields
972    call MPI_Bcast(cloudsh(:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
973    if (mp_ierr /= 0) goto 600
974    call MPI_Bcast(vdep(:,:,:,li:ui),d2s2,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
975    if (mp_ierr /= 0) goto 600
976    call MPI_Bcast(ps(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
977    if (mp_ierr /= 0) goto 600
978    call MPI_Bcast(sd(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
979    if (mp_ierr /= 0) goto 600
980    call MPI_Bcast(tcc(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
981    if (mp_ierr /= 0) goto 600
982    call MPI_Bcast(tt2(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
983    if (mp_ierr /= 0) goto 600
984    call MPI_Bcast(td2(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
985    if (mp_ierr /= 0) goto 600
986    call MPI_Bcast(lsprec(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
987    if (mp_ierr /= 0) goto 600
988    call MPI_Bcast(convprec(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
989    if (mp_ierr /= 0) goto 600
990    call MPI_Bcast(ustar(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
991    if (mp_ierr /= 0) goto 600
992    call MPI_Bcast(wstar(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
993    if (mp_ierr /= 0) goto 600
994    call MPI_Bcast(hmix(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
995    if (mp_ierr /= 0) goto 600
996    call MPI_Bcast(tropopause(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
997    if (mp_ierr /= 0) goto 600
998    call MPI_Bcast(oli(:,:,:,li:ui),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
999    if (mp_ierr /= 0) goto 600
1000
1001    call MPI_Bcast(z0,numclass,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1002    if (mp_ierr /= 0) goto 600
1003
1004    if (mp_measure_time) call mpif_mtime('commtime',1)
1005
1006    goto 601
1007
1008600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1009    stop
1010
1011601 end subroutine mpif_gf_send_vars
1012
1013
1014  subroutine mpif_gf_send_vars_nest(memstat)
1015!***********************************************************************
1016! DESCRIPTION
1017!   Distribute 'getfield' variables from reader process to all processes.
1018!   For nested fields
1019!
1020!   Called from timemanager
1021!
1022! NOTE
1023!   This subroutine distributes nested windfields from the reader process to
1024!   all other processes. Usually one set of fields is transfered, but at
1025!   first timestep when there are no fields in memory, both are transfered.
1026!   MPI_Bcast is used, so implicitly all processes are synchronized at this
1027!   step
1028!
1029! TODO
1030!   Transfer cloud water/ice if and when available for nested
1031!
1032!***********************************************************************
1033    use com_mod
1034    use par_mod, only: numwfmem
1035
1036    implicit none
1037
1038    integer, intent(in) :: memstat
1039    integer :: i
1040! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li)
1041    integer :: li,ui
1042
1043! Common array sizes used for communications
1044    integer :: d3_size1 = nxmaxn*nymaxn*nzmax*maxnests
1045    integer :: d3_size2 = nxmaxn*nymaxn*nuvzmax*maxnests
1046    integer :: d2_size1 = nxmaxn*nymaxn*maxnests
1047    integer :: d2_size2 = nxmaxn*nymaxn*maxspec*maxnests
1048    integer :: d2_size3 = nxmaxn*nymaxn*maxnests
1049
1050    integer :: d3s1,d3s2,d2s1,d2s2
1051
1052! First time routine is called the unchangeable fields will be transfered   
1053    logical :: first_call=.true.
1054
1055!**********************************************************************
1056
1057! Sizes of arrays transfered
1058    d3s1=d3_size1
1059    d3s2=d3_size2
1060    d2s1=d2_size1
1061    d2s2=d2_size2
1062
1063! Decide which field(s) need to be transfered
1064    if (memstat.ge.32) then ! distribute 2 fields
1065      li=1; ui=2
1066      d3s1=2*d3_size1
1067      d3s2=2*d3_size2
1068      d2s1=2*d2_size1
1069      d2s2=2*d2_size2
1070    else if (memstat.eq.17) then ! distribute 1 field, on 1st index
1071      li=1; ui=1
1072    else if (memstat.eq.18) then ! distribute 1 field, on 2nd index
1073      li=2; ui=2
1074    else if (memstat.eq.19) then ! distribute 1 field, on 3nd index
1075      li=3; ui=3
1076    else
1077      write(*,*) '#### mpi_mod::mpif_gf_send_vars_nest> ERROR: ', &
1078           & 'wrong value of memstat, exiting ####', memstat
1079      stop
1080    end if
1081
1082
1083! Time for MPI communications
1084    if (mp_measure_time) call mpif_mtime('commtime',0)
1085
1086! Distribute variables, send from getfield process (id_read) to other
1087! processes
1088!**********************************************************************
1089
1090! Static fields/variables sent only at startup
1091    if (first_call) then
1092      call MPI_Bcast(oron(:,:,:),d2_size3,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1093      if (mp_ierr /= 0) goto 600
1094      call MPI_Bcast(excessoron(:,:,:),d2_size3,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1095      if (mp_ierr /= 0) goto 600
1096      call MPI_Bcast(lsmn(:,:,:),d2_size3,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1097      if (mp_ierr /= 0) goto 600
1098      call MPI_Bcast(xlandusen(:,:,:,:),d2_size3*numclass,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1099      if (mp_ierr /= 0) goto 600
1100      first_call=.false.
1101    end if
1102
1103! MPI prefers contiguous arrays for sending (else a buffer is created),
1104! hence the loop
1105
1106    do i=1, numbnests
1107! 3D fields
1108      call MPI_Bcast(uun(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1109      if (mp_ierr /= 0) goto 600
1110      call MPI_Bcast(vvn(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1111      if (mp_ierr /= 0) goto 600
1112      call MPI_Bcast(wwn(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1113      if (mp_ierr /= 0) goto 600
1114      call MPI_Bcast(ttn(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1115      if (mp_ierr /= 0) goto 600
1116      call MPI_Bcast(rhon(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1117      if (mp_ierr /= 0) goto 600
1118      call MPI_Bcast(drhodzn(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1119      if (mp_ierr /= 0) goto 600
1120      call MPI_Bcast(tthn(:,:,:,li:ui,i),d3s2,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1121      if (mp_ierr /= 0) goto 600
1122      call MPI_Bcast(qvhn(:,:,:,li:ui,i),d3s2,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1123      if (mp_ierr /= 0) goto 600
1124      call MPI_Bcast(qvn(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1125      if (mp_ierr /= 0) goto 600
1126      call MPI_Bcast(pvn(:,:,:,li:ui,i),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1127      if (mp_ierr /= 0) goto 600
1128      call MPI_Bcast(cloudsn(:,:,:,li:ui,i),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
1129      if (mp_ierr /= 0) goto 600
1130
1131! 2D fields
1132      call MPI_Bcast(cloudsnh(:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1133      if (mp_ierr /= 0) goto 600
1134      call MPI_Bcast(vdepn(:,:,:,li:ui,i),d2s2,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1135      if (mp_ierr /= 0) goto 600
1136      call MPI_Bcast(psn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1137      if (mp_ierr /= 0) goto 600
1138      call MPI_Bcast(sdn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1139      if (mp_ierr /= 0) goto 600
1140      call MPI_Bcast(tccn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1141      if (mp_ierr /= 0) goto 600
1142      call MPI_Bcast(tt2n(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1143      if (mp_ierr /= 0) goto 600
1144      call MPI_Bcast(td2n(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1145      if (mp_ierr /= 0) goto 600
1146      call MPI_Bcast(lsprecn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1147      if (mp_ierr /= 0) goto 600
1148      call MPI_Bcast(convprecn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1149      if (mp_ierr /= 0) goto 600
1150      call MPI_Bcast(ustarn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1151      if (mp_ierr /= 0) goto 600
1152      call MPI_Bcast(wstarn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1153      if (mp_ierr /= 0) goto 600
1154      call MPI_Bcast(olin(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1155      if (mp_ierr /= 0) goto 600
1156      call MPI_Bcast(hmixn(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1157      if (mp_ierr /= 0) goto 600
1158      call MPI_Bcast(tropopausen(:,:,:,li:ui,i),d2s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
1159      if (mp_ierr /= 0) goto 600
1160    end do
1161
1162    if (mp_measure_time) call mpif_mtime('commtime',1)
1163
1164    goto 601
1165
1166600 write(*,*) "mpi_mod> mp_ierr \= 0",mp_ierr
1167    stop
1168
1169601 end subroutine mpif_gf_send_vars_nest
1170
1171
1172  subroutine mpif_gf_send_vars_async(memstat)
1173!*******************************************************************************
1174! DESCRIPTION
1175!   Distribute 'getfield' variables from reader process to all processes.
1176!   Called from timemanager by reader process only
1177!
1178! NOTE
1179!   This version uses asynchronious sends. The newest fields are sent in the
1180!   background, so calculations can continue while
1181!   MPI communications are performed.
1182!
1183!   The way the MPI tags/requests are sequenced means that this routine must
1184!   carefully match routine 'mpif_gf_recv_vars_async'
1185!
1186! VARIABLES
1187!   memstat -- input, for resolving pointer to windfield index being read
1188!   mind    -- index where to place new fields
1189!
1190! TODO
1191!   Transfer cloud water/ice
1192!
1193!*******************************************************************************
1194    use com_mod
1195
1196    implicit none
1197
1198    integer, intent(in) :: memstat
1199    integer :: mind
1200    integer :: dest,i
1201
1202! Common array sizes used for communications
1203    integer :: d3s1 = nxmax*nymax*nzmax
1204    integer :: d3s2 = nxmax*nymax*nuvzmax
1205    integer :: d2s1 = nxmax*nymax
1206    integer :: d2s2 = nxmax*nymax*maxspec
1207    integer :: d1s1 = maxwf
1208
1209!*******************************************************************************
1210
1211! At the time the send is posted, the reader process is one step further
1212! in the permutation of memstat compared with the receiving processes
1213
1214    if (memstat.ge.32) then
1215! last read was synchronous, to indices 1 and 2, 3 is free
1216      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1217           & memstat>=32 should never happen here."
1218      stop
1219    else if (memstat.eq.17) then
1220! old fields on 1,2, send 3
1221      mind=3
1222    else if (memstat.eq.18) then
1223! old fields on 2,3, send 1
1224      mind=1
1225    else if (memstat.eq.19) then
1226! old fields on 3,1, send 2
1227      mind=2
1228    else
1229      write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: &
1230           & invalid memstat"
1231    end if
1232
1233    if (mp_dev_mode) then
1234      if (mp_pid.ne.id_read) then
1235        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_async'
1236      end if
1237    end if
1238
1239    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1240
1241! Time for MPI communications
1242    if (mp_measure_time) call mpif_mtime('commtime',0)
1243
1244! Loop over receiving processes, initiate data sending
1245!*****************************************************
1246
1247    do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading
1248! TODO: use mp_partgroup_np here
1249      if (dest.eq.id_read) cycle
1250      i=dest*nvar_async
1251      call MPI_Isend(uu(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1252      if (mp_ierr /= 0) goto 600
1253      i=i+1
1254      call MPI_Isend(vv(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1255      if (mp_ierr /= 0) goto 600
1256      i=i+1
1257      call MPI_Isend(uupol(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1258      if (mp_ierr /= 0) goto 600
1259      i=i+1
1260      call MPI_Isend(vvpol(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1261      if (mp_ierr /= 0) goto 600
1262      i=i+1
1263      call MPI_Isend(ww(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1264      if (mp_ierr /= 0) goto 600
1265      i=i+1
1266      call MPI_Isend(tt(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1267      if (mp_ierr /= 0) goto 600
1268      i=i+1
1269      call MPI_Isend(rho(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1270      if (mp_ierr /= 0) goto 600
1271      i=i+1
1272      call MPI_Isend(drhodz(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1273      if (mp_ierr /= 0) goto 600
1274      i=i+1
1275      call MPI_Isend(tth(:,:,:,mind),d3s2,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1276      if (mp_ierr /= 0) goto 600
1277      i=i+1
1278      call MPI_Isend(qvh(:,:,:,mind),d3s2,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1279      if (mp_ierr /= 0) goto 600
1280      i=i+1
1281      call MPI_Isend(qv(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1282      if (mp_ierr /= 0) goto 600
1283      i=i+1
1284      call MPI_Isend(pv(:,:,:,mind),d3s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1285      if (mp_ierr /= 0) goto 600
1286      i=i+1
1287      call MPI_Isend(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1288      i=i+1
1289      if (mp_ierr /= 0) goto 600
1290
1291      call MPI_Isend(cloudsh(:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1292      if (mp_ierr /= 0) goto 600
1293      i=i+1
1294      call MPI_Isend(vdep(:,:,:,mind),d2s2,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1295      if (mp_ierr /= 0) goto 600
1296      i=i+1
1297      call MPI_Isend(ps(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1298      if (mp_ierr /= 0) goto 600
1299      i=i+1
1300      call MPI_Isend(sd(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1301      if (mp_ierr /= 0) goto 600
1302      i=i+1
1303      call MPI_Isend(tcc(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1304      if (mp_ierr /= 0) goto 600
1305      i=i+1
1306      call MPI_Isend(tt2(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1307      if (mp_ierr /= 0) goto 600
1308      i=i+1
1309      call MPI_Isend(td2(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1310      if (mp_ierr /= 0) goto 600
1311      i=i+1
1312      call MPI_Isend(lsprec(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1313      if (mp_ierr /= 0) goto 600
1314      i=i+1
1315      call MPI_Isend(convprec(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1316      if (mp_ierr /= 0) goto 600
1317      i=i+1
1318      call MPI_Isend(ustar(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1319      if (mp_ierr /= 0) goto 600
1320      i=i+1
1321      call MPI_Isend(wstar(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1322      if (mp_ierr /= 0) goto 600
1323      i=i+1
1324      call MPI_Isend(hmix(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1325      if (mp_ierr /= 0) goto 600
1326      i=i+1
1327      call MPI_Isend(tropopause(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1328      if (mp_ierr /= 0) goto 600
1329      i=i+1
1330      call MPI_Isend(oli(:,:,:,mind),d2s1,mp_pp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
1331      if (mp_ierr /= 0) goto 600
1332
1333! Send cloud water if it exists. Increment counter always (as on receiving end)
1334      if (readclouds) then
1335        i=i+1
1336        call MPI_Isend(clwc(:,:,:,mind),d3s1,mp_pp,dest,tm1,&
1337             &MPI_COMM_WORLD,reqs(i),mp_ierr)
1338        if (mp_ierr /= 0) goto 600
1339        i=i+1
1340
1341        call MPI_Isend(ciwc(:,:,:,mind),d3s1,mp_pp,dest,tm1,&
1342             &MPI_COMM_WORLD,reqs(i),mp_ierr)
1343        if (mp_ierr /= 0) goto 600
1344! else
1345!   i=i+2
1346      end if
1347
1348    end do
1349
1350    if (mp_measure_time) call mpif_mtime('commtime',1)
1351
1352    goto 601
1353
1354600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> mp_ierr \= 0", mp_ierr
1355    stop
1356
1357601 end subroutine mpif_gf_send_vars_async
1358
1359
1360  subroutine mpif_gf_recv_vars_async(memstat)
1361!*******************************************************************************
1362! DESCRIPTION
1363!   Receive 'getfield' variables from reader process.
1364!   Called from timemanager by all processes except reader
1365!
1366! NOTE
1367!   This version uses asynchronious communications.
1368!
1369! VARIABLES
1370!   memstat -- input, used to resolve windfield index being received
1371!
1372! TODO
1373!   Transfer cloud water/ice
1374!
1375!*******************************************************************************
1376    use com_mod
1377
1378    implicit none
1379
1380    integer, intent(in) :: memstat
1381    integer :: mind,j
1382
1383! Common array sizes used for communications
1384    integer :: d3s1 = nxmax*nymax*nzmax
1385    integer :: d3s2 = nxmax*nymax*nuvzmax
1386    integer :: d2s1 = nxmax*nymax
1387    integer :: d2s2 = nxmax*nymax*maxspec
1388    integer :: d1_size1 = maxwf
1389
1390!    integer :: d3s1,d3s2,d2s1,d2s2
1391!*******************************************************************************
1392
1393! :TODO: don't need these
1394! d3s1=d3_size1
1395! d3s2=d3_size2
1396! d2s1=d2_size1
1397! d2s2=d2_size2
1398
1399! At the time this immediate receive is posted, memstat is the state of
1400! windfield indices at the previous/current time. From this, the future
1401! state is deduced.
1402    if (memstat.eq.32) then
1403! last read was synchronous to indices 1 and 2, 3 is free
1404      mind=3
1405    else if (memstat.eq.17) then
1406! last read was asynchronous to index 3, 1 is free
1407      mind=1
1408    else if (memstat.eq.18) then
1409! last read was asynchronous to index 1, 2 is free
1410      mind=2
1411    else if (memstat.eq.19) then
1412! last read was asynchronous to index 2, 3 is free
1413      mind=3
1414    end if
1415
1416    if (mp_dev_mode) then
1417      if (mp_pid.eq.id_read) then
1418        write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async'
1419      end if
1420    end if
1421
1422! Time for MPI communications
1423    if (mp_measure_time) call mpif_mtime('commtime',0)
1424
1425    if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
1426
1427! Initiate receiving of data
1428!***************************
1429
1430! Get MPI tags/requests for communications
1431    j=mp_pid*nvar_async
1432    call MPI_Irecv(uu(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1433         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1434    if (mp_ierr /= 0) goto 600
1435    j=j+1
1436    call MPI_Irecv(vv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1437         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1438    if (mp_ierr /= 0) goto 600
1439    j=j+1
1440    call MPI_Irecv(uupol(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1441         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1442    if (mp_ierr /= 0) goto 600
1443    j=j+1
1444    call MPI_Irecv(vvpol(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1445         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1446    if (mp_ierr /= 0) goto 600
1447    j=j+1
1448    call MPI_Irecv(ww(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1449         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1450    if (mp_ierr /= 0) goto 600
1451    j=j+1
1452    call MPI_Irecv(tt(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1453         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1454    if (mp_ierr /= 0) goto 600
1455    j=j+1
1456    call MPI_Irecv(rho(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1457         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1458    if (mp_ierr /= 0) goto 600
1459    j=j+1
1460    call MPI_Irecv(drhodz(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1461         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1462    if (mp_ierr /= 0) goto 600
1463    j=j+1
1464    call MPI_Irecv(tth(:,:,:,mind),d3s2,mp_pp,id_read,MPI_ANY_TAG,&
1465         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1466    if (mp_ierr /= 0) goto 600
1467    j=j+1
1468    call MPI_Irecv(qvh(:,:,:,mind),d3s2,mp_pp,id_read,MPI_ANY_TAG,&
1469         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1470    if (mp_ierr /= 0) goto 600
1471    j=j+1
1472
1473    call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1474         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1475    if (mp_ierr /= 0) goto 600
1476    j=j+1
1477    call MPI_Irecv(pv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1478         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1479    if (mp_ierr /= 0) goto 600
1480    j=j+1
1481    call MPI_Irecv(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
1482         &MPI_COMM_WORLD,reqs(j),mp_ierr)   
1483    if (mp_ierr /= 0) goto 600
1484    j=j+1
1485
1486    call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1487         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1488    if (mp_ierr /= 0) goto 600
1489    j=j+1
1490    call MPI_Irecv(vdep(:,:,:,mind),d2s2,mp_pp,id_read,MPI_ANY_TAG,&
1491         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1492    if (mp_ierr /= 0) goto 600
1493    j=j+1
1494    call MPI_Irecv(ps(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1495         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1496    if (mp_ierr /= 0) goto 600
1497    j=j+1
1498    call MPI_Irecv(sd(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1499         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1500    if (mp_ierr /= 0) goto 600
1501    j=j+1
1502    call MPI_Irecv(tcc(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1503         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1504    if (mp_ierr /= 0) goto 600
1505    j=j+1
1506    call MPI_Irecv(tt2(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1507         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1508    if (mp_ierr /= 0) goto 600
1509    j=j+1
1510    call MPI_Irecv(td2(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1511         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1512    if (mp_ierr /= 0) goto 600
1513    j=j+1
1514    call MPI_Irecv(lsprec(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1515         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1516    if (mp_ierr /= 0) goto 600
1517    j=j+1
1518    call MPI_Irecv(convprec(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1519         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1520    if (mp_ierr /= 0) goto 600
1521
1522    call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1523         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1524    if (mp_ierr /= 0) goto 600
1525    j=j+1
1526    call MPI_Irecv(wstar(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1527         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1528    if (mp_ierr /= 0) goto 600
1529    j=j+1
1530    call MPI_Irecv(hmix(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1531         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1532    if (mp_ierr /= 0) goto 600
1533    j=j+1
1534    call MPI_Irecv(tropopause(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1535         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1536    if (mp_ierr /= 0) goto 600
1537    j=j+1
1538    call MPI_Irecv(oli(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
1539         &MPI_COMM_WORLD,reqs(j),mp_ierr)
1540    if (mp_ierr /= 0) goto 600
1541
1542
1543! Post request for clwc. These data are possibly not sent, request must then be cancelled
1544! For now assume that data at all steps either have or do not have water
1545    if (readclouds) then
1546      j=j+1
1547      call MPI_Irecv(clwc(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1548           &MPI_COMM_WORLD,reqs(j),mp_ierr)   
1549      if (mp_ierr /= 0) goto 600
1550      j=j+1
1551      call MPI_Irecv(ciwc(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
1552           &MPI_COMM_WORLD,reqs(j),mp_ierr)   
1553      if (mp_ierr /= 0) goto 600
1554    end if
1555
1556
1557    if (mp_measure_time) call mpif_mtime('commtime',1)
1558
1559    goto 601
1560
1561600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_async> MPI ERROR ", mp_ierr
1562    stop
1563
1564601 end subroutine mpif_gf_recv_vars_async
1565
1566
1567  subroutine mpif_gf_request
1568!*******************************************************************************
1569! DESCRIPTION
1570!   Check for completion of MPI_Isend/MPI_Irecv data transfer.
1571!   
1572!   
1573! NOTE
1574!   implicit synchronisation between all processes takes place here
1575!
1576!
1577!*******************************************************************************
1578    use com_mod, only: readclouds
1579
1580    implicit none
1581
1582
1583    integer :: n_req
1584    integer :: i
1585
1586!***********************************************************************
1587
1588    n_req=nvar_async*mp_np
1589
1590    if (mp_measure_time) call mpif_mtime('commtime',0)
1591
1592!    call MPI_Wait(rm1,MPI_STATUS_IGNORE,mp_ierr)
1593
1594! TODO: cancel recv request if readclouds=.false.
1595!    if (readclouds) then
1596    call MPI_Waitall(n_req,reqs,MPI_STATUSES_IGNORE,mp_ierr)
1597!    endif
1598! else
1599!   do i = 0, nvar_async*mp_np-1
1600!     if (mod(i,27).eq.0 .or. mod(i,28).eq.0) then
1601!       call MPI_Cancel(reqs(i),mp_ierr)
1602!       cycle
1603!     end if
1604!     call MPI_Wait(reqs(i),MPI_STATUS_IGNORE,mp_ierr)
1605!   end do
1606! end if
1607
1608    if (mp_ierr /= 0) goto 600
1609
1610    if (mp_measure_time) call mpif_mtime('commtime',1)
1611
1612    goto 601
1613
1614600 write(*,*) "#### mpi_mod::mpif_gf_request> MPI ERROR ", mp_ierr
1615    stop
1616
1617601 end subroutine mpif_gf_request
1618
1619
1620  subroutine mpif_tm_reduce_grid
1621!***********************************************************************
1622! Collect grid variable to PID 0, adding from all processes.
1623!
1624! NOTE
1625!   - Older MPI implementations may lack the MPI_IN_PLACE operation.
1626!     If so use 1) below and change unc_mod
1627!
1628!***********************************************************************
1629    use com_mod
1630    use unc_mod
1631    use par_mod
1632
1633    implicit none
1634
1635    integer :: grid_size2d,grid_size3d
1636    integer, parameter :: rcpt_size=maxreceptor*maxspec
1637
1638!**********************************************************************
1639    grid_size2d=numxgrid*numygrid*maxspec* &
1640         & maxpointspec_act*nclassunc*maxageclass
1641    grid_size3d=numxgrid*numygrid*numzgrid*maxspec* &
1642         & maxpointspec_act*nclassunc*maxageclass
1643
1644
1645! Time for MPI communications
1646    if (mp_measure_time) call mpif_mtime('commtime',0)
1647
1648! 1) Using a separate grid (gridunc0) for received values
1649! call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_pp, MPI_SUM, id_root, &
1650!      & mp_comm_used, mp_ierr)
1651! if (mp_ierr /= 0) goto 600
1652
1653! 2) Using in-place reduction
1654    if (lroot) then
1655      call MPI_Reduce(MPI_IN_PLACE, gridunc, grid_size3d, mp_pp, MPI_SUM, id_root, &
1656           & mp_comm_used, mp_ierr)
1657      if (mp_ierr /= 0) goto 600
1658    else
1659      call MPI_Reduce(gridunc, gridunc, grid_size3d, mp_pp, MPI_SUM, id_root, &
1660           & mp_comm_used, mp_ierr)
1661    end if
1662
1663    if ((WETDEP).and.(ldirect.gt.0)) then
1664      call MPI_Reduce(wetgridunc, wetgridunc0, grid_size2d, mp_pp, MPI_SUM, id_root, &
1665           & mp_comm_used, mp_ierr)
1666      if (mp_ierr /= 0) goto 600
1667    end if
1668
1669    if ((DRYDEP).and.(ldirect.gt.0)) then
1670      call MPI_Reduce(drygridunc, drygridunc0, grid_size2d, mp_pp, MPI_SUM, id_root, &
1671           & mp_comm_used, mp_ierr)
1672      if (mp_ierr /= 0) goto 600
1673    end if
1674
1675! Receptor concentrations   
1676    if (lroot) then
1677      call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_pp,MPI_SUM,id_root, &
1678           & mp_comm_used,mp_ierr)
1679      if (mp_ierr /= 0) goto 600
1680    else
1681      call MPI_Reduce(creceptor,creceptor,rcpt_size,mp_pp,MPI_SUM,id_root, &
1682           & mp_comm_used,mp_ierr)
1683    end if
1684
1685    if (mp_measure_time) call mpif_mtime('commtime',1)
1686
1687    goto 601
1688
1689600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1690    stop
1691
1692601 end subroutine mpif_tm_reduce_grid
1693
1694
1695  subroutine mpif_tm_reduce_grid_nest
1696!***********************************************************************
1697! Collect nested grids to PID 0, adding from all processes.
1698!
1699! NOTE
1700!   - Compiling with 'fcheck=all' (gfortran) will cause a run-time error
1701!     as wetgriduncn0 is only allocated for root process. Possibly valid
1702!     MPI code but invalid gfortran.
1703!
1704!***********************************************************************
1705    use com_mod
1706    use unc_mod
1707    use par_mod
1708
1709    implicit none
1710
1711    integer :: grid_size2d,grid_size3d
1712
1713!**********************************************************************
1714
1715    grid_size3d=numxgridn*numygridn*numzgrid*maxspec* &
1716         & maxpointspec_act*nclassunc*maxageclass
1717    grid_size2d=numxgridn*numygridn*maxspec* &
1718         & maxpointspec_act*nclassunc*maxageclass
1719
1720
1721! Time for MPI communications
1722    if (mp_measure_time) call mpif_mtime('commtime',0)
1723
1724! Using a separate grid (gridunc0) for received values, for debugging
1725! call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_pp, MPI_SUM, id_root, &
1726!      & mp_comm_used, mp_ierr)
1727! if (mp_ierr /= 0) goto 600
1728
1729! Using in-place reduction
1730    if (lroot) then
1731      call MPI_Reduce(MPI_IN_PLACE, griduncn, grid_size3d, mp_pp, MPI_SUM, id_root, &
1732           & mp_comm_used, mp_ierr)
1733      if (mp_ierr /= 0) goto 600
1734    else
1735      call MPI_Reduce(griduncn, griduncn, grid_size3d, mp_pp, MPI_SUM, id_root, &
1736           & mp_comm_used, mp_ierr)
1737    end if
1738
1739    if ((WETDEP).and.(ldirect.gt.0)) then
1740      call MPI_Reduce(wetgriduncn, wetgriduncn0, grid_size2d, mp_pp, MPI_SUM, id_root, &
1741           & mp_comm_used, mp_ierr)
1742      if (mp_ierr /= 0) goto 600
1743    end if
1744
1745    if ((DRYDEP).and.(ldirect.gt.0)) then
1746      call MPI_Reduce(drygriduncn, drygriduncn0, grid_size2d, mp_pp, MPI_SUM, id_root, &
1747           & mp_comm_used, mp_ierr)
1748      if (mp_ierr /= 0) goto 600
1749    end if
1750
1751    if (mp_measure_time) call mpif_mtime('commtime',1)
1752
1753    goto 601
1754
1755600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
1756    stop
1757
1758601 end subroutine mpif_tm_reduce_grid_nest
1759
1760
1761  subroutine mpif_mtime(ident,imode)
1762!***********************************************************************
1763! Measure CPU/Wall time in various parts of the code
1764!
1765! VARIABLES
1766!   ident        character, identifies routine to measure
1767!   imode        integer, 0:start clock(s)  1: stop clock(s)
1768!
1769!***********************************************************************
1770    implicit none
1771
1772    character(LEN=*), intent(in) :: ident
1773    integer, intent(in) :: imode
1774
1775!***********************************************************************
1776
1777    select case(ident)
1778
1779    case ('timemanager')
1780      if (imode.eq.0) then
1781        call cpu_time(tm_tot_beg)
1782      else
1783        call cpu_time(tm_tot_end)
1784        tm_tot_total = tm_tot_total + (tm_tot_end-tm_tot_beg)
1785      end if
1786
1787    case ('wetdepo')
1788      if (imode.eq.0) then
1789        mp_wetdepo_wtime_beg = mpi_wtime()
1790        call cpu_time(mp_wetdepo_time_beg)
1791      else
1792        mp_wetdepo_wtime_end = mpi_wtime()
1793        call cpu_time(mp_wetdepo_time_end)
1794
1795        mp_wetdepo_wtime_total = mp_wetdepo_wtime_total + &
1796             &(mp_wetdepo_wtime_end - mp_wetdepo_wtime_beg)
1797        mp_wetdepo_time_total = mp_wetdepo_time_total + &
1798             &(mp_wetdepo_time_end - mp_wetdepo_time_beg)
1799      end if
1800
1801    case ('getfields')
1802      if (imode.eq.0) then
1803        mp_getfields_wtime_beg = mpi_wtime()
1804        call cpu_time(mp_getfields_time_beg)
1805      else
1806        mp_getfields_wtime_end = mpi_wtime()
1807        call cpu_time(mp_getfields_time_end)
1808
1809        mp_getfields_wtime_total = mp_getfields_wtime_total + &
1810             &(mp_getfields_wtime_end - mp_getfields_wtime_beg)
1811        mp_getfields_time_total = mp_getfields_time_total + &
1812             &(mp_getfields_time_end - mp_getfields_time_beg)
1813      end if
1814
1815    case ('partloop1')
1816      if (imode.eq.0) then
1817        call cpu_time(tm_nploop_beg)
1818      else
1819        call cpu_time(tm_nploop_end)
1820        tm_nploop_total = tm_nploop_total + (tm_nploop_end - tm_nploop_beg)
1821      end if
1822
1823    case ('conccalc')
1824      if (imode.eq.0) then
1825        call cpu_time(mp_conccalc_time_beg)
1826      else
1827        call cpu_time(mp_conccalc_time_end)
1828        mp_conccalc_time_total = mp_conccalc_time_total + mp_conccalc_time_end - &
1829             &mp_conccalc_time_beg
1830      end if
1831    case ('rootonly')
1832      if (imode.eq.0) then
1833        call cpu_time(mp_root_time_beg)
1834        mp_root_wtime_beg = mpi_wtime()
1835      else
1836        call cpu_time(mp_root_time_end)
1837        mp_root_wtime_end = mpi_wtime()
1838        mp_root_time_total = mp_root_time_total + &
1839             &(mp_root_time_end - mp_root_time_beg)
1840        mp_root_wtime_total = mp_root_wtime_total + &
1841             &(mp_root_wtime_end - mp_root_wtime_beg)
1842      end if
1843
1844    case ('iotime')
1845      if (imode.eq.0) then
1846        mp_io_wtime_beg = mpi_wtime()
1847        call cpu_time(mp_io_time_beg)
1848      else
1849        mp_io_wtime_end = mpi_wtime()
1850        call cpu_time(mp_io_time_end)
1851
1852        mp_io_wtime_total = mp_io_wtime_total + (mp_io_wtime_end - &
1853             & mp_io_wtime_beg)
1854        mp_io_time_total = mp_io_time_total + (mp_io_time_end - &
1855             & mp_io_time_beg)
1856      end if
1857
1858    case ('verttransform')
1859      if (imode.eq.0) then
1860        mp_vt_wtime_beg = mpi_wtime()
1861        call cpu_time(mp_vt_time_beg)
1862      else
1863        mp_vt_wtime_end = mpi_wtime()
1864        call cpu_time(mp_vt_time_end)
1865
1866        mp_vt_wtime_total = mp_vt_wtime_total + (mp_vt_wtime_end - &
1867             & mp_vt_wtime_beg)
1868        mp_vt_time_total = mp_vt_time_total + (mp_vt_time_end - &
1869             & mp_vt_time_beg)
1870      end if
1871
1872    case ('readwind')
1873      if (imode.eq.0) then
1874        call cpu_time(mp_readwind_time_beg)
1875        mp_readwind_wtime_beg = mpi_wtime()
1876      else
1877        call cpu_time(mp_readwind_time_end)
1878        mp_readwind_wtime_end = mpi_wtime()
1879
1880        mp_readwind_time_total = mp_readwind_time_total + &
1881             &(mp_readwind_time_end - mp_readwind_time_beg)
1882        mp_readwind_wtime_total = mp_readwind_wtime_total + &
1883             &(mp_readwind_wtime_end - mp_readwind_wtime_beg)
1884      end if
1885
1886    case ('commtime')
1887      if (imode.eq.0) then
1888        call cpu_time(mp_comm_time_beg)
1889        mp_comm_wtime_beg = mpi_wtime()
1890      else
1891        call cpu_time(mp_comm_time_end)
1892        mp_comm_wtime_end = mpi_wtime()
1893        mp_comm_time_total = mp_comm_time_total + &
1894             &(mp_comm_time_end - mp_comm_time_beg)
1895        mp_comm_wtime_total = mp_comm_wtime_total + &
1896             &(mp_comm_wtime_end - mp_comm_wtime_beg)
1897      end if
1898
1899    case ('flexpart')
1900      if (imode.eq.0) then
1901        mp_total_wtime_beg=mpi_wtime()
1902      else
1903        mp_total_wtime_end=mpi_wtime()
1904        mp_total_wtime_total = mp_total_wtime_end-mp_total_wtime_beg
1905      end if
1906
1907    case default
1908      write(*,*) 'mpi_mod::mpif_mtime> WARNING: unknown case identifier'
1909
1910    end select
1911
1912
1913  end subroutine mpif_mtime
1914
1915
1916  subroutine mpif_finalize
1917!***********************************************************************
1918! Finalize MPI
1919! Optionally print detailed time diagnostics
1920!***********************************************************************
1921    implicit none
1922
1923    integer :: ip,j,r
1924
1925!***********************************************************************
1926
1927    IF (mp_measure_time) THEN
1928      do ip=0, mp_np-1
1929        call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr)
1930
1931        if (ip == mp_pid) then
1932          write(*,FMT='(72("#"))')
1933          write(*,FMT='(A60,I3)') 'STATISTICS FOR MPI PROCESS:', mp_pid
1934          if (ip == 0) then
1935            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR ROOT-ONLY (PID 0) &
1936                 &SECTIONS: ', mp_root_time_total
1937            write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ROOT-ONLY (PID 0) &
1938                 &SECTIONS:', mp_root_wtime_total
1939          end if
1940          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FLEXPART: ' &
1941               &, mp_total_wtime_total
1942          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI COMMUNICATIONS:',&
1943               & mp_comm_time_total
1944          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI COMMUNICATIONS:',&
1945               & mp_comm_wtime_total
1946          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI BARRIERS:',&
1947               & mp_barrier_time_total
1948          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI BARRIERS:',&
1949               & mp_barrier_wtime_total
1950          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER:',&
1951               & tm_tot_total
1952          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER/NUMPART LOOP:',&
1953               & tm_nploop_total
1954          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ADVANCE:',&
1955               & mp_advance_wtime_total
1956          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR GETFIELDS:',&
1957               & mp_getfields_wtime_total
1958          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR GETFIELDS:',&
1959               & mp_getfields_time_total
1960          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',&
1961               & mp_readwind_wtime_total
1962          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',&
1963               & mp_readwind_time_total
1964          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FILE IO:',&
1965               & mp_io_wtime_total
1966          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR FILE IO:',&
1967               & mp_io_time_total
1968          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR WETDEPO:',&
1969               & mp_wetdepo_wtime_total
1970          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR WETDEPO:',&
1971               & mp_wetdepo_time_total
1972          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',&
1973               & mp_conccalc_time_total
1974          ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',&
1975          !      & mp_vt_wtime_total
1976          ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',&
1977          !      & mp_vt_time_total
1978! NB: the 'flush' function is possibly a gfortran-specific extension
1979          call flush()
1980        end if
1981      end do
1982    end if
1983
1984! This call to barrier is for correctly formatting output
1985    call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr)
1986
1987    if (lroot.and.mp_measure_time) then
1988      write(*,FMT='(72("#"))')
1989      WRITE(*,*) "To turn off output of time measurements, set "
1990      WRITE(*,*) "    mp_measure_time=.false."
1991      WRITE(*,*) "in file mpi_mod.f90"
1992      write(*,FMT='(72("#"))')
1993    end if
1994
1995! j=mp_pid*nvar_async
1996! In the implementation with 3 fields, the processes may have posted
1997! MPI_Irecv requests that should be cancelled here
1998!! TODO:
1999! if (.not.lmp_sync) then
2000!   r=mp_pid*nvar_async
2001!   do j=r,r+nvar_async-1
2002!     call MPI_Cancel(j,mp_ierr)
2003!     if (mp_ierr /= 0) write(*,*) '#### mpif_finalize::MPI_Cancel> ERROR ####'
2004!   end do
2005! end if
2006
2007    call MPI_FINALIZE(mp_ierr)
2008    if (mp_ierr /= 0) then
2009      write(*,*) '#### mpif_finalize::MPI_FINALIZE> MPI ERROR ', mp_ierr, ' ####'
2010      stop
2011    end if
2012
2013
2014    end subroutine mpif_finalize
2015
2016
2017    subroutine get_lun(my_lun)
2018!***********************************************************************
2019! get_lun:
2020!   Starting from 100, get next free logical unit number
2021!***********************************************************************
2022
2023      implicit none
2024
2025      integer, intent(inout) :: my_lun
2026      integer, save :: free_lun=100
2027      logical :: exists, iopen
2028
2029!***********************************************************************
2030
2031      loop1: do
2032        inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen)
2033        if (exists .and. .not.iopen) exit loop1
2034        free_lun = free_lun+1
2035      end do loop1
2036      my_lun = free_lun
2037
2038    end subroutine get_lun
2039
2040
2041    subroutine write_data_dbg(array_in, array_name, tstep, ident)
2042!***********************************************************************
2043! Write one-dimensional arrays to file (for debugging purposes)
2044!***********************************************************************
2045      implicit none
2046
2047      real, intent(in), dimension(:) :: array_in
2048      integer, intent(in) :: tstep
2049      integer :: lios
2050      character(LEN=*), intent(in) :: ident, array_name
2051
2052      character(LEN=8) :: c_ts
2053      character(LEN=40) :: fn_1, fn_2
2054
2055!***********************************************************************
2056
2057      write(c_ts, FMT='(I8.8,BZ)') tstep
2058      fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident)
2059      write(c_ts, FMT='(I2.2,BZ)') mp_np
2060      fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat'
2061
2062      call get_lun(dat_lun)
2063      open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', &
2064           FORM='UNFORMATTED', STATUS='REPLACE')
2065      write(UNIT=dat_lun, IOSTAT=lios) array_in
2066      close(UNIT=dat_lun)
2067
2068    end subroutine write_data_dbg
2069
2070
2071  subroutine set_fields_synthetic()
2072!*******************************************************************************
2073! DESCRIPTION
2074!   Set all meteorological fields to synthetic (usually constant/homogeneous)
2075!   values.
2076!   Used for validation and error-checking
2077!
2078! NOTE
2079!   This version uses asynchronious communications.
2080!
2081! VARIABLES
2082!   
2083!
2084! TODO
2085!
2086!*******************************************************************************
2087    use com_mod
2088
2089    implicit none
2090
2091    integer, parameter :: li=1, ui=3 ! wfmem indices (i.e, operate on entire field)
2092
2093
2094! Variables transferred at initialization only
2095!*********************************************
2096!    readclouds=readclouds_
2097    oro(:,:)=0.0
2098    excessoro(:,:)=0.0
2099    lsm(:,:)=0.0
2100    xlanduse(:,:,:)=0.0
2101!    wftime
2102!    numbwf
2103!    nmixz
2104!    height
2105
2106! Time-varying fields:
2107    uu(:,:,:,li:ui) = 10.0
2108    vv(:,:,:,li:ui) = 0.0
2109    uupol(:,:,:,li:ui) = 10.0 ! TODO check if ok
2110    vvpol(:,:,:,li:ui)=0.0
2111    ww(:,:,:,li:ui)=0.
2112    tt(:,:,:,li:ui)=300.
2113    rho(:,:,:,li:ui)=1.3
2114    drhodz(:,:,:,li:ui)=.0
2115    tth(:,:,:,li:ui)=0.0
2116    qvh(:,:,:,li:ui)=1.0
2117    qv(:,:,:,li:ui)=1.0
2118
2119    pv(:,:,:,li:ui)=1.0
2120    clouds(:,:,:,li:ui)=0.0
2121
2122    clwc(:,:,:,li:ui)=0.0
2123    ciwc(:,:,:,li:ui)=0.0
2124 
2125! 2D fields
2126
2127    cloudsh(:,:,li:ui)=0.0
2128    vdep(:,:,:,li:ui)=0.0
2129    ps(:,:,:,li:ui)=1.0e5
2130    sd(:,:,:,li:ui)=0.0
2131    tcc(:,:,:,li:ui)=0.0
2132    tt2(:,:,:,li:ui)=300.
2133    td2(:,:,:,li:ui)=250.
2134    lsprec(:,:,:,li:ui)=0.0
2135    convprec(:,:,:,li:ui)=0.0
2136    ustar(:,:,:,li:ui)=1.0
2137    wstar(:,:,:,li:ui)=1.0
2138    hmix(:,:,:,li:ui)=10000.
2139    tropopause(:,:,:,li:ui)=10000.
2140    oli(:,:,:,li:ui)=0.01
2141    z0=1.0
2142 
2143  end subroutine set_fields_synthetic
2144
2145end module mpi_mod
Note: See TracBrowser for help on using the repository browser.
hosted by ZAMG