[8a65cb0] | 1 | !********************************************************************** |
---|
| 2 | ! Copyright 1998,1999,2000,2001,2002,2005,2007,2008,2009,2010 * |
---|
| 3 | ! Andreas Stohl, Petra Seibert, A. Frank, Gerhard Wotawa, * |
---|
| 4 | ! Caroline Forster, Sabine Eckhardt, John Burkhart, Harald Sodemann * |
---|
| 5 | ! * |
---|
| 6 | ! This file is part of FLEXPART. * |
---|
| 7 | ! * |
---|
| 8 | ! FLEXPART is free software: you can redistribute it and/or modify * |
---|
| 9 | ! it under the terms of the GNU General Public License as published by* |
---|
| 10 | ! the Free Software Foundation, either version 3 of the License, or * |
---|
| 11 | ! (at your option) any later version. * |
---|
| 12 | ! * |
---|
| 13 | ! FLEXPART is distributed in the hope that it will be useful, * |
---|
| 14 | ! but WITHOUT ANY WARRANTY; without even the implied warranty of * |
---|
| 15 | ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * |
---|
| 16 | ! GNU General Public License for more details. * |
---|
| 17 | ! * |
---|
| 18 | ! You should have received a copy of the GNU General Public License * |
---|
| 19 | ! along with FLEXPART. If not, see <http://www.gnu.org/licenses/>. * |
---|
| 20 | !********************************************************************** |
---|
| 21 | |
---|
| 22 | module mpi_mod |
---|
| 23 | |
---|
| 24 | !***************************************************************************** |
---|
| 25 | ! * |
---|
| 26 | ! DESCRIPTION * |
---|
| 27 | ! This module contains subroutines and common variables used for the * |
---|
| 28 | ! MPI parallelization of FLEXPART. * |
---|
| 29 | ! * |
---|
| 30 | ! NOTE * |
---|
| 31 | ! Depending on the MPI library installed on your system (e.g. mpich2, * |
---|
| 32 | ! OpenMPI) you may need to choose below in this file between * |
---|
| 33 | ! use mpi * |
---|
| 34 | ! (if the MPI library comes with the file 'mpi.mod'); or * |
---|
| 35 | ! include 'mpif.h' * |
---|
| 36 | ! * |
---|
| 37 | ! * |
---|
| 38 | !***************************************************************************** |
---|
| 39 | ! * |
---|
| 40 | ! Variables: * |
---|
| 41 | ! * |
---|
| 42 | ! mp_ierr MPI error code * |
---|
| 43 | ! mp_np Number of MPI processes * |
---|
| 44 | ! mp_pid Process ID of each MPI process * |
---|
| 45 | ! mp_seed Parameter for random number seed * |
---|
[5f9d14a] | 46 | ! read_grp_min Minimum number of processes at which one will be * |
---|
| 47 | ! used as reader * |
---|
[8a65cb0] | 48 | ! numpart_mpi, Number of particles per node * |
---|
| 49 | ! maxpart_mpi * |
---|
| 50 | ! mp_partid MPI process ID for particle calculation * |
---|
| 51 | ! mp_partgroup_ Refers to the subset of processors performing * |
---|
| 52 | ! loops over particles. Will be all processes * |
---|
| 53 | ! unless a dedicated process runs getfields/readwind * |
---|
[5f9d14a] | 54 | ! lmp_sync If .false., use asynchronous MPI * |
---|
[6a678e3] | 55 | ! mp_cp Real precision to use for deposition fields * |
---|
[8a65cb0] | 56 | ! * |
---|
| 57 | ! * |
---|
| 58 | ! * |
---|
| 59 | ! * |
---|
| 60 | !***************************************************************************** |
---|
[6ecb30a] | 61 | ! * |
---|
| 62 | ! Modification by DJM, 2017-05-09 - added #ifdef USE_MPIINPLACE cpp * |
---|
| 63 | ! directive to mpif_tm_reduce_grid() to insure that MPI_IN_PLACE is * |
---|
| 64 | ! used in the MPI_Reduce() only if specifically compiled with that * |
---|
| 65 | ! directive. * |
---|
| 66 | ! * |
---|
| 67 | !***************************************************************************** |
---|
[8a65cb0] | 68 | |
---|
| 69 | use mpi |
---|
[1c3c778] | 70 | use par_mod, only: dp,sp |
---|
[8a65cb0] | 71 | use com_mod, only: lroot |
---|
| 72 | |
---|
| 73 | implicit none |
---|
| 74 | |
---|
| 75 | ! include 'mpif.h' |
---|
| 76 | |
---|
| 77 | public |
---|
| 78 | |
---|
| 79 | ! Set aside a process for reading windfields if using at least these many processes |
---|
| 80 | !================================================== |
---|
[5f9d14a] | 81 | integer, parameter, private :: read_grp_min=4 |
---|
[8a65cb0] | 82 | !================================================== |
---|
| 83 | |
---|
| 84 | ! Variables for each MPI process in the world group |
---|
| 85 | integer :: mp_ierr, mp_np, mp_pid, mp_partid |
---|
| 86 | integer, private :: world_group_id |
---|
| 87 | |
---|
| 88 | ! Variables for MPI processes in the 'particle' group |
---|
| 89 | integer, allocatable, dimension(:) :: mp_partgroup_rank |
---|
[0a94e13] | 90 | integer, allocatable, dimension(:) :: npart_per_process |
---|
[8a65cb0] | 91 | integer :: mp_partgroup_comm, mp_partgroup_pid, mp_partgroup_np |
---|
| 92 | |
---|
| 93 | integer :: mp_seed=0 |
---|
| 94 | integer, parameter :: mp_sp=MPI_REAL4, mp_dp=MPI_REAL8 |
---|
[6a678e3] | 95 | integer :: mp_cp |
---|
[8a65cb0] | 96 | integer, parameter :: id_root=0 ! master process |
---|
| 97 | |
---|
| 98 | ! MPI tags/requests for send/receive operation |
---|
[5f9d14a] | 99 | integer :: tm1 |
---|
[861805a] | 100 | integer, parameter :: nvar_async=26 |
---|
[78e62dc] | 101 | !integer, dimension(:), allocatable :: tags |
---|
[8a65cb0] | 102 | integer, dimension(:), allocatable :: reqs |
---|
| 103 | |
---|
[861805a] | 104 | ! Status array used for certain MPI operations (MPI_RECV) |
---|
| 105 | integer, dimension(MPI_STATUS_SIZE) :: mp_status |
---|
| 106 | |
---|
[8a65cb0] | 107 | |
---|
| 108 | integer :: id_read ! readwind/getfield process |
---|
| 109 | integer :: numpart_mpi,maxpart_mpi ! number of particles per node |
---|
| 110 | integer :: tot_numpart=0 |
---|
| 111 | integer :: mp_comm_used ! global or subgroup communicator |
---|
| 112 | |
---|
| 113 | logical :: lmpreader=.false. ! is set to true for reading process(es) only. |
---|
| 114 | logical :: lmp_use_reader=.false. ! true if separate readwind process is used |
---|
| 115 | |
---|
[62e65c7] | 116 | ! .true. if only using synchronous MPI send/recv (default) |
---|
[8a65cb0] | 117 | ! If setting this to .false., numwfmem must be set to 3 |
---|
| 118 | !=============================================================================== |
---|
[a1f4dd6] | 119 | logical :: lmp_sync=.true. |
---|
[8a65cb0] | 120 | !=============================================================================== |
---|
| 121 | |
---|
[5f9d14a] | 122 | ! mp_dbg_mode Used for debugging MPI. |
---|
[8a65cb0] | 123 | ! mp_dev_mode various checks related to debugging the parallel code |
---|
| 124 | ! mp_dbg_out write some arrays to data file for debugging |
---|
| 125 | ! mp_measure_time Measure cpu/wall time, write out at end of run |
---|
| 126 | ! mp_time_barrier Measure MPI barrier time |
---|
[f55fdce] | 127 | ! mp_exact_numpart Use an extra MPI communication to give the exact number of particles |
---|
[0a94e13] | 128 | ! to standard output (this does not otherwise affect the simulation) |
---|
[8a65cb0] | 129 | logical, parameter :: mp_dbg_mode = .false. |
---|
| 130 | logical, parameter :: mp_dev_mode = .false. |
---|
| 131 | logical, parameter :: mp_dbg_out = .false. |
---|
[16b61a5] | 132 | logical, parameter :: mp_time_barrier=.true. |
---|
[ec7fc72] | 133 | logical, parameter :: mp_measure_time=.false. |
---|
[f55fdce] | 134 | logical, parameter :: mp_exact_numpart=.true. |
---|
[8a65cb0] | 135 | |
---|
| 136 | ! for measuring CPU/Wall time |
---|
[38b7917] | 137 | real(sp),private :: mp_comm_time_beg, mp_comm_time_end, mp_comm_time_total=0. |
---|
| 138 | real(dp),private :: mp_comm_wtime_beg, mp_comm_wtime_end, mp_comm_wtime_total=0. |
---|
| 139 | real(sp),private :: mp_root_time_beg, mp_root_time_end, mp_root_time_total=0. |
---|
| 140 | real(dp),private :: mp_root_wtime_beg, mp_root_wtime_end, mp_root_wtime_total=0. |
---|
| 141 | real(sp),private :: mp_barrier_time_beg, mp_barrier_time_end, mp_barrier_time_total=0. |
---|
| 142 | real(dp),private :: mp_barrier_wtime_beg, mp_barrier_wtime_end, mp_barrier_wtime_total=0. |
---|
| 143 | real(sp),private :: tm_nploop_beg, tm_nploop_end, tm_nploop_total=0. |
---|
| 144 | real(sp),private :: tm_tot_beg, tm_tot_end, tm_tot_total=0. |
---|
| 145 | real(dp),private :: mp_getfields_wtime_beg, mp_getfields_wtime_end, mp_getfields_wtime_total=0. |
---|
| 146 | real(sp),private :: mp_getfields_time_beg, mp_getfields_time_end, mp_getfields_time_total=0. |
---|
[0a94e13] | 147 | real(dp),private :: mp_readwind_wtime_beg, mp_readwind_wtime_end, mp_readwind_wtime_total=0. |
---|
| 148 | real(sp),private :: mp_readwind_time_beg, mp_readwind_time_end, mp_readwind_time_total=0. |
---|
[38b7917] | 149 | real(dp),private :: mp_io_wtime_beg, mp_io_wtime_end, mp_io_wtime_total=0. |
---|
| 150 | real(sp),private :: mp_io_time_beg, mp_io_time_end, mp_io_time_total=0. |
---|
| 151 | real(dp),private :: mp_wetdepo_wtime_beg, mp_wetdepo_wtime_end, mp_wetdepo_wtime_total=0. |
---|
| 152 | real(sp),private :: mp_wetdepo_time_beg, mp_wetdepo_time_end, mp_wetdepo_time_total=0. |
---|
| 153 | real(dp),private :: mp_advance_wtime_beg, mp_advance_wtime_end, mp_advance_wtime_total=0. |
---|
| 154 | real(dp),private :: mp_conccalc_time_beg, mp_conccalc_time_end, mp_conccalc_time_total=0. |
---|
| 155 | real(dp),private :: mp_total_wtime_beg, mp_total_wtime_end, mp_total_wtime_total=0. |
---|
| 156 | real(dp),private :: mp_vt_wtime_beg, mp_vt_wtime_end, mp_vt_wtime_total |
---|
| 157 | real(sp),private :: mp_vt_time_beg, mp_vt_time_end, mp_vt_time_total |
---|
[8a65cb0] | 158 | |
---|
| 159 | ! dat_lun logical unit number for i/o |
---|
| 160 | integer, private :: dat_lun |
---|
| 161 | |
---|
[861805a] | 162 | ! Temporary arrays for particles (allocated and deallocated as needed) |
---|
| 163 | integer, allocatable, dimension(:) :: nclass_tmp, npoint_tmp, itra1_tmp, idt_tmp, & |
---|
| 164 | & itramem_tmp, itrasplit_tmp |
---|
| 165 | real(kind=dp), allocatable, dimension(:) :: xtra1_tmp, ytra1_tmp |
---|
| 166 | real, allocatable, dimension(:) :: ztra1_tmp |
---|
| 167 | real, allocatable, dimension(:,:) :: xmass1_tmp |
---|
| 168 | |
---|
| 169 | ! mp_redist_fract Exchange particles between processes if relative difference |
---|
| 170 | ! is larger. A good value is between 0.0 and 0.5 |
---|
| 171 | ! mp_maxpart_factor Allocate more memory per process than strictly needed |
---|
| 172 | ! (safety factor). Recommended value between 1.5 and 2.5 |
---|
| 173 | ! mp_min_redist Do not redistribute particles if below this limit |
---|
| 174 | real, parameter :: mp_redist_fract=0.2, mp_maxpart_factor=1.5 |
---|
| 175 | integer,parameter :: mp_min_redist=100000 |
---|
| 176 | |
---|
| 177 | |
---|
[8a65cb0] | 178 | contains |
---|
| 179 | |
---|
| 180 | subroutine mpif_init |
---|
| 181 | !*********************************************************************** |
---|
| 182 | ! DESCRIPTION |
---|
| 183 | ! Initialize MPI. |
---|
| 184 | ! |
---|
| 185 | ! Create the global communicator MPI_COMM_WORLD |
---|
| 186 | ! If using a separate MPI process for getfields/readwind, a subgroup |
---|
| 187 | ! is created for the other processes. |
---|
| 188 | ! |
---|
| 189 | ! VARIABLES |
---|
| 190 | ! mpi_mode default 0, set to 2/3 if running MPI version |
---|
| 191 | ! mp_np number of running processes, decided at run-time |
---|
| 192 | !*********************************************************************** |
---|
[328fdf9] | 193 | use par_mod, only: maxpart, numwfmem, dep_prec, maxreceptor, maxspec |
---|
| 194 | use com_mod, only: mpi_mode, verbosity, creceptor0 |
---|
[8a65cb0] | 195 | |
---|
| 196 | implicit none |
---|
| 197 | |
---|
| 198 | integer :: i,j,s,addmaxpart=0 |
---|
| 199 | |
---|
[6a678e3] | 200 | ! Each process gets an ID (mp_pid) in the range 0,..,mp_np-1 |
---|
[8a65cb0] | 201 | call MPI_INIT(mp_ierr) |
---|
| 202 | if (mp_ierr /= 0) goto 100 |
---|
| 203 | call MPI_COMM_RANK(MPI_COMM_WORLD, mp_pid, mp_ierr) |
---|
| 204 | if (mp_ierr /= 0) goto 100 |
---|
| 205 | call MPI_COMM_SIZE(MPI_COMM_WORLD, mp_np, mp_ierr) |
---|
| 206 | if (mp_ierr /= 0) goto 100 |
---|
| 207 | |
---|
| 208 | |
---|
[6a678e3] | 209 | ! Variable mpi_mode is used to handle subroutines common to parallel/serial version |
---|
[8a65cb0] | 210 | if (lmp_sync) then |
---|
| 211 | mpi_mode=2 ! hold 2 windfields in memory |
---|
| 212 | else |
---|
| 213 | mpi_mode=3 ! hold 3 windfields in memory |
---|
| 214 | end if |
---|
| 215 | |
---|
| 216 | if (mp_pid.ne.0) then |
---|
| 217 | lroot = .false. |
---|
| 218 | end if |
---|
| 219 | |
---|
[6a678e3] | 220 | ! Set MPI precision to use for transferring deposition fields |
---|
| 221 | !************************************************************ |
---|
| 222 | if (dep_prec==dp) then |
---|
| 223 | mp_cp = MPI_REAL8 |
---|
[861805a] | 224 | ! TODO: write info message for serial version as well |
---|
[8ed5f11] | 225 | if (lroot.and.verbosity>0) write(*,*) 'Using double precision for deposition fields' |
---|
[6a678e3] | 226 | else if (dep_prec==sp) then |
---|
| 227 | mp_cp = MPI_REAL4 |
---|
[8ed5f11] | 228 | if (lroot.and.verbosity>0) write(*,*) 'Using single precision for deposition fields' |
---|
[6a678e3] | 229 | else |
---|
| 230 | write(*,*) 'ERROR: something went wrong setting MPI real precision' |
---|
| 231 | stop |
---|
| 232 | end if |
---|
| 233 | |
---|
[8a65cb0] | 234 | ! Check for sensible combination of parameters |
---|
| 235 | !********************************************* |
---|
| 236 | |
---|
[62e65c7] | 237 | if (.not.lmp_sync.and.numwfmem.ne.3) then |
---|
| 238 | if (lroot) then |
---|
| 239 | write(*,FMT='(80("#"))') |
---|
| 240 | write(*,*) '#### mpi_mod::mpif_init> ERROR: ', & |
---|
| 241 | & 'numwfmem must be set to 3 for asyncronous reading ####' |
---|
| 242 | write(*,FMT='(80("#"))') |
---|
| 243 | end if |
---|
| 244 | call MPI_FINALIZE(mp_ierr) |
---|
[8a65cb0] | 245 | stop |
---|
| 246 | else if (lmp_sync.and.numwfmem.ne.2.and.lroot) then |
---|
| 247 | write(*,FMT='(80("#"))') |
---|
| 248 | write(*,*) '#### mpi_mod::mpif_init> WARNING: ', & |
---|
[a1f4dd6] | 249 | & 'numwfmem should be set to 2 for syncronous' |
---|
[62e65c7] | 250 | write(*,*) ' reading. Results will still be valid, but unneccesary memory & |
---|
| 251 | &is allocated.' |
---|
[8a65cb0] | 252 | write(*,FMT='(80("#"))') |
---|
[5f9d14a] | 253 | ! Force "syncronized" version if all processes will call getfields |
---|
[d6a0977] | 254 | else if ((.not.lmp_sync.and.mp_np.lt.read_grp_min).or.(mp_np.eq.1)) then |
---|
[5f9d14a] | 255 | if (lroot) then |
---|
| 256 | write(*,FMT='(80("#"))') |
---|
| 257 | write(*,*) '#### mpi_mod::mpif_init> WARNING: ', & |
---|
| 258 | & 'all procs call getfields. Setting lmp_sync=.true.' |
---|
| 259 | write(*,FMT='(80("#"))') |
---|
| 260 | end if |
---|
[16b61a5] | 261 | lmp_sync=.true. |
---|
[8a65cb0] | 262 | end if |
---|
[6b22af9] | 263 | |
---|
| 264 | ! TODO: Add more warnings for unimplemented flexpart features |
---|
[8a65cb0] | 265 | |
---|
| 266 | ! Set ID of process that calls getfield/readwind. |
---|
| 267 | ! Using the last process in the group ensures statistical identical results |
---|
| 268 | ! as running with one process less but not using separate read process |
---|
| 269 | !********************************************************************** |
---|
| 270 | |
---|
[16b61a5] | 271 | ! id_read = min(mp_np-1, 1) |
---|
[8a65cb0] | 272 | id_read = mp_np-1 |
---|
| 273 | |
---|
| 274 | if (mp_pid.eq.id_read) lmpreader=.true. |
---|
| 275 | |
---|
| 276 | call MPI_Comm_group (MPI_COMM_WORLD, world_group_id, mp_ierr) |
---|
| 277 | |
---|
[5f9d14a] | 278 | ! Create a MPI group/communicator that will calculate trajectories. |
---|
[8a65cb0] | 279 | ! Skip this step if program is run with only a few processes |
---|
| 280 | !************************************************************************ |
---|
| 281 | allocate(mp_partgroup_rank(0:mp_np-2)) |
---|
| 282 | |
---|
| 283 | ! This allows checking for allocation of arrays when no subgroup is used |
---|
| 284 | mp_partgroup_pid=0 |
---|
| 285 | |
---|
| 286 | if (read_grp_min.lt.2) then |
---|
| 287 | write(*,*) '#### mpi_mod::mpif_init> ERROR ####', & |
---|
| 288 | & 'read_grp_min should be at least 2. Exiting' |
---|
| 289 | stop |
---|
| 290 | end if |
---|
| 291 | |
---|
| 292 | if (mp_np.ge.read_grp_min) then |
---|
| 293 | lmp_use_reader = .true. |
---|
| 294 | |
---|
[5f9d14a] | 295 | ! Map the subgroup IDs= 0,1,2,...,mp_np-2, skipping reader process |
---|
[8a65cb0] | 296 | j=-1 |
---|
| 297 | do i=0, mp_np-2 !loop over all (subgroup) IDs |
---|
| 298 | j=j+1 |
---|
| 299 | if (i.eq.id_read) then |
---|
| 300 | j=j+1 |
---|
| 301 | end if |
---|
| 302 | mp_partgroup_rank(i) = j |
---|
| 303 | end do |
---|
| 304 | |
---|
| 305 | call MPI_Group_incl (world_group_id, mp_np-1, mp_partgroup_rank, & |
---|
| 306 | &mp_partgroup_pid, mp_ierr) |
---|
| 307 | if (mp_ierr /= 0) goto 100 |
---|
| 308 | call MPI_Comm_create (MPI_COMM_WORLD, mp_partgroup_pid, mp_partgroup_comm, mp_ierr) |
---|
| 309 | if (mp_ierr /= 0) goto 100 |
---|
| 310 | |
---|
| 311 | if (mp_pid.ne.id_read) then |
---|
| 312 | call MPI_Comm_rank (mp_partgroup_comm, mp_partgroup_pid, mp_ierr) |
---|
| 313 | if (mp_ierr /= 0) goto 100 |
---|
| 314 | |
---|
| 315 | ! Get size of new group (=mp_np-1) |
---|
| 316 | call MPI_COMM_SIZE(mp_partgroup_comm, mp_partgroup_np, mp_ierr) |
---|
| 317 | if (mp_ierr /= 0) goto 100 |
---|
| 318 | if (mp_partgroup_np.ne.mp_np-1) then |
---|
| 319 | write(*,*) '#### mpi_mod:: mpif_init> ERROR ####& |
---|
| 320 | & mp_partgroup_np.ne.mp_np-1' |
---|
| 321 | stop |
---|
| 322 | endif |
---|
| 323 | |
---|
| 324 | else |
---|
| 325 | mp_partgroup_pid = -1 |
---|
| 326 | end if |
---|
| 327 | end if |
---|
| 328 | |
---|
| 329 | if (lmp_use_reader) then |
---|
| 330 | mp_comm_used = mp_partgroup_comm |
---|
| 331 | mp_partid = mp_partgroup_pid |
---|
| 332 | mp_partgroup_np=mp_np-1 |
---|
| 333 | else |
---|
| 334 | mp_comm_used = MPI_COMM_WORLD |
---|
| 335 | mp_partgroup_np = mp_np |
---|
| 336 | mp_partid = mp_pid |
---|
| 337 | end if |
---|
| 338 | |
---|
| 339 | ! Set maxpart per process |
---|
[328fdf9] | 340 | ! ESO 08/2016: Increase maxpart per process, in case of unbalanced distribution |
---|
[4c64400] | 341 | maxpart_mpi=int(mp_maxpart_factor*real(maxpart)/real(mp_partgroup_np)) |
---|
[b5127f9] | 342 | if (mp_np == 1) maxpart_mpi = maxpart |
---|
[8a65cb0] | 343 | |
---|
| 344 | ! Do not allocate particle data arrays for readwind process |
---|
| 345 | if (lmpreader.and.lmp_use_reader) then |
---|
| 346 | maxpart_mpi=0 |
---|
| 347 | end if |
---|
| 348 | |
---|
| 349 | ! Set random seed for each non-root process |
---|
| 350 | if (mp_pid.gt.0) then |
---|
| 351 | s = 244 |
---|
| 352 | mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729)) |
---|
| 353 | end if |
---|
[5f9d14a] | 354 | if (mp_dbg_mode) then |
---|
| 355 | mp_seed=0 |
---|
| 356 | if (lroot) write(*,*) 'MPI: setting seed=0' |
---|
| 357 | end if |
---|
[8a65cb0] | 358 | |
---|
| 359 | ! Allocate request array for asynchronous MPI |
---|
| 360 | if (.not.lmp_sync) then |
---|
| 361 | allocate(reqs(0:nvar_async*mp_np-1)) |
---|
[5f9d14a] | 362 | reqs(:)=MPI_REQUEST_NULL |
---|
[8a65cb0] | 363 | else |
---|
| 364 | allocate(reqs(0:1)) |
---|
[5f9d14a] | 365 | reqs(:)=MPI_REQUEST_NULL |
---|
[8a65cb0] | 366 | end if |
---|
| 367 | |
---|
[0a94e13] | 368 | ! Allocate array for number of particles per process |
---|
| 369 | allocate(npart_per_process(0:mp_partgroup_np-1)) |
---|
| 370 | |
---|
[328fdf9] | 371 | ! Write whether MPI_IN_PLACE is used or not |
---|
| 372 | #ifdef USE_MPIINPLACE |
---|
| 373 | if (lroot) write(*,*) 'Using MPI_IN_PLACE operations' |
---|
| 374 | #else |
---|
| 375 | if (lroot) allocate(creceptor0(maxreceptor,maxspec)) |
---|
| 376 | if (lroot) write(*,*) 'Not using MPI_IN_PLACE operations' |
---|
| 377 | #endif |
---|
[8a65cb0] | 378 | goto 101 |
---|
| 379 | |
---|
| 380 | 100 write(*,*) '#### mpi_mod::mpif_init> ERROR ####', mp_ierr |
---|
| 381 | stop |
---|
| 382 | |
---|
| 383 | 101 end subroutine mpif_init |
---|
| 384 | |
---|
| 385 | |
---|
| 386 | subroutine mpif_mpi_barrier |
---|
| 387 | !*********************************************************************** |
---|
| 388 | ! Set MPI barrier (all processes are synchronized here), with the |
---|
| 389 | ! possible exception of a separate 'readwind' process. |
---|
| 390 | ! Optionally measure cpu/wall time. |
---|
| 391 | ! |
---|
| 392 | !*********************************************************************** |
---|
| 393 | implicit none |
---|
| 394 | |
---|
| 395 | if (mp_time_barrier) then |
---|
| 396 | call cpu_time(mp_barrier_time_beg) |
---|
| 397 | mp_barrier_wtime_beg = mpi_wtime() |
---|
| 398 | endif |
---|
| 399 | |
---|
| 400 | call MPI_BARRIER(mp_comm_used, mp_ierr) |
---|
| 401 | if (mp_ierr /= 0) goto 100 |
---|
| 402 | |
---|
| 403 | if (mp_time_barrier) then |
---|
| 404 | call cpu_time(mp_barrier_time_end) |
---|
| 405 | mp_barrier_wtime_end = mpi_wtime() |
---|
| 406 | |
---|
| 407 | mp_barrier_time_total=mp_barrier_time_total+& |
---|
| 408 | &(mp_barrier_time_end-mp_barrier_time_beg) |
---|
| 409 | mp_barrier_wtime_total=mp_barrier_wtime_total+& |
---|
| 410 | &(mp_barrier_wtime_end-mp_barrier_wtime_beg) |
---|
| 411 | end if |
---|
| 412 | |
---|
| 413 | goto 101 |
---|
| 414 | |
---|
| 415 | 100 write(*,*) '#### mpi_mod::mpif_mpi_barrier> ERROR ####', mp_ierr |
---|
| 416 | stop |
---|
| 417 | |
---|
| 418 | 101 end subroutine mpif_mpi_barrier |
---|
| 419 | |
---|
| 420 | |
---|
| 421 | subroutine mpif_com_mod_allocate |
---|
| 422 | !******************************************************************************* |
---|
| 423 | ! Dynamic allocation of arrays (in serial code these have size maxpart) |
---|
| 424 | ! |
---|
[32b49c3] | 425 | ! Not in use anymore, moved to com_mod for interoperability with serial version |
---|
| 426 | ! |
---|
| 427 | ! TODO: remove |
---|
[8a65cb0] | 428 | !******************************************************************************* |
---|
| 429 | use com_mod |
---|
| 430 | |
---|
| 431 | implicit none |
---|
| 432 | |
---|
| 433 | integer :: array_size |
---|
| 434 | |
---|
| 435 | array_size = maxpart_mpi |
---|
| 436 | |
---|
| 437 | allocate(itra1(array_size),npoint(array_size),& |
---|
| 438 | & nclass(array_size),idt(array_size),& |
---|
| 439 | & itramem(array_size),itrasplit(array_size),& |
---|
| 440 | & xtra1(array_size),ytra1(array_size),& |
---|
| 441 | & ztra1(array_size),xmass1(array_size, maxspec)) |
---|
| 442 | |
---|
| 443 | allocate(uap(array_size),ucp(array_size),& |
---|
| 444 | & uzp(array_size),us(array_size),& |
---|
| 445 | & vs(array_size),& |
---|
| 446 | & ws(array_size),cbt(array_size)) |
---|
| 447 | |
---|
| 448 | |
---|
| 449 | end subroutine mpif_com_mod_allocate |
---|
| 450 | |
---|
| 451 | |
---|
| 452 | subroutine mpif_tm_send_dims |
---|
| 453 | !*********************************************************************** |
---|
| 454 | ! Distribute array dimensions from pid0 to all processes. |
---|
| 455 | ! numpart_mpi/numpart is sent to allow dynamic allocation |
---|
| 456 | ! |
---|
| 457 | ! Currently not in use. |
---|
| 458 | ! |
---|
| 459 | ! Variables of similar type (integer, real) are placed in an array |
---|
| 460 | ! and sent collectively, to avoid the overhead associated with individual |
---|
| 461 | ! MPI send/receives |
---|
| 462 | ! |
---|
| 463 | ! |
---|
| 464 | !*********************************************************************** |
---|
| 465 | use com_mod |
---|
| 466 | |
---|
| 467 | implicit none |
---|
| 468 | |
---|
| 469 | integer :: add_part=0 |
---|
| 470 | |
---|
[5f9d14a] | 471 | call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 472 | |
---|
| 473 | ! MPI subgroup does the particle-loop |
---|
| 474 | if (lmp_use_reader) then |
---|
| 475 | if (mod(numpart,mp_partgroup_np).ne.0) add_part=1 |
---|
| 476 | numpart_mpi=int(numpart/mp_partgroup_np)+add_part |
---|
| 477 | else |
---|
| 478 | if (mod(numpart,mp_np).ne.0) add_part=1 |
---|
| 479 | numpart_mpi=int(numpart/mp_np)+add_part |
---|
| 480 | end if |
---|
| 481 | |
---|
| 482 | |
---|
| 483 | ! redefine numpart as 'numpart per process' throughout the code |
---|
| 484 | !************************************************************** |
---|
| 485 | numpart = numpart_mpi |
---|
| 486 | |
---|
| 487 | end subroutine mpif_tm_send_dims |
---|
| 488 | |
---|
| 489 | |
---|
[861805a] | 490 | subroutine mpif_send_part_properties(num_part) |
---|
| 491 | !*********************************************************************** |
---|
| 492 | ! Distribute particle properties from root to all processes. |
---|
| 493 | ! |
---|
| 494 | ! Used by init_domainfill_mpi |
---|
| 495 | ! |
---|
| 496 | ! Variables: |
---|
| 497 | ! |
---|
| 498 | ! num_part input, number of particles per process (rounded up) |
---|
| 499 | ! |
---|
| 500 | !*********************************************************************** |
---|
| 501 | use com_mod |
---|
| 502 | |
---|
| 503 | implicit none |
---|
| 504 | |
---|
| 505 | integer,intent(in) :: num_part |
---|
| 506 | integer :: i,jj, addone |
---|
| 507 | |
---|
[16b61a5] | 508 | ! Exit if too many particles |
---|
| 509 | if (num_part.gt.maxpart_mpi) then |
---|
| 510 | write(*,*) '#####################################################' |
---|
| 511 | write(*,*) '#### ERROR - TOTAL NUMBER OF PARTICLES REQUIRED ####' |
---|
| 512 | write(*,*) '#### EXCEEDS THE MAXIMUM ALLOWED NUMBER. REDUCE ####' |
---|
| 513 | write(*,*) '#### EITHER NUMBER OF PARTICLES PER RELEASE POINT####' |
---|
| 514 | write(*,*) '#### OR INCREASE MAXPART. ####' |
---|
| 515 | write(*,*) '#####################################################' |
---|
| 516 | ! call MPI_FINALIZE(mp_ierr) |
---|
| 517 | stop |
---|
| 518 | end if |
---|
| 519 | |
---|
| 520 | |
---|
[861805a] | 521 | ! Time for MPI communications |
---|
| 522 | !**************************** |
---|
| 523 | if (mp_measure_time) call mpif_mtime('commtime',0) |
---|
| 524 | |
---|
| 525 | ! Distribute variables, send from pid 0 to other processes (including itself) |
---|
| 526 | !**************************************************************************** |
---|
| 527 | |
---|
| 528 | call MPI_SCATTER(nclass_tmp,num_part,MPI_INTEGER,nclass,& |
---|
| 529 | &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
| 530 | if (mp_ierr /= 0) goto 600 |
---|
| 531 | call MPI_SCATTER(npoint_tmp,num_part,MPI_INTEGER,npoint,& |
---|
| 532 | &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
| 533 | if (mp_ierr /= 0) goto 600 |
---|
| 534 | call MPI_SCATTER(itra1_tmp,num_part,MPI_INTEGER,itra1,& |
---|
| 535 | &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
| 536 | if (mp_ierr /= 0) goto 600 |
---|
| 537 | call MPI_SCATTER(idt_tmp,num_part,MPI_INTEGER,idt,& |
---|
| 538 | &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
| 539 | if (mp_ierr /= 0) goto 600 |
---|
| 540 | call MPI_SCATTER(itramem_tmp,num_part,MPI_INTEGER,itramem,& |
---|
| 541 | &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
| 542 | if (mp_ierr /= 0) goto 600 |
---|
| 543 | call MPI_SCATTER(itrasplit_tmp,num_part,MPI_INTEGER,itrasplit,& |
---|
| 544 | &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
| 545 | if (mp_ierr /= 0) goto 600 |
---|
| 546 | call MPI_SCATTER(xtra1_tmp,num_part,mp_dp,xtra1,& |
---|
| 547 | &num_part,mp_dp,id_root,mp_comm_used,mp_ierr) |
---|
| 548 | if (mp_ierr /= 0) goto 600 |
---|
| 549 | call MPI_SCATTER(ytra1_tmp,num_part,mp_dp,ytra1,& |
---|
| 550 | &num_part,mp_dp,id_root,mp_comm_used,mp_ierr) |
---|
| 551 | if (mp_ierr /= 0) goto 600 |
---|
| 552 | call MPI_SCATTER(ztra1_tmp,num_part,mp_sp,ztra1,& |
---|
| 553 | &num_part,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
| 554 | if (mp_ierr /= 0) goto 600 |
---|
| 555 | do i=1,nspec |
---|
| 556 | call MPI_SCATTER(xmass1_tmp(:,i),num_part,mp_sp,xmass1(:,i),& |
---|
| 557 | &num_part,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
| 558 | if (mp_ierr /= 0) goto 600 |
---|
| 559 | end do |
---|
| 560 | |
---|
| 561 | if (mp_measure_time) call mpif_mtime('commtime',1) |
---|
| 562 | |
---|
| 563 | goto 601 |
---|
| 564 | |
---|
| 565 | 600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr |
---|
| 566 | stop |
---|
| 567 | |
---|
| 568 | ! After the transfer of particles it it possible that the value of |
---|
[16b61a5] | 569 | ! "numpart" is larger than the actual used, so we reduce it if there are |
---|
[861805a] | 570 | ! invalid particles at the end of the arrays |
---|
[b5127f9] | 571 | |
---|
[328fdf9] | 572 | 601 do i=numpart, 1, -1 |
---|
[861805a] | 573 | if (itra1(i).eq.-999999999) then |
---|
| 574 | numpart=numpart-1 |
---|
| 575 | else |
---|
| 576 | exit |
---|
| 577 | end if |
---|
| 578 | end do |
---|
| 579 | |
---|
| 580 | |
---|
| 581 | end subroutine mpif_send_part_properties |
---|
| 582 | |
---|
| 583 | |
---|
| 584 | subroutine mpif_calculate_part_redist(itime) |
---|
| 585 | !*********************************************************************** |
---|
| 586 | ! Calculate number of particles to redistribute between processes. This routine |
---|
| 587 | ! can be called at regular time intervals to keep a level number of |
---|
| 588 | ! particles on each process. |
---|
| 589 | ! |
---|
| 590 | ! First, all processes report their local "numpart" to each other, which is |
---|
| 591 | ! stored in array "numpart_mpi(np)". This array is sorted from low to |
---|
| 592 | ! high values, along with a corresponding process ID array "idx_arr(np)". |
---|
| 593 | ! If the relative difference between the highest and lowest "numpart_mpi" value |
---|
| 594 | ! is above a threshold, particles are transferred from process idx_arr(np-1) |
---|
| 595 | ! to process idx_arr(0). Repeat for processes idx_arr(np-i) and idx_arr(i) |
---|
| 596 | ! for all valid i. |
---|
| 597 | ! Note: If np is an odd number, the process with the median |
---|
| 598 | ! number of particles is left unchanged |
---|
| 599 | ! |
---|
| 600 | ! VARIABLES |
---|
| 601 | ! itime input, current time |
---|
| 602 | !*********************************************************************** |
---|
| 603 | use com_mod |
---|
| 604 | |
---|
| 605 | implicit none |
---|
| 606 | |
---|
| 607 | integer, intent(in) :: itime |
---|
| 608 | real :: pmin,z |
---|
| 609 | integer :: i,jj,nn, num_part=1,m,imin, num_trans |
---|
| 610 | logical :: first_iter |
---|
[0a94e13] | 611 | integer,allocatable,dimension(:) :: idx_arr |
---|
[861805a] | 612 | real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this |
---|
| 613 | |
---|
| 614 | ! Exit if running with only 1 process |
---|
| 615 | !************************************************************************ |
---|
| 616 | if (mp_np.eq.1) return |
---|
| 617 | |
---|
| 618 | ! All processes exchange information on number of particles |
---|
| 619 | !**************************************************************************** |
---|
[0a94e13] | 620 | allocate( idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1)) |
---|
[861805a] | 621 | |
---|
[0a94e13] | 622 | call MPI_Allgather(numpart, 1, MPI_INTEGER, npart_per_process, & |
---|
[861805a] | 623 | & 1, MPI_INTEGER, mp_comm_used, mp_ierr) |
---|
| 624 | |
---|
| 625 | |
---|
| 626 | ! Sort from lowest to highest |
---|
| 627 | ! Initial guess: correct order |
---|
[0a94e13] | 628 | sorted(:) = npart_per_process(:) |
---|
[861805a] | 629 | do i=0, mp_partgroup_np-1 |
---|
| 630 | idx_arr(i) = i |
---|
| 631 | end do |
---|
| 632 | |
---|
[0a94e13] | 633 | ! Do not rebalance particles for ipout=3 |
---|
| 634 | if (ipout.eq.3) return |
---|
| 635 | |
---|
[861805a] | 636 | ! For each successive element in index array, see if a lower value exists |
---|
| 637 | do i=0, mp_partgroup_np-2 |
---|
| 638 | pmin=sorted(i) |
---|
| 639 | imin=idx_arr(i) |
---|
| 640 | m=i+1 |
---|
| 641 | do jj=m, mp_partgroup_np-1 |
---|
| 642 | if (pmin.le.sorted(jj)) cycle |
---|
| 643 | z=pmin |
---|
| 644 | pmin=sorted(jj) |
---|
| 645 | sorted(jj)=z |
---|
| 646 | |
---|
| 647 | nn=imin |
---|
| 648 | imin=idx_arr(jj) |
---|
| 649 | idx_arr(jj)=nn |
---|
| 650 | end do |
---|
| 651 | sorted(i)=pmin |
---|
| 652 | idx_arr(i)=imin |
---|
| 653 | end do |
---|
| 654 | |
---|
| 655 | ! For each pair of processes, transfer particles if the difference is above a |
---|
| 656 | ! limit, and numpart at sending process large enough |
---|
| 657 | |
---|
| 658 | m=mp_partgroup_np-1 ! index for last sorted process (most particles) |
---|
| 659 | do i=0,mp_partgroup_np/2-1 |
---|
[0a94e13] | 660 | num_trans = npart_per_process(idx_arr(m)) - npart_per_process(idx_arr(i)) |
---|
[861805a] | 661 | if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then |
---|
[0a94e13] | 662 | if ( npart_per_process(idx_arr(m)).gt.mp_min_redist.and.& |
---|
| 663 | & real(num_trans)/real(npart_per_process(idx_arr(m))).gt.mp_redist_fract) then |
---|
[16b61a5] | 664 | ! DBG |
---|
[0a94e13] | 665 | ! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart_per_process', & |
---|
| 666 | ! &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart_per_process |
---|
[16b61a5] | 667 | ! DBG |
---|
[861805a] | 668 | call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2) |
---|
| 669 | end if |
---|
| 670 | end if |
---|
| 671 | m=m-1 |
---|
| 672 | end do |
---|
| 673 | |
---|
[0a94e13] | 674 | deallocate(idx_arr, sorted) |
---|
[861805a] | 675 | |
---|
| 676 | end subroutine mpif_calculate_part_redist |
---|
| 677 | |
---|
| 678 | |
---|
| 679 | subroutine mpif_redist_part(itime, src_proc, dest_proc, num_trans) |
---|
| 680 | !*********************************************************************** |
---|
| 681 | ! Transfer particle properties between two arbitrary processes. |
---|
| 682 | ! |
---|
| 683 | ! VARIABLES |
---|
| 684 | ! |
---|
| 685 | ! itime input, current time |
---|
| 686 | ! src_proc input, ID of source process |
---|
| 687 | ! dest_proc input, ID of destination process |
---|
| 688 | ! num_trans input, number of particles to transfer |
---|
| 689 | ! |
---|
| 690 | !************************************************************************ |
---|
| 691 | use com_mod !TODO: ,only: nclass etc |
---|
| 692 | |
---|
| 693 | implicit none |
---|
| 694 | |
---|
| 695 | integer, intent(in) :: itime, src_proc, dest_proc, num_trans |
---|
| 696 | integer :: ll, ul ! lower and upper indices in arrays |
---|
| 697 | integer :: arr_sz ! size of temporary arrays |
---|
| 698 | integer :: mtag ! MPI message tag |
---|
| 699 | integer :: i, j, minpart, ipart, maxnumpart |
---|
| 700 | |
---|
[16b61a5] | 701 | ! Check for invalid input arguments |
---|
| 702 | !********************************** |
---|
| 703 | if (src_proc.eq.dest_proc) then |
---|
| 704 | write(*,*) '<mpi_mod::mpif_redist_part>: Error: & |
---|
| 705 | &src_proc.eq.dest_proc' |
---|
| 706 | stop |
---|
| 707 | end if |
---|
| 708 | |
---|
[861805a] | 709 | ! Measure time for MPI communications |
---|
| 710 | !************************************ |
---|
| 711 | if (mp_measure_time) call mpif_mtime('commtime',0) |
---|
| 712 | |
---|
| 713 | ! Send particles |
---|
| 714 | !*************** |
---|
| 715 | if (mp_partid.eq.src_proc) then |
---|
| 716 | mtag = 2000 |
---|
| 717 | |
---|
| 718 | ! Array indices for the transferred particles |
---|
| 719 | ll=numpart-num_trans+1 |
---|
| 720 | ul=numpart |
---|
| 721 | |
---|
[16b61a5] | 722 | if (mp_dev_mode) then |
---|
| 723 | write(*,FMT='(72("#"))') |
---|
| 724 | write(*,*) "Sending ", num_trans, "particles (from/to)", src_proc, dest_proc |
---|
| 725 | write(*,*) "numpart before: ", numpart |
---|
| 726 | end if |
---|
[861805a] | 727 | |
---|
| 728 | call MPI_SEND(nclass(ll:ul),num_trans,& |
---|
| 729 | & MPI_INTEGER,dest_proc,mtag+1,mp_comm_used,mp_ierr) |
---|
| 730 | |
---|
| 731 | call MPI_SEND(npoint(ll:ul),num_trans,& |
---|
| 732 | & MPI_INTEGER,dest_proc,mtag+2,mp_comm_used,mp_ierr) |
---|
| 733 | |
---|
| 734 | call MPI_SEND(itra1(ll:ul),num_trans, & |
---|
| 735 | & MPI_INTEGER,dest_proc,mtag+3,mp_comm_used,mp_ierr) |
---|
| 736 | |
---|
| 737 | call MPI_SEND(idt(ll:ul),num_trans, & |
---|
| 738 | & MPI_INTEGER,dest_proc,mtag+4,mp_comm_used,mp_ierr) |
---|
| 739 | |
---|
| 740 | call MPI_SEND(itramem(ll:ul),num_trans, & |
---|
| 741 | & MPI_INTEGER,dest_proc,mtag+5,mp_comm_used,mp_ierr) |
---|
| 742 | |
---|
| 743 | call MPI_SEND(itrasplit(ll:ul),num_trans,& |
---|
| 744 | & MPI_INTEGER,dest_proc,mtag+6,mp_comm_used,mp_ierr) |
---|
| 745 | |
---|
| 746 | call MPI_SEND(xtra1(ll:ul),num_trans, & |
---|
| 747 | & mp_dp,dest_proc,mtag+7,mp_comm_used,mp_ierr) |
---|
| 748 | |
---|
| 749 | call MPI_SEND(ytra1(ll:ul),num_trans,& |
---|
| 750 | & mp_dp,dest_proc,mtag+8,mp_comm_used,mp_ierr) |
---|
| 751 | |
---|
| 752 | call MPI_SEND(ztra1(ll:ul),num_trans,& |
---|
| 753 | & mp_sp,dest_proc,mtag+9,mp_comm_used,mp_ierr) |
---|
| 754 | |
---|
| 755 | do j=1,nspec |
---|
| 756 | call MPI_SEND(xmass1(ll:ul,j),num_trans,& |
---|
| 757 | & mp_sp,dest_proc,mtag+(9+j),mp_comm_used,mp_ierr) |
---|
| 758 | end do |
---|
| 759 | |
---|
| 760 | ! Terminate transferred particles, update numpart |
---|
| 761 | itra1(ll:ul) = -999999999 |
---|
| 762 | |
---|
| 763 | numpart = numpart-num_trans |
---|
| 764 | |
---|
[16b61a5] | 765 | if (mp_dev_mode) then |
---|
| 766 | write(*,*) "numpart after: ", numpart |
---|
| 767 | write(*,FMT='(72("#"))') |
---|
| 768 | end if |
---|
[861805a] | 769 | |
---|
| 770 | else if (mp_partid.eq.dest_proc) then |
---|
| 771 | |
---|
| 772 | ! Receive particles |
---|
| 773 | !****************** |
---|
| 774 | mtag = 2000 |
---|
| 775 | ! Array indices for the transferred particles |
---|
| 776 | ll=numpart+1 |
---|
| 777 | ul=numpart+num_trans |
---|
| 778 | |
---|
[16b61a5] | 779 | if (mp_dev_mode) then |
---|
| 780 | write(*,FMT='(72("#"))') |
---|
| 781 | write(*,*) "Receiving ", num_trans, "particles (from/to)", src_proc, dest_proc |
---|
| 782 | write(*,*) "numpart before: ", numpart |
---|
| 783 | end if |
---|
[861805a] | 784 | |
---|
| 785 | ! We could receive the data directly at nclass(ll:ul) etc., but this leaves |
---|
| 786 | ! vacant spaces at lower indices. Using temporary arrays instead. |
---|
| 787 | arr_sz = ul-ll+1 |
---|
| 788 | allocate(itra1_tmp(arr_sz),npoint_tmp(arr_sz),nclass_tmp(arr_sz),& |
---|
| 789 | & idt_tmp(arr_sz),itramem_tmp(arr_sz),itrasplit_tmp(arr_sz),& |
---|
| 790 | & xtra1_tmp(arr_sz),ytra1_tmp(arr_sz),ztra1_tmp(arr_sz),& |
---|
| 791 | & xmass1_tmp(arr_sz, maxspec)) |
---|
| 792 | |
---|
| 793 | call MPI_RECV(nclass_tmp,num_trans,& |
---|
| 794 | & MPI_INTEGER,src_proc,mtag+1,mp_comm_used,mp_status,mp_ierr) |
---|
| 795 | |
---|
| 796 | call MPI_RECV(npoint_tmp,num_trans,& |
---|
| 797 | & MPI_INTEGER,src_proc,mtag+2,mp_comm_used,mp_status,mp_ierr) |
---|
| 798 | |
---|
| 799 | call MPI_RECV(itra1_tmp,num_trans, & |
---|
| 800 | & MPI_INTEGER,src_proc,mtag+3,mp_comm_used,mp_status,mp_ierr) |
---|
| 801 | |
---|
| 802 | call MPI_RECV(idt_tmp,num_trans, & |
---|
| 803 | & MPI_INTEGER,src_proc,mtag+4,mp_comm_used,mp_status,mp_ierr) |
---|
| 804 | |
---|
| 805 | call MPI_RECV(itramem_tmp,num_trans, & |
---|
| 806 | & MPI_INTEGER,src_proc,mtag+5,mp_comm_used,mp_status,mp_ierr) |
---|
| 807 | |
---|
| 808 | call MPI_RECV(itrasplit_tmp,num_trans,& |
---|
| 809 | & MPI_INTEGER,src_proc,mtag+6,mp_comm_used,mp_status,mp_ierr) |
---|
| 810 | |
---|
| 811 | call MPI_RECV(xtra1_tmp,num_trans, & |
---|
| 812 | & mp_dp,src_proc,mtag+7,mp_comm_used,mp_status,mp_ierr) |
---|
| 813 | |
---|
| 814 | call MPI_RECV(ytra1_tmp,num_trans,& |
---|
| 815 | & mp_dp,src_proc,mtag+8,mp_comm_used,mp_status,mp_ierr) |
---|
| 816 | |
---|
| 817 | call MPI_RECV(ztra1_tmp,num_trans,& |
---|
| 818 | & mp_sp,src_proc,mtag+9,mp_comm_used,mp_status,mp_ierr) |
---|
| 819 | |
---|
| 820 | do j=1,nspec |
---|
| 821 | call MPI_RECV(xmass1_tmp(:,j),num_trans,& |
---|
| 822 | & mp_sp,src_proc,mtag+(9+j),mp_comm_used,mp_status,mp_ierr) |
---|
| 823 | end do |
---|
| 824 | |
---|
| 825 | ! This is the maximum value numpart can possibly have after the transfer |
---|
| 826 | maxnumpart=numpart+num_trans |
---|
| 827 | |
---|
| 828 | ! Search for vacant space and copy from temporary storage |
---|
| 829 | !******************************************************** |
---|
| 830 | minpart=1 |
---|
| 831 | do i=1, num_trans |
---|
| 832 | ! Take into acount that we may have transferred invalid particles |
---|
[16b61a5] | 833 | if (itra1_tmp(i).ne.itime) cycle |
---|
[861805a] | 834 | do ipart=minpart,maxnumpart |
---|
| 835 | if (itra1(ipart).ne.itime) then |
---|
[16b61a5] | 836 | itra1(ipart) = itra1_tmp(i) |
---|
| 837 | npoint(ipart) = npoint_tmp(i) |
---|
| 838 | nclass(ipart) = nclass_tmp(i) |
---|
| 839 | idt(ipart) = idt_tmp(i) |
---|
| 840 | itramem(ipart) = itramem_tmp(i) |
---|
| 841 | itrasplit(ipart) = itrasplit_tmp(i) |
---|
| 842 | xtra1(ipart) = xtra1_tmp(i) |
---|
| 843 | ytra1(ipart) = ytra1_tmp(i) |
---|
| 844 | ztra1(ipart) = ztra1_tmp(i) |
---|
| 845 | xmass1(ipart,:) = xmass1_tmp(i,:) |
---|
[861805a] | 846 | goto 200 ! Storage space has been found, stop searching |
---|
| 847 | end if |
---|
[16b61a5] | 848 | ! :TODO: add check for if too many particles requiried |
---|
[861805a] | 849 | end do |
---|
[16b61a5] | 850 | 200 minpart=ipart+1 |
---|
[861805a] | 851 | end do |
---|
[16b61a5] | 852 | ! Increase numpart, if necessary |
---|
| 853 | numpart=max(numpart,ipart) |
---|
[861805a] | 854 | |
---|
[4c64400] | 855 | deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,& |
---|
| 856 | &itrasplit_tmp,xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp) |
---|
[861805a] | 857 | |
---|
[16b61a5] | 858 | if (mp_dev_mode) then |
---|
| 859 | write(*,*) "numpart after: ", numpart |
---|
| 860 | write(*,FMT='(72("#"))') |
---|
| 861 | end if |
---|
[861805a] | 862 | |
---|
| 863 | else |
---|
| 864 | ! This routine should only be called by the two participating processes |
---|
| 865 | write(*,*) "ERROR: wrong process has entered mpi_mod::mpif_redist_part" |
---|
| 866 | stop |
---|
| 867 | return |
---|
| 868 | end if |
---|
| 869 | |
---|
| 870 | ! Measure time for MPI communications |
---|
| 871 | !************************************ |
---|
| 872 | if (mp_measure_time) call mpif_mtime('commtime',1) |
---|
| 873 | |
---|
| 874 | end subroutine mpif_redist_part |
---|
| 875 | |
---|
| 876 | |
---|
[8a65cb0] | 877 | subroutine mpif_tm_send_vars |
---|
| 878 | !*********************************************************************** |
---|
| 879 | ! Distribute particle variables from pid0 to all processes. |
---|
| 880 | ! Called from timemanager |
---|
[6b22af9] | 881 | ! *NOT IN USE* at the moment, but can be useful for debugging |
---|
[8a65cb0] | 882 | ! |
---|
| 883 | !*********************************************************************** |
---|
| 884 | use com_mod |
---|
| 885 | |
---|
| 886 | implicit none |
---|
| 887 | |
---|
| 888 | integer :: i |
---|
| 889 | |
---|
| 890 | ! Time for MPI communications |
---|
| 891 | !**************************** |
---|
| 892 | if (mp_measure_time) call mpif_mtime('commtime',0) |
---|
| 893 | |
---|
| 894 | ! Distribute variables, send from pid 0 to other processes |
---|
| 895 | !********************************************************************** |
---|
| 896 | |
---|
| 897 | ! integers |
---|
| 898 | if (lroot) then |
---|
| 899 | call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& |
---|
[861805a] | 900 | & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 901 | if (mp_ierr /= 0) goto 600 |
---|
| 902 | call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& |
---|
[861805a] | 903 | & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 904 | if (mp_ierr /= 0) goto 600 |
---|
| 905 | call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& |
---|
[861805a] | 906 | & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 907 | if (mp_ierr /= 0) goto 600 |
---|
| 908 | call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& |
---|
[861805a] | 909 | & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 910 | if (mp_ierr /= 0) goto 600 |
---|
| 911 | call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& |
---|
[861805a] | 912 | & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 913 | if (mp_ierr /= 0) goto 600 |
---|
| 914 | |
---|
| 915 | ! int2 |
---|
| 916 | call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,& |
---|
[861805a] | 917 | & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 918 | if (mp_ierr /= 0) goto 600 |
---|
| 919 | |
---|
| 920 | ! real |
---|
[fdc0f03] | 921 | call MPI_SCATTER(uap,numpart_mpi,mp_sp,MPI_IN_PLACE,& |
---|
[861805a] | 922 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 923 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 924 | call MPI_SCATTER(ucp,numpart_mpi,mp_sp,MPI_IN_PLACE,& |
---|
[861805a] | 925 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 926 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 927 | call MPI_SCATTER(uzp,numpart_mpi,mp_sp,MPI_IN_PLACE,& |
---|
[861805a] | 928 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 929 | if (mp_ierr /= 0) goto 600 |
---|
| 930 | |
---|
[fdc0f03] | 931 | call MPI_SCATTER(us,numpart_mpi,mp_sp,MPI_IN_PLACE,& |
---|
[861805a] | 932 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 933 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 934 | call MPI_SCATTER(vs,numpart_mpi,mp_sp,MPI_IN_PLACE,& |
---|
[861805a] | 935 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 936 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 937 | call MPI_SCATTER(ws,numpart_mpi,mp_sp,MPI_IN_PLACE,& |
---|
[861805a] | 938 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 939 | if (mp_ierr /= 0) goto 600 |
---|
| 940 | |
---|
| 941 | call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,& |
---|
[861805a] | 942 | & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 943 | if (mp_ierr /= 0) goto 600 |
---|
| 944 | call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,& |
---|
[861805a] | 945 | & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 946 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 947 | call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,MPI_IN_PLACE,& |
---|
[861805a] | 948 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 949 | if (mp_ierr /= 0) goto 600 |
---|
| 950 | |
---|
| 951 | do i=1,nspec |
---|
[fdc0f03] | 952 | call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,MPI_IN_PLACE,& |
---|
[861805a] | 953 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 954 | if (mp_ierr /= 0) goto 600 |
---|
| 955 | end do |
---|
| 956 | |
---|
| 957 | else ! (mp_pid >= 1) |
---|
| 958 | ! integers |
---|
| 959 | call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,& |
---|
[861805a] | 960 | & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 961 | if (mp_ierr /= 0) goto 600 |
---|
| 962 | call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,& |
---|
[861805a] | 963 | & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 964 | if (mp_ierr /= 0) goto 600 |
---|
| 965 | call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,& |
---|
[861805a] | 966 | & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 967 | if (mp_ierr /= 0) goto 600 |
---|
| 968 | call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,& |
---|
[861805a] | 969 | & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 970 | if (mp_ierr /= 0) goto 600 |
---|
| 971 | call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,& |
---|
[861805a] | 972 | & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 973 | if (mp_ierr /= 0) goto 600 |
---|
| 974 | |
---|
| 975 | ! int2 |
---|
| 976 | call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,& |
---|
[861805a] | 977 | & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 978 | if (mp_ierr /= 0) goto 600 |
---|
| 979 | |
---|
| 980 | ! reals |
---|
[fdc0f03] | 981 | call MPI_SCATTER(uap,numpart_mpi,mp_sp,uap,& |
---|
[861805a] | 982 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 983 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 984 | call MPI_SCATTER(ucp,numpart_mpi,mp_sp,ucp,& |
---|
[861805a] | 985 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 986 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 987 | call MPI_SCATTER(uzp,numpart_mpi,mp_sp,uzp,& |
---|
[861805a] | 988 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 989 | if (mp_ierr /= 0) goto 600 |
---|
| 990 | |
---|
[fdc0f03] | 991 | call MPI_SCATTER(us,numpart_mpi,mp_sp,us,& |
---|
[861805a] | 992 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 993 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 994 | call MPI_SCATTER(vs,numpart_mpi,mp_sp,vs,& |
---|
[861805a] | 995 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 996 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 997 | call MPI_SCATTER(ws,numpart_mpi,mp_sp,ws,& |
---|
[861805a] | 998 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 999 | if (mp_ierr /= 0) goto 600 |
---|
| 1000 | |
---|
| 1001 | call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,& |
---|
[861805a] | 1002 | & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 1003 | if (mp_ierr /= 0) goto 600 |
---|
| 1004 | call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,& |
---|
[861805a] | 1005 | & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 1006 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1007 | call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,ztra1,& |
---|
[861805a] | 1008 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 1009 | if (mp_ierr /= 0) goto 600 |
---|
| 1010 | |
---|
| 1011 | do i=1,nspec |
---|
[fdc0f03] | 1012 | call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,xmass1(:,i),& |
---|
[861805a] | 1013 | & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) |
---|
[8a65cb0] | 1014 | if (mp_ierr /= 0) goto 600 |
---|
| 1015 | end do |
---|
| 1016 | |
---|
| 1017 | end if !(lroot) |
---|
| 1018 | |
---|
| 1019 | if (mp_measure_time) call mpif_mtime('commtime',1) |
---|
| 1020 | |
---|
| 1021 | goto 601 |
---|
| 1022 | |
---|
| 1023 | 600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr |
---|
| 1024 | stop |
---|
| 1025 | 601 end subroutine mpif_tm_send_vars |
---|
| 1026 | |
---|
| 1027 | |
---|
| 1028 | subroutine mpif_tm_collect_vars |
---|
| 1029 | !******************************************************************************* |
---|
| 1030 | ! Collect particle data to PID 0 from all processes. * |
---|
| 1031 | ! * |
---|
| 1032 | ! This can be called from timemanager_mpi, before caclulating * |
---|
| 1033 | ! concentrations/writing output grids. * |
---|
| 1034 | ! * |
---|
[6b22af9] | 1035 | ! Currently *not in use*, as each process calculates concentrations * |
---|
| 1036 | ! separately, but can be useful for debugging * |
---|
[8a65cb0] | 1037 | ! * |
---|
| 1038 | ! To use this routine (together with mpif_tm_send_vars) to transfer data * |
---|
| 1039 | ! to the MPI root process (useful for debugging), insert code like this * |
---|
| 1040 | ! (in timemanager_mpi.f90) * |
---|
| 1041 | ! * |
---|
[6b22af9] | 1042 | ! if (lroot) tot_numpart=numpart ! root stores total numpart * |
---|
[8a65cb0] | 1043 | ! call mpif_tm_send_dims * |
---|
| 1044 | ! if (numpart>1) then * |
---|
| 1045 | ! call mpif_tm_send_vars * |
---|
| 1046 | ! end if * |
---|
| 1047 | ! * |
---|
| 1048 | ! To collect data from all processes to root process after a parallel * |
---|
| 1049 | ! region: * |
---|
| 1050 | ! * |
---|
| 1051 | ! if (numpart>0) then * |
---|
[6b22af9] | 1052 | ! if (lroot) numpart = tot_numpart * |
---|
[8a65cb0] | 1053 | ! call mpif_tm_collect_vars * |
---|
| 1054 | ! end if * |
---|
| 1055 | ! * |
---|
[6b22af9] | 1056 | ! Then a section that begins with "if (lroot) ..." will behave like * |
---|
| 1057 | ! serial flexpart * |
---|
[8a65cb0] | 1058 | ! * |
---|
| 1059 | !******************************************************************************* |
---|
| 1060 | use com_mod !, only: numpart, nspec, itra1, npoint, nclass |
---|
| 1061 | |
---|
| 1062 | implicit none |
---|
| 1063 | |
---|
| 1064 | integer :: i |
---|
| 1065 | |
---|
| 1066 | logical :: use_mp_vars = .false.! use mp_... type allocated vars |
---|
| 1067 | logical :: use_in_place = .true.! use MPI_IN_PLACE to collect vars. |
---|
| 1068 | |
---|
| 1069 | |
---|
| 1070 | ! Time for MPI communications |
---|
| 1071 | if (mp_measure_time) call mpif_mtime('commtime',0) |
---|
| 1072 | |
---|
| 1073 | ! Distribute variables, send from pid 0 to other processes |
---|
| 1074 | !********************************************************************** |
---|
| 1075 | |
---|
| 1076 | if (.not. use_mp_vars.and..not.use_in_place) then |
---|
| 1077 | |
---|
| 1078 | ! Integers: |
---|
| 1079 | call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, & |
---|
| 1080 | &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
| 1081 | if (mp_ierr /= 0) goto 600 |
---|
| 1082 | call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, & |
---|
| 1083 | &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
| 1084 | if (mp_ierr /= 0) goto 600 |
---|
| 1085 | call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, & |
---|
| 1086 | &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
| 1087 | if (mp_ierr /= 0) goto 600 |
---|
| 1088 | call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, & |
---|
| 1089 | &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
| 1090 | if (mp_ierr /= 0) goto 600 |
---|
| 1091 | call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, & |
---|
| 1092 | &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
| 1093 | if (mp_ierr /= 0) goto 600 |
---|
| 1094 | |
---|
| 1095 | ! int2 |
---|
| 1096 | call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, & |
---|
| 1097 | &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr) |
---|
| 1098 | if (mp_ierr /= 0) goto 600 |
---|
| 1099 | |
---|
| 1100 | ! Reals: |
---|
[fdc0f03] | 1101 | call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, & |
---|
| 1102 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1103 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1104 | call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, & |
---|
| 1105 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1106 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1107 | call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, & |
---|
| 1108 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1109 | if (mp_ierr /= 0) goto 600 |
---|
| 1110 | |
---|
[fdc0f03] | 1111 | call MPI_GATHER(us, numpart_mpi, mp_sp, us, & |
---|
| 1112 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1113 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1114 | call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, & |
---|
| 1115 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1116 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1117 | call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, & |
---|
| 1118 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1119 | if (mp_ierr /= 0) goto 600 |
---|
| 1120 | |
---|
| 1121 | |
---|
| 1122 | call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, & |
---|
| 1123 | &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr) |
---|
| 1124 | if (mp_ierr /= 0) goto 600 |
---|
| 1125 | call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, & |
---|
| 1126 | &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr) |
---|
| 1127 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1128 | call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, & |
---|
| 1129 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1130 | if (mp_ierr /= 0) goto 600 |
---|
| 1131 | |
---|
| 1132 | do i=1, nspec |
---|
[fdc0f03] | 1133 | call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), & |
---|
| 1134 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1135 | if (mp_ierr /= 0) goto 600 |
---|
| 1136 | |
---|
| 1137 | end do |
---|
| 1138 | |
---|
| 1139 | ! Use variables npoint etc directly for communications |
---|
| 1140 | !*********************************************************************** |
---|
| 1141 | else if (use_in_place.and..not.use_mp_vars) then |
---|
| 1142 | if (lroot) then |
---|
| 1143 | ! Integers: |
---|
| 1144 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, npoint, & |
---|
| 1145 | &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
| 1146 | if (mp_ierr /= 0) goto 600 |
---|
| 1147 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, idt, & |
---|
| 1148 | &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
| 1149 | if (mp_ierr /= 0) goto 600 |
---|
| 1150 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itra1, & |
---|
| 1151 | &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
| 1152 | if (mp_ierr /= 0) goto 600 |
---|
| 1153 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, nclass, & |
---|
| 1154 | &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
| 1155 | if (mp_ierr /= 0) goto 600 |
---|
| 1156 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itramem, & |
---|
| 1157 | &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
| 1158 | if (mp_ierr /= 0) goto 600 |
---|
| 1159 | |
---|
| 1160 | ! int2 |
---|
| 1161 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER2, cbt, & |
---|
| 1162 | &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr) |
---|
| 1163 | if (mp_ierr /= 0) goto 600 |
---|
| 1164 | |
---|
| 1165 | ! Reals: |
---|
[fdc0f03] | 1166 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uap, & |
---|
| 1167 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1168 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1169 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ucp, & |
---|
| 1170 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1171 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1172 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uzp, & |
---|
| 1173 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1174 | if (mp_ierr /= 0) goto 600 |
---|
| 1175 | |
---|
[fdc0f03] | 1176 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, us, & |
---|
| 1177 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1178 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1179 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, vs, & |
---|
| 1180 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1181 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1182 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ws, & |
---|
| 1183 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1184 | if (mp_ierr /= 0) goto 600 |
---|
| 1185 | |
---|
| 1186 | |
---|
| 1187 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, xtra1, & |
---|
| 1188 | &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr) |
---|
| 1189 | if (mp_ierr /= 0) goto 600 |
---|
| 1190 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, ytra1, & |
---|
| 1191 | &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr) |
---|
| 1192 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1193 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ztra1, & |
---|
| 1194 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1195 | if (mp_ierr /= 0) goto 600 |
---|
| 1196 | |
---|
| 1197 | do i=1, nspec |
---|
[fdc0f03] | 1198 | call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, xmass1(:,i), & |
---|
| 1199 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1200 | if (mp_ierr /= 0) goto 600 |
---|
| 1201 | end do |
---|
| 1202 | |
---|
| 1203 | else ! (for mp_pid >= 1) |
---|
| 1204 | |
---|
| 1205 | ! Integers: |
---|
| 1206 | call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, & |
---|
| 1207 | &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
| 1208 | if (mp_ierr /= 0) goto 600 |
---|
| 1209 | call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, & |
---|
| 1210 | &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
| 1211 | if (mp_ierr /= 0) goto 600 |
---|
| 1212 | call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, & |
---|
| 1213 | &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
| 1214 | if (mp_ierr /= 0) goto 600 |
---|
| 1215 | call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, & |
---|
| 1216 | &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
| 1217 | if (mp_ierr /= 0) goto 600 |
---|
| 1218 | call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, & |
---|
| 1219 | &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) |
---|
| 1220 | if (mp_ierr /= 0) goto 600 |
---|
| 1221 | |
---|
| 1222 | ! int2 |
---|
| 1223 | call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, & |
---|
| 1224 | &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr) |
---|
| 1225 | if (mp_ierr /= 0) goto 600 |
---|
| 1226 | |
---|
| 1227 | ! Reals: |
---|
[fdc0f03] | 1228 | call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, & |
---|
| 1229 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1230 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1231 | call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, & |
---|
| 1232 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1233 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1234 | call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, & |
---|
| 1235 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1236 | if (mp_ierr /= 0) goto 600 |
---|
| 1237 | |
---|
[fdc0f03] | 1238 | call MPI_GATHER(us, numpart_mpi, mp_sp, us, & |
---|
| 1239 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1240 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1241 | call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, & |
---|
| 1242 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1243 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1244 | call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, & |
---|
| 1245 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1246 | if (mp_ierr /= 0) goto 600 |
---|
| 1247 | |
---|
| 1248 | |
---|
| 1249 | call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, & |
---|
| 1250 | &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr) |
---|
| 1251 | if (mp_ierr /= 0) goto 600 |
---|
| 1252 | call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, & |
---|
| 1253 | &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr) |
---|
| 1254 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1255 | call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, & |
---|
| 1256 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1257 | if (mp_ierr /= 0) goto 600 |
---|
| 1258 | |
---|
| 1259 | do i=1, nspec |
---|
[fdc0f03] | 1260 | call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), & |
---|
| 1261 | &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr) |
---|
[8a65cb0] | 1262 | if (mp_ierr /= 0) goto 600 |
---|
| 1263 | end do |
---|
| 1264 | end if ! (lroot) |
---|
| 1265 | end if ! (use_in_place) |
---|
| 1266 | |
---|
| 1267 | if (mp_measure_time) call mpif_mtime('commtime',1) |
---|
| 1268 | |
---|
| 1269 | goto 601 |
---|
| 1270 | |
---|
| 1271 | 600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr |
---|
| 1272 | stop |
---|
| 1273 | 601 end subroutine mpif_tm_collect_vars |
---|
| 1274 | |
---|
| 1275 | |
---|
| 1276 | subroutine mpif_gf_send_vars(memstat) |
---|
| 1277 | !******************************************************************************* |
---|
| 1278 | ! DESCRIPTION |
---|
[5f9d14a] | 1279 | ! Distribute 'getfield' variables from reader process |
---|
[8a65cb0] | 1280 | ! |
---|
| 1281 | ! Called from timemanager |
---|
| 1282 | ! |
---|
| 1283 | ! NOTE |
---|
| 1284 | ! This subroutine distributes windfields read from the reader process to |
---|
[3b80e98] | 1285 | ! all other processes. Usually one set of fields are transfered, but at |
---|
[8a65cb0] | 1286 | ! first timestep when there are no fields in memory, both are transfered. |
---|
| 1287 | ! MPI_Bcast is used, so implicitly all processes are synchronized at this |
---|
| 1288 | ! step |
---|
| 1289 | ! |
---|
| 1290 | ! |
---|
| 1291 | !******************************************************************************* |
---|
| 1292 | use com_mod |
---|
| 1293 | use par_mod,only: numwfmem |
---|
| 1294 | |
---|
| 1295 | implicit none |
---|
| 1296 | |
---|
| 1297 | integer, intent(in) :: memstat |
---|
| 1298 | |
---|
| 1299 | ! Common array sizes used for communications |
---|
| 1300 | integer, parameter :: d3_size1 = nxmax*nymax*nzmax |
---|
| 1301 | integer, parameter :: d3_size2 = nxmax*nymax*nuvzmax |
---|
| 1302 | integer, parameter :: d2_size1 = nxmax*nymax |
---|
| 1303 | integer, parameter :: d2_size2 = nxmax*nymax*maxspec |
---|
| 1304 | integer, parameter :: d2_size3 = nxmax*nymax |
---|
| 1305 | integer, parameter :: d1_size1 = maxwf |
---|
| 1306 | |
---|
| 1307 | integer :: d3s1,d3s2,d2s1,d2s2 |
---|
| 1308 | |
---|
| 1309 | ! li,ui lower,upper indices of fields to be transfered (1-3, ui>=li) |
---|
| 1310 | integer :: li,ui |
---|
| 1311 | |
---|
| 1312 | ! First time routine is called the unchangeable fields will be transfered |
---|
| 1313 | logical :: first_call=.true. |
---|
| 1314 | |
---|
| 1315 | !********************************************************************** |
---|
| 1316 | |
---|
| 1317 | ! Sizes of arrays transfered |
---|
| 1318 | d3s1=d3_size1 |
---|
| 1319 | d3s2=d3_size2 |
---|
| 1320 | d2s1=d2_size1 |
---|
| 1321 | d2s2=d2_size2 |
---|
| 1322 | |
---|
| 1323 | ! Decide which field(s) need to be transfered |
---|
| 1324 | if (memstat.ge.32) then ! distribute 2 fields, to 1st/2nd indices |
---|
| 1325 | li=1; ui=2 |
---|
| 1326 | d3s1=2*d3_size1 |
---|
| 1327 | d3s2=2*d3_size2 |
---|
| 1328 | d2s1=2*d2_size1 |
---|
| 1329 | d2s2=2*d2_size2 |
---|
| 1330 | else if (memstat.eq.17) then ! distribute 1 field, place on 1st index |
---|
| 1331 | li=1; ui=1 |
---|
| 1332 | else if (memstat.eq.18) then ! distribute 1 field, place on 2nd index |
---|
| 1333 | li=2; ui=2 |
---|
| 1334 | else if (memstat.eq.19) then ! distribute 1 field, place on 3nd index |
---|
| 1335 | li=3; ui=3 |
---|
| 1336 | else |
---|
| 1337 | write(*,*) '#### mpi_mod::mpif_gf_send_vars> ERROR: ', & |
---|
| 1338 | & 'wrong value of memstat, exiting ####', memstat |
---|
| 1339 | stop |
---|
| 1340 | end if |
---|
| 1341 | |
---|
| 1342 | |
---|
| 1343 | ! Time for MPI communications |
---|
| 1344 | if (mp_measure_time) call mpif_mtime('commtime',0) |
---|
| 1345 | |
---|
| 1346 | ! Send variables from getfield process (id_read) to other processes |
---|
| 1347 | !********************************************************************** |
---|
| 1348 | |
---|
[3b80e98] | 1349 | ! The non-reader processes need to know if cloud water was read. |
---|
[5f9d14a] | 1350 | call MPI_Bcast(readclouds,1,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
| 1351 | if (mp_ierr /= 0) goto 600 |
---|
| 1352 | |
---|
[8a65cb0] | 1353 | ! Static fields/variables sent only at startup |
---|
| 1354 | if (first_call) then |
---|
[fdc0f03] | 1355 | call MPI_Bcast(oro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1356 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1357 | call MPI_Bcast(excessoro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1358 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1359 | call MPI_Bcast(lsm(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1360 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1361 | call MPI_Bcast(xlanduse(:,:,:),d2_size3*numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1362 | if (mp_ierr /= 0) goto 600 |
---|
| 1363 | call MPI_Bcast(wftime,d1_size1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
| 1364 | if (mp_ierr /= 0) goto 600 |
---|
| 1365 | call MPI_Bcast(numbwf,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
| 1366 | if (mp_ierr /= 0) goto 600 |
---|
| 1367 | call MPI_Bcast(nmixz,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
| 1368 | if (mp_ierr /= 0) goto 600 |
---|
| 1369 | call MPI_Bcast(height,nzmax,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[5f9d14a] | 1370 | if (mp_ierr /= 0) goto 600 |
---|
[8a65cb0] | 1371 | |
---|
| 1372 | first_call=.false. |
---|
| 1373 | endif |
---|
| 1374 | |
---|
[fdc0f03] | 1375 | call MPI_Bcast(uu(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1376 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1377 | call MPI_Bcast(vv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1378 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1379 | call MPI_Bcast(uupol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1380 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1381 | call MPI_Bcast(vvpol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1382 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1383 | call MPI_Bcast(ww(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1384 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1385 | call MPI_Bcast(tt(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1386 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1387 | call MPI_Bcast(rho(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1388 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1389 | call MPI_Bcast(drhodz(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1390 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1391 | call MPI_Bcast(tth(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1392 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1393 | call MPI_Bcast(qvh(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1394 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1395 | call MPI_Bcast(qv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1396 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1397 | call MPI_Bcast(pv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1398 | if (mp_ierr /= 0) goto 600 |
---|
| 1399 | call MPI_Bcast(clouds(:,:,:,li:ui),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[5f9d14a] | 1400 | if (mp_ierr /= 0) goto 600 |
---|
| 1401 | |
---|
| 1402 | ! cloud water/ice: |
---|
| 1403 | if (readclouds) then |
---|
[341f4b7] | 1404 | call MPI_Bcast(ctwc(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[5f9d14a] | 1405 | if (mp_ierr /= 0) goto 600 |
---|
| 1406 | end if |
---|
[8a65cb0] | 1407 | |
---|
| 1408 | ! 2D fields |
---|
[fdc0f03] | 1409 | call MPI_Bcast(cloudsh(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1410 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1411 | call MPI_Bcast(vdep(:,:,:,li:ui),d2s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1412 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1413 | call MPI_Bcast(ps(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1414 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1415 | call MPI_Bcast(sd(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1416 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1417 | call MPI_Bcast(tcc(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1418 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1419 | call MPI_Bcast(tt2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1420 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1421 | call MPI_Bcast(td2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1422 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1423 | call MPI_Bcast(lsprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1424 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1425 | call MPI_Bcast(convprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1426 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1427 | call MPI_Bcast(ustar(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1428 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1429 | call MPI_Bcast(wstar(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1430 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1431 | call MPI_Bcast(hmix(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1432 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1433 | call MPI_Bcast(tropopause(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1434 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1435 | call MPI_Bcast(oli(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1436 | if (mp_ierr /= 0) goto 600 |
---|
| 1437 | |
---|
| 1438 | if (mp_measure_time) call mpif_mtime('commtime',1) |
---|
| 1439 | |
---|
| 1440 | goto 601 |
---|
| 1441 | |
---|
| 1442 | 600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr |
---|
| 1443 | stop |
---|
| 1444 | |
---|
| 1445 | 601 end subroutine mpif_gf_send_vars |
---|
| 1446 | |
---|
| 1447 | |
---|
| 1448 | subroutine mpif_gf_send_vars_nest(memstat) |
---|
| 1449 | !*********************************************************************** |
---|
| 1450 | ! DESCRIPTION |
---|
| 1451 | ! Distribute 'getfield' variables from reader process to all processes. |
---|
| 1452 | ! For nested fields |
---|
| 1453 | ! |
---|
| 1454 | ! Called from timemanager |
---|
| 1455 | ! |
---|
| 1456 | ! NOTE |
---|
| 1457 | ! This subroutine distributes nested windfields from the reader process to |
---|
| 1458 | ! all other processes. Usually one set of fields is transfered, but at |
---|
| 1459 | ! first timestep when there are no fields in memory, both are transfered. |
---|
| 1460 | ! MPI_Bcast is used, so implicitly all processes are synchronized at this |
---|
| 1461 | ! step |
---|
| 1462 | ! |
---|
| 1463 | ! |
---|
| 1464 | !*********************************************************************** |
---|
| 1465 | use com_mod |
---|
[32b49c3] | 1466 | use par_mod,only: numwfmem |
---|
[8a65cb0] | 1467 | |
---|
| 1468 | implicit none |
---|
| 1469 | |
---|
| 1470 | integer, intent(in) :: memstat |
---|
| 1471 | integer :: i |
---|
| 1472 | ! li,ui lower,upper indices of fields to be transfered (1-3, ui>=li) |
---|
| 1473 | integer :: li,ui |
---|
| 1474 | |
---|
| 1475 | ! Common array sizes used for communications |
---|
[db712a8] | 1476 | integer :: d3_size1 = nxmaxn*nymaxn*nzmax |
---|
| 1477 | integer :: d3_size2 = nxmaxn*nymaxn*nuvzmax |
---|
| 1478 | integer :: d2_size1 = nxmaxn*nymaxn |
---|
| 1479 | integer :: d2_size2 = nxmaxn*nymaxn*maxspec |
---|
| 1480 | integer :: d2_size3 = nxmaxn*nymaxn |
---|
[8a65cb0] | 1481 | |
---|
| 1482 | integer :: d3s1,d3s2,d2s1,d2s2 |
---|
| 1483 | |
---|
| 1484 | ! First time routine is called the unchangeable fields will be transfered |
---|
| 1485 | logical :: first_call=.true. |
---|
| 1486 | |
---|
| 1487 | !********************************************************************** |
---|
| 1488 | |
---|
| 1489 | ! Sizes of arrays transfered |
---|
| 1490 | d3s1=d3_size1 |
---|
| 1491 | d3s2=d3_size2 |
---|
| 1492 | d2s1=d2_size1 |
---|
| 1493 | d2s2=d2_size2 |
---|
| 1494 | |
---|
| 1495 | ! Decide which field(s) need to be transfered |
---|
| 1496 | if (memstat.ge.32) then ! distribute 2 fields |
---|
| 1497 | li=1; ui=2 |
---|
| 1498 | d3s1=2*d3_size1 |
---|
| 1499 | d3s2=2*d3_size2 |
---|
| 1500 | d2s1=2*d2_size1 |
---|
| 1501 | d2s2=2*d2_size2 |
---|
| 1502 | else if (memstat.eq.17) then ! distribute 1 field, on 1st index |
---|
| 1503 | li=1; ui=1 |
---|
| 1504 | else if (memstat.eq.18) then ! distribute 1 field, on 2nd index |
---|
| 1505 | li=2; ui=2 |
---|
| 1506 | else if (memstat.eq.19) then ! distribute 1 field, on 3nd index |
---|
| 1507 | li=3; ui=3 |
---|
| 1508 | else |
---|
| 1509 | write(*,*) '#### mpi_mod::mpif_gf_send_vars_nest> ERROR: ', & |
---|
| 1510 | & 'wrong value of memstat, exiting ####', memstat |
---|
| 1511 | stop |
---|
| 1512 | end if |
---|
| 1513 | |
---|
| 1514 | |
---|
| 1515 | ! Time for MPI communications |
---|
| 1516 | if (mp_measure_time) call mpif_mtime('commtime',0) |
---|
| 1517 | |
---|
| 1518 | ! Distribute variables, send from getfield process (id_read) to other |
---|
| 1519 | ! processes |
---|
| 1520 | !********************************************************************** |
---|
| 1521 | |
---|
[3b80e98] | 1522 | ! The non-reader processes need to know if cloud water was read. |
---|
[db712a8] | 1523 | call MPI_Bcast(readclouds_nest,maxnests,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
| 1524 | if (mp_ierr /= 0) goto 600 |
---|
| 1525 | |
---|
[8a65cb0] | 1526 | ! Static fields/variables sent only at startup |
---|
| 1527 | if (first_call) then |
---|
[fdc0f03] | 1528 | call MPI_Bcast(oron(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1529 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1530 | call MPI_Bcast(excessoron(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1531 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1532 | call MPI_Bcast(lsmn(:,:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1533 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1534 | call MPI_Bcast(xlandusen(:,:,:,:),d2_size3*numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1535 | if (mp_ierr /= 0) goto 600 |
---|
| 1536 | first_call=.false. |
---|
| 1537 | end if |
---|
| 1538 | |
---|
| 1539 | ! MPI prefers contiguous arrays for sending (else a buffer is created), |
---|
[db712a8] | 1540 | ! hence the loop over nests |
---|
| 1541 | !********************************************************************** |
---|
[8a65cb0] | 1542 | do i=1, numbnests |
---|
| 1543 | ! 3D fields |
---|
[fdc0f03] | 1544 | call MPI_Bcast(uun(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1545 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1546 | call MPI_Bcast(vvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1547 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1548 | call MPI_Bcast(wwn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1549 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1550 | call MPI_Bcast(ttn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1551 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1552 | call MPI_Bcast(rhon(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1553 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1554 | call MPI_Bcast(drhodzn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1555 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1556 | call MPI_Bcast(tthn(:,:,:,li:ui,i),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1557 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1558 | call MPI_Bcast(qvhn(:,:,:,li:ui,i),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1559 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1560 | call MPI_Bcast(qvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1561 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1562 | call MPI_Bcast(pvn(:,:,:,li:ui,i),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1563 | if (mp_ierr /= 0) goto 600 |
---|
| 1564 | call MPI_Bcast(cloudsn(:,:,:,li:ui,i),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[db712a8] | 1565 | if (mp_ierr /= 0) goto 600 |
---|
| 1566 | |
---|
| 1567 | ! cloud water/ice: |
---|
[861805a] | 1568 | if (readclouds_nest(i)) then |
---|
| 1569 | ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
| 1570 | ! if (mp_ierr /= 0) goto 600 |
---|
| 1571 | call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
| 1572 | if (mp_ierr /= 0) goto 600 |
---|
| 1573 | end if |
---|
[8a65cb0] | 1574 | |
---|
| 1575 | ! 2D fields |
---|
[db712a8] | 1576 | call MPI_Bcast(cloudshn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1577 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1578 | call MPI_Bcast(vdepn(:,:,:,li:ui,i),d2s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1579 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1580 | call MPI_Bcast(psn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1581 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1582 | call MPI_Bcast(sdn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1583 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1584 | call MPI_Bcast(tccn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1585 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1586 | call MPI_Bcast(tt2n(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1587 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1588 | call MPI_Bcast(td2n(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1589 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1590 | call MPI_Bcast(lsprecn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1591 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1592 | call MPI_Bcast(convprecn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1593 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1594 | call MPI_Bcast(ustarn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1595 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1596 | call MPI_Bcast(wstarn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1597 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1598 | call MPI_Bcast(olin(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1599 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1600 | call MPI_Bcast(hmixn(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1601 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1602 | call MPI_Bcast(tropopausen(:,:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) |
---|
[8a65cb0] | 1603 | if (mp_ierr /= 0) goto 600 |
---|
| 1604 | end do |
---|
| 1605 | |
---|
| 1606 | if (mp_measure_time) call mpif_mtime('commtime',1) |
---|
| 1607 | |
---|
| 1608 | goto 601 |
---|
| 1609 | |
---|
| 1610 | 600 write(*,*) "mpi_mod> mp_ierr \= 0",mp_ierr |
---|
| 1611 | stop |
---|
| 1612 | |
---|
| 1613 | 601 end subroutine mpif_gf_send_vars_nest |
---|
| 1614 | |
---|
| 1615 | |
---|
| 1616 | subroutine mpif_gf_send_vars_async(memstat) |
---|
| 1617 | !******************************************************************************* |
---|
| 1618 | ! DESCRIPTION |
---|
| 1619 | ! Distribute 'getfield' variables from reader process to all processes. |
---|
[5f9d14a] | 1620 | ! Called from timemanager by reader process only |
---|
[8a65cb0] | 1621 | ! |
---|
| 1622 | ! NOTE |
---|
| 1623 | ! This version uses asynchronious sends. The newest fields are sent in the |
---|
| 1624 | ! background, so calculations can continue while |
---|
| 1625 | ! MPI communications are performed. |
---|
| 1626 | ! |
---|
| 1627 | ! The way the MPI tags/requests are sequenced means that this routine must |
---|
| 1628 | ! carefully match routine 'mpif_gf_recv_vars_async' |
---|
| 1629 | ! |
---|
| 1630 | ! VARIABLES |
---|
| 1631 | ! memstat -- input, for resolving pointer to windfield index being read |
---|
| 1632 | ! mind -- index where to place new fields |
---|
| 1633 | ! |
---|
| 1634 | ! |
---|
| 1635 | !******************************************************************************* |
---|
| 1636 | use com_mod |
---|
| 1637 | |
---|
| 1638 | implicit none |
---|
| 1639 | |
---|
| 1640 | integer, intent(in) :: memstat |
---|
| 1641 | integer :: mind |
---|
| 1642 | integer :: dest,i |
---|
| 1643 | |
---|
| 1644 | ! Common array sizes used for communications |
---|
| 1645 | integer :: d3s1 = nxmax*nymax*nzmax |
---|
| 1646 | integer :: d3s2 = nxmax*nymax*nuvzmax |
---|
| 1647 | integer :: d2s1 = nxmax*nymax |
---|
| 1648 | integer :: d2s2 = nxmax*nymax*maxspec |
---|
[32b49c3] | 1649 | ! integer :: d1s1 = maxwf |
---|
[8a65cb0] | 1650 | |
---|
| 1651 | !******************************************************************************* |
---|
| 1652 | |
---|
| 1653 | ! At the time the send is posted, the reader process is one step further |
---|
| 1654 | ! in the permutation of memstat compared with the receiving processes |
---|
| 1655 | |
---|
| 1656 | if (memstat.ge.32) then |
---|
| 1657 | ! last read was synchronous, to indices 1 and 2, 3 is free |
---|
| 1658 | write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: & |
---|
| 1659 | & memstat>=32 should never happen here." |
---|
| 1660 | stop |
---|
| 1661 | else if (memstat.eq.17) then |
---|
| 1662 | ! old fields on 1,2, send 3 |
---|
| 1663 | mind=3 |
---|
| 1664 | else if (memstat.eq.18) then |
---|
| 1665 | ! old fields on 2,3, send 1 |
---|
| 1666 | mind=1 |
---|
| 1667 | else if (memstat.eq.19) then |
---|
| 1668 | ! old fields on 3,1, send 2 |
---|
| 1669 | mind=2 |
---|
| 1670 | else |
---|
| 1671 | write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> ERROR: & |
---|
| 1672 | & invalid memstat" |
---|
[aa8c34a] | 1673 | mind=-1 |
---|
| 1674 | stop |
---|
[8a65cb0] | 1675 | end if |
---|
| 1676 | |
---|
| 1677 | if (mp_dev_mode) then |
---|
| 1678 | if (mp_pid.ne.id_read) then |
---|
| 1679 | write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_async' |
---|
| 1680 | end if |
---|
| 1681 | end if |
---|
| 1682 | |
---|
| 1683 | if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat |
---|
| 1684 | |
---|
| 1685 | ! Time for MPI communications |
---|
| 1686 | if (mp_measure_time) call mpif_mtime('commtime',0) |
---|
| 1687 | |
---|
| 1688 | ! Loop over receiving processes, initiate data sending |
---|
| 1689 | !***************************************************** |
---|
| 1690 | |
---|
| 1691 | do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading |
---|
[78e62dc] | 1692 | ! TODO: use mp_partgroup_np here |
---|
[8a65cb0] | 1693 | if (dest.eq.id_read) cycle |
---|
| 1694 | i=dest*nvar_async |
---|
[fdc0f03] | 1695 | call MPI_Isend(uu(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1696 | if (mp_ierr /= 0) goto 600 |
---|
| 1697 | i=i+1 |
---|
[fdc0f03] | 1698 | call MPI_Isend(vv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1699 | if (mp_ierr /= 0) goto 600 |
---|
| 1700 | i=i+1 |
---|
[fdc0f03] | 1701 | call MPI_Isend(uupol(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1702 | if (mp_ierr /= 0) goto 600 |
---|
| 1703 | i=i+1 |
---|
[fdc0f03] | 1704 | call MPI_Isend(vvpol(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1705 | if (mp_ierr /= 0) goto 600 |
---|
| 1706 | i=i+1 |
---|
[fdc0f03] | 1707 | call MPI_Isend(ww(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1708 | if (mp_ierr /= 0) goto 600 |
---|
| 1709 | i=i+1 |
---|
[fdc0f03] | 1710 | call MPI_Isend(tt(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1711 | if (mp_ierr /= 0) goto 600 |
---|
| 1712 | i=i+1 |
---|
[fdc0f03] | 1713 | call MPI_Isend(rho(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1714 | if (mp_ierr /= 0) goto 600 |
---|
| 1715 | i=i+1 |
---|
[fdc0f03] | 1716 | call MPI_Isend(drhodz(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1717 | if (mp_ierr /= 0) goto 600 |
---|
| 1718 | i=i+1 |
---|
[fdc0f03] | 1719 | call MPI_Isend(tth(:,:,:,mind),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1720 | if (mp_ierr /= 0) goto 600 |
---|
| 1721 | i=i+1 |
---|
[fdc0f03] | 1722 | call MPI_Isend(qvh(:,:,:,mind),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1723 | if (mp_ierr /= 0) goto 600 |
---|
| 1724 | i=i+1 |
---|
[fdc0f03] | 1725 | call MPI_Isend(qv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1726 | if (mp_ierr /= 0) goto 600 |
---|
| 1727 | i=i+1 |
---|
[fdc0f03] | 1728 | call MPI_Isend(pv(:,:,:,mind),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1729 | if (mp_ierr /= 0) goto 600 |
---|
| 1730 | i=i+1 |
---|
| 1731 | call MPI_Isend(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 1732 | i=i+1 |
---|
| 1733 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1734 | call MPI_Isend(cloudsh(:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1735 | if (mp_ierr /= 0) goto 600 |
---|
| 1736 | i=i+1 |
---|
[fdc0f03] | 1737 | call MPI_Isend(vdep(:,:,:,mind),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1738 | if (mp_ierr /= 0) goto 600 |
---|
| 1739 | i=i+1 |
---|
[3b80e98] | 1740 | ! 15 |
---|
[fdc0f03] | 1741 | call MPI_Isend(ps(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1742 | if (mp_ierr /= 0) goto 600 |
---|
| 1743 | i=i+1 |
---|
[fdc0f03] | 1744 | call MPI_Isend(sd(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1745 | if (mp_ierr /= 0) goto 600 |
---|
| 1746 | i=i+1 |
---|
[fdc0f03] | 1747 | call MPI_Isend(tcc(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1748 | if (mp_ierr /= 0) goto 600 |
---|
| 1749 | i=i+1 |
---|
[fdc0f03] | 1750 | call MPI_Isend(tt2(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1751 | if (mp_ierr /= 0) goto 600 |
---|
| 1752 | i=i+1 |
---|
[fdc0f03] | 1753 | call MPI_Isend(td2(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1754 | if (mp_ierr /= 0) goto 600 |
---|
| 1755 | i=i+1 |
---|
[fdc0f03] | 1756 | call MPI_Isend(lsprec(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1757 | if (mp_ierr /= 0) goto 600 |
---|
| 1758 | i=i+1 |
---|
[fdc0f03] | 1759 | call MPI_Isend(convprec(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1760 | if (mp_ierr /= 0) goto 600 |
---|
| 1761 | i=i+1 |
---|
[fdc0f03] | 1762 | call MPI_Isend(ustar(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1763 | if (mp_ierr /= 0) goto 600 |
---|
| 1764 | i=i+1 |
---|
[fdc0f03] | 1765 | call MPI_Isend(wstar(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1766 | if (mp_ierr /= 0) goto 600 |
---|
| 1767 | i=i+1 |
---|
[fdc0f03] | 1768 | call MPI_Isend(hmix(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1769 | if (mp_ierr /= 0) goto 600 |
---|
| 1770 | i=i+1 |
---|
[3b80e98] | 1771 | ! 25 |
---|
[fdc0f03] | 1772 | call MPI_Isend(tropopause(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1773 | if (mp_ierr /= 0) goto 600 |
---|
| 1774 | i=i+1 |
---|
[fdc0f03] | 1775 | call MPI_Isend(oli(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
[8a65cb0] | 1776 | if (mp_ierr /= 0) goto 600 |
---|
| 1777 | |
---|
[5f9d14a] | 1778 | ! Send cloud water if it exists. Increment counter always (as on receiving end) |
---|
| 1779 | if (readclouds) then |
---|
| 1780 | i=i+1 |
---|
[341f4b7] | 1781 | call MPI_Isend(ctwc(:,:,mind),d2s1,mp_sp,dest,tm1,& |
---|
[5f9d14a] | 1782 | &MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 1783 | if (mp_ierr /= 0) goto 600 |
---|
[d6a0977] | 1784 | end if |
---|
[8a65cb0] | 1785 | end do |
---|
| 1786 | |
---|
| 1787 | if (mp_measure_time) call mpif_mtime('commtime',1) |
---|
| 1788 | |
---|
| 1789 | goto 601 |
---|
| 1790 | |
---|
[5f9d14a] | 1791 | 600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> mp_ierr \= 0", mp_ierr |
---|
[8a65cb0] | 1792 | stop |
---|
| 1793 | |
---|
| 1794 | 601 end subroutine mpif_gf_send_vars_async |
---|
| 1795 | |
---|
| 1796 | |
---|
| 1797 | subroutine mpif_gf_recv_vars_async(memstat) |
---|
| 1798 | !******************************************************************************* |
---|
| 1799 | ! DESCRIPTION |
---|
[5f9d14a] | 1800 | ! Receive 'getfield' variables from reader process. |
---|
| 1801 | ! Called from timemanager by all processes except reader |
---|
[8a65cb0] | 1802 | ! |
---|
| 1803 | ! NOTE |
---|
| 1804 | ! This version uses asynchronious communications. |
---|
| 1805 | ! |
---|
| 1806 | ! VARIABLES |
---|
| 1807 | ! memstat -- input, used to resolve windfield index being received |
---|
| 1808 | ! |
---|
| 1809 | ! |
---|
| 1810 | !******************************************************************************* |
---|
| 1811 | use com_mod |
---|
| 1812 | |
---|
| 1813 | implicit none |
---|
| 1814 | |
---|
| 1815 | integer, intent(in) :: memstat |
---|
| 1816 | integer :: mind,j |
---|
| 1817 | |
---|
| 1818 | ! Common array sizes used for communications |
---|
| 1819 | integer :: d3s1 = nxmax*nymax*nzmax |
---|
| 1820 | integer :: d3s2 = nxmax*nymax*nuvzmax |
---|
| 1821 | integer :: d2s1 = nxmax*nymax |
---|
| 1822 | integer :: d2s2 = nxmax*nymax*maxspec |
---|
[861805a] | 1823 | !integer :: d1_size1 = maxwf |
---|
[8a65cb0] | 1824 | |
---|
| 1825 | ! integer :: d3s1,d3s2,d2s1,d2s2 |
---|
| 1826 | !******************************************************************************* |
---|
| 1827 | |
---|
| 1828 | ! At the time this immediate receive is posted, memstat is the state of |
---|
| 1829 | ! windfield indices at the previous/current time. From this, the future |
---|
| 1830 | ! state is deduced. |
---|
| 1831 | if (memstat.eq.32) then |
---|
| 1832 | ! last read was synchronous to indices 1 and 2, 3 is free |
---|
| 1833 | mind=3 |
---|
| 1834 | else if (memstat.eq.17) then |
---|
| 1835 | ! last read was asynchronous to index 3, 1 is free |
---|
| 1836 | mind=1 |
---|
| 1837 | else if (memstat.eq.18) then |
---|
| 1838 | ! last read was asynchronous to index 1, 2 is free |
---|
| 1839 | mind=2 |
---|
| 1840 | else if (memstat.eq.19) then |
---|
| 1841 | ! last read was asynchronous to index 2, 3 is free |
---|
| 1842 | mind=3 |
---|
[32b49c3] | 1843 | else |
---|
| 1844 | ! illegal state |
---|
| 1845 | write(*,*) 'mpi_mod> FLEXPART ERROR: Illegal memstat value. Exiting.' |
---|
| 1846 | stop |
---|
[8a65cb0] | 1847 | end if |
---|
| 1848 | |
---|
| 1849 | if (mp_dev_mode) then |
---|
| 1850 | if (mp_pid.eq.id_read) then |
---|
| 1851 | write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async' |
---|
| 1852 | end if |
---|
| 1853 | end if |
---|
| 1854 | |
---|
| 1855 | ! Time for MPI communications |
---|
| 1856 | if (mp_measure_time) call mpif_mtime('commtime',0) |
---|
| 1857 | |
---|
| 1858 | if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat |
---|
| 1859 | |
---|
| 1860 | ! Initiate receiving of data |
---|
| 1861 | !*************************** |
---|
| 1862 | |
---|
| 1863 | ! Get MPI tags/requests for communications |
---|
| 1864 | j=mp_pid*nvar_async |
---|
[fdc0f03] | 1865 | call MPI_Irecv(uu(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1866 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1867 | if (mp_ierr /= 0) goto 600 |
---|
| 1868 | j=j+1 |
---|
[fdc0f03] | 1869 | call MPI_Irecv(vv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1870 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1871 | if (mp_ierr /= 0) goto 600 |
---|
| 1872 | j=j+1 |
---|
[fdc0f03] | 1873 | call MPI_Irecv(uupol(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1874 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1875 | if (mp_ierr /= 0) goto 600 |
---|
| 1876 | j=j+1 |
---|
[fdc0f03] | 1877 | call MPI_Irecv(vvpol(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1878 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1879 | if (mp_ierr /= 0) goto 600 |
---|
| 1880 | j=j+1 |
---|
[fdc0f03] | 1881 | call MPI_Irecv(ww(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1882 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1883 | if (mp_ierr /= 0) goto 600 |
---|
| 1884 | j=j+1 |
---|
[fdc0f03] | 1885 | call MPI_Irecv(tt(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1886 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1887 | if (mp_ierr /= 0) goto 600 |
---|
| 1888 | j=j+1 |
---|
[fdc0f03] | 1889 | call MPI_Irecv(rho(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1890 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1891 | if (mp_ierr /= 0) goto 600 |
---|
| 1892 | j=j+1 |
---|
[fdc0f03] | 1893 | call MPI_Irecv(drhodz(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1894 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1895 | if (mp_ierr /= 0) goto 600 |
---|
| 1896 | j=j+1 |
---|
[fdc0f03] | 1897 | call MPI_Irecv(tth(:,:,:,mind),d3s2,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1898 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1899 | if (mp_ierr /= 0) goto 600 |
---|
| 1900 | j=j+1 |
---|
[fdc0f03] | 1901 | call MPI_Irecv(qvh(:,:,:,mind),d3s2,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1902 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1903 | if (mp_ierr /= 0) goto 600 |
---|
| 1904 | j=j+1 |
---|
[fdc0f03] | 1905 | call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1906 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1907 | if (mp_ierr /= 0) goto 600 |
---|
[5f9d14a] | 1908 | j=j+1 |
---|
[fdc0f03] | 1909 | call MPI_Irecv(pv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1910 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1911 | if (mp_ierr /= 0) goto 600 |
---|
| 1912 | j=j+1 |
---|
[5f9d14a] | 1913 | call MPI_Irecv(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,& |
---|
| 1914 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1915 | if (mp_ierr /= 0) goto 600 |
---|
| 1916 | j=j+1 |
---|
[fdc0f03] | 1917 | call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1918 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1919 | if (mp_ierr /= 0) goto 600 |
---|
| 1920 | j=j+1 |
---|
[fdc0f03] | 1921 | call MPI_Irecv(vdep(:,:,:,mind),d2s2,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1922 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1923 | if (mp_ierr /= 0) goto 600 |
---|
| 1924 | j=j+1 |
---|
[fdc0f03] | 1925 | call MPI_Irecv(ps(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1926 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1927 | if (mp_ierr /= 0) goto 600 |
---|
| 1928 | j=j+1 |
---|
[fdc0f03] | 1929 | call MPI_Irecv(sd(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1930 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1931 | if (mp_ierr /= 0) goto 600 |
---|
| 1932 | j=j+1 |
---|
[fdc0f03] | 1933 | call MPI_Irecv(tcc(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1934 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1935 | if (mp_ierr /= 0) goto 600 |
---|
| 1936 | j=j+1 |
---|
[fdc0f03] | 1937 | call MPI_Irecv(tt2(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1938 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1939 | if (mp_ierr /= 0) goto 600 |
---|
| 1940 | j=j+1 |
---|
[fdc0f03] | 1941 | call MPI_Irecv(td2(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1942 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1943 | if (mp_ierr /= 0) goto 600 |
---|
| 1944 | j=j+1 |
---|
[fdc0f03] | 1945 | call MPI_Irecv(lsprec(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1946 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1947 | if (mp_ierr /= 0) goto 600 |
---|
| 1948 | j=j+1 |
---|
[fdc0f03] | 1949 | call MPI_Irecv(convprec(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1950 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1951 | if (mp_ierr /= 0) goto 600 |
---|
[fdc0f03] | 1952 | call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1953 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1954 | if (mp_ierr /= 0) goto 600 |
---|
| 1955 | j=j+1 |
---|
[fdc0f03] | 1956 | call MPI_Irecv(wstar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1957 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1958 | if (mp_ierr /= 0) goto 600 |
---|
| 1959 | j=j+1 |
---|
[fdc0f03] | 1960 | call MPI_Irecv(hmix(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1961 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1962 | if (mp_ierr /= 0) goto 600 |
---|
| 1963 | j=j+1 |
---|
[fdc0f03] | 1964 | call MPI_Irecv(tropopause(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1965 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1966 | if (mp_ierr /= 0) goto 600 |
---|
| 1967 | j=j+1 |
---|
[fdc0f03] | 1968 | call MPI_Irecv(oli(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[5f9d14a] | 1969 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
[8a65cb0] | 1970 | if (mp_ierr /= 0) goto 600 |
---|
| 1971 | |
---|
[5f9d14a] | 1972 | ! Post request for clwc. These data are possibly not sent, request must then be cancelled |
---|
| 1973 | ! For now assume that data at all steps either have or do not have water |
---|
| 1974 | if (readclouds) then |
---|
| 1975 | j=j+1 |
---|
[328fdf9] | 1976 | call MPI_Irecv(ctwc(:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[d6a0977] | 1977 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 1978 | if (mp_ierr /= 0) goto 600 |
---|
[5f9d14a] | 1979 | end if |
---|
| 1980 | |
---|
| 1981 | |
---|
[8a65cb0] | 1982 | if (mp_measure_time) call mpif_mtime('commtime',1) |
---|
| 1983 | |
---|
| 1984 | goto 601 |
---|
| 1985 | |
---|
[5f9d14a] | 1986 | 600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_async> MPI ERROR ", mp_ierr |
---|
[8a65cb0] | 1987 | stop |
---|
| 1988 | |
---|
| 1989 | 601 end subroutine mpif_gf_recv_vars_async |
---|
| 1990 | |
---|
| 1991 | |
---|
[3b80e98] | 1992 | subroutine mpif_gf_send_vars_nest_async(memstat) |
---|
| 1993 | !******************************************************************************* |
---|
| 1994 | ! DESCRIPTION |
---|
| 1995 | ! Distribute 'getfield' variables from reader process to all processes. |
---|
| 1996 | ! Called from timemanager by reader process only. |
---|
| 1997 | ! Version for nested wind fields |
---|
| 1998 | ! |
---|
| 1999 | ! NOTE |
---|
| 2000 | ! This version uses asynchronious sends. The newest fields are sent in the |
---|
| 2001 | ! background, so calculations can continue while |
---|
| 2002 | ! MPI communications are performed. |
---|
| 2003 | ! |
---|
| 2004 | ! The way the MPI tags/requests are sequenced means that this routine must |
---|
| 2005 | ! carefully match routine 'mpif_gf_recv_vars_async' |
---|
| 2006 | ! |
---|
| 2007 | ! VARIABLES |
---|
| 2008 | ! memstat -- input, for resolving pointer to windfield index being read |
---|
| 2009 | ! mind -- index where to place new fields |
---|
| 2010 | ! |
---|
| 2011 | ! TODO |
---|
| 2012 | ! Some unused arrays are currently sent (uupoln,..) |
---|
| 2013 | !******************************************************************************* |
---|
| 2014 | use com_mod |
---|
| 2015 | |
---|
| 2016 | implicit none |
---|
| 2017 | |
---|
| 2018 | integer, intent(in) :: memstat |
---|
| 2019 | integer :: mind |
---|
| 2020 | integer :: dest,i,k |
---|
| 2021 | |
---|
| 2022 | ! Common array sizes used for communications |
---|
| 2023 | integer :: d3s1 = nxmaxn*nymaxn*nzmax |
---|
| 2024 | integer :: d3s2 = nxmaxn*nymaxn*nuvzmax |
---|
| 2025 | integer :: d2s1 = nxmaxn*nymaxn |
---|
| 2026 | integer :: d2s2 = nxmaxn*nymaxn*maxspec |
---|
| 2027 | |
---|
| 2028 | !******************************************************************************* |
---|
| 2029 | |
---|
| 2030 | ! At the time the send is posted, the reader process is one step further |
---|
| 2031 | ! in the permutation of memstat compared with the receiving processes |
---|
| 2032 | |
---|
| 2033 | if (memstat.ge.32) then |
---|
| 2034 | ! last read was synchronous, to indices 1 and 2, 3 is free |
---|
| 2035 | write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: & |
---|
| 2036 | & memstat>=32 should never happen here." |
---|
| 2037 | stop |
---|
| 2038 | else if (memstat.eq.17) then |
---|
| 2039 | ! old fields on 1,2, send 3 |
---|
| 2040 | mind=3 |
---|
| 2041 | else if (memstat.eq.18) then |
---|
| 2042 | ! old fields on 2,3, send 1 |
---|
| 2043 | mind=1 |
---|
| 2044 | else if (memstat.eq.19) then |
---|
| 2045 | ! old fields on 3,1, send 2 |
---|
| 2046 | mind=2 |
---|
| 2047 | else |
---|
| 2048 | write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: & |
---|
| 2049 | & invalid memstat" |
---|
[aa8c34a] | 2050 | mind=-1 |
---|
| 2051 | stop |
---|
[3b80e98] | 2052 | end if |
---|
| 2053 | |
---|
| 2054 | if (mp_dev_mode) then |
---|
| 2055 | if (mp_pid.ne.id_read) then |
---|
| 2056 | write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_nest_async' |
---|
| 2057 | end if |
---|
| 2058 | end if |
---|
| 2059 | |
---|
| 2060 | if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_nest_async, memstat:', memstat |
---|
| 2061 | |
---|
| 2062 | ! Time for MPI communications |
---|
| 2063 | if (mp_measure_time) call mpif_mtime('commtime',0) |
---|
| 2064 | |
---|
| 2065 | ! Loop over receiving processes, initiate data sending |
---|
| 2066 | !***************************************************** |
---|
| 2067 | |
---|
| 2068 | do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading |
---|
| 2069 | ! TODO: use mp_partgroup_np here |
---|
| 2070 | if (dest.eq.id_read) cycle |
---|
| 2071 | do k=1, numbnests |
---|
[861805a] | 2072 | i=dest*nvar_async |
---|
| 2073 | call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2074 | if (mp_ierr /= 0) goto 600 |
---|
| 2075 | i=i+1 |
---|
| 2076 | call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2077 | if (mp_ierr /= 0) goto 600 |
---|
| 2078 | i=i+1 |
---|
| 2079 | call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2080 | if (mp_ierr /= 0) goto 600 |
---|
| 2081 | i=i+1 |
---|
| 2082 | call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2083 | if (mp_ierr /= 0) goto 600 |
---|
| 2084 | i=i+1 |
---|
| 2085 | call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2086 | if (mp_ierr /= 0) goto 600 |
---|
| 2087 | i=i+1 |
---|
| 2088 | call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2089 | if (mp_ierr /= 0) goto 600 |
---|
| 2090 | i=i+1 |
---|
| 2091 | call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2092 | if (mp_ierr /= 0) goto 600 |
---|
| 2093 | i=i+1 |
---|
| 2094 | call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2095 | if (mp_ierr /= 0) goto 600 |
---|
| 2096 | i=i+1 |
---|
| 2097 | call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2098 | if (mp_ierr /= 0) goto 600 |
---|
| 2099 | i=i+1 |
---|
| 2100 | call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2101 | if (mp_ierr /= 0) goto 600 |
---|
| 2102 | i=i+1 |
---|
| 2103 | call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2104 | i=i+1 |
---|
| 2105 | if (mp_ierr /= 0) goto 600 |
---|
| 2106 | call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2107 | if (mp_ierr /= 0) goto 600 |
---|
| 2108 | i=i+1 |
---|
| 2109 | call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2110 | if (mp_ierr /= 0) goto 600 |
---|
| 2111 | i=i+1 |
---|
| 2112 | call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2113 | if (mp_ierr /= 0) goto 600 |
---|
| 2114 | i=i+1 |
---|
| 2115 | call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2116 | if (mp_ierr /= 0) goto 600 |
---|
| 2117 | i=i+1 |
---|
[3b80e98] | 2118 | ! 15 |
---|
[861805a] | 2119 | call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2120 | if (mp_ierr /= 0) goto 600 |
---|
| 2121 | i=i+1 |
---|
| 2122 | call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2123 | if (mp_ierr /= 0) goto 600 |
---|
| 2124 | i=i+1 |
---|
| 2125 | call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2126 | if (mp_ierr /= 0) goto 600 |
---|
| 2127 | i=i+1 |
---|
| 2128 | call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2129 | if (mp_ierr /= 0) goto 600 |
---|
| 2130 | i=i+1 |
---|
| 2131 | call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2132 | if (mp_ierr /= 0) goto 600 |
---|
| 2133 | i=i+1 |
---|
| 2134 | call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2135 | if (mp_ierr /= 0) goto 600 |
---|
| 2136 | i=i+1 |
---|
| 2137 | call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2138 | if (mp_ierr /= 0) goto 600 |
---|
| 2139 | i=i+1 |
---|
| 2140 | call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2141 | if (mp_ierr /= 0) goto 600 |
---|
| 2142 | i=i+1 |
---|
| 2143 | call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2144 | if (mp_ierr /= 0) goto 600 |
---|
| 2145 | i=i+1 |
---|
| 2146 | call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2147 | if (mp_ierr /= 0) goto 600 |
---|
[3b80e98] | 2148 | ! 25 |
---|
| 2149 | |
---|
| 2150 | ! Send cloud water if it exists. Increment counter always (as on receiving end) |
---|
[861805a] | 2151 | if (readclouds) then |
---|
| 2152 | i=i+1 |
---|
| 2153 | call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,& |
---|
| 2154 | &MPI_COMM_WORLD,reqs(i),mp_ierr) |
---|
| 2155 | if (mp_ierr /= 0) goto 600 |
---|
| 2156 | end if |
---|
| 2157 | end do |
---|
[3b80e98] | 2158 | end do |
---|
| 2159 | |
---|
| 2160 | if (mp_measure_time) call mpif_mtime('commtime',1) |
---|
| 2161 | |
---|
| 2162 | goto 601 |
---|
| 2163 | |
---|
| 2164 | 600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> mp_ierr \= 0", mp_ierr |
---|
| 2165 | stop |
---|
| 2166 | |
---|
| 2167 | 601 end subroutine mpif_gf_send_vars_nest_async |
---|
| 2168 | |
---|
| 2169 | |
---|
| 2170 | subroutine mpif_gf_recv_vars_nest_async(memstat) |
---|
| 2171 | !******************************************************************************* |
---|
| 2172 | ! DESCRIPTION |
---|
| 2173 | ! Receive 'getfield' variables from reader process. |
---|
| 2174 | ! Called from timemanager by all processes except reader |
---|
| 2175 | ! Version for nested wind fields |
---|
| 2176 | ! |
---|
| 2177 | ! NOTE |
---|
| 2178 | ! This version uses asynchronious communications. |
---|
| 2179 | ! |
---|
| 2180 | ! VARIABLES |
---|
| 2181 | ! memstat -- input, used to resolve windfield index being received |
---|
| 2182 | ! |
---|
| 2183 | ! |
---|
| 2184 | !******************************************************************************* |
---|
| 2185 | use com_mod |
---|
| 2186 | |
---|
| 2187 | implicit none |
---|
| 2188 | |
---|
| 2189 | integer, intent(in) :: memstat |
---|
| 2190 | integer :: mind,j,k |
---|
| 2191 | |
---|
| 2192 | ! Common array sizes used for communications |
---|
| 2193 | integer :: d3s1 = nxmaxn*nymaxn*nzmax |
---|
| 2194 | integer :: d3s2 = nxmaxn*nymaxn*nuvzmax |
---|
| 2195 | integer :: d2s1 = nxmaxn*nymaxn |
---|
| 2196 | integer :: d2s2 = nxmaxn*nymaxn*maxspec |
---|
| 2197 | |
---|
| 2198 | !******************************************************************************* |
---|
| 2199 | |
---|
| 2200 | ! At the time this immediate receive is posted, memstat is the state of |
---|
| 2201 | ! windfield indices at the previous/current time. From this, the future |
---|
| 2202 | ! state is deduced. |
---|
| 2203 | if (memstat.eq.32) then |
---|
| 2204 | ! last read was synchronous to indices 1 and 2, 3 is free |
---|
| 2205 | mind=3 |
---|
| 2206 | else if (memstat.eq.17) then |
---|
| 2207 | ! last read was asynchronous to index 3, 1 is free |
---|
| 2208 | mind=1 |
---|
| 2209 | else if (memstat.eq.18) then |
---|
| 2210 | ! last read was asynchronous to index 1, 2 is free |
---|
| 2211 | mind=2 |
---|
| 2212 | else if (memstat.eq.19) then |
---|
| 2213 | ! last read was asynchronous to index 2, 3 is free |
---|
| 2214 | mind=3 |
---|
| 2215 | else |
---|
| 2216 | ! illegal state |
---|
| 2217 | write(*,*) 'mpi_mod> FLEXPART ERROR: Illegal memstat value. Exiting.' |
---|
| 2218 | stop |
---|
| 2219 | end if |
---|
| 2220 | |
---|
| 2221 | if (mp_dev_mode) then |
---|
| 2222 | if (mp_pid.eq.id_read) then |
---|
| 2223 | write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async' |
---|
| 2224 | end if |
---|
| 2225 | end if |
---|
| 2226 | |
---|
| 2227 | ! Time for MPI communications |
---|
| 2228 | if (mp_measure_time) call mpif_mtime('commtime',0) |
---|
| 2229 | |
---|
| 2230 | if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat |
---|
| 2231 | |
---|
| 2232 | ! Initiate receiving of data |
---|
| 2233 | !*************************** |
---|
| 2234 | |
---|
| 2235 | do k=1, numbnests |
---|
| 2236 | ! Get MPI tags/requests for communications |
---|
[861805a] | 2237 | j=mp_pid*nvar_async |
---|
| 2238 | call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2239 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2240 | if (mp_ierr /= 0) goto 600 |
---|
[3b80e98] | 2241 | j=j+1 |
---|
[861805a] | 2242 | call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2243 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2244 | if (mp_ierr /= 0) goto 600 |
---|
| 2245 | j=j+1 |
---|
| 2246 | call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[3b80e98] | 2247 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2248 | if (mp_ierr /= 0) goto 600 |
---|
[861805a] | 2249 | j=j+1 |
---|
| 2250 | call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2251 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2252 | if (mp_ierr /= 0) goto 600 |
---|
| 2253 | j=j+1 |
---|
| 2254 | call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2255 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2256 | if (mp_ierr /= 0) goto 600 |
---|
| 2257 | j=j+1 |
---|
| 2258 | call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2259 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2260 | if (mp_ierr /= 0) goto 600 |
---|
| 2261 | j=j+1 |
---|
| 2262 | call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2263 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2264 | if (mp_ierr /= 0) goto 600 |
---|
| 2265 | j=j+1 |
---|
| 2266 | call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2267 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2268 | if (mp_ierr /= 0) goto 600 |
---|
| 2269 | j=j+1 |
---|
| 2270 | call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2271 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2272 | if (mp_ierr /= 0) goto 600 |
---|
| 2273 | j=j+1 |
---|
| 2274 | call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2275 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2276 | if (mp_ierr /= 0) goto 600 |
---|
| 2277 | j=j+1 |
---|
| 2278 | call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,& |
---|
| 2279 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2280 | if (mp_ierr /= 0) goto 600 |
---|
| 2281 | j=j+1 |
---|
| 2282 | call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2283 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2284 | if (mp_ierr /= 0) goto 600 |
---|
| 2285 | j=j+1 |
---|
| 2286 | call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2287 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2288 | if (mp_ierr /= 0) goto 600 |
---|
| 2289 | j=j+1 |
---|
| 2290 | call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2291 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2292 | if (mp_ierr /= 0) goto 600 |
---|
| 2293 | j=j+1 |
---|
| 2294 | call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2295 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2296 | if (mp_ierr /= 0) goto 600 |
---|
| 2297 | j=j+1 |
---|
| 2298 | call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2299 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2300 | if (mp_ierr /= 0) goto 600 |
---|
| 2301 | j=j+1 |
---|
| 2302 | call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2303 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2304 | if (mp_ierr /= 0) goto 600 |
---|
| 2305 | j=j+1 |
---|
| 2306 | call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2307 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2308 | if (mp_ierr /= 0) goto 600 |
---|
| 2309 | j=j+1 |
---|
| 2310 | call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2311 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2312 | if (mp_ierr /= 0) goto 600 |
---|
| 2313 | j=j+1 |
---|
| 2314 | call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2315 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2316 | if (mp_ierr /= 0) goto 600 |
---|
| 2317 | call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2318 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2319 | if (mp_ierr /= 0) goto 600 |
---|
| 2320 | j=j+1 |
---|
| 2321 | call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2322 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2323 | if (mp_ierr /= 0) goto 600 |
---|
| 2324 | j=j+1 |
---|
| 2325 | call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2326 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2327 | if (mp_ierr /= 0) goto 600 |
---|
| 2328 | j=j+1 |
---|
| 2329 | call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2330 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2331 | if (mp_ierr /= 0) goto 600 |
---|
| 2332 | j=j+1 |
---|
| 2333 | call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& |
---|
| 2334 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2335 | if (mp_ierr /= 0) goto 600 |
---|
| 2336 | |
---|
| 2337 | ! Post request for clwc. These data are possibly not sent, request must then be cancelled |
---|
| 2338 | ! For now assume that data at all steps either have or do not have water |
---|
| 2339 | if (readclouds) then |
---|
| 2340 | j=j+1 |
---|
[328fdf9] | 2341 | call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& |
---|
[861805a] | 2342 | &MPI_COMM_WORLD,reqs(j),mp_ierr) |
---|
| 2343 | if (mp_ierr /= 0) goto 600 |
---|
| 2344 | end if |
---|
| 2345 | end do |
---|
[3b80e98] | 2346 | |
---|
| 2347 | if (mp_measure_time) call mpif_mtime('commtime',1) |
---|
| 2348 | |
---|
| 2349 | goto 601 |
---|
| 2350 | |
---|
| 2351 | 600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_nest_async> MPI ERROR ", mp_ierr |
---|
| 2352 | stop |
---|
| 2353 | |
---|
| 2354 | 601 end subroutine mpif_gf_recv_vars_nest_async |
---|
| 2355 | |
---|
| 2356 | |
---|
[8a65cb0] | 2357 | subroutine mpif_gf_request |
---|
| 2358 | !******************************************************************************* |
---|
| 2359 | ! DESCRIPTION |
---|
| 2360 | ! Check for completion of MPI_Isend/MPI_Irecv data transfer. |
---|
| 2361 | ! |
---|
| 2362 | ! |
---|
| 2363 | ! NOTE |
---|
| 2364 | ! implicit synchronisation between all processes takes place here |
---|
| 2365 | ! |
---|
[3b80e98] | 2366 | ! TODO |
---|
[4c64400] | 2367 | ! NB: take into account nested wind fields by using a separate |
---|
| 2368 | ! subroutine mpif_gf_request_nest (and separate request arrays for the |
---|
| 2369 | ! nested variables) |
---|
[8a65cb0] | 2370 | ! |
---|
| 2371 | !******************************************************************************* |
---|
[32b49c3] | 2372 | ! use com_mod,only: readclouds |
---|
[5f9d14a] | 2373 | |
---|
[8a65cb0] | 2374 | implicit none |
---|
| 2375 | |
---|
[5f9d14a] | 2376 | |
---|
[32b49c3] | 2377 | integer :: n_req !,i |
---|
[5f9d14a] | 2378 | |
---|
| 2379 | !*********************************************************************** |
---|
| 2380 | |
---|
| 2381 | n_req=nvar_async*mp_np |
---|
| 2382 | |
---|
| 2383 | if (mp_measure_time) call mpif_mtime('commtime',0) |
---|
[8a65cb0] | 2384 | |
---|
| 2385 | ! call MPI_Wait(rm1,MPI_STATUS_IGNORE,mp_ierr) |
---|
[5f9d14a] | 2386 | |
---|
| 2387 | ! TODO: cancel recv request if readclouds=.false. |
---|
| 2388 | ! if (readclouds) then |
---|
| 2389 | call MPI_Waitall(n_req,reqs,MPI_STATUSES_IGNORE,mp_ierr) |
---|
| 2390 | ! endif |
---|
[78e62dc] | 2391 | ! else |
---|
| 2392 | ! do i = 0, nvar_async*mp_np-1 |
---|
| 2393 | ! if (mod(i,27).eq.0 .or. mod(i,28).eq.0) then |
---|
| 2394 | ! call MPI_Cancel(reqs(i),mp_ierr) |
---|
| 2395 | ! cycle |
---|
| 2396 | ! end if |
---|
| 2397 | ! call MPI_Wait(reqs(i),MPI_STATUS_IGNORE,mp_ierr) |
---|
| 2398 | ! end do |
---|
| 2399 | ! end if |
---|
[5f9d14a] | 2400 | |
---|
[8a65cb0] | 2401 | if (mp_ierr /= 0) goto 600 |
---|
| 2402 | |
---|
[5f9d14a] | 2403 | if (mp_measure_time) call mpif_mtime('commtime',1) |
---|
[8a65cb0] | 2404 | |
---|
| 2405 | goto 601 |
---|
| 2406 | |
---|
[5f9d14a] | 2407 | 600 write(*,*) "#### mpi_mod::mpif_gf_request> MPI ERROR ", mp_ierr |
---|
[8a65cb0] | 2408 | stop |
---|
| 2409 | |
---|
| 2410 | 601 end subroutine mpif_gf_request |
---|
| 2411 | |
---|
| 2412 | |
---|
| 2413 | subroutine mpif_tm_reduce_grid |
---|
| 2414 | !*********************************************************************** |
---|
| 2415 | ! Collect grid variable to PID 0, adding from all processes. |
---|
| 2416 | ! |
---|
| 2417 | ! NOTE |
---|
| 2418 | ! - Older MPI implementations may lack the MPI_IN_PLACE operation. |
---|
| 2419 | ! If so use 1) below and change unc_mod |
---|
| 2420 | ! |
---|
| 2421 | !*********************************************************************** |
---|
| 2422 | use com_mod |
---|
| 2423 | use unc_mod |
---|
| 2424 | use par_mod |
---|
| 2425 | |
---|
| 2426 | implicit none |
---|
| 2427 | |
---|
| 2428 | integer :: grid_size2d,grid_size3d |
---|
| 2429 | integer, parameter :: rcpt_size=maxreceptor*maxspec |
---|
| 2430 | |
---|
| 2431 | !********************************************************************** |
---|
| 2432 | grid_size2d=numxgrid*numygrid*maxspec* & |
---|
| 2433 | & maxpointspec_act*nclassunc*maxageclass |
---|
| 2434 | grid_size3d=numxgrid*numygrid*numzgrid*maxspec* & |
---|
| 2435 | & maxpointspec_act*nclassunc*maxageclass |
---|
| 2436 | |
---|
| 2437 | |
---|
| 2438 | ! Time for MPI communications |
---|
| 2439 | if (mp_measure_time) call mpif_mtime('commtime',0) |
---|
| 2440 | |
---|
| 2441 | ! 1) Using a separate grid (gridunc0) for received values |
---|
[fdc0f03] | 2442 | ! call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, & |
---|
[8a65cb0] | 2443 | ! & mp_comm_used, mp_ierr) |
---|
| 2444 | ! if (mp_ierr /= 0) goto 600 |
---|
| 2445 | |
---|
| 2446 | ! 2) Using in-place reduction |
---|
[6ecb30a] | 2447 | |
---|
| 2448 | !!!-------------------------------------------------------------------- |
---|
| 2449 | !!! DJM - 2017-05-09 change - MPI_IN_PLACE option for MPI_Reduce() causes |
---|
| 2450 | !!! severe numerical problems in some cases. I'm guessing there are memory |
---|
| 2451 | !!! overrun issues in this complex code, but have so far failed to identify |
---|
| 2452 | !!! a specific problem. And/or, when one searches the Internet for this |
---|
| 2453 | !!! issue, there is "some" hint that the implementation may be buggy. |
---|
| 2454 | !!! |
---|
| 2455 | !!! At this point, with the following CPP USE_MPIINPLACE directive, the |
---|
| 2456 | !!! default behaviour will be to not use the MPI_IN_PLACE option. |
---|
| 2457 | !!! Users will have to compile with -DUSE_MPIINPLACE if they want that option. |
---|
| 2458 | !!! Introduction of the CPP directives also requires that the code be compiled |
---|
| 2459 | !!! with the "-x f95-cpp-input" option. |
---|
| 2460 | !!! |
---|
| 2461 | !!! Modification of this section requires the addition of array gridunc0, which |
---|
| 2462 | !!! requires an allocation in outgrid_init.f90 and initial declaration in |
---|
| 2463 | !!! unc_mod.f90. |
---|
| 2464 | !!!-------------------------------------------------------------------- |
---|
| 2465 | |
---|
| 2466 | #ifdef USE_MPIINPLACE |
---|
| 2467 | |
---|
[8a65cb0] | 2468 | if (lroot) then |
---|
[fdc0f03] | 2469 | call MPI_Reduce(MPI_IN_PLACE, gridunc, grid_size3d, mp_sp, MPI_SUM, id_root, & |
---|
[8a65cb0] | 2470 | & mp_comm_used, mp_ierr) |
---|
| 2471 | if (mp_ierr /= 0) goto 600 |
---|
| 2472 | else |
---|
[79abee9] | 2473 | call MPI_Reduce(gridunc, 0, grid_size3d, mp_sp, MPI_SUM, id_root, & |
---|
[8a65cb0] | 2474 | & mp_comm_used, mp_ierr) |
---|
| 2475 | end if |
---|
| 2476 | |
---|
[328fdf9] | 2477 | ! Receptor concentrations |
---|
| 2478 | if (lroot) then |
---|
| 2479 | call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, & |
---|
| 2480 | & mp_comm_used,mp_ierr) |
---|
| 2481 | if (mp_ierr /= 0) goto 600 |
---|
| 2482 | else |
---|
| 2483 | call MPI_Reduce(creceptor,0,rcpt_size,mp_sp,MPI_SUM,id_root, & |
---|
| 2484 | & mp_comm_used,mp_ierr) |
---|
| 2485 | end if |
---|
| 2486 | |
---|
[6ecb30a] | 2487 | #else |
---|
| 2488 | |
---|
| 2489 | call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, & |
---|
| 2490 | & mp_comm_used, mp_ierr) |
---|
[328fdf9] | 2491 | if (mp_ierr /= 0) goto 600 |
---|
[6ecb30a] | 2492 | if (lroot) gridunc = gridunc0 |
---|
| 2493 | |
---|
[328fdf9] | 2494 | call MPI_Reduce(creceptor, creceptor0,rcpt_size,mp_sp,MPI_SUM,id_root, & |
---|
| 2495 | & mp_comm_used,mp_ierr) |
---|
| 2496 | if (mp_ierr /= 0) goto 600 |
---|
| 2497 | if (lroot) creceptor = creceptor0 |
---|
| 2498 | |
---|
[6ecb30a] | 2499 | #endif |
---|
| 2500 | |
---|
[8a65cb0] | 2501 | if ((WETDEP).and.(ldirect.gt.0)) then |
---|
[6a678e3] | 2502 | call MPI_Reduce(wetgridunc, wetgridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, & |
---|
[8a65cb0] | 2503 | & mp_comm_used, mp_ierr) |
---|
| 2504 | if (mp_ierr /= 0) goto 600 |
---|
| 2505 | end if |
---|
| 2506 | |
---|
| 2507 | if ((DRYDEP).and.(ldirect.gt.0)) then |
---|
[6a678e3] | 2508 | call MPI_Reduce(drygridunc, drygridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, & |
---|
[8a65cb0] | 2509 | & mp_comm_used, mp_ierr) |
---|
| 2510 | if (mp_ierr /= 0) goto 600 |
---|
| 2511 | end if |
---|
| 2512 | |
---|
| 2513 | |
---|
| 2514 | if (mp_measure_time) call mpif_mtime('commtime',1) |
---|
| 2515 | |
---|
| 2516 | goto 601 |
---|
| 2517 | |
---|
| 2518 | 600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr |
---|
| 2519 | stop |
---|
| 2520 | |
---|
| 2521 | 601 end subroutine mpif_tm_reduce_grid |
---|
| 2522 | |
---|
| 2523 | |
---|
| 2524 | subroutine mpif_tm_reduce_grid_nest |
---|
| 2525 | !*********************************************************************** |
---|
| 2526 | ! Collect nested grids to PID 0, adding from all processes. |
---|
| 2527 | ! |
---|
| 2528 | ! NOTE |
---|
| 2529 | ! - Compiling with 'fcheck=all' (gfortran) will cause a run-time error |
---|
| 2530 | ! as wetgriduncn0 is only allocated for root process. Possibly valid |
---|
| 2531 | ! MPI code but invalid gfortran. |
---|
| 2532 | ! |
---|
| 2533 | !*********************************************************************** |
---|
| 2534 | use com_mod |
---|
| 2535 | use unc_mod |
---|
| 2536 | use par_mod |
---|
| 2537 | |
---|
| 2538 | implicit none |
---|
| 2539 | |
---|
| 2540 | integer :: grid_size2d,grid_size3d |
---|
| 2541 | |
---|
| 2542 | !********************************************************************** |
---|
[5f9d14a] | 2543 | |
---|
[8a65cb0] | 2544 | grid_size3d=numxgridn*numygridn*numzgrid*maxspec* & |
---|
| 2545 | & maxpointspec_act*nclassunc*maxageclass |
---|
| 2546 | grid_size2d=numxgridn*numygridn*maxspec* & |
---|
| 2547 | & maxpointspec_act*nclassunc*maxageclass |
---|
| 2548 | |
---|
| 2549 | |
---|
| 2550 | ! Time for MPI communications |
---|
| 2551 | if (mp_measure_time) call mpif_mtime('commtime',0) |
---|
| 2552 | |
---|
| 2553 | ! Using a separate grid (gridunc0) for received values, for debugging |
---|
[fdc0f03] | 2554 | ! call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_sp, MPI_SUM, id_root, & |
---|
[8a65cb0] | 2555 | ! & mp_comm_used, mp_ierr) |
---|
| 2556 | ! if (mp_ierr /= 0) goto 600 |
---|
| 2557 | |
---|
[0ecc1fe] | 2558 | #ifdef USE_MPIINPLACE |
---|
[8a65cb0] | 2559 | ! Using in-place reduction |
---|
| 2560 | if (lroot) then |
---|
[fdc0f03] | 2561 | call MPI_Reduce(MPI_IN_PLACE, griduncn, grid_size3d, mp_sp, MPI_SUM, id_root, & |
---|
[8a65cb0] | 2562 | & mp_comm_used, mp_ierr) |
---|
| 2563 | if (mp_ierr /= 0) goto 600 |
---|
| 2564 | else |
---|
[79abee9] | 2565 | call MPI_Reduce(griduncn, 0, grid_size3d, mp_sp, MPI_SUM, id_root, & |
---|
[8a65cb0] | 2566 | & mp_comm_used, mp_ierr) |
---|
[0ecc1fe] | 2567 | if (mp_ierr /= 0) goto 600 |
---|
[8a65cb0] | 2568 | end if |
---|
| 2569 | |
---|
[0ecc1fe] | 2570 | #else |
---|
| 2571 | call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_sp, MPI_SUM, id_root, & |
---|
| 2572 | & mp_comm_used, mp_ierr) |
---|
| 2573 | if (mp_ierr /= 0) goto 600 |
---|
| 2574 | if (lroot) griduncn = griduncn0 |
---|
| 2575 | |
---|
| 2576 | #endif |
---|
| 2577 | |
---|
[8a65cb0] | 2578 | if ((WETDEP).and.(ldirect.gt.0)) then |
---|
[6a678e3] | 2579 | call MPI_Reduce(wetgriduncn, wetgriduncn0, grid_size2d, mp_cp, MPI_SUM, id_root, & |
---|
[8a65cb0] | 2580 | & mp_comm_used, mp_ierr) |
---|
| 2581 | if (mp_ierr /= 0) goto 600 |
---|
| 2582 | end if |
---|
| 2583 | |
---|
| 2584 | if ((DRYDEP).and.(ldirect.gt.0)) then |
---|
[6a678e3] | 2585 | call MPI_Reduce(drygriduncn, drygriduncn0, grid_size2d, mp_cp, MPI_SUM, id_root, & |
---|
[8a65cb0] | 2586 | & mp_comm_used, mp_ierr) |
---|
| 2587 | if (mp_ierr /= 0) goto 600 |
---|
| 2588 | end if |
---|
| 2589 | |
---|
| 2590 | if (mp_measure_time) call mpif_mtime('commtime',1) |
---|
| 2591 | |
---|
| 2592 | goto 601 |
---|
| 2593 | |
---|
| 2594 | 600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr |
---|
| 2595 | stop |
---|
| 2596 | |
---|
| 2597 | 601 end subroutine mpif_tm_reduce_grid_nest |
---|
| 2598 | |
---|
| 2599 | |
---|
| 2600 | subroutine mpif_mtime(ident,imode) |
---|
| 2601 | !*********************************************************************** |
---|
| 2602 | ! Measure CPU/Wall time in various parts of the code |
---|
| 2603 | ! |
---|
| 2604 | ! VARIABLES |
---|
| 2605 | ! ident character, identifies routine to measure |
---|
| 2606 | ! imode integer, 0:start clock(s) 1: stop clock(s) |
---|
| 2607 | ! |
---|
| 2608 | !*********************************************************************** |
---|
| 2609 | implicit none |
---|
| 2610 | |
---|
| 2611 | character(LEN=*), intent(in) :: ident |
---|
| 2612 | integer, intent(in) :: imode |
---|
| 2613 | |
---|
[5f9d14a] | 2614 | !*********************************************************************** |
---|
| 2615 | |
---|
[8a65cb0] | 2616 | select case(ident) |
---|
| 2617 | |
---|
| 2618 | case ('timemanager') |
---|
| 2619 | if (imode.eq.0) then |
---|
| 2620 | call cpu_time(tm_tot_beg) |
---|
| 2621 | else |
---|
| 2622 | call cpu_time(tm_tot_end) |
---|
| 2623 | tm_tot_total = tm_tot_total + (tm_tot_end-tm_tot_beg) |
---|
| 2624 | end if |
---|
| 2625 | |
---|
| 2626 | case ('wetdepo') |
---|
| 2627 | if (imode.eq.0) then |
---|
| 2628 | mp_wetdepo_wtime_beg = mpi_wtime() |
---|
| 2629 | call cpu_time(mp_wetdepo_time_beg) |
---|
| 2630 | else |
---|
| 2631 | mp_wetdepo_wtime_end = mpi_wtime() |
---|
| 2632 | call cpu_time(mp_wetdepo_time_end) |
---|
| 2633 | |
---|
| 2634 | mp_wetdepo_wtime_total = mp_wetdepo_wtime_total + & |
---|
| 2635 | &(mp_wetdepo_wtime_end - mp_wetdepo_wtime_beg) |
---|
| 2636 | mp_wetdepo_time_total = mp_wetdepo_time_total + & |
---|
| 2637 | &(mp_wetdepo_time_end - mp_wetdepo_time_beg) |
---|
| 2638 | end if |
---|
| 2639 | |
---|
[38b7917] | 2640 | case ('advance') |
---|
| 2641 | if (imode.eq.0) then |
---|
| 2642 | mp_advance_wtime_beg = mpi_wtime() |
---|
| 2643 | else |
---|
| 2644 | mp_advance_wtime_end = mpi_wtime() |
---|
| 2645 | |
---|
| 2646 | mp_advance_wtime_total = mp_advance_wtime_total + & |
---|
| 2647 | &(mp_advance_wtime_end - mp_advance_wtime_beg) |
---|
| 2648 | end if |
---|
| 2649 | |
---|
[8a65cb0] | 2650 | case ('getfields') |
---|
| 2651 | if (imode.eq.0) then |
---|
| 2652 | mp_getfields_wtime_beg = mpi_wtime() |
---|
| 2653 | call cpu_time(mp_getfields_time_beg) |
---|
| 2654 | else |
---|
| 2655 | mp_getfields_wtime_end = mpi_wtime() |
---|
| 2656 | call cpu_time(mp_getfields_time_end) |
---|
| 2657 | |
---|
| 2658 | mp_getfields_wtime_total = mp_getfields_wtime_total + & |
---|
| 2659 | &(mp_getfields_wtime_end - mp_getfields_wtime_beg) |
---|
| 2660 | mp_getfields_time_total = mp_getfields_time_total + & |
---|
| 2661 | &(mp_getfields_time_end - mp_getfields_time_beg) |
---|
| 2662 | end if |
---|
| 2663 | |
---|
| 2664 | case ('partloop1') |
---|
| 2665 | if (imode.eq.0) then |
---|
| 2666 | call cpu_time(tm_nploop_beg) |
---|
| 2667 | else |
---|
| 2668 | call cpu_time(tm_nploop_end) |
---|
| 2669 | tm_nploop_total = tm_nploop_total + (tm_nploop_end - tm_nploop_beg) |
---|
| 2670 | end if |
---|
| 2671 | |
---|
| 2672 | case ('conccalc') |
---|
| 2673 | if (imode.eq.0) then |
---|
| 2674 | call cpu_time(mp_conccalc_time_beg) |
---|
| 2675 | else |
---|
| 2676 | call cpu_time(mp_conccalc_time_end) |
---|
| 2677 | mp_conccalc_time_total = mp_conccalc_time_total + mp_conccalc_time_end - & |
---|
| 2678 | &mp_conccalc_time_beg |
---|
| 2679 | end if |
---|
[38b7917] | 2680 | |
---|
[8a65cb0] | 2681 | case ('rootonly') |
---|
| 2682 | if (imode.eq.0) then |
---|
| 2683 | call cpu_time(mp_root_time_beg) |
---|
| 2684 | mp_root_wtime_beg = mpi_wtime() |
---|
| 2685 | else |
---|
| 2686 | call cpu_time(mp_root_time_end) |
---|
| 2687 | mp_root_wtime_end = mpi_wtime() |
---|
| 2688 | mp_root_time_total = mp_root_time_total + & |
---|
| 2689 | &(mp_root_time_end - mp_root_time_beg) |
---|
| 2690 | mp_root_wtime_total = mp_root_wtime_total + & |
---|
| 2691 | &(mp_root_wtime_end - mp_root_wtime_beg) |
---|
| 2692 | end if |
---|
| 2693 | |
---|
| 2694 | case ('iotime') |
---|
| 2695 | if (imode.eq.0) then |
---|
| 2696 | mp_io_wtime_beg = mpi_wtime() |
---|
| 2697 | call cpu_time(mp_io_time_beg) |
---|
| 2698 | else |
---|
| 2699 | mp_io_wtime_end = mpi_wtime() |
---|
| 2700 | call cpu_time(mp_io_time_end) |
---|
| 2701 | |
---|
| 2702 | mp_io_wtime_total = mp_io_wtime_total + (mp_io_wtime_end - & |
---|
| 2703 | & mp_io_wtime_beg) |
---|
| 2704 | mp_io_time_total = mp_io_time_total + (mp_io_time_end - & |
---|
| 2705 | & mp_io_time_beg) |
---|
| 2706 | end if |
---|
| 2707 | |
---|
[78e62dc] | 2708 | case ('verttransform') |
---|
| 2709 | if (imode.eq.0) then |
---|
| 2710 | mp_vt_wtime_beg = mpi_wtime() |
---|
| 2711 | call cpu_time(mp_vt_time_beg) |
---|
| 2712 | else |
---|
| 2713 | mp_vt_wtime_end = mpi_wtime() |
---|
| 2714 | call cpu_time(mp_vt_time_end) |
---|
| 2715 | |
---|
| 2716 | mp_vt_wtime_total = mp_vt_wtime_total + (mp_vt_wtime_end - & |
---|
| 2717 | & mp_vt_wtime_beg) |
---|
| 2718 | mp_vt_time_total = mp_vt_time_total + (mp_vt_time_end - & |
---|
| 2719 | & mp_vt_time_beg) |
---|
| 2720 | end if |
---|
| 2721 | |
---|
[0a94e13] | 2722 | case ('readwind') |
---|
| 2723 | if (imode.eq.0) then |
---|
| 2724 | call cpu_time(mp_readwind_time_beg) |
---|
| 2725 | mp_readwind_wtime_beg = mpi_wtime() |
---|
| 2726 | else |
---|
| 2727 | call cpu_time(mp_readwind_time_end) |
---|
| 2728 | mp_readwind_wtime_end = mpi_wtime() |
---|
| 2729 | |
---|
| 2730 | mp_readwind_time_total = mp_readwind_time_total + & |
---|
| 2731 | &(mp_readwind_time_end - mp_readwind_time_beg) |
---|
| 2732 | mp_readwind_wtime_total = mp_readwind_wtime_total + & |
---|
| 2733 | &(mp_readwind_wtime_end - mp_readwind_wtime_beg) |
---|
| 2734 | end if |
---|
[8a65cb0] | 2735 | |
---|
| 2736 | case ('commtime') |
---|
| 2737 | if (imode.eq.0) then |
---|
| 2738 | call cpu_time(mp_comm_time_beg) |
---|
| 2739 | mp_comm_wtime_beg = mpi_wtime() |
---|
| 2740 | else |
---|
| 2741 | call cpu_time(mp_comm_time_end) |
---|
| 2742 | mp_comm_wtime_end = mpi_wtime() |
---|
| 2743 | mp_comm_time_total = mp_comm_time_total + & |
---|
| 2744 | &(mp_comm_time_end - mp_comm_time_beg) |
---|
| 2745 | mp_comm_wtime_total = mp_comm_wtime_total + & |
---|
| 2746 | &(mp_comm_wtime_end - mp_comm_wtime_beg) |
---|
| 2747 | end if |
---|
| 2748 | |
---|
| 2749 | case ('flexpart') |
---|
| 2750 | if (imode.eq.0) then |
---|
| 2751 | mp_total_wtime_beg=mpi_wtime() |
---|
| 2752 | else |
---|
| 2753 | mp_total_wtime_end=mpi_wtime() |
---|
| 2754 | mp_total_wtime_total = mp_total_wtime_end-mp_total_wtime_beg |
---|
| 2755 | end if |
---|
| 2756 | |
---|
| 2757 | case default |
---|
| 2758 | write(*,*) 'mpi_mod::mpif_mtime> WARNING: unknown case identifier' |
---|
| 2759 | |
---|
| 2760 | end select |
---|
| 2761 | |
---|
| 2762 | |
---|
| 2763 | end subroutine mpif_mtime |
---|
| 2764 | |
---|
| 2765 | |
---|
| 2766 | subroutine mpif_finalize |
---|
| 2767 | !*********************************************************************** |
---|
| 2768 | ! Finalize MPI |
---|
| 2769 | ! Optionally print detailed time diagnostics |
---|
| 2770 | !*********************************************************************** |
---|
| 2771 | implicit none |
---|
| 2772 | |
---|
[32b49c3] | 2773 | integer :: ip !,j,r |
---|
[8a65cb0] | 2774 | |
---|
[5f9d14a] | 2775 | !*********************************************************************** |
---|
| 2776 | |
---|
[78e62dc] | 2777 | IF (mp_measure_time) THEN |
---|
[8a65cb0] | 2778 | do ip=0, mp_np-1 |
---|
| 2779 | call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr) |
---|
| 2780 | |
---|
| 2781 | if (ip == mp_pid) then |
---|
| 2782 | write(*,FMT='(72("#"))') |
---|
| 2783 | write(*,FMT='(A60,I3)') 'STATISTICS FOR MPI PROCESS:', mp_pid |
---|
| 2784 | if (ip == 0) then |
---|
| 2785 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR ROOT-ONLY (PID 0) & |
---|
| 2786 | &SECTIONS: ', mp_root_time_total |
---|
| 2787 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ROOT-ONLY (PID 0) & |
---|
| 2788 | &SECTIONS:', mp_root_wtime_total |
---|
| 2789 | end if |
---|
| 2790 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FLEXPART: ' & |
---|
| 2791 | &, mp_total_wtime_total |
---|
| 2792 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI COMMUNICATIONS:',& |
---|
| 2793 | & mp_comm_time_total |
---|
| 2794 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI COMMUNICATIONS:',& |
---|
| 2795 | & mp_comm_wtime_total |
---|
| 2796 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR MPI BARRIERS:',& |
---|
| 2797 | & mp_barrier_time_total |
---|
| 2798 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR MPI BARRIERS:',& |
---|
| 2799 | & mp_barrier_wtime_total |
---|
| 2800 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER:',& |
---|
| 2801 | & tm_tot_total |
---|
| 2802 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR TIMEMANAGER/NUMPART LOOP:',& |
---|
| 2803 | & tm_nploop_total |
---|
| 2804 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR ADVANCE:',& |
---|
| 2805 | & mp_advance_wtime_total |
---|
| 2806 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR GETFIELDS:',& |
---|
| 2807 | & mp_getfields_wtime_total |
---|
| 2808 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR GETFIELDS:',& |
---|
| 2809 | & mp_getfields_time_total |
---|
[328fdf9] | 2810 | ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',& |
---|
| 2811 | ! & mp_readwind_wtime_total |
---|
| 2812 | ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',& |
---|
| 2813 | ! & mp_readwind_time_total |
---|
[8a65cb0] | 2814 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FILE IO:',& |
---|
| 2815 | & mp_io_wtime_total |
---|
| 2816 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR FILE IO:',& |
---|
| 2817 | & mp_io_time_total |
---|
| 2818 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR WETDEPO:',& |
---|
| 2819 | & mp_wetdepo_wtime_total |
---|
| 2820 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR WETDEPO:',& |
---|
| 2821 | & mp_wetdepo_time_total |
---|
| 2822 | write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',& |
---|
| 2823 | & mp_conccalc_time_total |
---|
[861805a] | 2824 | ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',& |
---|
| 2825 | ! & mp_vt_wtime_total |
---|
| 2826 | ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',& |
---|
| 2827 | ! & mp_vt_time_total |
---|
[16b61a5] | 2828 | ! NB: the 'flush' function is possibly a gfortran-specific extension, |
---|
| 2829 | ! comment out if it gives problems |
---|
| 2830 | ! call flush() |
---|
[8a65cb0] | 2831 | end if |
---|
| 2832 | end do |
---|
| 2833 | end if |
---|
| 2834 | |
---|
[78e62dc] | 2835 | ! This call to barrier is for correctly formatting output |
---|
| 2836 | call MPI_BARRIER(MPI_COMM_WORLD, mp_ierr) |
---|
| 2837 | |
---|
[f55fdce] | 2838 | if (lroot.and.mp_measure_time) then |
---|
[78e62dc] | 2839 | write(*,FMT='(72("#"))') |
---|
| 2840 | WRITE(*,*) "To turn off output of time measurements, set " |
---|
| 2841 | WRITE(*,*) " mp_measure_time=.false." |
---|
| 2842 | WRITE(*,*) "in file mpi_mod.f90" |
---|
| 2843 | write(*,FMT='(72("#"))') |
---|
| 2844 | end if |
---|
| 2845 | |
---|
[8a65cb0] | 2846 | ! j=mp_pid*nvar_async |
---|
| 2847 | ! In the implementation with 3 fields, the processes may have posted |
---|
| 2848 | ! MPI_Irecv requests that should be cancelled here |
---|
[78e62dc] | 2849 | ! if (.not.lmp_sync) then |
---|
| 2850 | ! r=mp_pid*nvar_async |
---|
| 2851 | ! do j=r,r+nvar_async-1 |
---|
| 2852 | ! call MPI_Cancel(j,mp_ierr) |
---|
| 2853 | ! if (mp_ierr /= 0) write(*,*) '#### mpif_finalize::MPI_Cancel> ERROR ####' |
---|
| 2854 | ! end do |
---|
| 2855 | ! end if |
---|
[8a65cb0] | 2856 | |
---|
| 2857 | call MPI_FINALIZE(mp_ierr) |
---|
| 2858 | if (mp_ierr /= 0) then |
---|
[5f9d14a] | 2859 | write(*,*) '#### mpif_finalize::MPI_FINALIZE> MPI ERROR ', mp_ierr, ' ####' |
---|
[8a65cb0] | 2860 | stop |
---|
| 2861 | end if |
---|
| 2862 | |
---|
| 2863 | |
---|
[861805a] | 2864 | end subroutine mpif_finalize |
---|
[8a65cb0] | 2865 | |
---|
| 2866 | |
---|
[861805a] | 2867 | subroutine get_lun(my_lun) |
---|
[8a65cb0] | 2868 | !*********************************************************************** |
---|
| 2869 | ! get_lun: |
---|
| 2870 | ! Starting from 100, get next free logical unit number |
---|
| 2871 | !*********************************************************************** |
---|
| 2872 | |
---|
[861805a] | 2873 | implicit none |
---|
[8a65cb0] | 2874 | |
---|
[861805a] | 2875 | integer, intent(inout) :: my_lun |
---|
| 2876 | integer, save :: free_lun=100 |
---|
| 2877 | logical :: exists, iopen |
---|
[8a65cb0] | 2878 | |
---|
[5f9d14a] | 2879 | !*********************************************************************** |
---|
| 2880 | |
---|
[861805a] | 2881 | loop1: do |
---|
| 2882 | inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen) |
---|
| 2883 | if (exists .and. .not.iopen) exit loop1 |
---|
| 2884 | free_lun = free_lun+1 |
---|
| 2885 | end do loop1 |
---|
| 2886 | my_lun = free_lun |
---|
[8a65cb0] | 2887 | |
---|
[861805a] | 2888 | end subroutine get_lun |
---|
[8a65cb0] | 2889 | |
---|
| 2890 | |
---|
[861805a] | 2891 | subroutine write_data_dbg(array_in, array_name, tstep, ident) |
---|
[8a65cb0] | 2892 | !*********************************************************************** |
---|
[78e62dc] | 2893 | ! Write one-dimensional arrays to file (for debugging purposes) |
---|
[8a65cb0] | 2894 | !*********************************************************************** |
---|
[861805a] | 2895 | implicit none |
---|
[8a65cb0] | 2896 | |
---|
[861805a] | 2897 | real, intent(in), dimension(:) :: array_in |
---|
| 2898 | integer, intent(in) :: tstep |
---|
| 2899 | integer :: lios |
---|
| 2900 | character(LEN=*), intent(in) :: ident, array_name |
---|
[8a65cb0] | 2901 | |
---|
[861805a] | 2902 | character(LEN=8) :: c_ts |
---|
| 2903 | character(LEN=40) :: fn_1, fn_2 |
---|
[8a65cb0] | 2904 | |
---|
[5f9d14a] | 2905 | !*********************************************************************** |
---|
| 2906 | |
---|
[861805a] | 2907 | write(c_ts, FMT='(I8.8,BZ)') tstep |
---|
| 2908 | fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident) |
---|
| 2909 | write(c_ts, FMT='(I2.2,BZ)') mp_np |
---|
| 2910 | fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat' |
---|
| 2911 | |
---|
| 2912 | call get_lun(dat_lun) |
---|
| 2913 | open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', & |
---|
| 2914 | FORM='UNFORMATTED', STATUS='REPLACE') |
---|
| 2915 | write(UNIT=dat_lun, IOSTAT=lios) array_in |
---|
| 2916 | close(UNIT=dat_lun) |
---|
| 2917 | |
---|
| 2918 | end subroutine write_data_dbg |
---|
| 2919 | |
---|
| 2920 | |
---|
| 2921 | subroutine set_fields_synthetic() |
---|
| 2922 | !******************************************************************************* |
---|
| 2923 | ! DESCRIPTION |
---|
| 2924 | ! Set all meteorological fields to synthetic (usually constant/homogeneous) |
---|
| 2925 | ! values. |
---|
| 2926 | ! Used for validation and error-checking |
---|
| 2927 | ! |
---|
| 2928 | ! NOTE |
---|
| 2929 | ! This version uses asynchronious communications. |
---|
| 2930 | ! |
---|
| 2931 | ! VARIABLES |
---|
| 2932 | ! |
---|
| 2933 | ! |
---|
| 2934 | ! |
---|
| 2935 | !******************************************************************************* |
---|
| 2936 | use com_mod |
---|
| 2937 | |
---|
| 2938 | implicit none |
---|
[8a65cb0] | 2939 | |
---|
[861805a] | 2940 | integer :: li=1, ui=2 ! wfmem indices (i.e, operate on entire field) |
---|
| 2941 | |
---|
| 2942 | if (.not.lmp_sync) ui=3 |
---|
| 2943 | |
---|
| 2944 | |
---|
| 2945 | ! Variables transferred at initialization only |
---|
| 2946 | !********************************************* |
---|
| 2947 | ! readclouds=readclouds_ |
---|
| 2948 | oro(:,:)=0.0 |
---|
| 2949 | excessoro(:,:)=0.0 |
---|
| 2950 | lsm(:,:)=0.0 |
---|
| 2951 | xlanduse(:,:,:)=0.0 |
---|
| 2952 | ! wftime |
---|
| 2953 | ! numbwf |
---|
| 2954 | ! nmixz |
---|
| 2955 | ! height |
---|
| 2956 | |
---|
| 2957 | ! Time-varying fields: |
---|
| 2958 | uu(:,:,:,li:ui) = 10.0 |
---|
| 2959 | vv(:,:,:,li:ui) = 0.0 |
---|
| 2960 | uupol(:,:,:,li:ui) = 10.0 |
---|
| 2961 | vvpol(:,:,:,li:ui)=0.0 |
---|
| 2962 | ww(:,:,:,li:ui)=0. |
---|
| 2963 | tt(:,:,:,li:ui)=300. |
---|
| 2964 | rho(:,:,:,li:ui)=1.3 |
---|
| 2965 | drhodz(:,:,:,li:ui)=.0 |
---|
| 2966 | tth(:,:,:,li:ui)=0.0 |
---|
| 2967 | qvh(:,:,:,li:ui)=1.0 |
---|
| 2968 | qv(:,:,:,li:ui)=1.0 |
---|
| 2969 | |
---|
| 2970 | pv(:,:,:,li:ui)=1.0 |
---|
| 2971 | clouds(:,:,:,li:ui)=0 |
---|
| 2972 | |
---|
| 2973 | clwc(:,:,:,li:ui)=0.0 |
---|
| 2974 | ciwc(:,:,:,li:ui)=0.0 |
---|
| 2975 | |
---|
| 2976 | ! 2D fields |
---|
[8a65cb0] | 2977 | |
---|
[861805a] | 2978 | cloudsh(:,:,li:ui)=0 |
---|
| 2979 | vdep(:,:,:,li:ui)=0.0 |
---|
| 2980 | ps(:,:,:,li:ui)=1.0e5 |
---|
| 2981 | sd(:,:,:,li:ui)=0.0 |
---|
| 2982 | tcc(:,:,:,li:ui)=0.0 |
---|
| 2983 | tt2(:,:,:,li:ui)=300. |
---|
| 2984 | td2(:,:,:,li:ui)=250. |
---|
| 2985 | lsprec(:,:,:,li:ui)=0.0 |
---|
| 2986 | convprec(:,:,:,li:ui)=0.0 |
---|
| 2987 | ustar(:,:,:,li:ui)=1.0 |
---|
| 2988 | wstar(:,:,:,li:ui)=1.0 |
---|
| 2989 | hmix(:,:,:,li:ui)=10000. |
---|
| 2990 | tropopause(:,:,:,li:ui)=10000. |
---|
| 2991 | oli(:,:,:,li:ui)=0.01 |
---|
| 2992 | |
---|
| 2993 | end subroutine set_fields_synthetic |
---|
[8a65cb0] | 2994 | |
---|
| 2995 | end module mpi_mod |
---|