Changeset 5f9d14a in flexpart.git for src/mpi_mod.f90


Ignore:
Timestamp:
Apr 8, 2015, 2:23:27 PM (9 years ago)
Author:
Espen Sollum ATMOS <eso@…>
Branches:
master, 10.4.1_pesei, GFS_025, bugfixes+enhancements, dev, release-10, release-10.4.1, scaling-bug, univie
Children:
1585284
Parents:
cd85138
Message:

Updated wet depo scheme

File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/mpi_mod.f90

    • Property mode changed from 100755 to 100644
    r8a65cb0 r5f9d14a  
    4444! mp_pid                  Process ID of each MPI process                     *
    4545! mp_seed                 Parameter for random number seed                   *
     46! read_grp_min            Minimum number of processes at which one will be   *
     47!                         used as reader                                     *
    4648! numpart_mpi,            Number of particles per node                       *
    4749! maxpart_mpi                                                                *
     
    5052!                         loops over particles. Will be all processes        *
    5153!                         unless a dedicated process runs getfields/readwind *
    52 !                                                                            *
     54! lmp_sync                If .false., use asynchronous MPI                   *
    5355!                                                                            *
    5456!                                                                            *
     
    6971! Set aside a process for reading windfields if using at least these many processes
    7072!==================================================
    71   integer, parameter, private :: read_grp_min=99
     73  integer, parameter, private :: read_grp_min=4
    7274!==================================================
    7375
     
    8688
    8789! MPI tags/requests for send/receive operation
    88   integer :: tm1!=100
    89   integer, parameter :: nvar_async=27
     90  integer :: tm1
     91  integer, parameter :: nvar_async=27 !29 :DBG:
    9092  !integer, dimension(:), allocatable :: tags
    9193  integer, dimension(:), allocatable :: reqs
     
    106108!===============================================================================
    107109
    108 ! mp_dbg_mode       MPI related output for debugging etc.
     110! mp_dbg_mode       Used for debugging MPI.
    109111! mp_dev_mode       various checks related to debugging the parallel code
    110112! mp_dbg_out        write some arrays to data file for debugging
     
    197199           & 'numwfmem should be set to 2 for syncronous reading'
    198200      write(*,FMT='(80("#"))')
    199     end if
     201! Force "syncronized" version if all processes will call getfields
     202    else if (.not.lmp_sync.and.mp_np.lt.read_grp_min) then
     203      if (lroot) then
     204        write(*,FMT='(80("#"))')
     205        write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
     206             & 'all procs call getfields. Setting lmp_sync=.true.'
     207        write(*,FMT='(80("#"))')
     208      end if
     209      lmp_sync=.true. ! :DBG: eso fix this...
     210    end if
     211! TODO: Add warnings for unimplemented flexpart features
    200212
    201213! Set ID of process that calls getfield/readwind.
     
    211223    call MPI_Comm_group (MPI_COMM_WORLD, world_group_id, mp_ierr)
    212224
    213 ! Create a MPI group/communiactor that will calculate trajectories.
     225! Create a MPI group/communicator that will calculate trajectories.
    214226! Skip this step if program is run with only a few processes
    215227!************************************************************************
     
    228240      lmp_use_reader = .true.
    229241
    230 ! Map the subgroup IDs= 0,1,2,...,mp_np-2, skipping 'readwind' process
     242! Map the subgroup IDs= 0,1,2,...,mp_np-2, skipping reader process
    231243      j=-1
    232244      do i=0, mp_np-2 !loop over all (subgroup) IDs
     
    289301    end if
    290302    if (mp_dev_mode) write(*,*) 'PID, mp_seed: ',mp_pid, mp_seed
     303    if (mp_dbg_mode) then
     304! :DBG: For debugging, set all seed to 0 and maxrand to e.g. 20
     305      mp_seed=0
     306      if (lroot) write(*,*) 'MPI: setting seed=0'
     307    end if
    291308
    292309! Allocate request array for asynchronous MPI
    293310    if (.not.lmp_sync) then
    294311      allocate(reqs(0:nvar_async*mp_np-1))
    295       reqs=MPI_REQUEST_NULL
     312      reqs(:)=MPI_REQUEST_NULL
    296313    else
    297314      allocate(reqs(0:1))
    298       reqs=MPI_REQUEST_NULL
     315      reqs(:)=MPI_REQUEST_NULL
    299316    end if
    300317
     
    389406    integer :: add_part=0
    390407
    391     call MPI_BCAST(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
     408    call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
    392409
    393410! MPI subgroup does the particle-loop
     
    815832!*******************************************************************************
    816833! DESCRIPTION
    817 !   Distribute 'getfield' variables from reader process to all processes.
     834!   Distribute 'getfield' variables from reader process
    818835!
    819836!   Called from timemanager
     
    887904!**********************************************************************
    888905
     906! The non-reader processes need to know if clouds were read.
     907! TODO: only at first step or always?
     908    call MPI_Bcast(readclouds,1,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
     909    if (mp_ierr /= 0) goto 600
     910
    889911! Static fields/variables sent only at startup
    890912    if (first_call) then
     
    904926      if (mp_ierr /= 0) goto 600
    905927      call MPI_Bcast(height,nzmax,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
    906     if (mp_ierr /= 0) goto 600
     928      if (mp_ierr /= 0) goto 600
    907929
    908930      first_call=.false.
     
    934956    if (mp_ierr /= 0) goto 600
    935957    call MPI_Bcast(clouds(:,:,:,li:ui),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
    936     if (mp_ierr /= 0) goto 600
     958    if (mp_ierr /= 0) goto 600
     959
     960! cloud water/ice:
     961    if (readclouds) then
     962      call MPI_Bcast(clwc(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
     963      if (mp_ierr /= 0) goto 600
     964      call MPI_Bcast(ciwc(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr)
     965      if (mp_ierr /= 0) goto 600
     966    end if
    937967
    938968! 2D fields
     
    969999    if (mp_ierr /= 0) goto 600
    9701000
    971 
    9721001    if (mp_measure_time) call mpif_mtime('commtime',1)
    9731002
     
    9951024!   step
    9961025!
     1026! TODO
     1027!   Transfer cloud water/ice if and when available for nested
    9971028!
    9981029!***********************************************************************
     
    11401171! DESCRIPTION
    11411172!   Distribute 'getfield' variables from reader process to all processes.
    1142 !
    1143 !   Called from timemanager by root process only
     1173!   Called from timemanager by reader process only
    11441174!
    11451175! NOTE
     
    11541184!   memstat -- input, for resolving pointer to windfield index being read
    11551185!   mind    -- index where to place new fields
    1156 !   !!! Under development, don't use yet !!!
    1157 !
     1186!
     1187! TODO
     1188!   Transfer cloud water/ice
    11581189!
    11591190!*******************************************************************************
     
    11731204    integer :: d1s1 = maxwf
    11741205
    1175     !integer :: d3s1,d3s2,d2s1,d2s2
    1176 
    11771206!*******************************************************************************
    1178 
    1179 ! :TODO: don't need these
    1180     ! d3s1=d3_size1
    1181     ! d3s2=d3_size2
    1182     ! d2s1=d2_size1
    1183     ! d2s2=d2_size2
    11841207
    11851208! At the time the send is posted, the reader process is one step further
     
    12201243
    12211244    do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading
    1222                       ! :TODO: use mp_partgroup_np here
     1245                      ! TODO: use mp_partgroup_np here
    12231246      if (dest.eq.id_read) cycle
    12241247      i=dest*nvar_async
     
    13051328      if (mp_ierr /= 0) goto 600
    13061329
     1330! Send cloud water if it exists. Increment counter always (as on receiving end)
     1331      if (readclouds) then
     1332        i=i+1
     1333        call MPI_Isend(clwc(:,:,:,mind),d3s1,mp_pp,dest,tm1,&
     1334             &MPI_COMM_WORLD,reqs(i),mp_ierr)
     1335        if (mp_ierr /= 0) goto 600
     1336        i=i+1
     1337
     1338        call MPI_Isend(ciwc(:,:,:,mind),d3s1,mp_pp,dest,tm1,&
     1339             &MPI_COMM_WORLD,reqs(i),mp_ierr)
     1340        if (mp_ierr /= 0) goto 600
     1341      ! else
     1342      !   i=i+2
     1343      end if
     1344
    13071345    end do
    13081346
     
    13111349    goto 601
    13121350
    1313 600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
     1351600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> mp_ierr \= 0", mp_ierr
    13141352    stop
    13151353
     
    13201358!*******************************************************************************
    13211359! DESCRIPTION
    1322 !   Receive 'getfield' variables from reader process to all processes.
    1323 !
    1324 !   Called from timemanager by all processes except root
     1360!   Receive 'getfield' variables from reader process.
     1361!   Called from timemanager by all processes except reader
    13251362!
    13261363! NOTE
     
    13301367!   memstat -- input, used to resolve windfield index being received
    13311368!
     1369! TODO
     1370!   Transfer cloud water/ice
    13321371!
    13331372!*******************************************************************************
     
    13881427! Get MPI tags/requests for communications
    13891428    j=mp_pid*nvar_async
    1390     call MPI_Irecv(uu(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1391     if (mp_ierr /= 0) goto 600
    1392     j=j+1
    1393     call MPI_Irecv(vv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1394     if (mp_ierr /= 0) goto 600
    1395     j=j+1
    1396     call MPI_Irecv(uupol(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1397     if (mp_ierr /= 0) goto 600
    1398     j=j+1
    1399     call MPI_Irecv(vvpol(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1400     if (mp_ierr /= 0) goto 600
    1401     j=j+1
    1402     call MPI_Irecv(ww(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
     1429    call MPI_Irecv(uu(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
     1430         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1431    if (mp_ierr /= 0) goto 600
     1432    j=j+1
     1433    call MPI_Irecv(vv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
     1434         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1435    if (mp_ierr /= 0) goto 600
     1436    j=j+1
     1437    call MPI_Irecv(uupol(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
     1438         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1439    if (mp_ierr /= 0) goto 600
     1440    j=j+1
     1441    call MPI_Irecv(vvpol(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
     1442         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1443    if (mp_ierr /= 0) goto 600
     1444    j=j+1
     1445    call MPI_Irecv(ww(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
     1446         &MPI_COMM_WORLD,reqs(j),mp_ierr)
    14031447    if (mp_ierr /= 0) goto 600
    14041448    j=j+1
    1405     call MPI_Irecv(tt(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1406     if (mp_ierr /= 0) goto 600
    1407     j=j+1
    1408     call MPI_Irecv(rho(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1409     if (mp_ierr /= 0) goto 600
    1410     j=j+1
    1411     call MPI_Irecv(drhodz(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1412     if (mp_ierr /= 0) goto 600
    1413     j=j+1
    1414     call MPI_Irecv(tth(:,:,:,mind),d3s2,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1415     if (mp_ierr /= 0) goto 600
    1416     j=j+1
    1417     call MPI_Irecv(qvh(:,:,:,mind),d3s2,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1418     if (mp_ierr /= 0) goto 600
    1419     j=j+1
    1420 
    1421     call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
     1449    call MPI_Irecv(tt(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
     1450         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1451    if (mp_ierr /= 0) goto 600
     1452    j=j+1
     1453    call MPI_Irecv(rho(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
     1454         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1455    if (mp_ierr /= 0) goto 600
     1456    j=j+1
     1457    call MPI_Irecv(drhodz(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
     1458         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1459    if (mp_ierr /= 0) goto 600
     1460    j=j+1
     1461    call MPI_Irecv(tth(:,:,:,mind),d3s2,mp_pp,id_read,MPI_ANY_TAG,&
     1462         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1463    if (mp_ierr /= 0) goto 600
     1464    j=j+1
     1465    call MPI_Irecv(qvh(:,:,:,mind),d3s2,mp_pp,id_read,MPI_ANY_TAG,&
     1466         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1467    if (mp_ierr /= 0) goto 600
     1468    j=j+1
     1469
     1470    call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
     1471         &MPI_COMM_WORLD,reqs(j),mp_ierr)
    14221472    if (mp_ierr /= 0) goto 600
    1423 j=j+1
    1424     call MPI_Irecv(pv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1425     if (mp_ierr /= 0) goto 600
    1426     j=j+1
    1427     call MPI_Irecv(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)   
    1428     if (mp_ierr /= 0) goto 600
    1429     j=j+1
    1430 
    1431     call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1432     if (mp_ierr /= 0) goto 600
    1433     j=j+1
    1434     call MPI_Irecv(vdep(:,:,:,mind),d2s2,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1435     if (mp_ierr /= 0) goto 600
    1436     j=j+1
    1437 
    1438     call MPI_Irecv(ps(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
     1473    j=j+1
     1474    call MPI_Irecv(pv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
     1475         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1476    if (mp_ierr /= 0) goto 600
     1477    j=j+1
     1478    call MPI_Irecv(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
     1479         &MPI_COMM_WORLD,reqs(j),mp_ierr)   
     1480    if (mp_ierr /= 0) goto 600
     1481    j=j+1
     1482
     1483    call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
     1484         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1485    if (mp_ierr /= 0) goto 600
     1486    j=j+1
     1487    call MPI_Irecv(vdep(:,:,:,mind),d2s2,mp_pp,id_read,MPI_ANY_TAG,&
     1488         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1489    if (mp_ierr /= 0) goto 600
     1490    j=j+1
     1491    call MPI_Irecv(ps(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
     1492         &MPI_COMM_WORLD,reqs(j),mp_ierr)
    14391493    if (mp_ierr /= 0) goto 600
    14401494    j=j+1
    1441     call MPI_Irecv(sd(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
     1495    call MPI_Irecv(sd(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
     1496         &MPI_COMM_WORLD,reqs(j),mp_ierr)
    14421497    if (mp_ierr /= 0) goto 600
    14431498    j=j+1
    1444     call MPI_Irecv(tcc(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
     1499    call MPI_Irecv(tcc(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
     1500         &MPI_COMM_WORLD,reqs(j),mp_ierr)
    14451501    if (mp_ierr /= 0) goto 600
    14461502    j=j+1
    1447     call MPI_Irecv(tt2(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1448     if (mp_ierr /= 0) goto 600
    1449     j=j+1
    1450     call MPI_Irecv(td2(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1451     if (mp_ierr /= 0) goto 600
    1452     j=j+1
    1453     call MPI_Irecv(lsprec(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1454     if (mp_ierr /= 0) goto 600
    1455     j=j+1
    1456     call MPI_Irecv(convprec(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1457     if (mp_ierr /= 0) goto 600
    1458 
    1459     call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1460     if (mp_ierr /= 0) goto 600
    1461     j=j+1
    1462     call MPI_Irecv(wstar(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1463     if (mp_ierr /= 0) goto 600
    1464     j=j+1
    1465     call MPI_Irecv(hmix(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1466     if (mp_ierr /= 0) goto 600
    1467     j=j+1
    1468     call MPI_Irecv(tropopause(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
     1503    call MPI_Irecv(tt2(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
     1504         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1505    if (mp_ierr /= 0) goto 600
     1506    j=j+1
     1507    call MPI_Irecv(td2(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
     1508         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1509    if (mp_ierr /= 0) goto 600
     1510    j=j+1
     1511    call MPI_Irecv(lsprec(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
     1512         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1513    if (mp_ierr /= 0) goto 600
     1514    j=j+1
     1515    call MPI_Irecv(convprec(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
     1516         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1517    if (mp_ierr /= 0) goto 600
     1518
     1519    call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
     1520         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1521    if (mp_ierr /= 0) goto 600
     1522    j=j+1
     1523    call MPI_Irecv(wstar(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
     1524         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1525    if (mp_ierr /= 0) goto 600
     1526    j=j+1
     1527    call MPI_Irecv(hmix(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
     1528         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1529    if (mp_ierr /= 0) goto 600
     1530    j=j+1
     1531    call MPI_Irecv(tropopause(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
     1532         &MPI_COMM_WORLD,reqs(j),mp_ierr)
    14691533    if (mp_ierr /= 0) goto 600
    14701534    j=j+1
    1471     call MPI_Irecv(oli(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr)
    1472     if (mp_ierr /= 0) goto 600
     1535    call MPI_Irecv(oli(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,&
     1536         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     1537    if (mp_ierr /= 0) goto 600
     1538
     1539
     1540! Post request for clwc. These data are possibly not sent, request must then be cancelled
     1541! For now assume that data at all steps either have or do not have water
     1542    if (readclouds) then
     1543      j=j+1
     1544      call MPI_Irecv(clwc(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
     1545           &MPI_COMM_WORLD,reqs(j),mp_ierr)   
     1546      if (mp_ierr /= 0) goto 600
     1547      j=j+1
     1548      call MPI_Irecv(ciwc(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,&
     1549           &MPI_COMM_WORLD,reqs(j),mp_ierr)   
     1550      if (mp_ierr /= 0) goto 600
     1551    end if
    14731552
    14741553
     
    14771556    goto 601
    14781557
    1479 600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
     1558600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_async> MPI ERROR ", mp_ierr
    14801559    stop
    14811560
     
    14941573!
    14951574!*******************************************************************************
     1575    use com_mod, only: readclouds
     1576
    14961577    implicit none
    14971578
    1498 !    if (mp_measure_time) call mpif_mtime('commtime',0)
     1579
     1580    integer :: n_req
     1581    integer :: i
     1582
     1583!***********************************************************************
     1584
     1585    n_req=nvar_async*mp_np
     1586
     1587    if (mp_measure_time) call mpif_mtime('commtime',0)
    14991588
    15001589!    call MPI_Wait(rm1,MPI_STATUS_IGNORE,mp_ierr)
    1501     call MPI_Waitall(nvar_async*mp_np,reqs,MPI_STATUSES_IGNORE,mp_ierr)
    1502     if (mp_ierr /= 0) goto 600
    1503 
    1504 !    if (mp_measure_time) call mpif_mtime('commtime',1)
     1590
     1591! TODO: cancel recv request if readclouds=.false.
     1592!    if (readclouds) then
     1593    call MPI_Waitall(n_req,reqs,MPI_STATUSES_IGNORE,mp_ierr)
     1594!    endif
     1595    ! else
     1596    !   do i = 0, nvar_async*mp_np-1
     1597    !     if (mod(i,27).eq.0 .or. mod(i,28).eq.0) then
     1598    !       call MPI_Cancel(reqs(i),mp_ierr)
     1599    !       cycle
     1600    !     end if
     1601    !     call MPI_Wait(reqs(i),MPI_STATUS_IGNORE,mp_ierr)
     1602    !   end do
     1603    ! end if
     1604
     1605    if (mp_ierr /= 0) goto 600
     1606
     1607    if (mp_measure_time) call mpif_mtime('commtime',1)
    15051608
    15061609    goto 601
    15071610
    1508 600 write(*,*) "#### mpi_mod::mpif_gf_request> ERROR, mp_ierr \= 0 ", mp_ierr
     1611600 write(*,*) "#### mpi_mod::mpif_gf_request> MPI ERROR ", mp_ierr
    15091612    stop
    15101613
     
    16061709
    16071710!**********************************************************************
     1711
    16081712    grid_size3d=numxgridn*numygridn*numzgrid*maxspec* &
    16091713         & maxpointspec_act*nclassunc*maxageclass
     
    16661770    character(LEN=*), intent(in) :: ident
    16671771    integer, intent(in) :: imode
     1772
     1773!***********************************************************************
    16681774
    16691775    select case(ident)
     
    18001906
    18011907    integer :: ip,j,r
     1908
     1909!***********************************************************************
    18021910
    18031911    if (mp_measure_time) then
     
    18571965! In the implementation with 3 fields, the processes may have posted
    18581966! MPI_Irecv requests that should be cancelled here
    1859 !! :TODO:
     1967!! TODO:
    18601968    ! if (.not.lmp_sync) then
    18611969    !   r=mp_pid*nvar_async
     
    18681976    call MPI_FINALIZE(mp_ierr)
    18691977    if (mp_ierr /= 0) then
    1870       write(*,*) '#### mpif_finalize::MPI_FINALIZE> ERROR ####'
     1978      write(*,*) '#### mpif_finalize::MPI_FINALIZE> MPI ERROR ', mp_ierr, ' ####'
    18711979      stop
    18721980    end if
     
    18871995    integer, save :: free_lun=100
    18881996    logical :: exists, iopen
     1997
     1998!***********************************************************************
    18891999
    18902000    loop1: do
     
    19122022    character(LEN=40) :: fn_1, fn_2
    19132023
     2024!***********************************************************************
     2025
    19142026    write(c_ts, FMT='(I8.8,BZ)') tstep
    19152027    fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident)
Note: See TracChangeset for help on using the changeset viewer.
hosted by ZAMG