Changes in / [06f094f:94bb383] in flexpart.git


Ignore:
Location:
src
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • src/mpi_mod.f90

    r3b80e98 r62e65c7  
    862862! NOTE
    863863!   This subroutine distributes windfields read from the reader process to
    864 !   all other processes. Usually one set of fields are transfered, but at
     864!   all other processes. Usually one set of fields is transfered, but at
    865865!   first timestep when there are no fields in memory, both are transfered.
    866866!   MPI_Bcast is used, so implicitly all processes are synchronized at this
     
    926926!**********************************************************************
    927927
    928 ! The non-reader processes need to know if cloud water was read.
     928! The non-reader processes need to know if cloud water were read.
    929929    call MPI_Bcast(readclouds,1,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
    930930    if (mp_ierr /= 0) goto 600
     
    981981! cloud water/ice:
    982982    if (readclouds) then
     983      ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
     984      ! if (mp_ierr /= 0) goto 600
    983985      call MPI_Bcast(ctwc(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
    984986      if (mp_ierr /= 0) goto 600
     987      ! call MPI_Bcast(clwc(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
     988      ! if (mp_ierr /= 0) goto 600
     989      ! call MPI_Bcast(ciwc(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
     990      ! if (mp_ierr /= 0) goto 600
    985991    end if
    986992
     
    10991105!**********************************************************************
    11001106
    1101 ! The non-reader processes need to know if cloud water was read.
     1107! The non-reader processes need to know if cloud water were read.
    11021108    call MPI_Bcast(readclouds_nest,maxnests,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
    11031109    if (mp_ierr /= 0) goto 600
     
    13091315      i=i+1
    13101316      if (mp_ierr /= 0) goto 600
     1317
    13111318      call MPI_Isend(cloudsh(:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    13121319      if (mp_ierr /= 0) goto 600
     
    13151322      if (mp_ierr /= 0) goto 600
    13161323      i=i+1
    1317 ! 15
    13181324      call MPI_Isend(ps(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    13191325      if (mp_ierr /= 0) goto 600
     
    13461352      if (mp_ierr /= 0) goto 600
    13471353      i=i+1
    1348 ! 25
    13491354      call MPI_Isend(tropopause(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    13501355      if (mp_ierr /= 0) goto 600
     
    13561361      if (readclouds) then
    13571362        i=i+1
     1363        ! call MPI_Isend(icloud_stats(:,:,:,mind),d2s1*5,mp_sp,dest,tm1,&
     1364        !      &MPI_COMM_WORLD,reqs(i),mp_ierr)
    13581365        call MPI_Isend(ctwc(:,:,mind),d2s1,mp_sp,dest,tm1,&
    13591366             &MPI_COMM_WORLD,reqs(i),mp_ierr)
     1367
    13601368        if (mp_ierr /= 0) goto 600
     1369
     1370        ! call MPI_Isend(clwc(:,:,:,mind),d3s1,mp_sp,dest,tm1,&
     1371        !      &MPI_COMM_WORLD,reqs(i),mp_ierr)
     1372        ! if (mp_ierr /= 0) goto 600
     1373        ! i=i+1
     1374
     1375        ! call MPI_Isend(ciwc(:,:,:,mind),d3s1,mp_sp,dest,tm1,&
     1376        !      &MPI_COMM_WORLD,reqs(i),mp_ierr)
     1377        ! if (mp_ierr /= 0) goto 600
     1378
    13611379      end if
    13621380    end do
     
    14801498    if (mp_ierr /= 0) goto 600
    14811499    j=j+1
     1500
    14821501    call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    14831502         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     
    14921511    if (mp_ierr /= 0) goto 600
    14931512    j=j+1
     1513
    14941514    call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    14951515         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     
    15271547         &MPI_COMM_WORLD,reqs(j),mp_ierr)
    15281548    if (mp_ierr /= 0) goto 600
     1549
    15291550    call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    15301551         &MPI_COMM_WORLD,reqs(j),mp_ierr)
     
    15461567         &MPI_COMM_WORLD,reqs(j),mp_ierr)
    15471568    if (mp_ierr /= 0) goto 600
     1569
    15481570
    15491571! Post request for clwc. These data are possibly not sent, request must then be cancelled
     
    15511573    if (readclouds) then
    15521574      j=j+1
     1575
     1576      ! call MPI_Irecv(icloud_stats(:,:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
     1577      !      &MPI_COMM_WORLD,reqs(j),mp_ierr)
    15531578      call MPI_Irecv(ctwc(:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
    15541579           &MPI_COMM_WORLD,reqs(j),mp_ierr)
    15551580      if (mp_ierr /= 0) goto 600
     1581
     1582      ! call MPI_Irecv(clwc(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
     1583      !      &MPI_COMM_WORLD,reqs(j),mp_ierr)   
     1584      ! if (mp_ierr /= 0) goto 600
     1585      ! j=j+1
     1586      ! call MPI_Irecv(ciwc(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
     1587      !      &MPI_COMM_WORLD,reqs(j),mp_ierr)   
     1588      ! if (mp_ierr /= 0) goto 600
     1589
    15561590    end if
    15571591
     
    15651599
    15661600601 end subroutine mpif_gf_recv_vars_async
    1567 
    1568 
    1569   subroutine mpif_gf_send_vars_nest_async(memstat)
    1570 !*******************************************************************************
    1571 ! DESCRIPTION
    1572 !   Distribute 'getfield' variables from reader process to all processes.
    1573 !   Called from timemanager by reader process only.
    1574 !   Version for nested wind fields
    1575 !
    1576 ! NOTE
    1577 !   This version uses asynchronious sends. The newest fields are sent in the
    1578 !   background, so calculations can continue while
    1579 !   MPI communications are performed.
    1580 !
    1581 !   The way the MPI tags/requests are sequenced means that this routine must
    1582 !   carefully match routine 'mpif_gf_recv_vars_async'
    1583 !
    1584 ! VARIABLES
    1585 !   memstat -- input, for resolving pointer to windfield index being read
    1586 !   mind    -- index where to place new fields
    1587 !
    1588 ! TODO
    1589 !   Some unused arrays are currently sent (uupoln,..)
    1590 !*******************************************************************************
    1591     use com_mod
    1592 
    1593     implicit none
    1594 
    1595     integer, intent(in) :: memstat
    1596     integer :: mind
    1597     integer :: dest,i,k
    1598 
    1599 ! Common array sizes used for communications
    1600     integer :: d3s1 = nxmaxn*nymaxn*nzmax
    1601     integer :: d3s2 = nxmaxn*nymaxn*nuvzmax
    1602     integer :: d2s1 = nxmaxn*nymaxn
    1603     integer :: d2s2 = nxmaxn*nymaxn*maxspec
    1604 
    1605 !*******************************************************************************
    1606 
    1607 ! At the time the send is posted, the reader process is one step further
    1608 ! in the permutation of memstat compared with the receiving processes
    1609 
    1610     if (memstat.ge.32) then
    1611 ! last read was synchronous, to indices 1 and 2, 3 is free
    1612       write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: &
    1613            & memstat>=32 should never happen here."
    1614       stop
    1615     else if (memstat.eq.17) then
    1616 ! old fields on 1,2, send 3
    1617       mind=3
    1618     else if (memstat.eq.18) then
    1619 ! old fields on 2,3, send 1
    1620       mind=1
    1621     else if (memstat.eq.19) then
    1622 ! old fields on 3,1, send 2
    1623       mind=2
    1624     else
    1625       write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: &
    1626            & invalid memstat"
    1627     end if
    1628 
    1629     if (mp_dev_mode) then
    1630       if (mp_pid.ne.id_read) then
    1631         write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_nest_async'
    1632       end if
    1633     end if
    1634 
    1635     if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_nest_async, memstat:', memstat
    1636 
    1637 ! Time for MPI communications
    1638     if (mp_measure_time) call mpif_mtime('commtime',0)
    1639 
    1640 ! Loop over receiving processes, initiate data sending
    1641 !*****************************************************
    1642 
    1643     do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading
    1644 ! TODO: use mp_partgroup_np here
    1645       if (dest.eq.id_read) cycle
    1646       do k=1, numbnests
    1647       i=dest*nvar_async
    1648       call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1649       if (mp_ierr /= 0) goto 600
    1650       i=i+1
    1651       call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1652       if (mp_ierr /= 0) goto 600
    1653       i=i+1
    1654       call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1655       if (mp_ierr /= 0) goto 600
    1656       i=i+1
    1657       call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1658       if (mp_ierr /= 0) goto 600
    1659       i=i+1
    1660       call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1661       if (mp_ierr /= 0) goto 600
    1662       i=i+1
    1663       call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1664       if (mp_ierr /= 0) goto 600
    1665       i=i+1
    1666       call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1667       if (mp_ierr /= 0) goto 600
    1668       i=i+1
    1669       call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1670       if (mp_ierr /= 0) goto 600
    1671       i=i+1
    1672       call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1673       if (mp_ierr /= 0) goto 600
    1674       i=i+1
    1675       call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1676       if (mp_ierr /= 0) goto 600
    1677       i=i+1
    1678       call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1679       i=i+1
    1680       if (mp_ierr /= 0) goto 600
    1681       call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1682       if (mp_ierr /= 0) goto 600
    1683       i=i+1
    1684       call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1685       if (mp_ierr /= 0) goto 600
    1686       i=i+1
    1687       call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1688       if (mp_ierr /= 0) goto 600
    1689       i=i+1
    1690       call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1691       if (mp_ierr /= 0) goto 600
    1692       i=i+1
    1693 ! 15
    1694       call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1695       if (mp_ierr /= 0) goto 600
    1696       i=i+1
    1697       call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1698       if (mp_ierr /= 0) goto 600
    1699       i=i+1
    1700       call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1701       if (mp_ierr /= 0) goto 600
    1702       i=i+1
    1703       call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1704       if (mp_ierr /= 0) goto 600
    1705       i=i+1
    1706       call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1707       if (mp_ierr /= 0) goto 600
    1708       i=i+1
    1709       call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1710       if (mp_ierr /= 0) goto 600
    1711       i=i+1
    1712       call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1713       if (mp_ierr /= 0) goto 600
    1714       i=i+1
    1715       call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1716       if (mp_ierr /= 0) goto 600
    1717       i=i+1
    1718       call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1719       if (mp_ierr /= 0) goto 600
    1720       i=i+1
    1721       call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1722       if (mp_ierr /= 0) goto 600
    1723 ! 25
    1724 
    1725 ! Send cloud water if it exists. Increment counter always (as on receiving end)
    1726       if (readclouds) then
    1727         i=i+1
    1728         call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,&
    1729              &MPI_COMM_WORLD,reqs(i),mp_ierr)
    1730         if (mp_ierr /= 0) goto 600
    1731       end if
    1732     end do
    1733   end do
    1734 
    1735     if (mp_measure_time) call mpif_mtime('commtime',1)
    1736 
    1737     goto 601
    1738 
    1739 600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> mp_ierr \= 0", mp_ierr
    1740     stop
    1741 
    1742 601 end subroutine mpif_gf_send_vars_nest_async
    1743 
    1744 
    1745   subroutine mpif_gf_recv_vars_nest_async(memstat)
    1746 !*******************************************************************************
    1747 ! DESCRIPTION
    1748 !   Receive 'getfield' variables from reader process.
    1749 !   Called from timemanager by all processes except reader
    1750 !   Version for nested wind fields
    1751 !
    1752 ! NOTE
    1753 !   This version uses asynchronious communications.
    1754 !
    1755 ! VARIABLES
    1756 !   memstat -- input, used to resolve windfield index being received
    1757 !
    1758 !
    1759 !*******************************************************************************
    1760     use com_mod
    1761 
    1762     implicit none
    1763 
    1764     integer, intent(in) :: memstat
    1765     integer :: mind,j,k
    1766 
    1767 ! Common array sizes used for communications
    1768     integer :: d3s1 = nxmaxn*nymaxn*nzmax
    1769     integer :: d3s2 = nxmaxn*nymaxn*nuvzmax
    1770     integer :: d2s1 = nxmaxn*nymaxn
    1771     integer :: d2s2 = nxmaxn*nymaxn*maxspec
    1772 
    1773 !*******************************************************************************
    1774 
    1775 ! At the time this immediate receive is posted, memstat is the state of
    1776 ! windfield indices at the previous/current time. From this, the future
    1777 ! state is deduced.
    1778     if (memstat.eq.32) then
    1779 ! last read was synchronous to indices 1 and 2, 3 is free
    1780       mind=3
    1781     else if (memstat.eq.17) then
    1782 ! last read was asynchronous to index 3, 1 is free
    1783       mind=1
    1784     else if (memstat.eq.18) then
    1785 ! last read was asynchronous to index 1, 2 is free
    1786       mind=2
    1787     else if (memstat.eq.19) then
    1788 ! last read was asynchronous to index 2, 3 is free
    1789       mind=3
    1790     else
    1791 ! illegal state
    1792       write(*,*) 'mpi_mod> FLEXPART ERROR: Illegal memstat value. Exiting.'
    1793       stop
    1794     end if
    1795 
    1796     if (mp_dev_mode) then
    1797       if (mp_pid.eq.id_read) then
    1798         write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async'
    1799       end if
    1800     end if
    1801 
    1802 ! Time for MPI communications
    1803     if (mp_measure_time) call mpif_mtime('commtime',0)
    1804 
    1805     if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat
    1806 
    1807 ! Initiate receiving of data
    1808 !***************************
    1809 
    1810     do k=1, numbnests
    1811 ! Get MPI tags/requests for communications
    1812     j=mp_pid*nvar_async
    1813     call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1814          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1815     if (mp_ierr /= 0) goto 600
    1816     j=j+1
    1817     call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1818          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1819     if (mp_ierr /= 0) goto 600
    1820     j=j+1
    1821     call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1822          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1823     if (mp_ierr /= 0) goto 600
    1824     j=j+1
    1825     call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1826          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1827     if (mp_ierr /= 0) goto 600
    1828     j=j+1
    1829     call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1830          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1831     if (mp_ierr /= 0) goto 600
    1832     j=j+1
    1833     call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1834          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1835     if (mp_ierr /= 0) goto 600
    1836     j=j+1
    1837     call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
    1838          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1839     if (mp_ierr /= 0) goto 600
    1840     j=j+1
    1841     call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
    1842          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1843     if (mp_ierr /= 0) goto 600
    1844     j=j+1
    1845     call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1846          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1847     if (mp_ierr /= 0) goto 600
    1848     j=j+1
    1849     call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1850          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1851     if (mp_ierr /= 0) goto 600
    1852     j=j+1
    1853     call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
    1854          &MPI_COMM_WORLD,reqs(j),mp_ierr)   
    1855     if (mp_ierr /= 0) goto 600
    1856     j=j+1
    1857     call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1858          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1859     if (mp_ierr /= 0) goto 600
    1860     j=j+1
    1861     call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,&
    1862          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1863     if (mp_ierr /= 0) goto 600
    1864     j=j+1
    1865     call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1866          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1867     if (mp_ierr /= 0) goto 600
    1868     j=j+1
    1869     call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1870          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1871     if (mp_ierr /= 0) goto 600
    1872     j=j+1
    1873     call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1874          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1875     if (mp_ierr /= 0) goto 600
    1876     j=j+1
    1877     call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1878          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1879     if (mp_ierr /= 0) goto 600
    1880     j=j+1
    1881     call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1882          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1883     if (mp_ierr /= 0) goto 600
    1884     j=j+1
    1885     call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1886          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1887     if (mp_ierr /= 0) goto 600
    1888     j=j+1
    1889     call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1890          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1891     if (mp_ierr /= 0) goto 600
    1892     call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1893          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1894     if (mp_ierr /= 0) goto 600
    1895     j=j+1
    1896     call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1897          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1898     if (mp_ierr /= 0) goto 600
    1899     j=j+1
    1900     call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1901          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1902     if (mp_ierr /= 0) goto 600
    1903     j=j+1
    1904     call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1905          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1906     if (mp_ierr /= 0) goto 600
    1907     j=j+1
    1908     call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1909          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1910     if (mp_ierr /= 0) goto 600
    1911 
    1912 ! Post request for clwc. These data are possibly not sent, request must then be cancelled
    1913 ! For now assume that data at all steps either have or do not have water
    1914     if (readclouds) then
    1915       j=j+1
    1916       call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
    1917            &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1918       if (mp_ierr /= 0) goto 600
    1919     end if
    1920   end do
    1921 
    1922     if (mp_measure_time) call mpif_mtime('commtime',1)
    1923 
    1924     goto 601
    1925 
    1926 600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_nest_async> MPI ERROR ", mp_ierr
    1927     stop
    1928 
    1929 601 end subroutine mpif_gf_recv_vars_nest_async
    19301601
    19311602
     
    19391610!   implicit synchronisation between all processes takes place here
    19401611!
    1941 ! TODO
    1942 !   take into account nested wind fields
    19431612!
    19441613!*******************************************************************************
  • src/par_mod.f90

    r3b80e98 r18adf60  
    187187
    188188  integer,parameter :: maxpart=1000000
    189   integer,parameter :: maxspec=2
     189  integer,parameter :: maxspec=6
    190190  real,parameter :: minmass=0.0001
    191191
  • src/timemanager_mpi.f90

    r3b80e98 r18adf60  
    253253        if (mp_dev_mode) write(*,*) 'Reader process: calling mpif_gf_send_vars_async'
    254254        call mpif_gf_send_vars_async(memstat)
    255         if (numbnests>0) call mpif_gf_send_vars_nest_async(memstat)
    256255      end if
    257256
     
    268267        if (mp_dev_mode) write(*,*) 'Receiving process: calling mpif_gf_send_vars_async. PID: ', mp_pid
    269268        call mpif_gf_recv_vars_async(memstat)
    270         if (numbnests>0) call mpif_gf_recv_vars_nest_async(memstat)
    271269      end if
    272270
Note: See TracChangeset for help on using the changeset viewer.
hosted by ZAMG