Changeset 06f094f in flexpart.git for src/mpi_mod.f90
- Timestamp:
- May 11, 2016, 10:36:23 AM (8 years ago)
- Branches:
- master, 10.4.1_pesei, GFS_025, bugfixes+enhancements, dev, release-10, release-10.4.1, scaling-bug, univie
- Children:
- ed82e08
- Parents:
- 94bb383 (diff), 3b80e98 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/mpi_mod.f90
r62e65c7 r3b80e98 862 862 ! NOTE 863 863 ! This subroutine distributes windfields read from the reader process to 864 ! all other processes. Usually one set of fields istransfered, but at864 ! all other processes. Usually one set of fields are transfered, but at 865 865 ! first timestep when there are no fields in memory, both are transfered. 866 866 ! MPI_Bcast is used, so implicitly all processes are synchronized at this … … 926 926 !********************************************************************** 927 927 928 ! The non-reader processes need to know if cloud water w ereread.928 ! The non-reader processes need to know if cloud water was read. 929 929 call MPI_Bcast(readclouds,1,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr) 930 930 if (mp_ierr /= 0) goto 600 … … 981 981 ! cloud water/ice: 982 982 if (readclouds) then 983 ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)984 ! if (mp_ierr /= 0) goto 600985 983 call MPI_Bcast(ctwc(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 986 984 if (mp_ierr /= 0) goto 600 987 ! call MPI_Bcast(clwc(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)988 ! if (mp_ierr /= 0) goto 600989 ! call MPI_Bcast(ciwc(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)990 ! if (mp_ierr /= 0) goto 600991 985 end if 992 986 … … 1105 1099 !********************************************************************** 1106 1100 1107 ! The non-reader processes need to know if cloud water w ereread.1101 ! The non-reader processes need to know if cloud water was read. 1108 1102 call MPI_Bcast(readclouds_nest,maxnests,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr) 1109 1103 if (mp_ierr /= 0) goto 600 … … 1315 1309 i=i+1 1316 1310 if (mp_ierr /= 0) goto 600 1317 1318 1311 call MPI_Isend(cloudsh(:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1319 1312 if (mp_ierr /= 0) goto 600 … … 1322 1315 if (mp_ierr /= 0) goto 600 1323 1316 i=i+1 1317 ! 15 1324 1318 call MPI_Isend(ps(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1325 1319 if (mp_ierr /= 0) goto 600 … … 1352 1346 if (mp_ierr /= 0) goto 600 1353 1347 i=i+1 1348 ! 25 1354 1349 call MPI_Isend(tropopause(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1355 1350 if (mp_ierr /= 0) goto 600 … … 1361 1356 if (readclouds) then 1362 1357 i=i+1 1363 ! call MPI_Isend(icloud_stats(:,:,:,mind),d2s1*5,mp_sp,dest,tm1,&1364 ! &MPI_COMM_WORLD,reqs(i),mp_ierr)1365 1358 call MPI_Isend(ctwc(:,:,mind),d2s1,mp_sp,dest,tm1,& 1366 1359 &MPI_COMM_WORLD,reqs(i),mp_ierr) 1367 1368 1360 if (mp_ierr /= 0) goto 600 1369 1370 ! call MPI_Isend(clwc(:,:,:,mind),d3s1,mp_sp,dest,tm1,&1371 ! &MPI_COMM_WORLD,reqs(i),mp_ierr)1372 ! if (mp_ierr /= 0) goto 6001373 ! i=i+11374 1375 ! call MPI_Isend(ciwc(:,:,:,mind),d3s1,mp_sp,dest,tm1,&1376 ! &MPI_COMM_WORLD,reqs(i),mp_ierr)1377 ! if (mp_ierr /= 0) goto 6001378 1379 1361 end if 1380 1362 end do … … 1498 1480 if (mp_ierr /= 0) goto 600 1499 1481 j=j+1 1500 1501 1482 call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1502 1483 &MPI_COMM_WORLD,reqs(j),mp_ierr) … … 1511 1492 if (mp_ierr /= 0) goto 600 1512 1493 j=j+1 1513 1514 1494 call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1515 1495 &MPI_COMM_WORLD,reqs(j),mp_ierr) … … 1547 1527 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1548 1528 if (mp_ierr /= 0) goto 600 1549 1550 1529 call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1551 1530 &MPI_COMM_WORLD,reqs(j),mp_ierr) … … 1567 1546 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1568 1547 if (mp_ierr /= 0) goto 600 1569 1570 1548 1571 1549 ! Post request for clwc. These data are possibly not sent, request must then be cancelled … … 1573 1551 if (readclouds) then 1574 1552 j=j+1 1575 1576 ! call MPI_Irecv(icloud_stats(:,:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&1577 ! &MPI_COMM_WORLD,reqs(j),mp_ierr)1578 1553 call MPI_Irecv(ctwc(:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& 1579 1554 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1580 1555 if (mp_ierr /= 0) goto 600 1581 1582 ! call MPI_Irecv(clwc(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1583 ! &MPI_COMM_WORLD,reqs(j),mp_ierr)1584 ! if (mp_ierr /= 0) goto 6001585 ! j=j+11586 ! call MPI_Irecv(ciwc(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1587 ! &MPI_COMM_WORLD,reqs(j),mp_ierr)1588 ! if (mp_ierr /= 0) goto 6001589 1590 1556 end if 1591 1557 … … 1599 1565 1600 1566 601 end subroutine mpif_gf_recv_vars_async 1567 1568 1569 subroutine mpif_gf_send_vars_nest_async(memstat) 1570 !******************************************************************************* 1571 ! DESCRIPTION 1572 ! Distribute 'getfield' variables from reader process to all processes. 1573 ! Called from timemanager by reader process only. 1574 ! Version for nested wind fields 1575 ! 1576 ! NOTE 1577 ! This version uses asynchronious sends. The newest fields are sent in the 1578 ! background, so calculations can continue while 1579 ! MPI communications are performed. 1580 ! 1581 ! The way the MPI tags/requests are sequenced means that this routine must 1582 ! carefully match routine 'mpif_gf_recv_vars_async' 1583 ! 1584 ! VARIABLES 1585 ! memstat -- input, for resolving pointer to windfield index being read 1586 ! mind -- index where to place new fields 1587 ! 1588 ! TODO 1589 ! Some unused arrays are currently sent (uupoln,..) 1590 !******************************************************************************* 1591 use com_mod 1592 1593 implicit none 1594 1595 integer, intent(in) :: memstat 1596 integer :: mind 1597 integer :: dest,i,k 1598 1599 ! Common array sizes used for communications 1600 integer :: d3s1 = nxmaxn*nymaxn*nzmax 1601 integer :: d3s2 = nxmaxn*nymaxn*nuvzmax 1602 integer :: d2s1 = nxmaxn*nymaxn 1603 integer :: d2s2 = nxmaxn*nymaxn*maxspec 1604 1605 !******************************************************************************* 1606 1607 ! At the time the send is posted, the reader process is one step further 1608 ! in the permutation of memstat compared with the receiving processes 1609 1610 if (memstat.ge.32) then 1611 ! last read was synchronous, to indices 1 and 2, 3 is free 1612 write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: & 1613 & memstat>=32 should never happen here." 1614 stop 1615 else if (memstat.eq.17) then 1616 ! old fields on 1,2, send 3 1617 mind=3 1618 else if (memstat.eq.18) then 1619 ! old fields on 2,3, send 1 1620 mind=1 1621 else if (memstat.eq.19) then 1622 ! old fields on 3,1, send 2 1623 mind=2 1624 else 1625 write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: & 1626 & invalid memstat" 1627 end if 1628 1629 if (mp_dev_mode) then 1630 if (mp_pid.ne.id_read) then 1631 write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_nest_async' 1632 end if 1633 end if 1634 1635 if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_nest_async, memstat:', memstat 1636 1637 ! Time for MPI communications 1638 if (mp_measure_time) call mpif_mtime('commtime',0) 1639 1640 ! Loop over receiving processes, initiate data sending 1641 !***************************************************** 1642 1643 do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading 1644 ! TODO: use mp_partgroup_np here 1645 if (dest.eq.id_read) cycle 1646 do k=1, numbnests 1647 i=dest*nvar_async 1648 call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1649 if (mp_ierr /= 0) goto 600 1650 i=i+1 1651 call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1652 if (mp_ierr /= 0) goto 600 1653 i=i+1 1654 call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1655 if (mp_ierr /= 0) goto 600 1656 i=i+1 1657 call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1658 if (mp_ierr /= 0) goto 600 1659 i=i+1 1660 call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1661 if (mp_ierr /= 0) goto 600 1662 i=i+1 1663 call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1664 if (mp_ierr /= 0) goto 600 1665 i=i+1 1666 call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1667 if (mp_ierr /= 0) goto 600 1668 i=i+1 1669 call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1670 if (mp_ierr /= 0) goto 600 1671 i=i+1 1672 call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1673 if (mp_ierr /= 0) goto 600 1674 i=i+1 1675 call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1676 if (mp_ierr /= 0) goto 600 1677 i=i+1 1678 call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1679 i=i+1 1680 if (mp_ierr /= 0) goto 600 1681 call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1682 if (mp_ierr /= 0) goto 600 1683 i=i+1 1684 call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1685 if (mp_ierr /= 0) goto 600 1686 i=i+1 1687 call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1688 if (mp_ierr /= 0) goto 600 1689 i=i+1 1690 call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1691 if (mp_ierr /= 0) goto 600 1692 i=i+1 1693 ! 15 1694 call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1695 if (mp_ierr /= 0) goto 600 1696 i=i+1 1697 call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1698 if (mp_ierr /= 0) goto 600 1699 i=i+1 1700 call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1701 if (mp_ierr /= 0) goto 600 1702 i=i+1 1703 call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1704 if (mp_ierr /= 0) goto 600 1705 i=i+1 1706 call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1707 if (mp_ierr /= 0) goto 600 1708 i=i+1 1709 call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1710 if (mp_ierr /= 0) goto 600 1711 i=i+1 1712 call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1713 if (mp_ierr /= 0) goto 600 1714 i=i+1 1715 call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1716 if (mp_ierr /= 0) goto 600 1717 i=i+1 1718 call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1719 if (mp_ierr /= 0) goto 600 1720 i=i+1 1721 call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1722 if (mp_ierr /= 0) goto 600 1723 ! 25 1724 1725 ! Send cloud water if it exists. Increment counter always (as on receiving end) 1726 if (readclouds) then 1727 i=i+1 1728 call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,& 1729 &MPI_COMM_WORLD,reqs(i),mp_ierr) 1730 if (mp_ierr /= 0) goto 600 1731 end if 1732 end do 1733 end do 1734 1735 if (mp_measure_time) call mpif_mtime('commtime',1) 1736 1737 goto 601 1738 1739 600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> mp_ierr \= 0", mp_ierr 1740 stop 1741 1742 601 end subroutine mpif_gf_send_vars_nest_async 1743 1744 1745 subroutine mpif_gf_recv_vars_nest_async(memstat) 1746 !******************************************************************************* 1747 ! DESCRIPTION 1748 ! Receive 'getfield' variables from reader process. 1749 ! Called from timemanager by all processes except reader 1750 ! Version for nested wind fields 1751 ! 1752 ! NOTE 1753 ! This version uses asynchronious communications. 1754 ! 1755 ! VARIABLES 1756 ! memstat -- input, used to resolve windfield index being received 1757 ! 1758 ! 1759 !******************************************************************************* 1760 use com_mod 1761 1762 implicit none 1763 1764 integer, intent(in) :: memstat 1765 integer :: mind,j,k 1766 1767 ! Common array sizes used for communications 1768 integer :: d3s1 = nxmaxn*nymaxn*nzmax 1769 integer :: d3s2 = nxmaxn*nymaxn*nuvzmax 1770 integer :: d2s1 = nxmaxn*nymaxn 1771 integer :: d2s2 = nxmaxn*nymaxn*maxspec 1772 1773 !******************************************************************************* 1774 1775 ! At the time this immediate receive is posted, memstat is the state of 1776 ! windfield indices at the previous/current time. From this, the future 1777 ! state is deduced. 1778 if (memstat.eq.32) then 1779 ! last read was synchronous to indices 1 and 2, 3 is free 1780 mind=3 1781 else if (memstat.eq.17) then 1782 ! last read was asynchronous to index 3, 1 is free 1783 mind=1 1784 else if (memstat.eq.18) then 1785 ! last read was asynchronous to index 1, 2 is free 1786 mind=2 1787 else if (memstat.eq.19) then 1788 ! last read was asynchronous to index 2, 3 is free 1789 mind=3 1790 else 1791 ! illegal state 1792 write(*,*) 'mpi_mod> FLEXPART ERROR: Illegal memstat value. Exiting.' 1793 stop 1794 end if 1795 1796 if (mp_dev_mode) then 1797 if (mp_pid.eq.id_read) then 1798 write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async' 1799 end if 1800 end if 1801 1802 ! Time for MPI communications 1803 if (mp_measure_time) call mpif_mtime('commtime',0) 1804 1805 if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat 1806 1807 ! Initiate receiving of data 1808 !*************************** 1809 1810 do k=1, numbnests 1811 ! Get MPI tags/requests for communications 1812 j=mp_pid*nvar_async 1813 call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1814 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1815 if (mp_ierr /= 0) goto 600 1816 j=j+1 1817 call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1818 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1819 if (mp_ierr /= 0) goto 600 1820 j=j+1 1821 call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1822 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1823 if (mp_ierr /= 0) goto 600 1824 j=j+1 1825 call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1826 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1827 if (mp_ierr /= 0) goto 600 1828 j=j+1 1829 call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1830 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1831 if (mp_ierr /= 0) goto 600 1832 j=j+1 1833 call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1834 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1835 if (mp_ierr /= 0) goto 600 1836 j=j+1 1837 call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& 1838 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1839 if (mp_ierr /= 0) goto 600 1840 j=j+1 1841 call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& 1842 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1843 if (mp_ierr /= 0) goto 600 1844 j=j+1 1845 call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1846 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1847 if (mp_ierr /= 0) goto 600 1848 j=j+1 1849 call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1850 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1851 if (mp_ierr /= 0) goto 600 1852 j=j+1 1853 call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,& 1854 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1855 if (mp_ierr /= 0) goto 600 1856 j=j+1 1857 call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1858 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1859 if (mp_ierr /= 0) goto 600 1860 j=j+1 1861 call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,& 1862 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1863 if (mp_ierr /= 0) goto 600 1864 j=j+1 1865 call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1866 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1867 if (mp_ierr /= 0) goto 600 1868 j=j+1 1869 call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1870 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1871 if (mp_ierr /= 0) goto 600 1872 j=j+1 1873 call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1874 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1875 if (mp_ierr /= 0) goto 600 1876 j=j+1 1877 call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1878 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1879 if (mp_ierr /= 0) goto 600 1880 j=j+1 1881 call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1882 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1883 if (mp_ierr /= 0) goto 600 1884 j=j+1 1885 call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1886 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1887 if (mp_ierr /= 0) goto 600 1888 j=j+1 1889 call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1890 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1891 if (mp_ierr /= 0) goto 600 1892 call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1893 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1894 if (mp_ierr /= 0) goto 600 1895 j=j+1 1896 call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1897 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1898 if (mp_ierr /= 0) goto 600 1899 j=j+1 1900 call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1901 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1902 if (mp_ierr /= 0) goto 600 1903 j=j+1 1904 call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1905 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1906 if (mp_ierr /= 0) goto 600 1907 j=j+1 1908 call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1909 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1910 if (mp_ierr /= 0) goto 600 1911 1912 ! Post request for clwc. These data are possibly not sent, request must then be cancelled 1913 ! For now assume that data at all steps either have or do not have water 1914 if (readclouds) then 1915 j=j+1 1916 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& 1917 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1918 if (mp_ierr /= 0) goto 600 1919 end if 1920 end do 1921 1922 if (mp_measure_time) call mpif_mtime('commtime',1) 1923 1924 goto 601 1925 1926 600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_nest_async> MPI ERROR ", mp_ierr 1927 stop 1928 1929 601 end subroutine mpif_gf_recv_vars_nest_async 1601 1930 1602 1931 … … 1610 1939 ! implicit synchronisation between all processes takes place here 1611 1940 ! 1941 ! TODO 1942 ! take into account nested wind fields 1612 1943 ! 1613 1944 !*******************************************************************************
Note: See TracChangeset
for help on using the changeset viewer.