Changes in / [06f094f:94bb383] in flexpart.git
- Location:
- src
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
src/mpi_mod.f90
r3b80e98 r62e65c7 862 862 ! NOTE 863 863 ! This subroutine distributes windfields read from the reader process to 864 ! all other processes. Usually one set of fields aretransfered, but at864 ! all other processes. Usually one set of fields is transfered, but at 865 865 ! first timestep when there are no fields in memory, both are transfered. 866 866 ! MPI_Bcast is used, so implicitly all processes are synchronized at this … … 926 926 !********************************************************************** 927 927 928 ! The non-reader processes need to know if cloud water w asread.928 ! The non-reader processes need to know if cloud water were read. 929 929 call MPI_Bcast(readclouds,1,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr) 930 930 if (mp_ierr /= 0) goto 600 … … 981 981 ! cloud water/ice: 982 982 if (readclouds) then 983 ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 984 ! if (mp_ierr /= 0) goto 600 983 985 call MPI_Bcast(ctwc(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 984 986 if (mp_ierr /= 0) goto 600 987 ! call MPI_Bcast(clwc(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 988 ! if (mp_ierr /= 0) goto 600 989 ! call MPI_Bcast(ciwc(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 990 ! if (mp_ierr /= 0) goto 600 985 991 end if 986 992 … … 1099 1105 !********************************************************************** 1100 1106 1101 ! The non-reader processes need to know if cloud water w asread.1107 ! The non-reader processes need to know if cloud water were read. 1102 1108 call MPI_Bcast(readclouds_nest,maxnests,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr) 1103 1109 if (mp_ierr /= 0) goto 600 … … 1309 1315 i=i+1 1310 1316 if (mp_ierr /= 0) goto 600 1317 1311 1318 call MPI_Isend(cloudsh(:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1312 1319 if (mp_ierr /= 0) goto 600 … … 1315 1322 if (mp_ierr /= 0) goto 600 1316 1323 i=i+1 1317 ! 151318 1324 call MPI_Isend(ps(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1319 1325 if (mp_ierr /= 0) goto 600 … … 1346 1352 if (mp_ierr /= 0) goto 600 1347 1353 i=i+1 1348 ! 251349 1354 call MPI_Isend(tropopause(:,:,:,mind),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1350 1355 if (mp_ierr /= 0) goto 600 … … 1356 1361 if (readclouds) then 1357 1362 i=i+1 1363 ! call MPI_Isend(icloud_stats(:,:,:,mind),d2s1*5,mp_sp,dest,tm1,& 1364 ! &MPI_COMM_WORLD,reqs(i),mp_ierr) 1358 1365 call MPI_Isend(ctwc(:,:,mind),d2s1,mp_sp,dest,tm1,& 1359 1366 &MPI_COMM_WORLD,reqs(i),mp_ierr) 1367 1360 1368 if (mp_ierr /= 0) goto 600 1369 1370 ! call MPI_Isend(clwc(:,:,:,mind),d3s1,mp_sp,dest,tm1,& 1371 ! &MPI_COMM_WORLD,reqs(i),mp_ierr) 1372 ! if (mp_ierr /= 0) goto 600 1373 ! i=i+1 1374 1375 ! call MPI_Isend(ciwc(:,:,:,mind),d3s1,mp_sp,dest,tm1,& 1376 ! &MPI_COMM_WORLD,reqs(i),mp_ierr) 1377 ! if (mp_ierr /= 0) goto 600 1378 1361 1379 end if 1362 1380 end do … … 1480 1498 if (mp_ierr /= 0) goto 600 1481 1499 j=j+1 1500 1482 1501 call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1483 1502 &MPI_COMM_WORLD,reqs(j),mp_ierr) … … 1492 1511 if (mp_ierr /= 0) goto 600 1493 1512 j=j+1 1513 1494 1514 call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1495 1515 &MPI_COMM_WORLD,reqs(j),mp_ierr) … … 1527 1547 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1528 1548 if (mp_ierr /= 0) goto 600 1549 1529 1550 call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1530 1551 &MPI_COMM_WORLD,reqs(j),mp_ierr) … … 1546 1567 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1547 1568 if (mp_ierr /= 0) goto 600 1569 1548 1570 1549 1571 ! Post request for clwc. These data are possibly not sent, request must then be cancelled … … 1551 1573 if (readclouds) then 1552 1574 j=j+1 1575 1576 ! call MPI_Irecv(icloud_stats(:,:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& 1577 ! &MPI_COMM_WORLD,reqs(j),mp_ierr) 1553 1578 call MPI_Irecv(ctwc(:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& 1554 1579 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1555 1580 if (mp_ierr /= 0) goto 600 1581 1582 ! call MPI_Irecv(clwc(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1583 ! &MPI_COMM_WORLD,reqs(j),mp_ierr) 1584 ! if (mp_ierr /= 0) goto 600 1585 ! j=j+1 1586 ! call MPI_Irecv(ciwc(:,:,:,mind),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1587 ! &MPI_COMM_WORLD,reqs(j),mp_ierr) 1588 ! if (mp_ierr /= 0) goto 600 1589 1556 1590 end if 1557 1591 … … 1565 1599 1566 1600 601 end subroutine mpif_gf_recv_vars_async 1567 1568 1569 subroutine mpif_gf_send_vars_nest_async(memstat)1570 !*******************************************************************************1571 ! DESCRIPTION1572 ! Distribute 'getfield' variables from reader process to all processes.1573 ! Called from timemanager by reader process only.1574 ! Version for nested wind fields1575 !1576 ! NOTE1577 ! This version uses asynchronious sends. The newest fields are sent in the1578 ! background, so calculations can continue while1579 ! MPI communications are performed.1580 !1581 ! The way the MPI tags/requests are sequenced means that this routine must1582 ! carefully match routine 'mpif_gf_recv_vars_async'1583 !1584 ! VARIABLES1585 ! memstat -- input, for resolving pointer to windfield index being read1586 ! mind -- index where to place new fields1587 !1588 ! TODO1589 ! Some unused arrays are currently sent (uupoln,..)1590 !*******************************************************************************1591 use com_mod1592 1593 implicit none1594 1595 integer, intent(in) :: memstat1596 integer :: mind1597 integer :: dest,i,k1598 1599 ! Common array sizes used for communications1600 integer :: d3s1 = nxmaxn*nymaxn*nzmax1601 integer :: d3s2 = nxmaxn*nymaxn*nuvzmax1602 integer :: d2s1 = nxmaxn*nymaxn1603 integer :: d2s2 = nxmaxn*nymaxn*maxspec1604 1605 !*******************************************************************************1606 1607 ! At the time the send is posted, the reader process is one step further1608 ! in the permutation of memstat compared with the receiving processes1609 1610 if (memstat.ge.32) then1611 ! last read was synchronous, to indices 1 and 2, 3 is free1612 write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: &1613 & memstat>=32 should never happen here."1614 stop1615 else if (memstat.eq.17) then1616 ! old fields on 1,2, send 31617 mind=31618 else if (memstat.eq.18) then1619 ! old fields on 2,3, send 11620 mind=11621 else if (memstat.eq.19) then1622 ! old fields on 3,1, send 21623 mind=21624 else1625 write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> ERROR: &1626 & invalid memstat"1627 end if1628 1629 if (mp_dev_mode) then1630 if (mp_pid.ne.id_read) then1631 write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_send_vars_nest_async'1632 end if1633 end if1634 1635 if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_nest_async, memstat:', memstat1636 1637 ! Time for MPI communications1638 if (mp_measure_time) call mpif_mtime('commtime',0)1639 1640 ! Loop over receiving processes, initiate data sending1641 !*****************************************************1642 1643 do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading1644 ! TODO: use mp_partgroup_np here1645 if (dest.eq.id_read) cycle1646 do k=1, numbnests1647 i=dest*nvar_async1648 call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1649 if (mp_ierr /= 0) goto 6001650 i=i+11651 call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1652 if (mp_ierr /= 0) goto 6001653 i=i+11654 call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1655 if (mp_ierr /= 0) goto 6001656 i=i+11657 call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1658 if (mp_ierr /= 0) goto 6001659 i=i+11660 call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1661 if (mp_ierr /= 0) goto 6001662 i=i+11663 call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1664 if (mp_ierr /= 0) goto 6001665 i=i+11666 call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1667 if (mp_ierr /= 0) goto 6001668 i=i+11669 call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1670 if (mp_ierr /= 0) goto 6001671 i=i+11672 call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1673 if (mp_ierr /= 0) goto 6001674 i=i+11675 call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1676 if (mp_ierr /= 0) goto 6001677 i=i+11678 call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1679 i=i+11680 if (mp_ierr /= 0) goto 6001681 call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1682 if (mp_ierr /= 0) goto 6001683 i=i+11684 call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1685 if (mp_ierr /= 0) goto 6001686 i=i+11687 call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1688 if (mp_ierr /= 0) goto 6001689 i=i+11690 call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1691 if (mp_ierr /= 0) goto 6001692 i=i+11693 ! 151694 call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1695 if (mp_ierr /= 0) goto 6001696 i=i+11697 call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1698 if (mp_ierr /= 0) goto 6001699 i=i+11700 call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1701 if (mp_ierr /= 0) goto 6001702 i=i+11703 call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1704 if (mp_ierr /= 0) goto 6001705 i=i+11706 call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1707 if (mp_ierr /= 0) goto 6001708 i=i+11709 call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1710 if (mp_ierr /= 0) goto 6001711 i=i+11712 call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1713 if (mp_ierr /= 0) goto 6001714 i=i+11715 call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1716 if (mp_ierr /= 0) goto 6001717 i=i+11718 call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1719 if (mp_ierr /= 0) goto 6001720 i=i+11721 call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1722 if (mp_ierr /= 0) goto 6001723 ! 251724 1725 ! Send cloud water if it exists. Increment counter always (as on receiving end)1726 if (readclouds) then1727 i=i+11728 call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,&1729 &MPI_COMM_WORLD,reqs(i),mp_ierr)1730 if (mp_ierr /= 0) goto 6001731 end if1732 end do1733 end do1734 1735 if (mp_measure_time) call mpif_mtime('commtime',1)1736 1737 goto 6011738 1739 600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_nest_async> mp_ierr \= 0", mp_ierr1740 stop1741 1742 601 end subroutine mpif_gf_send_vars_nest_async1743 1744 1745 subroutine mpif_gf_recv_vars_nest_async(memstat)1746 !*******************************************************************************1747 ! DESCRIPTION1748 ! Receive 'getfield' variables from reader process.1749 ! Called from timemanager by all processes except reader1750 ! Version for nested wind fields1751 !1752 ! NOTE1753 ! This version uses asynchronious communications.1754 !1755 ! VARIABLES1756 ! memstat -- input, used to resolve windfield index being received1757 !1758 !1759 !*******************************************************************************1760 use com_mod1761 1762 implicit none1763 1764 integer, intent(in) :: memstat1765 integer :: mind,j,k1766 1767 ! Common array sizes used for communications1768 integer :: d3s1 = nxmaxn*nymaxn*nzmax1769 integer :: d3s2 = nxmaxn*nymaxn*nuvzmax1770 integer :: d2s1 = nxmaxn*nymaxn1771 integer :: d2s2 = nxmaxn*nymaxn*maxspec1772 1773 !*******************************************************************************1774 1775 ! At the time this immediate receive is posted, memstat is the state of1776 ! windfield indices at the previous/current time. From this, the future1777 ! state is deduced.1778 if (memstat.eq.32) then1779 ! last read was synchronous to indices 1 and 2, 3 is free1780 mind=31781 else if (memstat.eq.17) then1782 ! last read was asynchronous to index 3, 1 is free1783 mind=11784 else if (memstat.eq.18) then1785 ! last read was asynchronous to index 1, 2 is free1786 mind=21787 else if (memstat.eq.19) then1788 ! last read was asynchronous to index 2, 3 is free1789 mind=31790 else1791 ! illegal state1792 write(*,*) 'mpi_mod> FLEXPART ERROR: Illegal memstat value. Exiting.'1793 stop1794 end if1795 1796 if (mp_dev_mode) then1797 if (mp_pid.eq.id_read) then1798 write(*,*) 'MPI_DEV_MODE: error calling mpif_gf_recv_vars_async'1799 end if1800 end if1801 1802 ! Time for MPI communications1803 if (mp_measure_time) call mpif_mtime('commtime',0)1804 1805 if (mp_dev_mode) write(*,*) '## in mpif_gf_send_vars_async, memstat:', memstat1806 1807 ! Initiate receiving of data1808 !***************************1809 1810 do k=1, numbnests1811 ! Get MPI tags/requests for communications1812 j=mp_pid*nvar_async1813 call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1814 &MPI_COMM_WORLD,reqs(j),mp_ierr)1815 if (mp_ierr /= 0) goto 6001816 j=j+11817 call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1818 &MPI_COMM_WORLD,reqs(j),mp_ierr)1819 if (mp_ierr /= 0) goto 6001820 j=j+11821 call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1822 &MPI_COMM_WORLD,reqs(j),mp_ierr)1823 if (mp_ierr /= 0) goto 6001824 j=j+11825 call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1826 &MPI_COMM_WORLD,reqs(j),mp_ierr)1827 if (mp_ierr /= 0) goto 6001828 j=j+11829 call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1830 &MPI_COMM_WORLD,reqs(j),mp_ierr)1831 if (mp_ierr /= 0) goto 6001832 j=j+11833 call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1834 &MPI_COMM_WORLD,reqs(j),mp_ierr)1835 if (mp_ierr /= 0) goto 6001836 j=j+11837 call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&1838 &MPI_COMM_WORLD,reqs(j),mp_ierr)1839 if (mp_ierr /= 0) goto 6001840 j=j+11841 call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&1842 &MPI_COMM_WORLD,reqs(j),mp_ierr)1843 if (mp_ierr /= 0) goto 6001844 j=j+11845 call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1846 &MPI_COMM_WORLD,reqs(j),mp_ierr)1847 if (mp_ierr /= 0) goto 6001848 j=j+11849 call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1850 &MPI_COMM_WORLD,reqs(j),mp_ierr)1851 if (mp_ierr /= 0) goto 6001852 j=j+11853 call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&1854 &MPI_COMM_WORLD,reqs(j),mp_ierr)1855 if (mp_ierr /= 0) goto 6001856 j=j+11857 call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1858 &MPI_COMM_WORLD,reqs(j),mp_ierr)1859 if (mp_ierr /= 0) goto 6001860 j=j+11861 call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,&1862 &MPI_COMM_WORLD,reqs(j),mp_ierr)1863 if (mp_ierr /= 0) goto 6001864 j=j+11865 call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1866 &MPI_COMM_WORLD,reqs(j),mp_ierr)1867 if (mp_ierr /= 0) goto 6001868 j=j+11869 call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1870 &MPI_COMM_WORLD,reqs(j),mp_ierr)1871 if (mp_ierr /= 0) goto 6001872 j=j+11873 call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1874 &MPI_COMM_WORLD,reqs(j),mp_ierr)1875 if (mp_ierr /= 0) goto 6001876 j=j+11877 call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1878 &MPI_COMM_WORLD,reqs(j),mp_ierr)1879 if (mp_ierr /= 0) goto 6001880 j=j+11881 call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1882 &MPI_COMM_WORLD,reqs(j),mp_ierr)1883 if (mp_ierr /= 0) goto 6001884 j=j+11885 call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1886 &MPI_COMM_WORLD,reqs(j),mp_ierr)1887 if (mp_ierr /= 0) goto 6001888 j=j+11889 call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1890 &MPI_COMM_WORLD,reqs(j),mp_ierr)1891 if (mp_ierr /= 0) goto 6001892 call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1893 &MPI_COMM_WORLD,reqs(j),mp_ierr)1894 if (mp_ierr /= 0) goto 6001895 j=j+11896 call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1897 &MPI_COMM_WORLD,reqs(j),mp_ierr)1898 if (mp_ierr /= 0) goto 6001899 j=j+11900 call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1901 &MPI_COMM_WORLD,reqs(j),mp_ierr)1902 if (mp_ierr /= 0) goto 6001903 j=j+11904 call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1905 &MPI_COMM_WORLD,reqs(j),mp_ierr)1906 if (mp_ierr /= 0) goto 6001907 j=j+11908 call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1909 &MPI_COMM_WORLD,reqs(j),mp_ierr)1910 if (mp_ierr /= 0) goto 6001911 1912 ! Post request for clwc. These data are possibly not sent, request must then be cancelled1913 ! For now assume that data at all steps either have or do not have water1914 if (readclouds) then1915 j=j+11916 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&1917 &MPI_COMM_WORLD,reqs(j),mp_ierr)1918 if (mp_ierr /= 0) goto 6001919 end if1920 end do1921 1922 if (mp_measure_time) call mpif_mtime('commtime',1)1923 1924 goto 6011925 1926 600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_nest_async> MPI ERROR ", mp_ierr1927 stop1928 1929 601 end subroutine mpif_gf_recv_vars_nest_async1930 1601 1931 1602 … … 1939 1610 ! implicit synchronisation between all processes takes place here 1940 1611 ! 1941 ! TODO1942 ! take into account nested wind fields1943 1612 ! 1944 1613 !******************************************************************************* -
src/par_mod.f90
r3b80e98 r18adf60 187 187 188 188 integer,parameter :: maxpart=1000000 189 integer,parameter :: maxspec= 2189 integer,parameter :: maxspec=6 190 190 real,parameter :: minmass=0.0001 191 191 -
src/timemanager_mpi.f90
r3b80e98 r18adf60 253 253 if (mp_dev_mode) write(*,*) 'Reader process: calling mpif_gf_send_vars_async' 254 254 call mpif_gf_send_vars_async(memstat) 255 if (numbnests>0) call mpif_gf_send_vars_nest_async(memstat)256 255 end if 257 256 … … 268 267 if (mp_dev_mode) write(*,*) 'Receiving process: calling mpif_gf_send_vars_async. PID: ', mp_pid 269 268 call mpif_gf_recv_vars_async(memstat) 270 if (numbnests>0) call mpif_gf_recv_vars_nest_async(memstat)271 269 end if 272 270
Note: See TracChangeset
for help on using the changeset viewer.