Changes in src/mpi_mod.f90 [4c64400:79abee9] in flexpart.git
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/mpi_mod.f90
r4c64400 r79abee9 90 90 ! MPI tags/requests for send/receive operation 91 91 integer :: tm1 92 integer, parameter :: nvar_async=26 92 integer, parameter :: nvar_async=26 !27 !29 :DBG: 93 93 !integer, dimension(:), allocatable :: tags 94 94 integer, dimension(:), allocatable :: reqs 95 96 ! Status array used for certain MPI operations (MPI_RECV)97 integer, dimension(MPI_STATUS_SIZE) :: mp_status98 95 99 96 … … 152 149 integer, private :: dat_lun 153 150 154 ! Temporary arrays for particles (allocated and deallocated as needed)155 integer, allocatable, dimension(:) :: nclass_tmp, npoint_tmp, itra1_tmp, idt_tmp, &156 & itramem_tmp, itrasplit_tmp157 real(kind=dp), allocatable, dimension(:) :: xtra1_tmp, ytra1_tmp158 real, allocatable, dimension(:) :: ztra1_tmp159 real, allocatable, dimension(:,:) :: xmass1_tmp160 161 ! mp_redist_fract Exchange particles between processes if relative difference162 ! is larger. A good value is between 0.0 and 0.5163 ! mp_maxpart_factor Allocate more memory per process than strictly needed164 ! (safety factor). Recommended value between 1.5 and 2.5165 ! mp_min_redist Do not redistribute particles if below this limit166 real, parameter :: mp_redist_fract=0.2, mp_maxpart_factor=1.5167 integer,parameter :: mp_min_redist=100000168 169 170 151 contains 171 152 … … 214 195 if (dep_prec==dp) then 215 196 mp_cp = MPI_REAL8 216 ! TODO: write info message for serial version as well197 ! TODO: write info message for serial version as well 217 198 if (lroot.and.verbosity>0) write(*,*) 'Using double precision for deposition fields' 218 199 else if (dep_prec==sp) then … … 251 232 write(*,FMT='(80("#"))') 252 233 end if 253 lmp_sync=.true. 234 lmp_sync=.true. ! :DBG: eso fix this... 254 235 end if 255 236 … … 330 311 331 312 ! Set maxpart per process 332 ! eso 08/2016: Increase maxpart per process, in case of unbalanced distribution 333 maxpart_mpi=int(m p_maxpart_factor*real(maxpart)/real(mp_partgroup_np))313 if (mp_partid.lt.mod(maxpart,mp_partgroup_np)) addmaxpart=1 314 maxpart_mpi=int(maxpart/mp_partgroup_np)+addmaxpart 334 315 335 316 ! Do not allocate particle data arrays for readwind process … … 340 321 ! Set random seed for each non-root process 341 322 if (mp_pid.gt.0) then 323 ! if (mp_pid.ge.0) then 324 ! call system_clock(s) 342 325 s = 244 343 326 mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729)) 344 327 end if 328 if (mp_dev_mode) write(*,*) 'PID, mp_seed: ',mp_pid, mp_seed 345 329 if (mp_dbg_mode) then 330 ! :DBG: For debugging, set all seed to 0 and maxrand to e.g. 20 346 331 mp_seed=0 347 332 if (lroot) write(*,*) 'MPI: setting seed=0' … … 469 454 470 455 471 subroutine mpif_send_part_properties(num_part) 472 !*********************************************************************** 473 ! Distribute particle properties from root to all processes. 474 ! 475 ! Used by init_domainfill_mpi 476 ! 477 ! Variables: 478 ! 479 ! num_part input, number of particles per process (rounded up) 456 subroutine mpif_tm_send_vars 457 !*********************************************************************** 458 ! Distribute particle variables from pid0 to all processes. 459 ! Called from timemanager 460 ! *NOT IN USE* at the moment, but can be useful for debugging 480 461 ! 481 462 !*********************************************************************** … … 484 465 implicit none 485 466 486 integer,intent(in) :: num_part 487 integer :: i,jj, addone 488 489 ! Exit if too many particles 490 if (num_part.gt.maxpart_mpi) then 491 write(*,*) '#####################################################' 492 write(*,*) '#### ERROR - TOTAL NUMBER OF PARTICLES REQUIRED ####' 493 write(*,*) '#### EXCEEDS THE MAXIMUM ALLOWED NUMBER. REDUCE ####' 494 write(*,*) '#### EITHER NUMBER OF PARTICLES PER RELEASE POINT####' 495 write(*,*) '#### OR INCREASE MAXPART. ####' 496 write(*,*) '#####################################################' 497 ! call MPI_FINALIZE(mp_ierr) 498 stop 499 end if 500 467 integer :: i 501 468 502 469 ! Time for MPI communications … … 504 471 if (mp_measure_time) call mpif_mtime('commtime',0) 505 472 506 ! Distribute variables, send from pid 0 to other processes (including itself)507 !****************************************************************************508 509 call MPI_SCATTER(nclass_tmp,num_part,MPI_INTEGER,nclass,&510 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)511 if (mp_ierr /= 0) goto 600512 call MPI_SCATTER(npoint_tmp,num_part,MPI_INTEGER,npoint,&513 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)514 if (mp_ierr /= 0) goto 600515 call MPI_SCATTER(itra1_tmp,num_part,MPI_INTEGER,itra1,&516 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)517 if (mp_ierr /= 0) goto 600518 call MPI_SCATTER(idt_tmp,num_part,MPI_INTEGER,idt,&519 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)520 if (mp_ierr /= 0) goto 600521 call MPI_SCATTER(itramem_tmp,num_part,MPI_INTEGER,itramem,&522 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)523 if (mp_ierr /= 0) goto 600524 call MPI_SCATTER(itrasplit_tmp,num_part,MPI_INTEGER,itrasplit,&525 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)526 if (mp_ierr /= 0) goto 600527 call MPI_SCATTER(xtra1_tmp,num_part,mp_dp,xtra1,&528 &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)529 if (mp_ierr /= 0) goto 600530 call MPI_SCATTER(ytra1_tmp,num_part,mp_dp,ytra1,&531 &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)532 if (mp_ierr /= 0) goto 600533 call MPI_SCATTER(ztra1_tmp,num_part,mp_sp,ztra1,&534 &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)535 if (mp_ierr /= 0) goto 600536 do i=1,nspec537 call MPI_SCATTER(xmass1_tmp(:,i),num_part,mp_sp,xmass1(:,i),&538 &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)539 if (mp_ierr /= 0) goto 600540 end do541 542 if (mp_measure_time) call mpif_mtime('commtime',1)543 544 goto 601545 546 600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr547 stop548 549 ! After the transfer of particles it it possible that the value of550 ! "numpart" is larger than the actual used, so we reduce it if there are551 ! invalid particles at the end of the arrays552 601 do i=num_part, 1, -1553 if (itra1(i).eq.-999999999) then554 numpart=numpart-1555 else556 exit557 end if558 end do559 560 561 !601 end subroutine mpif_send_part_properties562 end subroutine mpif_send_part_properties563 564 565 subroutine mpif_calculate_part_redist(itime)566 !***********************************************************************567 ! Calculate number of particles to redistribute between processes. This routine568 ! can be called at regular time intervals to keep a level number of569 ! particles on each process.570 !571 ! First, all processes report their local "numpart" to each other, which is572 ! stored in array "numpart_mpi(np)". This array is sorted from low to573 ! high values, along with a corresponding process ID array "idx_arr(np)".574 ! If the relative difference between the highest and lowest "numpart_mpi" value575 ! is above a threshold, particles are transferred from process idx_arr(np-1)576 ! to process idx_arr(0). Repeat for processes idx_arr(np-i) and idx_arr(i)577 ! for all valid i.578 ! Note: If np is an odd number, the process with the median579 ! number of particles is left unchanged580 !581 ! VARIABLES582 ! itime input, current time583 !***********************************************************************584 use com_mod585 586 implicit none587 588 integer, intent(in) :: itime589 real :: pmin,z590 integer :: i,jj,nn, num_part=1,m,imin, num_trans591 logical :: first_iter592 integer,allocatable,dimension(:) :: numparticles_mpi, idx_arr593 real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this594 595 ! Exit if running with only 1 process596 !************************************************************************597 if (mp_np.eq.1) return598 599 ! All processes exchange information on number of particles600 !****************************************************************************601 allocate(numparticles_mpi(0:mp_partgroup_np-1), &602 &idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1))603 604 call MPI_Allgather(numpart, 1, MPI_INTEGER, numparticles_mpi, &605 & 1, MPI_INTEGER, mp_comm_used, mp_ierr)606 607 608 ! Sort from lowest to highest609 ! Initial guess: correct order610 sorted(:) = numparticles_mpi(:)611 do i=0, mp_partgroup_np-1612 idx_arr(i) = i613 end do614 615 ! For each successive element in index array, see if a lower value exists616 do i=0, mp_partgroup_np-2617 pmin=sorted(i)618 imin=idx_arr(i)619 m=i+1620 do jj=m, mp_partgroup_np-1621 if (pmin.le.sorted(jj)) cycle622 z=pmin623 pmin=sorted(jj)624 sorted(jj)=z625 626 nn=imin627 imin=idx_arr(jj)628 idx_arr(jj)=nn629 end do630 sorted(i)=pmin631 idx_arr(i)=imin632 end do633 634 ! For each pair of processes, transfer particles if the difference is above a635 ! limit, and numpart at sending process large enough636 637 m=mp_partgroup_np-1 ! index for last sorted process (most particles)638 do i=0,mp_partgroup_np/2-1639 num_trans = numparticles_mpi(idx_arr(m)) - numparticles_mpi(idx_arr(i))640 if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then641 if ( numparticles_mpi(idx_arr(m)).gt.mp_min_redist.and.&642 & real(num_trans)/real(numparticles_mpi(idx_arr(m))).gt.mp_redist_fract) then643 ! DBG644 ! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi', &645 ! &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi646 ! DBG647 call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2)648 end if649 end if650 m=m-1651 end do652 653 deallocate(numparticles_mpi, idx_arr, sorted)654 655 end subroutine mpif_calculate_part_redist656 657 658 subroutine mpif_redist_part(itime, src_proc, dest_proc, num_trans)659 !***********************************************************************660 ! Transfer particle properties between two arbitrary processes.661 !662 ! VARIABLES663 !664 ! itime input, current time665 ! src_proc input, ID of source process666 ! dest_proc input, ID of destination process667 ! num_trans input, number of particles to transfer668 !669 !************************************************************************670 use com_mod !TODO: ,only: nclass etc671 672 implicit none673 674 integer, intent(in) :: itime, src_proc, dest_proc, num_trans675 integer :: ll, ul ! lower and upper indices in arrays676 integer :: arr_sz ! size of temporary arrays677 integer :: mtag ! MPI message tag678 integer :: i, j, minpart, ipart, maxnumpart679 680 ! Check for invalid input arguments681 !**********************************682 if (src_proc.eq.dest_proc) then683 write(*,*) '<mpi_mod::mpif_redist_part>: Error: &684 &src_proc.eq.dest_proc'685 stop686 end if687 688 ! Measure time for MPI communications689 !************************************690 if (mp_measure_time) call mpif_mtime('commtime',0)691 692 ! Send particles693 !***************694 if (mp_partid.eq.src_proc) then695 mtag = 2000696 697 ! Array indices for the transferred particles698 ll=numpart-num_trans+1699 ul=numpart700 701 if (mp_dev_mode) then702 write(*,FMT='(72("#"))')703 write(*,*) "Sending ", num_trans, "particles (from/to)", src_proc, dest_proc704 write(*,*) "numpart before: ", numpart705 end if706 707 call MPI_SEND(nclass(ll:ul),num_trans,&708 & MPI_INTEGER,dest_proc,mtag+1,mp_comm_used,mp_ierr)709 710 call MPI_SEND(npoint(ll:ul),num_trans,&711 & MPI_INTEGER,dest_proc,mtag+2,mp_comm_used,mp_ierr)712 713 call MPI_SEND(itra1(ll:ul),num_trans, &714 & MPI_INTEGER,dest_proc,mtag+3,mp_comm_used,mp_ierr)715 716 call MPI_SEND(idt(ll:ul),num_trans, &717 & MPI_INTEGER,dest_proc,mtag+4,mp_comm_used,mp_ierr)718 719 call MPI_SEND(itramem(ll:ul),num_trans, &720 & MPI_INTEGER,dest_proc,mtag+5,mp_comm_used,mp_ierr)721 722 call MPI_SEND(itrasplit(ll:ul),num_trans,&723 & MPI_INTEGER,dest_proc,mtag+6,mp_comm_used,mp_ierr)724 725 call MPI_SEND(xtra1(ll:ul),num_trans, &726 & mp_dp,dest_proc,mtag+7,mp_comm_used,mp_ierr)727 728 call MPI_SEND(ytra1(ll:ul),num_trans,&729 & mp_dp,dest_proc,mtag+8,mp_comm_used,mp_ierr)730 731 call MPI_SEND(ztra1(ll:ul),num_trans,&732 & mp_sp,dest_proc,mtag+9,mp_comm_used,mp_ierr)733 734 do j=1,nspec735 call MPI_SEND(xmass1(ll:ul,j),num_trans,&736 & mp_sp,dest_proc,mtag+(9+j),mp_comm_used,mp_ierr)737 end do738 739 ! Terminate transferred particles, update numpart740 itra1(ll:ul) = -999999999741 742 numpart = numpart-num_trans743 744 if (mp_dev_mode) then745 write(*,*) "numpart after: ", numpart746 write(*,FMT='(72("#"))')747 end if748 749 else if (mp_partid.eq.dest_proc) then750 751 ! Receive particles752 !******************753 mtag = 2000754 ! Array indices for the transferred particles755 ll=numpart+1756 ul=numpart+num_trans757 758 if (mp_dev_mode) then759 write(*,FMT='(72("#"))')760 write(*,*) "Receiving ", num_trans, "particles (from/to)", src_proc, dest_proc761 write(*,*) "numpart before: ", numpart762 end if763 764 ! We could receive the data directly at nclass(ll:ul) etc., but this leaves765 ! vacant spaces at lower indices. Using temporary arrays instead.766 arr_sz = ul-ll+1767 allocate(itra1_tmp(arr_sz),npoint_tmp(arr_sz),nclass_tmp(arr_sz),&768 & idt_tmp(arr_sz),itramem_tmp(arr_sz),itrasplit_tmp(arr_sz),&769 & xtra1_tmp(arr_sz),ytra1_tmp(arr_sz),ztra1_tmp(arr_sz),&770 & xmass1_tmp(arr_sz, maxspec))771 772 call MPI_RECV(nclass_tmp,num_trans,&773 & MPI_INTEGER,src_proc,mtag+1,mp_comm_used,mp_status,mp_ierr)774 775 call MPI_RECV(npoint_tmp,num_trans,&776 & MPI_INTEGER,src_proc,mtag+2,mp_comm_used,mp_status,mp_ierr)777 778 call MPI_RECV(itra1_tmp,num_trans, &779 & MPI_INTEGER,src_proc,mtag+3,mp_comm_used,mp_status,mp_ierr)780 781 call MPI_RECV(idt_tmp,num_trans, &782 & MPI_INTEGER,src_proc,mtag+4,mp_comm_used,mp_status,mp_ierr)783 784 call MPI_RECV(itramem_tmp,num_trans, &785 & MPI_INTEGER,src_proc,mtag+5,mp_comm_used,mp_status,mp_ierr)786 787 call MPI_RECV(itrasplit_tmp,num_trans,&788 & MPI_INTEGER,src_proc,mtag+6,mp_comm_used,mp_status,mp_ierr)789 790 call MPI_RECV(xtra1_tmp,num_trans, &791 & mp_dp,src_proc,mtag+7,mp_comm_used,mp_status,mp_ierr)792 793 call MPI_RECV(ytra1_tmp,num_trans,&794 & mp_dp,src_proc,mtag+8,mp_comm_used,mp_status,mp_ierr)795 796 call MPI_RECV(ztra1_tmp,num_trans,&797 & mp_sp,src_proc,mtag+9,mp_comm_used,mp_status,mp_ierr)798 799 do j=1,nspec800 call MPI_RECV(xmass1_tmp(:,j),num_trans,&801 & mp_sp,src_proc,mtag+(9+j),mp_comm_used,mp_status,mp_ierr)802 end do803 804 ! This is the maximum value numpart can possibly have after the transfer805 maxnumpart=numpart+num_trans806 807 ! Search for vacant space and copy from temporary storage808 !********************************************************809 minpart=1810 do i=1, num_trans811 ! Take into acount that we may have transferred invalid particles812 if (itra1_tmp(i).ne.itime) cycle813 do ipart=minpart,maxnumpart814 if (itra1(ipart).ne.itime) then815 itra1(ipart) = itra1_tmp(i)816 npoint(ipart) = npoint_tmp(i)817 nclass(ipart) = nclass_tmp(i)818 idt(ipart) = idt_tmp(i)819 itramem(ipart) = itramem_tmp(i)820 itrasplit(ipart) = itrasplit_tmp(i)821 xtra1(ipart) = xtra1_tmp(i)822 ytra1(ipart) = ytra1_tmp(i)823 ztra1(ipart) = ztra1_tmp(i)824 xmass1(ipart,:) = xmass1_tmp(i,:)825 goto 200 ! Storage space has been found, stop searching826 end if827 ! :TODO: add check for if too many particles requiried828 end do829 200 minpart=ipart+1830 end do831 ! Increase numpart, if necessary832 numpart=max(numpart,ipart)833 834 deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,&835 &itrasplit_tmp,xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp)836 837 if (mp_dev_mode) then838 write(*,*) "numpart after: ", numpart839 write(*,FMT='(72("#"))')840 end if841 842 else843 ! This routine should only be called by the two participating processes844 write(*,*) "ERROR: wrong process has entered mpi_mod::mpif_redist_part"845 stop846 return847 end if848 849 ! Measure time for MPI communications850 !************************************851 if (mp_measure_time) call mpif_mtime('commtime',1)852 853 end subroutine mpif_redist_part854 855 856 subroutine mpif_tm_send_vars857 !***********************************************************************858 ! Distribute particle variables from pid0 to all processes.859 ! Called from timemanager860 ! *NOT IN USE* at the moment, but can be useful for debugging861 !862 !***********************************************************************863 use com_mod864 865 implicit none866 867 integer :: i868 869 ! Time for MPI communications870 !****************************871 if (mp_measure_time) call mpif_mtime('commtime',0)872 873 473 ! Distribute variables, send from pid 0 to other processes 874 474 !********************************************************************** … … 877 477 if (lroot) then 878 478 call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 879 & 479 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 880 480 if (mp_ierr /= 0) goto 600 881 481 call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 882 & 482 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 883 483 if (mp_ierr /= 0) goto 600 884 484 call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 885 & 485 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 886 486 if (mp_ierr /= 0) goto 600 887 487 call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 888 & 488 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 889 489 if (mp_ierr /= 0) goto 600 890 490 call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 891 & 491 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 892 492 if (mp_ierr /= 0) goto 600 893 493 894 494 ! int2 895 495 call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,& 896 & 496 &numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr) 897 497 if (mp_ierr /= 0) goto 600 898 498 899 499 ! real 900 500 call MPI_SCATTER(uap,numpart_mpi,mp_sp,MPI_IN_PLACE,& 901 & 501 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 902 502 if (mp_ierr /= 0) goto 600 903 503 call MPI_SCATTER(ucp,numpart_mpi,mp_sp,MPI_IN_PLACE,& 904 & 504 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 905 505 if (mp_ierr /= 0) goto 600 906 506 call MPI_SCATTER(uzp,numpart_mpi,mp_sp,MPI_IN_PLACE,& 907 & 507 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 908 508 if (mp_ierr /= 0) goto 600 909 509 910 510 call MPI_SCATTER(us,numpart_mpi,mp_sp,MPI_IN_PLACE,& 911 & 511 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 912 512 if (mp_ierr /= 0) goto 600 913 513 call MPI_SCATTER(vs,numpart_mpi,mp_sp,MPI_IN_PLACE,& 914 & 514 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 915 515 if (mp_ierr /= 0) goto 600 916 516 call MPI_SCATTER(ws,numpart_mpi,mp_sp,MPI_IN_PLACE,& 917 & 517 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 918 518 if (mp_ierr /= 0) goto 600 919 519 920 520 call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,& 921 & 521 &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 922 522 if (mp_ierr /= 0) goto 600 923 523 call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,& 924 & 524 &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 925 525 if (mp_ierr /= 0) goto 600 926 526 call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,MPI_IN_PLACE,& 927 & 527 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 928 528 if (mp_ierr /= 0) goto 600 929 529 930 530 do i=1,nspec 931 531 call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,MPI_IN_PLACE,& 932 & 532 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 933 533 if (mp_ierr /= 0) goto 600 934 534 end do … … 937 537 ! integers 938 538 call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,& 939 & 539 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 940 540 if (mp_ierr /= 0) goto 600 941 541 call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,& 942 & 542 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 943 543 if (mp_ierr /= 0) goto 600 944 544 call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,& 945 & 545 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 946 546 if (mp_ierr /= 0) goto 600 947 547 call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,& 948 & 548 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 949 549 if (mp_ierr /= 0) goto 600 950 550 call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,& 951 & 551 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 952 552 if (mp_ierr /= 0) goto 600 953 553 954 554 ! int2 955 555 call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,& 956 & 556 &numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr) 957 557 if (mp_ierr /= 0) goto 600 958 558 959 559 ! reals 960 560 call MPI_SCATTER(uap,numpart_mpi,mp_sp,uap,& 961 & 561 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 962 562 if (mp_ierr /= 0) goto 600 963 563 call MPI_SCATTER(ucp,numpart_mpi,mp_sp,ucp,& 964 & 564 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 965 565 if (mp_ierr /= 0) goto 600 966 566 call MPI_SCATTER(uzp,numpart_mpi,mp_sp,uzp,& 967 & 567 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 968 568 if (mp_ierr /= 0) goto 600 969 569 970 570 call MPI_SCATTER(us,numpart_mpi,mp_sp,us,& 971 & 571 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 972 572 if (mp_ierr /= 0) goto 600 973 573 call MPI_SCATTER(vs,numpart_mpi,mp_sp,vs,& 974 & 574 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 975 575 if (mp_ierr /= 0) goto 600 976 576 call MPI_SCATTER(ws,numpart_mpi,mp_sp,ws,& 977 & 577 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 978 578 if (mp_ierr /= 0) goto 600 979 579 980 580 call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,& 981 & 581 &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 982 582 if (mp_ierr /= 0) goto 600 983 583 call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,& 984 & 584 &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 985 585 if (mp_ierr /= 0) goto 600 986 586 call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,ztra1,& 987 & 587 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 988 588 if (mp_ierr /= 0) goto 600 989 589 990 590 do i=1,nspec 991 591 call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,xmass1(:,i),& 992 & 592 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 993 593 if (mp_ierr /= 0) goto 600 994 594 end do … … 1545 1145 1546 1146 ! cloud water/ice: 1547 1548 ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)1549 ! if (mp_ierr /= 0) goto 6001550 1551 1552 1147 if (readclouds_nest(i)) then 1148 ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 1149 ! if (mp_ierr /= 0) goto 600 1150 call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 1151 if (mp_ierr /= 0) goto 600 1152 end if 1553 1153 1554 1154 ! 2D fields … … 1798 1398 integer :: d2s1 = nxmax*nymax 1799 1399 integer :: d2s2 = nxmax*nymax*maxspec 1800 !integer :: d1_size1 = maxwf1400 !integer :: d1_size1 = maxwf 1801 1401 1802 1402 ! integer :: d3s1,d3s2,d2s1,d2s2 … … 2045 1645 if (dest.eq.id_read) cycle 2046 1646 do k=1, numbnests 2047 i=dest*nvar_async 2048 call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2049 if (mp_ierr /= 0) goto 600 1647 i=dest*nvar_async 1648 call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1649 if (mp_ierr /= 0) goto 600 1650 i=i+1 1651 call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1652 if (mp_ierr /= 0) goto 600 1653 i=i+1 1654 call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1655 if (mp_ierr /= 0) goto 600 1656 i=i+1 1657 call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1658 if (mp_ierr /= 0) goto 600 1659 i=i+1 1660 call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1661 if (mp_ierr /= 0) goto 600 1662 i=i+1 1663 call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1664 if (mp_ierr /= 0) goto 600 1665 i=i+1 1666 call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1667 if (mp_ierr /= 0) goto 600 1668 i=i+1 1669 call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1670 if (mp_ierr /= 0) goto 600 1671 i=i+1 1672 call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1673 if (mp_ierr /= 0) goto 600 1674 i=i+1 1675 call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1676 if (mp_ierr /= 0) goto 600 1677 i=i+1 1678 call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1679 i=i+1 1680 if (mp_ierr /= 0) goto 600 1681 call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1682 if (mp_ierr /= 0) goto 600 1683 i=i+1 1684 call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1685 if (mp_ierr /= 0) goto 600 1686 i=i+1 1687 call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1688 if (mp_ierr /= 0) goto 600 1689 i=i+1 1690 call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1691 if (mp_ierr /= 0) goto 600 1692 i=i+1 1693 ! 15 1694 call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1695 if (mp_ierr /= 0) goto 600 1696 i=i+1 1697 call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1698 if (mp_ierr /= 0) goto 600 1699 i=i+1 1700 call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1701 if (mp_ierr /= 0) goto 600 1702 i=i+1 1703 call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1704 if (mp_ierr /= 0) goto 600 1705 i=i+1 1706 call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1707 if (mp_ierr /= 0) goto 600 1708 i=i+1 1709 call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1710 if (mp_ierr /= 0) goto 600 1711 i=i+1 1712 call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1713 if (mp_ierr /= 0) goto 600 1714 i=i+1 1715 call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1716 if (mp_ierr /= 0) goto 600 1717 i=i+1 1718 call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1719 if (mp_ierr /= 0) goto 600 1720 i=i+1 1721 call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1722 if (mp_ierr /= 0) goto 600 1723 ! 25 1724 1725 ! Send cloud water if it exists. Increment counter always (as on receiving end) 1726 if (readclouds) then 2050 1727 i=i+1 2051 call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2052 if (mp_ierr /= 0) goto 600 2053 i=i+1 2054 call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1728 call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,& 1729 &MPI_COMM_WORLD,reqs(i),mp_ierr) 2055 1730 if (mp_ierr /= 0) goto 600 2056 i=i+1 2057 call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2058 if (mp_ierr /= 0) goto 600 2059 i=i+1 2060 call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2061 if (mp_ierr /= 0) goto 600 2062 i=i+1 2063 call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2064 if (mp_ierr /= 0) goto 600 2065 i=i+1 2066 call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2067 if (mp_ierr /= 0) goto 600 2068 i=i+1 2069 call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2070 if (mp_ierr /= 0) goto 600 2071 i=i+1 2072 call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2073 if (mp_ierr /= 0) goto 600 2074 i=i+1 2075 call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2076 if (mp_ierr /= 0) goto 600 2077 i=i+1 2078 call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2079 i=i+1 2080 if (mp_ierr /= 0) goto 600 2081 call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2082 if (mp_ierr /= 0) goto 600 2083 i=i+1 2084 call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2085 if (mp_ierr /= 0) goto 600 2086 i=i+1 2087 call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2088 if (mp_ierr /= 0) goto 600 2089 i=i+1 2090 call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2091 if (mp_ierr /= 0) goto 600 2092 i=i+1 2093 ! 15 2094 call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2095 if (mp_ierr /= 0) goto 600 2096 i=i+1 2097 call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2098 if (mp_ierr /= 0) goto 600 2099 i=i+1 2100 call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2101 if (mp_ierr /= 0) goto 600 2102 i=i+1 2103 call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2104 if (mp_ierr /= 0) goto 600 2105 i=i+1 2106 call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2107 if (mp_ierr /= 0) goto 600 2108 i=i+1 2109 call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2110 if (mp_ierr /= 0) goto 600 2111 i=i+1 2112 call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2113 if (mp_ierr /= 0) goto 600 2114 i=i+1 2115 call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2116 if (mp_ierr /= 0) goto 600 2117 i=i+1 2118 call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2119 if (mp_ierr /= 0) goto 600 2120 i=i+1 2121 call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2122 if (mp_ierr /= 0) goto 600 2123 ! 25 2124 2125 ! Send cloud water if it exists. Increment counter always (as on receiving end) 2126 if (readclouds) then 2127 i=i+1 2128 call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,& 2129 &MPI_COMM_WORLD,reqs(i),mp_ierr) 2130 if (mp_ierr /= 0) goto 600 2131 end if 2132 end do 1731 end if 2133 1732 end do 1733 end do 2134 1734 2135 1735 if (mp_measure_time) call mpif_mtime('commtime',1) … … 2210 1810 do k=1, numbnests 2211 1811 ! Get MPI tags/requests for communications 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 1812 j=mp_pid*nvar_async 1813 call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1814 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1815 if (mp_ierr /= 0) goto 600 1816 j=j+1 1817 call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1818 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1819 if (mp_ierr /= 0) goto 600 1820 j=j+1 1821 call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1822 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1823 if (mp_ierr /= 0) goto 600 1824 j=j+1 1825 call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1826 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1827 if (mp_ierr /= 0) goto 600 1828 j=j+1 1829 call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1830 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1831 if (mp_ierr /= 0) goto 600 1832 j=j+1 1833 call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1834 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1835 if (mp_ierr /= 0) goto 600 1836 j=j+1 1837 call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& 1838 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1839 if (mp_ierr /= 0) goto 600 1840 j=j+1 1841 call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& 1842 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1843 if (mp_ierr /= 0) goto 600 1844 j=j+1 1845 call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1846 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1847 if (mp_ierr /= 0) goto 600 1848 j=j+1 1849 call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1850 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1851 if (mp_ierr /= 0) goto 600 1852 j=j+1 1853 call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,& 1854 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1855 if (mp_ierr /= 0) goto 600 1856 j=j+1 1857 call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1858 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1859 if (mp_ierr /= 0) goto 600 1860 j=j+1 1861 call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,& 1862 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1863 if (mp_ierr /= 0) goto 600 1864 j=j+1 1865 call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1866 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1867 if (mp_ierr /= 0) goto 600 1868 j=j+1 1869 call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1870 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1871 if (mp_ierr /= 0) goto 600 1872 j=j+1 1873 call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1874 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1875 if (mp_ierr /= 0) goto 600 1876 j=j+1 1877 call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1878 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1879 if (mp_ierr /= 0) goto 600 1880 j=j+1 1881 call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1882 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1883 if (mp_ierr /= 0) goto 600 1884 j=j+1 1885 call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1886 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1887 if (mp_ierr /= 0) goto 600 1888 j=j+1 1889 call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1890 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1891 if (mp_ierr /= 0) goto 600 1892 call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1893 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1894 if (mp_ierr /= 0) goto 600 1895 j=j+1 1896 call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1897 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1898 if (mp_ierr /= 0) goto 600 1899 j=j+1 1900 call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1901 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1902 if (mp_ierr /= 0) goto 600 1903 j=j+1 1904 call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1905 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1906 if (mp_ierr /= 0) goto 600 1907 j=j+1 1908 call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1909 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1910 if (mp_ierr /= 0) goto 600 2311 1911 2312 1912 ! Post request for clwc. These data are possibly not sent, request must then be cancelled 2313 1913 ! For now assume that data at all steps either have or do not have water 2314 2315 2316 2317 2318 2319 2320 1914 if (readclouds) then 1915 j=j+1 1916 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& 1917 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1918 if (mp_ierr /= 0) goto 600 1919 end if 1920 end do 2321 1921 2322 1922 if (mp_measure_time) call mpif_mtime('commtime',1) … … 2340 1940 ! 2341 1941 ! TODO 2342 ! NB: take into account nested wind fields by using a separate 2343 ! subroutine mpif_gf_request_nest (and separate request arrays for the 2344 ! nested variables) 1942 ! take into account nested wind fields 2345 1943 ! 2346 1944 !******************************************************************************* … … 2751 2349 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',& 2752 2350 & mp_conccalc_time_total 2753 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',& 2754 ! & mp_vt_wtime_total 2755 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',& 2756 ! & mp_vt_time_total 2757 ! NB: the 'flush' function is possibly a gfortran-specific extension, 2758 ! comment out if it gives problems 2759 ! call flush() 2351 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',& 2352 ! & mp_vt_wtime_total 2353 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',& 2354 ! & mp_vt_time_total 2355 ! NB: the 'flush' function is possibly a gfortran-specific extension 2356 call flush() 2760 2357 end if 2761 2358 end do … … 2791 2388 2792 2389 2793 end subroutine mpif_finalize2794 2795 2796 subroutine get_lun(my_lun)2390 end subroutine mpif_finalize 2391 2392 2393 subroutine get_lun(my_lun) 2797 2394 !*********************************************************************** 2798 2395 ! get_lun: … … 2800 2397 !*********************************************************************** 2801 2398 2802 implicit none2803 2804 integer, intent(inout) :: my_lun2805 integer, save :: free_lun=1002806 logical :: exists, iopen2807 2808 !*********************************************************************** 2809 2810 loop1: do2811 inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen)2812 if (exists .and. .not.iopen) exit loop12813 free_lun = free_lun+12814 end do loop12815 my_lun = free_lun2816 2817 end subroutine get_lun2818 2819 2820 subroutine write_data_dbg(array_in, array_name, tstep, ident)2399 implicit none 2400 2401 integer, intent(inout) :: my_lun 2402 integer, save :: free_lun=100 2403 logical :: exists, iopen 2404 2405 !*********************************************************************** 2406 2407 loop1: do 2408 inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen) 2409 if (exists .and. .not.iopen) exit loop1 2410 free_lun = free_lun+1 2411 end do loop1 2412 my_lun = free_lun 2413 2414 end subroutine get_lun 2415 2416 2417 subroutine write_data_dbg(array_in, array_name, tstep, ident) 2821 2418 !*********************************************************************** 2822 2419 ! Write one-dimensional arrays to file (for debugging purposes) 2823 2420 !*********************************************************************** 2824 implicit none2825 2826 real, intent(in), dimension(:) :: array_in2827 integer, intent(in) :: tstep2828 integer :: lios2829 character(LEN=*), intent(in) :: ident, array_name2830 2831 character(LEN=8) :: c_ts2832 character(LEN=40) :: fn_1, fn_22833 2834 !*********************************************************************** 2835 2836 write(c_ts, FMT='(I8.8,BZ)') tstep2837 fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident)2838 write(c_ts, FMT='(I2.2,BZ)') mp_np2839 fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat'2840 2841 call get_lun(dat_lun)2842 open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', &2843 FORM='UNFORMATTED', STATUS='REPLACE')2844 write(UNIT=dat_lun, IOSTAT=lios) array_in2845 close(UNIT=dat_lun)2846 2847 end subroutine write_data_dbg2421 implicit none 2422 2423 real, intent(in), dimension(:) :: array_in 2424 integer, intent(in) :: tstep 2425 integer :: lios 2426 character(LEN=*), intent(in) :: ident, array_name 2427 2428 character(LEN=8) :: c_ts 2429 character(LEN=40) :: fn_1, fn_2 2430 2431 !*********************************************************************** 2432 2433 write(c_ts, FMT='(I8.8,BZ)') tstep 2434 fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident) 2435 write(c_ts, FMT='(I2.2,BZ)') mp_np 2436 fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat' 2437 2438 call get_lun(dat_lun) 2439 open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', & 2440 FORM='UNFORMATTED', STATUS='REPLACE') 2441 write(UNIT=dat_lun, IOSTAT=lios) array_in 2442 close(UNIT=dat_lun) 2443 2444 end subroutine write_data_dbg 2848 2445 2849 2446 … … 2902 2499 clwc(:,:,:,li:ui)=0.0 2903 2500 ciwc(:,:,:,li:ui)=0.0 2904 2501 2905 2502 ! 2D fields 2906 2503 … … 2919 2516 tropopause(:,:,:,li:ui)=10000. 2920 2517 oli(:,:,:,li:ui)=0.01 2921 2518 2922 2519 end subroutine set_fields_synthetic 2923 2520
Note: See TracChangeset
for help on using the changeset viewer.