Changes in src/mpi_mod.f90 [79abee9:4c64400] in flexpart.git
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/mpi_mod.f90
r79abee9 r4c64400 90 90 ! MPI tags/requests for send/receive operation 91 91 integer :: tm1 92 integer, parameter :: nvar_async=26 !27 !29 :DBG:92 integer, parameter :: nvar_async=26 93 93 !integer, dimension(:), allocatable :: tags 94 94 integer, dimension(:), allocatable :: reqs 95 96 ! Status array used for certain MPI operations (MPI_RECV) 97 integer, dimension(MPI_STATUS_SIZE) :: mp_status 95 98 96 99 … … 149 152 integer, private :: dat_lun 150 153 154 ! Temporary arrays for particles (allocated and deallocated as needed) 155 integer, allocatable, dimension(:) :: nclass_tmp, npoint_tmp, itra1_tmp, idt_tmp, & 156 & itramem_tmp, itrasplit_tmp 157 real(kind=dp), allocatable, dimension(:) :: xtra1_tmp, ytra1_tmp 158 real, allocatable, dimension(:) :: ztra1_tmp 159 real, allocatable, dimension(:,:) :: xmass1_tmp 160 161 ! mp_redist_fract Exchange particles between processes if relative difference 162 ! is larger. A good value is between 0.0 and 0.5 163 ! mp_maxpart_factor Allocate more memory per process than strictly needed 164 ! (safety factor). Recommended value between 1.5 and 2.5 165 ! mp_min_redist Do not redistribute particles if below this limit 166 real, parameter :: mp_redist_fract=0.2, mp_maxpart_factor=1.5 167 integer,parameter :: mp_min_redist=100000 168 169 151 170 contains 152 171 … … 195 214 if (dep_prec==dp) then 196 215 mp_cp = MPI_REAL8 197 216 ! TODO: write info message for serial version as well 198 217 if (lroot.and.verbosity>0) write(*,*) 'Using double precision for deposition fields' 199 218 else if (dep_prec==sp) then … … 232 251 write(*,FMT='(80("#"))') 233 252 end if 234 lmp_sync=.true. ! :DBG: eso fix this...253 lmp_sync=.true. 235 254 end if 236 255 … … 311 330 312 331 ! Set maxpart per process 313 if (mp_partid.lt.mod(maxpart,mp_partgroup_np)) addmaxpart=1 314 maxpart_mpi=int(m axpart/mp_partgroup_np)+addmaxpart332 ! eso 08/2016: Increase maxpart per process, in case of unbalanced distribution 333 maxpart_mpi=int(mp_maxpart_factor*real(maxpart)/real(mp_partgroup_np)) 315 334 316 335 ! Do not allocate particle data arrays for readwind process … … 321 340 ! Set random seed for each non-root process 322 341 if (mp_pid.gt.0) then 323 ! if (mp_pid.ge.0) then324 ! call system_clock(s)325 342 s = 244 326 343 mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729)) 327 344 end if 328 if (mp_dev_mode) write(*,*) 'PID, mp_seed: ',mp_pid, mp_seed329 345 if (mp_dbg_mode) then 330 ! :DBG: For debugging, set all seed to 0 and maxrand to e.g. 20331 346 mp_seed=0 332 347 if (lroot) write(*,*) 'MPI: setting seed=0' … … 454 469 455 470 471 subroutine mpif_send_part_properties(num_part) 472 !*********************************************************************** 473 ! Distribute particle properties from root to all processes. 474 ! 475 ! Used by init_domainfill_mpi 476 ! 477 ! Variables: 478 ! 479 ! num_part input, number of particles per process (rounded up) 480 ! 481 !*********************************************************************** 482 use com_mod 483 484 implicit none 485 486 integer,intent(in) :: num_part 487 integer :: i,jj, addone 488 489 ! Exit if too many particles 490 if (num_part.gt.maxpart_mpi) then 491 write(*,*) '#####################################################' 492 write(*,*) '#### ERROR - TOTAL NUMBER OF PARTICLES REQUIRED ####' 493 write(*,*) '#### EXCEEDS THE MAXIMUM ALLOWED NUMBER. REDUCE ####' 494 write(*,*) '#### EITHER NUMBER OF PARTICLES PER RELEASE POINT####' 495 write(*,*) '#### OR INCREASE MAXPART. ####' 496 write(*,*) '#####################################################' 497 ! call MPI_FINALIZE(mp_ierr) 498 stop 499 end if 500 501 502 ! Time for MPI communications 503 !**************************** 504 if (mp_measure_time) call mpif_mtime('commtime',0) 505 506 ! Distribute variables, send from pid 0 to other processes (including itself) 507 !**************************************************************************** 508 509 call MPI_SCATTER(nclass_tmp,num_part,MPI_INTEGER,nclass,& 510 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 511 if (mp_ierr /= 0) goto 600 512 call MPI_SCATTER(npoint_tmp,num_part,MPI_INTEGER,npoint,& 513 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 514 if (mp_ierr /= 0) goto 600 515 call MPI_SCATTER(itra1_tmp,num_part,MPI_INTEGER,itra1,& 516 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 517 if (mp_ierr /= 0) goto 600 518 call MPI_SCATTER(idt_tmp,num_part,MPI_INTEGER,idt,& 519 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 520 if (mp_ierr /= 0) goto 600 521 call MPI_SCATTER(itramem_tmp,num_part,MPI_INTEGER,itramem,& 522 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 523 if (mp_ierr /= 0) goto 600 524 call MPI_SCATTER(itrasplit_tmp,num_part,MPI_INTEGER,itrasplit,& 525 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 526 if (mp_ierr /= 0) goto 600 527 call MPI_SCATTER(xtra1_tmp,num_part,mp_dp,xtra1,& 528 &num_part,mp_dp,id_root,mp_comm_used,mp_ierr) 529 if (mp_ierr /= 0) goto 600 530 call MPI_SCATTER(ytra1_tmp,num_part,mp_dp,ytra1,& 531 &num_part,mp_dp,id_root,mp_comm_used,mp_ierr) 532 if (mp_ierr /= 0) goto 600 533 call MPI_SCATTER(ztra1_tmp,num_part,mp_sp,ztra1,& 534 &num_part,mp_sp,id_root,mp_comm_used,mp_ierr) 535 if (mp_ierr /= 0) goto 600 536 do i=1,nspec 537 call MPI_SCATTER(xmass1_tmp(:,i),num_part,mp_sp,xmass1(:,i),& 538 &num_part,mp_sp,id_root,mp_comm_used,mp_ierr) 539 if (mp_ierr /= 0) goto 600 540 end do 541 542 if (mp_measure_time) call mpif_mtime('commtime',1) 543 544 goto 601 545 546 600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr 547 stop 548 549 ! After the transfer of particles it it possible that the value of 550 ! "numpart" is larger than the actual used, so we reduce it if there are 551 ! invalid particles at the end of the arrays 552 601 do i=num_part, 1, -1 553 if (itra1(i).eq.-999999999) then 554 numpart=numpart-1 555 else 556 exit 557 end if 558 end do 559 560 561 !601 end subroutine mpif_send_part_properties 562 end subroutine mpif_send_part_properties 563 564 565 subroutine mpif_calculate_part_redist(itime) 566 !*********************************************************************** 567 ! Calculate number of particles to redistribute between processes. This routine 568 ! can be called at regular time intervals to keep a level number of 569 ! particles on each process. 570 ! 571 ! First, all processes report their local "numpart" to each other, which is 572 ! stored in array "numpart_mpi(np)". This array is sorted from low to 573 ! high values, along with a corresponding process ID array "idx_arr(np)". 574 ! If the relative difference between the highest and lowest "numpart_mpi" value 575 ! is above a threshold, particles are transferred from process idx_arr(np-1) 576 ! to process idx_arr(0). Repeat for processes idx_arr(np-i) and idx_arr(i) 577 ! for all valid i. 578 ! Note: If np is an odd number, the process with the median 579 ! number of particles is left unchanged 580 ! 581 ! VARIABLES 582 ! itime input, current time 583 !*********************************************************************** 584 use com_mod 585 586 implicit none 587 588 integer, intent(in) :: itime 589 real :: pmin,z 590 integer :: i,jj,nn, num_part=1,m,imin, num_trans 591 logical :: first_iter 592 integer,allocatable,dimension(:) :: numparticles_mpi, idx_arr 593 real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this 594 595 ! Exit if running with only 1 process 596 !************************************************************************ 597 if (mp_np.eq.1) return 598 599 ! All processes exchange information on number of particles 600 !**************************************************************************** 601 allocate(numparticles_mpi(0:mp_partgroup_np-1), & 602 &idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1)) 603 604 call MPI_Allgather(numpart, 1, MPI_INTEGER, numparticles_mpi, & 605 & 1, MPI_INTEGER, mp_comm_used, mp_ierr) 606 607 608 ! Sort from lowest to highest 609 ! Initial guess: correct order 610 sorted(:) = numparticles_mpi(:) 611 do i=0, mp_partgroup_np-1 612 idx_arr(i) = i 613 end do 614 615 ! For each successive element in index array, see if a lower value exists 616 do i=0, mp_partgroup_np-2 617 pmin=sorted(i) 618 imin=idx_arr(i) 619 m=i+1 620 do jj=m, mp_partgroup_np-1 621 if (pmin.le.sorted(jj)) cycle 622 z=pmin 623 pmin=sorted(jj) 624 sorted(jj)=z 625 626 nn=imin 627 imin=idx_arr(jj) 628 idx_arr(jj)=nn 629 end do 630 sorted(i)=pmin 631 idx_arr(i)=imin 632 end do 633 634 ! For each pair of processes, transfer particles if the difference is above a 635 ! limit, and numpart at sending process large enough 636 637 m=mp_partgroup_np-1 ! index for last sorted process (most particles) 638 do i=0,mp_partgroup_np/2-1 639 num_trans = numparticles_mpi(idx_arr(m)) - numparticles_mpi(idx_arr(i)) 640 if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then 641 if ( numparticles_mpi(idx_arr(m)).gt.mp_min_redist.and.& 642 & real(num_trans)/real(numparticles_mpi(idx_arr(m))).gt.mp_redist_fract) then 643 ! DBG 644 ! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi', & 645 ! &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi 646 ! DBG 647 call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2) 648 end if 649 end if 650 m=m-1 651 end do 652 653 deallocate(numparticles_mpi, idx_arr, sorted) 654 655 end subroutine mpif_calculate_part_redist 656 657 658 subroutine mpif_redist_part(itime, src_proc, dest_proc, num_trans) 659 !*********************************************************************** 660 ! Transfer particle properties between two arbitrary processes. 661 ! 662 ! VARIABLES 663 ! 664 ! itime input, current time 665 ! src_proc input, ID of source process 666 ! dest_proc input, ID of destination process 667 ! num_trans input, number of particles to transfer 668 ! 669 !************************************************************************ 670 use com_mod !TODO: ,only: nclass etc 671 672 implicit none 673 674 integer, intent(in) :: itime, src_proc, dest_proc, num_trans 675 integer :: ll, ul ! lower and upper indices in arrays 676 integer :: arr_sz ! size of temporary arrays 677 integer :: mtag ! MPI message tag 678 integer :: i, j, minpart, ipart, maxnumpart 679 680 ! Check for invalid input arguments 681 !********************************** 682 if (src_proc.eq.dest_proc) then 683 write(*,*) '<mpi_mod::mpif_redist_part>: Error: & 684 &src_proc.eq.dest_proc' 685 stop 686 end if 687 688 ! Measure time for MPI communications 689 !************************************ 690 if (mp_measure_time) call mpif_mtime('commtime',0) 691 692 ! Send particles 693 !*************** 694 if (mp_partid.eq.src_proc) then 695 mtag = 2000 696 697 ! Array indices for the transferred particles 698 ll=numpart-num_trans+1 699 ul=numpart 700 701 if (mp_dev_mode) then 702 write(*,FMT='(72("#"))') 703 write(*,*) "Sending ", num_trans, "particles (from/to)", src_proc, dest_proc 704 write(*,*) "numpart before: ", numpart 705 end if 706 707 call MPI_SEND(nclass(ll:ul),num_trans,& 708 & MPI_INTEGER,dest_proc,mtag+1,mp_comm_used,mp_ierr) 709 710 call MPI_SEND(npoint(ll:ul),num_trans,& 711 & MPI_INTEGER,dest_proc,mtag+2,mp_comm_used,mp_ierr) 712 713 call MPI_SEND(itra1(ll:ul),num_trans, & 714 & MPI_INTEGER,dest_proc,mtag+3,mp_comm_used,mp_ierr) 715 716 call MPI_SEND(idt(ll:ul),num_trans, & 717 & MPI_INTEGER,dest_proc,mtag+4,mp_comm_used,mp_ierr) 718 719 call MPI_SEND(itramem(ll:ul),num_trans, & 720 & MPI_INTEGER,dest_proc,mtag+5,mp_comm_used,mp_ierr) 721 722 call MPI_SEND(itrasplit(ll:ul),num_trans,& 723 & MPI_INTEGER,dest_proc,mtag+6,mp_comm_used,mp_ierr) 724 725 call MPI_SEND(xtra1(ll:ul),num_trans, & 726 & mp_dp,dest_proc,mtag+7,mp_comm_used,mp_ierr) 727 728 call MPI_SEND(ytra1(ll:ul),num_trans,& 729 & mp_dp,dest_proc,mtag+8,mp_comm_used,mp_ierr) 730 731 call MPI_SEND(ztra1(ll:ul),num_trans,& 732 & mp_sp,dest_proc,mtag+9,mp_comm_used,mp_ierr) 733 734 do j=1,nspec 735 call MPI_SEND(xmass1(ll:ul,j),num_trans,& 736 & mp_sp,dest_proc,mtag+(9+j),mp_comm_used,mp_ierr) 737 end do 738 739 ! Terminate transferred particles, update numpart 740 itra1(ll:ul) = -999999999 741 742 numpart = numpart-num_trans 743 744 if (mp_dev_mode) then 745 write(*,*) "numpart after: ", numpart 746 write(*,FMT='(72("#"))') 747 end if 748 749 else if (mp_partid.eq.dest_proc) then 750 751 ! Receive particles 752 !****************** 753 mtag = 2000 754 ! Array indices for the transferred particles 755 ll=numpart+1 756 ul=numpart+num_trans 757 758 if (mp_dev_mode) then 759 write(*,FMT='(72("#"))') 760 write(*,*) "Receiving ", num_trans, "particles (from/to)", src_proc, dest_proc 761 write(*,*) "numpart before: ", numpart 762 end if 763 764 ! We could receive the data directly at nclass(ll:ul) etc., but this leaves 765 ! vacant spaces at lower indices. Using temporary arrays instead. 766 arr_sz = ul-ll+1 767 allocate(itra1_tmp(arr_sz),npoint_tmp(arr_sz),nclass_tmp(arr_sz),& 768 & idt_tmp(arr_sz),itramem_tmp(arr_sz),itrasplit_tmp(arr_sz),& 769 & xtra1_tmp(arr_sz),ytra1_tmp(arr_sz),ztra1_tmp(arr_sz),& 770 & xmass1_tmp(arr_sz, maxspec)) 771 772 call MPI_RECV(nclass_tmp,num_trans,& 773 & MPI_INTEGER,src_proc,mtag+1,mp_comm_used,mp_status,mp_ierr) 774 775 call MPI_RECV(npoint_tmp,num_trans,& 776 & MPI_INTEGER,src_proc,mtag+2,mp_comm_used,mp_status,mp_ierr) 777 778 call MPI_RECV(itra1_tmp,num_trans, & 779 & MPI_INTEGER,src_proc,mtag+3,mp_comm_used,mp_status,mp_ierr) 780 781 call MPI_RECV(idt_tmp,num_trans, & 782 & MPI_INTEGER,src_proc,mtag+4,mp_comm_used,mp_status,mp_ierr) 783 784 call MPI_RECV(itramem_tmp,num_trans, & 785 & MPI_INTEGER,src_proc,mtag+5,mp_comm_used,mp_status,mp_ierr) 786 787 call MPI_RECV(itrasplit_tmp,num_trans,& 788 & MPI_INTEGER,src_proc,mtag+6,mp_comm_used,mp_status,mp_ierr) 789 790 call MPI_RECV(xtra1_tmp,num_trans, & 791 & mp_dp,src_proc,mtag+7,mp_comm_used,mp_status,mp_ierr) 792 793 call MPI_RECV(ytra1_tmp,num_trans,& 794 & mp_dp,src_proc,mtag+8,mp_comm_used,mp_status,mp_ierr) 795 796 call MPI_RECV(ztra1_tmp,num_trans,& 797 & mp_sp,src_proc,mtag+9,mp_comm_used,mp_status,mp_ierr) 798 799 do j=1,nspec 800 call MPI_RECV(xmass1_tmp(:,j),num_trans,& 801 & mp_sp,src_proc,mtag+(9+j),mp_comm_used,mp_status,mp_ierr) 802 end do 803 804 ! This is the maximum value numpart can possibly have after the transfer 805 maxnumpart=numpart+num_trans 806 807 ! Search for vacant space and copy from temporary storage 808 !******************************************************** 809 minpart=1 810 do i=1, num_trans 811 ! Take into acount that we may have transferred invalid particles 812 if (itra1_tmp(i).ne.itime) cycle 813 do ipart=minpart,maxnumpart 814 if (itra1(ipart).ne.itime) then 815 itra1(ipart) = itra1_tmp(i) 816 npoint(ipart) = npoint_tmp(i) 817 nclass(ipart) = nclass_tmp(i) 818 idt(ipart) = idt_tmp(i) 819 itramem(ipart) = itramem_tmp(i) 820 itrasplit(ipart) = itrasplit_tmp(i) 821 xtra1(ipart) = xtra1_tmp(i) 822 ytra1(ipart) = ytra1_tmp(i) 823 ztra1(ipart) = ztra1_tmp(i) 824 xmass1(ipart,:) = xmass1_tmp(i,:) 825 goto 200 ! Storage space has been found, stop searching 826 end if 827 ! :TODO: add check for if too many particles requiried 828 end do 829 200 minpart=ipart+1 830 end do 831 ! Increase numpart, if necessary 832 numpart=max(numpart,ipart) 833 834 deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,& 835 &itrasplit_tmp,xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp) 836 837 if (mp_dev_mode) then 838 write(*,*) "numpart after: ", numpart 839 write(*,FMT='(72("#"))') 840 end if 841 842 else 843 ! This routine should only be called by the two participating processes 844 write(*,*) "ERROR: wrong process has entered mpi_mod::mpif_redist_part" 845 stop 846 return 847 end if 848 849 ! Measure time for MPI communications 850 !************************************ 851 if (mp_measure_time) call mpif_mtime('commtime',1) 852 853 end subroutine mpif_redist_part 854 855 456 856 subroutine mpif_tm_send_vars 457 857 !*********************************************************************** … … 477 877 if (lroot) then 478 878 call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 479 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)879 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 480 880 if (mp_ierr /= 0) goto 600 481 881 call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 482 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)882 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 483 883 if (mp_ierr /= 0) goto 600 484 884 call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 485 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)885 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 486 886 if (mp_ierr /= 0) goto 600 487 887 call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 488 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)888 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 489 889 if (mp_ierr /= 0) goto 600 490 890 call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 491 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)891 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 492 892 if (mp_ierr /= 0) goto 600 493 893 494 894 ! int2 495 895 call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,& 496 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)896 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr) 497 897 if (mp_ierr /= 0) goto 600 498 898 499 899 ! real 500 900 call MPI_SCATTER(uap,numpart_mpi,mp_sp,MPI_IN_PLACE,& 501 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)901 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 502 902 if (mp_ierr /= 0) goto 600 503 903 call MPI_SCATTER(ucp,numpart_mpi,mp_sp,MPI_IN_PLACE,& 504 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)904 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 505 905 if (mp_ierr /= 0) goto 600 506 906 call MPI_SCATTER(uzp,numpart_mpi,mp_sp,MPI_IN_PLACE,& 507 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)907 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 508 908 if (mp_ierr /= 0) goto 600 509 909 510 910 call MPI_SCATTER(us,numpart_mpi,mp_sp,MPI_IN_PLACE,& 511 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)911 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 512 912 if (mp_ierr /= 0) goto 600 513 913 call MPI_SCATTER(vs,numpart_mpi,mp_sp,MPI_IN_PLACE,& 514 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)914 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 515 915 if (mp_ierr /= 0) goto 600 516 916 call MPI_SCATTER(ws,numpart_mpi,mp_sp,MPI_IN_PLACE,& 517 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)917 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 518 918 if (mp_ierr /= 0) goto 600 519 919 520 920 call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,& 521 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)921 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 522 922 if (mp_ierr /= 0) goto 600 523 923 call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,& 524 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)924 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 525 925 if (mp_ierr /= 0) goto 600 526 926 call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,MPI_IN_PLACE,& 527 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)927 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 528 928 if (mp_ierr /= 0) goto 600 529 929 530 930 do i=1,nspec 531 931 call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,MPI_IN_PLACE,& 532 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)932 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 533 933 if (mp_ierr /= 0) goto 600 534 934 end do … … 537 937 ! integers 538 938 call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,& 539 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)939 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 540 940 if (mp_ierr /= 0) goto 600 541 941 call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,& 542 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)942 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 543 943 if (mp_ierr /= 0) goto 600 544 944 call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,& 545 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)945 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 546 946 if (mp_ierr /= 0) goto 600 547 947 call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,& 548 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)948 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 549 949 if (mp_ierr /= 0) goto 600 550 950 call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,& 551 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)951 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 552 952 if (mp_ierr /= 0) goto 600 553 953 554 954 ! int2 555 955 call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,& 556 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)956 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr) 557 957 if (mp_ierr /= 0) goto 600 558 958 559 959 ! reals 560 960 call MPI_SCATTER(uap,numpart_mpi,mp_sp,uap,& 561 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)961 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 562 962 if (mp_ierr /= 0) goto 600 563 963 call MPI_SCATTER(ucp,numpart_mpi,mp_sp,ucp,& 564 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)964 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 565 965 if (mp_ierr /= 0) goto 600 566 966 call MPI_SCATTER(uzp,numpart_mpi,mp_sp,uzp,& 567 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)967 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 568 968 if (mp_ierr /= 0) goto 600 569 969 570 970 call MPI_SCATTER(us,numpart_mpi,mp_sp,us,& 571 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)971 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 572 972 if (mp_ierr /= 0) goto 600 573 973 call MPI_SCATTER(vs,numpart_mpi,mp_sp,vs,& 574 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)974 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 575 975 if (mp_ierr /= 0) goto 600 576 976 call MPI_SCATTER(ws,numpart_mpi,mp_sp,ws,& 577 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)977 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 578 978 if (mp_ierr /= 0) goto 600 579 979 580 980 call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,& 581 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)981 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 582 982 if (mp_ierr /= 0) goto 600 583 983 call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,& 584 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)984 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 585 985 if (mp_ierr /= 0) goto 600 586 986 call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,ztra1,& 587 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)987 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 588 988 if (mp_ierr /= 0) goto 600 589 989 590 990 do i=1,nspec 591 991 call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,xmass1(:,i),& 592 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)992 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 593 993 if (mp_ierr /= 0) goto 600 594 994 end do … … 1145 1545 1146 1546 ! cloud water/ice: 1147 if (readclouds_nest(i)) then1148 1149 1150 call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)1151 if (mp_ierr /= 0) goto 6001152 end if1547 if (readclouds_nest(i)) then 1548 ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 1549 ! if (mp_ierr /= 0) goto 600 1550 call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 1551 if (mp_ierr /= 0) goto 600 1552 end if 1153 1553 1154 1554 ! 2D fields … … 1398 1798 integer :: d2s1 = nxmax*nymax 1399 1799 integer :: d2s2 = nxmax*nymax*maxspec 1400 1800 !integer :: d1_size1 = maxwf 1401 1801 1402 1802 ! integer :: d3s1,d3s2,d2s1,d2s2 … … 1645 2045 if (dest.eq.id_read) cycle 1646 2046 do k=1, numbnests 1647 i=dest*nvar_async1648 call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1649 if (mp_ierr /= 0) goto 6001650 i=i+11651 call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1652 if (mp_ierr /= 0) goto 6001653 i=i+11654 call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1655 if (mp_ierr /= 0) goto 6001656 i=i+11657 call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1658 if (mp_ierr /= 0) goto 6001659 i=i+11660 call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1661 if (mp_ierr /= 0) goto 6001662 i=i+11663 call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1664 if (mp_ierr /= 0) goto 6001665 i=i+11666 call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1667 if (mp_ierr /= 0) goto 6001668 i=i+11669 call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1670 if (mp_ierr /= 0) goto 6001671 i=i+11672 call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1673 if (mp_ierr /= 0) goto 6001674 i=i+11675 call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1676 if (mp_ierr /= 0) goto 6001677 i=i+11678 call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1679 i=i+11680 if (mp_ierr /= 0) goto 6001681 call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1682 if (mp_ierr /= 0) goto 6001683 i=i+11684 call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1685 if (mp_ierr /= 0) goto 6001686 i=i+11687 call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1688 if (mp_ierr /= 0) goto 6001689 i=i+11690 call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1691 if (mp_ierr /= 0) goto 6001692 i=i+12047 i=dest*nvar_async 2048 call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2049 if (mp_ierr /= 0) goto 600 2050 i=i+1 2051 call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2052 if (mp_ierr /= 0) goto 600 2053 i=i+1 2054 call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2055 if (mp_ierr /= 0) goto 600 2056 i=i+1 2057 call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2058 if (mp_ierr /= 0) goto 600 2059 i=i+1 2060 call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2061 if (mp_ierr /= 0) goto 600 2062 i=i+1 2063 call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2064 if (mp_ierr /= 0) goto 600 2065 i=i+1 2066 call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2067 if (mp_ierr /= 0) goto 600 2068 i=i+1 2069 call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2070 if (mp_ierr /= 0) goto 600 2071 i=i+1 2072 call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2073 if (mp_ierr /= 0) goto 600 2074 i=i+1 2075 call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2076 if (mp_ierr /= 0) goto 600 2077 i=i+1 2078 call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2079 i=i+1 2080 if (mp_ierr /= 0) goto 600 2081 call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2082 if (mp_ierr /= 0) goto 600 2083 i=i+1 2084 call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2085 if (mp_ierr /= 0) goto 600 2086 i=i+1 2087 call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2088 if (mp_ierr /= 0) goto 600 2089 i=i+1 2090 call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2091 if (mp_ierr /= 0) goto 600 2092 i=i+1 1693 2093 ! 15 1694 call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1695 if (mp_ierr /= 0) goto 6001696 i=i+11697 call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1698 if (mp_ierr /= 0) goto 6001699 i=i+11700 call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1701 if (mp_ierr /= 0) goto 6001702 i=i+11703 call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1704 if (mp_ierr /= 0) goto 6001705 i=i+11706 call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1707 if (mp_ierr /= 0) goto 6001708 i=i+11709 call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1710 if (mp_ierr /= 0) goto 6001711 i=i+11712 call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1713 if (mp_ierr /= 0) goto 6001714 i=i+11715 call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1716 if (mp_ierr /= 0) goto 6001717 i=i+11718 call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1719 if (mp_ierr /= 0) goto 6001720 i=i+11721 call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1722 if (mp_ierr /= 0) goto 6002094 call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2095 if (mp_ierr /= 0) goto 600 2096 i=i+1 2097 call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2098 if (mp_ierr /= 0) goto 600 2099 i=i+1 2100 call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2101 if (mp_ierr /= 0) goto 600 2102 i=i+1 2103 call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2104 if (mp_ierr /= 0) goto 600 2105 i=i+1 2106 call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2107 if (mp_ierr /= 0) goto 600 2108 i=i+1 2109 call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2110 if (mp_ierr /= 0) goto 600 2111 i=i+1 2112 call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2113 if (mp_ierr /= 0) goto 600 2114 i=i+1 2115 call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2116 if (mp_ierr /= 0) goto 600 2117 i=i+1 2118 call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2119 if (mp_ierr /= 0) goto 600 2120 i=i+1 2121 call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2122 if (mp_ierr /= 0) goto 600 1723 2123 ! 25 1724 2124 1725 2125 ! Send cloud water if it exists. Increment counter always (as on receiving end) 1726 if (readclouds) then 1727 i=i+1 1728 call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,& 1729 &MPI_COMM_WORLD,reqs(i),mp_ierr) 1730 if (mp_ierr /= 0) goto 600 1731 end if 2126 if (readclouds) then 2127 i=i+1 2128 call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,& 2129 &MPI_COMM_WORLD,reqs(i),mp_ierr) 2130 if (mp_ierr /= 0) goto 600 2131 end if 2132 end do 1732 2133 end do 1733 end do1734 2134 1735 2135 if (mp_measure_time) call mpif_mtime('commtime',1) … … 1810 2210 do k=1, numbnests 1811 2211 ! Get MPI tags/requests for communications 1812 j=mp_pid*nvar_async1813 call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1814 &MPI_COMM_WORLD,reqs(j),mp_ierr)1815 if (mp_ierr /= 0) goto 6001816 j=j+11817 call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1818 &MPI_COMM_WORLD,reqs(j),mp_ierr)1819 if (mp_ierr /= 0) goto 6001820 j=j+11821 call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1822 &MPI_COMM_WORLD,reqs(j),mp_ierr)1823 if (mp_ierr /= 0) goto 6001824 j=j+11825 call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1826 &MPI_COMM_WORLD,reqs(j),mp_ierr)1827 if (mp_ierr /= 0) goto 6001828 j=j+11829 call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1830 &MPI_COMM_WORLD,reqs(j),mp_ierr)1831 if (mp_ierr /= 0) goto 6001832 j=j+11833 call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1834 &MPI_COMM_WORLD,reqs(j),mp_ierr)1835 if (mp_ierr /= 0) goto 6001836 j=j+11837 call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&1838 &MPI_COMM_WORLD,reqs(j),mp_ierr)1839 if (mp_ierr /= 0) goto 6001840 j=j+11841 call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&1842 &MPI_COMM_WORLD,reqs(j),mp_ierr)1843 if (mp_ierr /= 0) goto 6001844 j=j+11845 call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1846 &MPI_COMM_WORLD,reqs(j),mp_ierr)1847 if (mp_ierr /= 0) goto 6001848 j=j+11849 call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1850 &MPI_COMM_WORLD,reqs(j),mp_ierr)1851 if (mp_ierr /= 0) goto 6001852 j=j+11853 call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&1854 &MPI_COMM_WORLD,reqs(j),mp_ierr)1855 if (mp_ierr /= 0) goto 6001856 j=j+11857 call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1858 &MPI_COMM_WORLD,reqs(j),mp_ierr)1859 if (mp_ierr /= 0) goto 6001860 j=j+11861 call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,&1862 &MPI_COMM_WORLD,reqs(j),mp_ierr)1863 if (mp_ierr /= 0) goto 6001864 j=j+11865 call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1866 &MPI_COMM_WORLD,reqs(j),mp_ierr)1867 if (mp_ierr /= 0) goto 6001868 j=j+11869 call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1870 &MPI_COMM_WORLD,reqs(j),mp_ierr)1871 if (mp_ierr /= 0) goto 6001872 j=j+11873 call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1874 &MPI_COMM_WORLD,reqs(j),mp_ierr)1875 if (mp_ierr /= 0) goto 6001876 j=j+11877 call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1878 &MPI_COMM_WORLD,reqs(j),mp_ierr)1879 if (mp_ierr /= 0) goto 6001880 j=j+11881 call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1882 &MPI_COMM_WORLD,reqs(j),mp_ierr)1883 if (mp_ierr /= 0) goto 6001884 j=j+11885 call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1886 &MPI_COMM_WORLD,reqs(j),mp_ierr)1887 if (mp_ierr /= 0) goto 6001888 j=j+11889 call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1890 &MPI_COMM_WORLD,reqs(j),mp_ierr)1891 if (mp_ierr /= 0) goto 6001892 call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1893 &MPI_COMM_WORLD,reqs(j),mp_ierr)1894 if (mp_ierr /= 0) goto 6001895 j=j+11896 call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1897 &MPI_COMM_WORLD,reqs(j),mp_ierr)1898 if (mp_ierr /= 0) goto 6001899 j=j+11900 call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1901 &MPI_COMM_WORLD,reqs(j),mp_ierr)1902 if (mp_ierr /= 0) goto 6001903 j=j+11904 call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1905 &MPI_COMM_WORLD,reqs(j),mp_ierr)1906 if (mp_ierr /= 0) goto 6001907 j=j+11908 call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1909 &MPI_COMM_WORLD,reqs(j),mp_ierr)1910 if (mp_ierr /= 0) goto 6002212 j=mp_pid*nvar_async 2213 call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2214 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2215 if (mp_ierr /= 0) goto 600 2216 j=j+1 2217 call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2218 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2219 if (mp_ierr /= 0) goto 600 2220 j=j+1 2221 call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2222 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2223 if (mp_ierr /= 0) goto 600 2224 j=j+1 2225 call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2226 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2227 if (mp_ierr /= 0) goto 600 2228 j=j+1 2229 call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2230 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2231 if (mp_ierr /= 0) goto 600 2232 j=j+1 2233 call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2234 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2235 if (mp_ierr /= 0) goto 600 2236 j=j+1 2237 call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& 2238 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2239 if (mp_ierr /= 0) goto 600 2240 j=j+1 2241 call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& 2242 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2243 if (mp_ierr /= 0) goto 600 2244 j=j+1 2245 call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2246 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2247 if (mp_ierr /= 0) goto 600 2248 j=j+1 2249 call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2250 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2251 if (mp_ierr /= 0) goto 600 2252 j=j+1 2253 call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,& 2254 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2255 if (mp_ierr /= 0) goto 600 2256 j=j+1 2257 call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2258 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2259 if (mp_ierr /= 0) goto 600 2260 j=j+1 2261 call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,& 2262 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2263 if (mp_ierr /= 0) goto 600 2264 j=j+1 2265 call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2266 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2267 if (mp_ierr /= 0) goto 600 2268 j=j+1 2269 call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2270 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2271 if (mp_ierr /= 0) goto 600 2272 j=j+1 2273 call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2274 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2275 if (mp_ierr /= 0) goto 600 2276 j=j+1 2277 call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2278 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2279 if (mp_ierr /= 0) goto 600 2280 j=j+1 2281 call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2282 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2283 if (mp_ierr /= 0) goto 600 2284 j=j+1 2285 call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2286 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2287 if (mp_ierr /= 0) goto 600 2288 j=j+1 2289 call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2290 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2291 if (mp_ierr /= 0) goto 600 2292 call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2293 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2294 if (mp_ierr /= 0) goto 600 2295 j=j+1 2296 call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2297 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2298 if (mp_ierr /= 0) goto 600 2299 j=j+1 2300 call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2301 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2302 if (mp_ierr /= 0) goto 600 2303 j=j+1 2304 call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2305 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2306 if (mp_ierr /= 0) goto 600 2307 j=j+1 2308 call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2309 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2310 if (mp_ierr /= 0) goto 600 1911 2311 1912 2312 ! Post request for clwc. These data are possibly not sent, request must then be cancelled 1913 2313 ! For now assume that data at all steps either have or do not have water 1914 if (readclouds) then1915 j=j+11916 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&1917 &MPI_COMM_WORLD,reqs(j),mp_ierr)1918 if (mp_ierr /= 0) goto 6001919 end if1920 end do2314 if (readclouds) then 2315 j=j+1 2316 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& 2317 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2318 if (mp_ierr /= 0) goto 600 2319 end if 2320 end do 1921 2321 1922 2322 if (mp_measure_time) call mpif_mtime('commtime',1) … … 1940 2340 ! 1941 2341 ! TODO 1942 ! take into account nested wind fields 2342 ! NB: take into account nested wind fields by using a separate 2343 ! subroutine mpif_gf_request_nest (and separate request arrays for the 2344 ! nested variables) 1943 2345 ! 1944 2346 !******************************************************************************* … … 2349 2751 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',& 2350 2752 & mp_conccalc_time_total 2351 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',& 2352 ! & mp_vt_wtime_total 2353 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',& 2354 ! & mp_vt_time_total 2355 ! NB: the 'flush' function is possibly a gfortran-specific extension 2356 call flush() 2753 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',& 2754 ! & mp_vt_wtime_total 2755 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',& 2756 ! & mp_vt_time_total 2757 ! NB: the 'flush' function is possibly a gfortran-specific extension, 2758 ! comment out if it gives problems 2759 ! call flush() 2357 2760 end if 2358 2761 end do … … 2388 2791 2389 2792 2390 2391 2392 2393 2793 end subroutine mpif_finalize 2794 2795 2796 subroutine get_lun(my_lun) 2394 2797 !*********************************************************************** 2395 2798 ! get_lun: … … 2397 2800 !*********************************************************************** 2398 2801 2399 2400 2401 2402 2403 2404 2405 !*********************************************************************** 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2802 implicit none 2803 2804 integer, intent(inout) :: my_lun 2805 integer, save :: free_lun=100 2806 logical :: exists, iopen 2807 2808 !*********************************************************************** 2809 2810 loop1: do 2811 inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen) 2812 if (exists .and. .not.iopen) exit loop1 2813 free_lun = free_lun+1 2814 end do loop1 2815 my_lun = free_lun 2816 2817 end subroutine get_lun 2818 2819 2820 subroutine write_data_dbg(array_in, array_name, tstep, ident) 2418 2821 !*********************************************************************** 2419 2822 ! Write one-dimensional arrays to file (for debugging purposes) 2420 2823 !*********************************************************************** 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 !*********************************************************************** 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2824 implicit none 2825 2826 real, intent(in), dimension(:) :: array_in 2827 integer, intent(in) :: tstep 2828 integer :: lios 2829 character(LEN=*), intent(in) :: ident, array_name 2830 2831 character(LEN=8) :: c_ts 2832 character(LEN=40) :: fn_1, fn_2 2833 2834 !*********************************************************************** 2835 2836 write(c_ts, FMT='(I8.8,BZ)') tstep 2837 fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident) 2838 write(c_ts, FMT='(I2.2,BZ)') mp_np 2839 fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat' 2840 2841 call get_lun(dat_lun) 2842 open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', & 2843 FORM='UNFORMATTED', STATUS='REPLACE') 2844 write(UNIT=dat_lun, IOSTAT=lios) array_in 2845 close(UNIT=dat_lun) 2846 2847 end subroutine write_data_dbg 2445 2848 2446 2849 … … 2499 2902 clwc(:,:,:,li:ui)=0.0 2500 2903 ciwc(:,:,:,li:ui)=0.0 2501 2904 2502 2905 ! 2D fields 2503 2906 … … 2516 2919 tropopause(:,:,:,li:ui)=10000. 2517 2920 oli(:,:,:,li:ui)=0.01 2518 2921 2519 2922 end subroutine set_fields_synthetic 2520 2923
Note: See TracChangeset
for help on using the changeset viewer.