Changeset 861805a in flexpart.git for src/mpi_mod.f90
- Timestamp:
- Sep 6, 2016, 9:59:11 AM (8 years ago)
- Branches:
- master, 10.4.1_pesei, GFS_025, bugfixes+enhancements, dev, release-10, release-10.4.1, scaling-bug, univie
- Children:
- 16b61a5, 93786a1
- Parents:
- 0f7835d
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/mpi_mod.f90
r0f7835d r861805a 90 90 ! MPI tags/requests for send/receive operation 91 91 integer :: tm1 92 integer, parameter :: nvar_async=26 !27 !29 :DBG:92 integer, parameter :: nvar_async=26 93 93 !integer, dimension(:), allocatable :: tags 94 94 integer, dimension(:), allocatable :: reqs 95 96 ! Status array used for certain MPI operations (MPI_RECV) 97 integer, dimension(MPI_STATUS_SIZE) :: mp_status 95 98 96 99 … … 119 122 logical, parameter :: mp_dev_mode = .false. 120 123 logical, parameter :: mp_dbg_out = .false. 121 logical, parameter :: mp_time_barrier=. true.124 logical, parameter :: mp_time_barrier=.false. 122 125 logical, parameter :: mp_measure_time=.false. 123 126 logical, parameter :: mp_exact_numpart=.true. … … 149 152 integer, private :: dat_lun 150 153 154 ! Temporary arrays for particles (allocated and deallocated as needed) 155 integer, allocatable, dimension(:) :: nclass_tmp, npoint_tmp, itra1_tmp, idt_tmp, & 156 & itramem_tmp, itrasplit_tmp 157 real(kind=dp), allocatable, dimension(:) :: xtra1_tmp, ytra1_tmp 158 real, allocatable, dimension(:) :: ztra1_tmp 159 real, allocatable, dimension(:,:) :: xmass1_tmp 160 161 ! mp_redist_fract Exchange particles between processes if relative difference 162 ! is larger. A good value is between 0.0 and 0.5 163 ! mp_maxpart_factor Allocate more memory per process than strictly needed 164 ! (safety factor). Recommended value between 1.5 and 2.5 165 ! mp_min_redist Do not redistribute particles if below this limit 166 real, parameter :: mp_redist_fract=0.2, mp_maxpart_factor=1.5 167 integer,parameter :: mp_min_redist=100000 168 169 151 170 contains 152 171 … … 195 214 if (dep_prec==dp) then 196 215 mp_cp = MPI_REAL8 197 216 ! TODO: write info message for serial version as well 198 217 if (lroot.and.verbosity>0) write(*,*) 'Using double precision for deposition fields' 199 218 else if (dep_prec==sp) then … … 242 261 !********************************************************************** 243 262 244 ! id_read = min(mp_np-1, 1)245 263 id_read = mp_np-1 246 264 … … 311 329 312 330 ! Set maxpart per process 313 if (mp_partid.lt.mod(maxpart,mp_partgroup_np)) addmaxpart=1 314 maxpart_mpi=int(m axpart/mp_partgroup_np)+addmaxpart331 ! eso 08/2016: Increase maxpart per process, in case of unbalanced distribution 332 maxpart_mpi=int(mp_maxpart_factor*maxpart/mp_partgroup_np) 315 333 316 334 ! Do not allocate particle data arrays for readwind process … … 321 339 ! Set random seed for each non-root process 322 340 if (mp_pid.gt.0) then 323 ! if (mp_pid.ge.0) then324 ! call system_clock(s)325 341 s = 244 326 342 mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729)) 327 343 end if 328 if (mp_dev_mode) write(*,*) 'PID, mp_seed: ',mp_pid, mp_seed329 344 if (mp_dbg_mode) then 330 ! :DBG: For debugging, set all seed to 0 and maxrand to e.g. 20331 345 mp_seed=0 332 346 if (lroot) write(*,*) 'MPI: setting seed=0' … … 454 468 455 469 470 subroutine mpif_send_part_properties(num_part) 471 !*********************************************************************** 472 ! Distribute particle properties from root to all processes. 473 ! 474 ! Used by init_domainfill_mpi 475 ! 476 ! Variables: 477 ! 478 ! num_part input, number of particles per process (rounded up) 479 ! 480 !*********************************************************************** 481 use com_mod 482 483 implicit none 484 485 integer,intent(in) :: num_part 486 integer :: i,jj, addone 487 488 ! Time for MPI communications 489 !**************************** 490 if (mp_measure_time) call mpif_mtime('commtime',0) 491 492 ! Distribute variables, send from pid 0 to other processes (including itself) 493 !**************************************************************************** 494 495 call MPI_SCATTER(nclass_tmp,num_part,MPI_INTEGER,nclass,& 496 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 497 if (mp_ierr /= 0) goto 600 498 call MPI_SCATTER(npoint_tmp,num_part,MPI_INTEGER,npoint,& 499 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 500 if (mp_ierr /= 0) goto 600 501 call MPI_SCATTER(itra1_tmp,num_part,MPI_INTEGER,itra1,& 502 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 503 if (mp_ierr /= 0) goto 600 504 call MPI_SCATTER(idt_tmp,num_part,MPI_INTEGER,idt,& 505 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 506 if (mp_ierr /= 0) goto 600 507 call MPI_SCATTER(itramem_tmp,num_part,MPI_INTEGER,itramem,& 508 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 509 if (mp_ierr /= 0) goto 600 510 call MPI_SCATTER(itrasplit_tmp,num_part,MPI_INTEGER,itrasplit,& 511 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 512 if (mp_ierr /= 0) goto 600 513 call MPI_SCATTER(xtra1_tmp,num_part,mp_dp,xtra1,& 514 &num_part,mp_dp,id_root,mp_comm_used,mp_ierr) 515 if (mp_ierr /= 0) goto 600 516 call MPI_SCATTER(ytra1_tmp,num_part,mp_dp,ytra1,& 517 &num_part,mp_dp,id_root,mp_comm_used,mp_ierr) 518 if (mp_ierr /= 0) goto 600 519 call MPI_SCATTER(ztra1_tmp,num_part,mp_sp,ztra1,& 520 &num_part,mp_sp,id_root,mp_comm_used,mp_ierr) 521 if (mp_ierr /= 0) goto 600 522 do i=1,nspec 523 call MPI_SCATTER(xmass1_tmp(:,i),num_part,mp_sp,xmass1(:,i),& 524 &num_part,mp_sp,id_root,mp_comm_used,mp_ierr) 525 if (mp_ierr /= 0) goto 600 526 end do 527 528 if (mp_measure_time) call mpif_mtime('commtime',1) 529 write(*,*) "PID ", mp_partid, "ending MPI_Scatter operation" 530 531 goto 601 532 533 600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr 534 stop 535 536 ! After the transfer of particles it it possible that the value of 537 ! "numpart" is larger than the actual, so we reduce it if there are 538 ! invalid particles at the end of the arrays 539 601 do i=num_part, 1, -1 540 if (itra1(i).eq.-999999999) then 541 numpart=numpart-1 542 else 543 exit 544 end if 545 end do 546 547 548 !601 end subroutine mpif_send_part_properties 549 end subroutine mpif_send_part_properties 550 551 552 subroutine mpif_calculate_part_redist(itime) 553 !*********************************************************************** 554 ! Calculate number of particles to redistribute between processes. This routine 555 ! can be called at regular time intervals to keep a level number of 556 ! particles on each process. 557 ! 558 ! First, all processes report their local "numpart" to each other, which is 559 ! stored in array "numpart_mpi(np)". This array is sorted from low to 560 ! high values, along with a corresponding process ID array "idx_arr(np)". 561 ! If the relative difference between the highest and lowest "numpart_mpi" value 562 ! is above a threshold, particles are transferred from process idx_arr(np-1) 563 ! to process idx_arr(0). Repeat for processes idx_arr(np-i) and idx_arr(i) 564 ! for all valid i. 565 ! Note: If np is an odd number, the process with the median 566 ! number of particles is left unchanged 567 ! 568 ! VARIABLES 569 ! itime input, current time 570 !*********************************************************************** 571 use com_mod 572 573 implicit none 574 575 integer, intent(in) :: itime 576 real :: pmin,z 577 integer :: i,jj,nn, num_part=1,m,imin, num_trans 578 logical :: first_iter 579 integer,allocatable,dimension(:) :: numparticles_mpi, idx_arr 580 real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this 581 582 ! Exit if running with only 1 process 583 !************************************************************************ 584 if (mp_np.eq.1) return 585 586 ! All processes exchange information on number of particles 587 !**************************************************************************** 588 allocate(numparticles_mpi(0:mp_partgroup_np-1), & 589 &idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1)) 590 591 call MPI_Allgather(numpart, 1, MPI_INTEGER, numparticles_mpi, & 592 & 1, MPI_INTEGER, mp_comm_used, mp_ierr) 593 594 595 ! Sort from lowest to highest 596 ! Initial guess: correct order 597 sorted(:) = numparticles_mpi(:) 598 do i=0, mp_partgroup_np-1 599 idx_arr(i) = i 600 end do 601 602 ! For each successive element in index array, see if a lower value exists 603 do i=0, mp_partgroup_np-2 604 pmin=sorted(i) 605 imin=idx_arr(i) 606 m=i+1 607 do jj=m, mp_partgroup_np-1 608 if (pmin.le.sorted(jj)) cycle 609 z=pmin 610 pmin=sorted(jj) 611 sorted(jj)=z 612 613 nn=imin 614 imin=idx_arr(jj) 615 idx_arr(jj)=nn 616 end do 617 sorted(i)=pmin 618 idx_arr(i)=imin 619 end do 620 621 ! For each pair of processes, transfer particles if the difference is above a 622 ! limit, and numpart at sending process large enough 623 624 m=mp_partgroup_np-1 ! index for last sorted process (most particles) 625 do i=0,mp_partgroup_np/2-1 626 num_trans = numparticles_mpi(idx_arr(m)) - numparticles_mpi(idx_arr(i)) 627 if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then 628 if ( numparticles_mpi(idx_arr(m)).gt.mp_min_redist.and.& 629 & real(num_trans)/real(numparticles_mpi(idx_arr(m))).gt.mp_redist_fract) then 630 call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2) 631 end if 632 end if 633 m=m-1 634 end do 635 636 deallocate(numparticles_mpi, idx_arr, sorted) 637 638 end subroutine mpif_calculate_part_redist 639 640 641 subroutine mpif_redist_part(itime, src_proc, dest_proc, num_trans) 642 !*********************************************************************** 643 ! Transfer particle properties between two arbitrary processes. 644 ! 645 ! VARIABLES 646 ! 647 ! itime input, current time 648 ! src_proc input, ID of source process 649 ! dest_proc input, ID of destination process 650 ! num_trans input, number of particles to transfer 651 ! 652 !************************************************************************ 653 use com_mod !TODO: ,only: nclass etc 654 655 implicit none 656 657 integer, intent(in) :: itime, src_proc, dest_proc, num_trans 658 integer :: ll, ul ! lower and upper indices in arrays 659 integer :: arr_sz ! size of temporary arrays 660 integer :: mtag ! MPI message tag 661 integer :: i, j, minpart, ipart, maxnumpart 662 663 ! Measure time for MPI communications 664 !************************************ 665 if (mp_measure_time) call mpif_mtime('commtime',0) 666 667 ! Send particles 668 !*************** 669 if (mp_partid.eq.src_proc) then 670 mtag = 2000 671 672 ! Array indices for the transferred particles 673 ll=numpart-num_trans+1 674 ul=numpart 675 676 ! if (mp_dev_mode) then 677 ! write(*,FMT='(72("#"))') 678 ! write(*,*) "Sending ", num_trans, "particles (from/to)", src_proc, dest_proc 679 ! write(*,*) "numpart before: ", numpart 680 ! end if 681 682 call MPI_SEND(nclass(ll:ul),num_trans,& 683 & MPI_INTEGER,dest_proc,mtag+1,mp_comm_used,mp_ierr) 684 685 call MPI_SEND(npoint(ll:ul),num_trans,& 686 & MPI_INTEGER,dest_proc,mtag+2,mp_comm_used,mp_ierr) 687 688 call MPI_SEND(itra1(ll:ul),num_trans, & 689 & MPI_INTEGER,dest_proc,mtag+3,mp_comm_used,mp_ierr) 690 691 call MPI_SEND(idt(ll:ul),num_trans, & 692 & MPI_INTEGER,dest_proc,mtag+4,mp_comm_used,mp_ierr) 693 694 call MPI_SEND(itramem(ll:ul),num_trans, & 695 & MPI_INTEGER,dest_proc,mtag+5,mp_comm_used,mp_ierr) 696 697 call MPI_SEND(itrasplit(ll:ul),num_trans,& 698 & MPI_INTEGER,dest_proc,mtag+6,mp_comm_used,mp_ierr) 699 700 call MPI_SEND(xtra1(ll:ul),num_trans, & 701 & mp_dp,dest_proc,mtag+7,mp_comm_used,mp_ierr) 702 703 call MPI_SEND(ytra1(ll:ul),num_trans,& 704 & mp_dp,dest_proc,mtag+8,mp_comm_used,mp_ierr) 705 706 call MPI_SEND(ztra1(ll:ul),num_trans,& 707 & mp_sp,dest_proc,mtag+9,mp_comm_used,mp_ierr) 708 709 do j=1,nspec 710 call MPI_SEND(xmass1(ll:ul,j),num_trans,& 711 & mp_sp,dest_proc,mtag+(9+j),mp_comm_used,mp_ierr) 712 end do 713 714 ! Terminate transferred particles, update numpart 715 itra1(ll:ul) = -999999999 716 717 numpart = numpart-num_trans 718 719 ! if (mp_dev_mode) then 720 ! write(*,*) "numpart after: ", numpart 721 ! write(*,FMT='(72("#"))') 722 ! end if 723 724 else if (mp_partid.eq.dest_proc) then 725 726 ! Receive particles 727 !****************** 728 mtag = 2000 729 ! Array indices for the transferred particles 730 ll=numpart+1 731 ul=numpart+num_trans 732 733 ! if (mp_dev_mode) then 734 ! write(*,FMT='(72("#"))') 735 ! write(*,*) "Receiving ", num_trans, "particles (from/to)", src_proc, dest_proc 736 ! write(*,*) "numpart before: ", numpart 737 ! end if 738 739 ! We could receive the data directly at nclass(ll:ul) etc., but this leaves 740 ! vacant spaces at lower indices. Using temporary arrays instead. 741 arr_sz = ul-ll+1 742 allocate(itra1_tmp(arr_sz),npoint_tmp(arr_sz),nclass_tmp(arr_sz),& 743 & idt_tmp(arr_sz),itramem_tmp(arr_sz),itrasplit_tmp(arr_sz),& 744 & xtra1_tmp(arr_sz),ytra1_tmp(arr_sz),ztra1_tmp(arr_sz),& 745 & xmass1_tmp(arr_sz, maxspec)) 746 747 call MPI_RECV(nclass_tmp,num_trans,& 748 & MPI_INTEGER,src_proc,mtag+1,mp_comm_used,mp_status,mp_ierr) 749 750 call MPI_RECV(npoint_tmp,num_trans,& 751 & MPI_INTEGER,src_proc,mtag+2,mp_comm_used,mp_status,mp_ierr) 752 753 call MPI_RECV(itra1_tmp,num_trans, & 754 & MPI_INTEGER,src_proc,mtag+3,mp_comm_used,mp_status,mp_ierr) 755 756 call MPI_RECV(idt_tmp,num_trans, & 757 & MPI_INTEGER,src_proc,mtag+4,mp_comm_used,mp_status,mp_ierr) 758 759 call MPI_RECV(itramem_tmp,num_trans, & 760 & MPI_INTEGER,src_proc,mtag+5,mp_comm_used,mp_status,mp_ierr) 761 762 call MPI_RECV(itrasplit_tmp,num_trans,& 763 & MPI_INTEGER,src_proc,mtag+6,mp_comm_used,mp_status,mp_ierr) 764 765 call MPI_RECV(xtra1_tmp,num_trans, & 766 & mp_dp,src_proc,mtag+7,mp_comm_used,mp_status,mp_ierr) 767 768 call MPI_RECV(ytra1_tmp,num_trans,& 769 & mp_dp,src_proc,mtag+8,mp_comm_used,mp_status,mp_ierr) 770 771 call MPI_RECV(ztra1_tmp,num_trans,& 772 & mp_sp,src_proc,mtag+9,mp_comm_used,mp_status,mp_ierr) 773 774 do j=1,nspec 775 call MPI_RECV(xmass1_tmp(:,j),num_trans,& 776 & mp_sp,src_proc,mtag+(9+j),mp_comm_used,mp_status,mp_ierr) 777 end do 778 779 ! This is the maximum value numpart can possibly have after the transfer 780 maxnumpart=numpart+num_trans 781 782 ! Search for vacant space and copy from temporary storage 783 !******************************************************** 784 minpart=1 785 do i=1, num_trans 786 ! Take into acount that we may have transferred invalid particles 787 if (itra1_tmp(minpart).ne.itime) goto 200 788 do ipart=minpart,maxnumpart 789 if (itra1(ipart).ne.itime) then 790 itra1(ipart) = itra1_tmp(minpart) 791 npoint(ipart) = npoint_tmp(minpart) 792 nclass(ipart) = nclass_tmp(minpart) 793 idt(ipart) = idt_tmp(minpart) 794 itramem(ipart) = itramem_tmp(minpart) 795 itrasplit(ipart) = itrasplit_tmp(minpart) 796 xtra1(ipart) = xtra1_tmp(minpart) 797 ytra1(ipart) = ytra1_tmp(minpart) 798 ztra1(ipart) = ztra1_tmp(minpart) 799 xmass1(ipart,:) = xmass1_tmp(minpart,:) 800 ! Increase numpart, if necessary 801 numpart=max(numpart,ipart) 802 goto 200 ! Storage space has been found, stop searching 803 end if 804 end do 805 200 minpart=minpart+1 806 end do 807 808 deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,itrasplit_tmp,& 809 & xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp) 810 811 ! if (mp_dev_mode) then 812 ! write(*,*) "numpart after: ", numpart 813 ! write(*,FMT='(72("#"))') 814 ! end if 815 816 else 817 ! This routine should only be called by the two participating processes 818 write(*,*) "ERROR: wrong process has entered mpi_mod::mpif_redist_part" 819 stop 820 return 821 end if 822 823 ! Measure time for MPI communications 824 !************************************ 825 if (mp_measure_time) call mpif_mtime('commtime',1) 826 827 end subroutine mpif_redist_part 828 829 456 830 subroutine mpif_tm_send_vars 457 831 !*********************************************************************** … … 477 851 if (lroot) then 478 852 call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 479 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)853 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 480 854 if (mp_ierr /= 0) goto 600 481 855 call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 482 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)856 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 483 857 if (mp_ierr /= 0) goto 600 484 858 call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 485 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)859 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 486 860 if (mp_ierr /= 0) goto 600 487 861 call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 488 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)862 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 489 863 if (mp_ierr /= 0) goto 600 490 864 call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 491 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)865 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 492 866 if (mp_ierr /= 0) goto 600 493 867 494 868 ! int2 495 869 call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,& 496 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)870 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr) 497 871 if (mp_ierr /= 0) goto 600 498 872 499 873 ! real 500 874 call MPI_SCATTER(uap,numpart_mpi,mp_sp,MPI_IN_PLACE,& 501 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)875 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 502 876 if (mp_ierr /= 0) goto 600 503 877 call MPI_SCATTER(ucp,numpart_mpi,mp_sp,MPI_IN_PLACE,& 504 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)878 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 505 879 if (mp_ierr /= 0) goto 600 506 880 call MPI_SCATTER(uzp,numpart_mpi,mp_sp,MPI_IN_PLACE,& 507 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)881 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 508 882 if (mp_ierr /= 0) goto 600 509 883 510 884 call MPI_SCATTER(us,numpart_mpi,mp_sp,MPI_IN_PLACE,& 511 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)885 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 512 886 if (mp_ierr /= 0) goto 600 513 887 call MPI_SCATTER(vs,numpart_mpi,mp_sp,MPI_IN_PLACE,& 514 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)888 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 515 889 if (mp_ierr /= 0) goto 600 516 890 call MPI_SCATTER(ws,numpart_mpi,mp_sp,MPI_IN_PLACE,& 517 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)891 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 518 892 if (mp_ierr /= 0) goto 600 519 893 520 894 call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,& 521 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)895 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 522 896 if (mp_ierr /= 0) goto 600 523 897 call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,& 524 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)898 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 525 899 if (mp_ierr /= 0) goto 600 526 900 call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,MPI_IN_PLACE,& 527 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)901 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 528 902 if (mp_ierr /= 0) goto 600 529 903 530 904 do i=1,nspec 531 905 call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,MPI_IN_PLACE,& 532 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)906 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 533 907 if (mp_ierr /= 0) goto 600 534 908 end do … … 537 911 ! integers 538 912 call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,& 539 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)913 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 540 914 if (mp_ierr /= 0) goto 600 541 915 call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,& 542 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)916 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 543 917 if (mp_ierr /= 0) goto 600 544 918 call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,& 545 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)919 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 546 920 if (mp_ierr /= 0) goto 600 547 921 call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,& 548 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)922 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 549 923 if (mp_ierr /= 0) goto 600 550 924 call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,& 551 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)925 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 552 926 if (mp_ierr /= 0) goto 600 553 927 554 928 ! int2 555 929 call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,& 556 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)930 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr) 557 931 if (mp_ierr /= 0) goto 600 558 932 559 933 ! reals 560 934 call MPI_SCATTER(uap,numpart_mpi,mp_sp,uap,& 561 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)935 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 562 936 if (mp_ierr /= 0) goto 600 563 937 call MPI_SCATTER(ucp,numpart_mpi,mp_sp,ucp,& 564 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)938 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 565 939 if (mp_ierr /= 0) goto 600 566 940 call MPI_SCATTER(uzp,numpart_mpi,mp_sp,uzp,& 567 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)941 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 568 942 if (mp_ierr /= 0) goto 600 569 943 570 944 call MPI_SCATTER(us,numpart_mpi,mp_sp,us,& 571 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)945 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 572 946 if (mp_ierr /= 0) goto 600 573 947 call MPI_SCATTER(vs,numpart_mpi,mp_sp,vs,& 574 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)948 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 575 949 if (mp_ierr /= 0) goto 600 576 950 call MPI_SCATTER(ws,numpart_mpi,mp_sp,ws,& 577 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)951 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 578 952 if (mp_ierr /= 0) goto 600 579 953 580 954 call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,& 581 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)955 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 582 956 if (mp_ierr /= 0) goto 600 583 957 call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,& 584 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)958 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 585 959 if (mp_ierr /= 0) goto 600 586 960 call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,ztra1,& 587 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)961 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 588 962 if (mp_ierr /= 0) goto 600 589 963 590 964 do i=1,nspec 591 965 call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,xmass1(:,i),& 592 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)966 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 593 967 if (mp_ierr /= 0) goto 600 594 968 end do … … 1145 1519 1146 1520 ! cloud water/ice: 1147 if (readclouds_nest(i)) then1148 1149 1150 call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)1151 if (mp_ierr /= 0) goto 6001152 end if1521 if (readclouds_nest(i)) then 1522 ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 1523 ! if (mp_ierr /= 0) goto 600 1524 call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 1525 if (mp_ierr /= 0) goto 600 1526 end if 1153 1527 1154 1528 ! 2D fields … … 1398 1772 integer :: d2s1 = nxmax*nymax 1399 1773 integer :: d2s2 = nxmax*nymax*maxspec 1400 1774 !integer :: d1_size1 = maxwf 1401 1775 1402 1776 ! integer :: d3s1,d3s2,d2s1,d2s2 … … 1645 2019 if (dest.eq.id_read) cycle 1646 2020 do k=1, numbnests 1647 i=dest*nvar_async1648 call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1649 if (mp_ierr /= 0) goto 6001650 i=i+11651 call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1652 if (mp_ierr /= 0) goto 6001653 i=i+11654 call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1655 if (mp_ierr /= 0) goto 6001656 i=i+11657 call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1658 if (mp_ierr /= 0) goto 6001659 i=i+11660 call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1661 if (mp_ierr /= 0) goto 6001662 i=i+11663 call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1664 if (mp_ierr /= 0) goto 6001665 i=i+11666 call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1667 if (mp_ierr /= 0) goto 6001668 i=i+11669 call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1670 if (mp_ierr /= 0) goto 6001671 i=i+11672 call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1673 if (mp_ierr /= 0) goto 6001674 i=i+11675 call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1676 if (mp_ierr /= 0) goto 6001677 i=i+11678 call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1679 i=i+11680 if (mp_ierr /= 0) goto 6001681 call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1682 if (mp_ierr /= 0) goto 6001683 i=i+11684 call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1685 if (mp_ierr /= 0) goto 6001686 i=i+11687 call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1688 if (mp_ierr /= 0) goto 6001689 i=i+11690 call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1691 if (mp_ierr /= 0) goto 6001692 i=i+12021 i=dest*nvar_async 2022 call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2023 if (mp_ierr /= 0) goto 600 2024 i=i+1 2025 call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2026 if (mp_ierr /= 0) goto 600 2027 i=i+1 2028 call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2029 if (mp_ierr /= 0) goto 600 2030 i=i+1 2031 call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2032 if (mp_ierr /= 0) goto 600 2033 i=i+1 2034 call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2035 if (mp_ierr /= 0) goto 600 2036 i=i+1 2037 call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2038 if (mp_ierr /= 0) goto 600 2039 i=i+1 2040 call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2041 if (mp_ierr /= 0) goto 600 2042 i=i+1 2043 call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2044 if (mp_ierr /= 0) goto 600 2045 i=i+1 2046 call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2047 if (mp_ierr /= 0) goto 600 2048 i=i+1 2049 call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2050 if (mp_ierr /= 0) goto 600 2051 i=i+1 2052 call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2053 i=i+1 2054 if (mp_ierr /= 0) goto 600 2055 call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2056 if (mp_ierr /= 0) goto 600 2057 i=i+1 2058 call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2059 if (mp_ierr /= 0) goto 600 2060 i=i+1 2061 call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2062 if (mp_ierr /= 0) goto 600 2063 i=i+1 2064 call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2065 if (mp_ierr /= 0) goto 600 2066 i=i+1 1693 2067 ! 15 1694 call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1695 if (mp_ierr /= 0) goto 6001696 i=i+11697 call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1698 if (mp_ierr /= 0) goto 6001699 i=i+11700 call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1701 if (mp_ierr /= 0) goto 6001702 i=i+11703 call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1704 if (mp_ierr /= 0) goto 6001705 i=i+11706 call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1707 if (mp_ierr /= 0) goto 6001708 i=i+11709 call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1710 if (mp_ierr /= 0) goto 6001711 i=i+11712 call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1713 if (mp_ierr /= 0) goto 6001714 i=i+11715 call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1716 if (mp_ierr /= 0) goto 6001717 i=i+11718 call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1719 if (mp_ierr /= 0) goto 6001720 i=i+11721 call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1722 if (mp_ierr /= 0) goto 6002068 call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2069 if (mp_ierr /= 0) goto 600 2070 i=i+1 2071 call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2072 if (mp_ierr /= 0) goto 600 2073 i=i+1 2074 call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2075 if (mp_ierr /= 0) goto 600 2076 i=i+1 2077 call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2078 if (mp_ierr /= 0) goto 600 2079 i=i+1 2080 call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2081 if (mp_ierr /= 0) goto 600 2082 i=i+1 2083 call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2084 if (mp_ierr /= 0) goto 600 2085 i=i+1 2086 call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2087 if (mp_ierr /= 0) goto 600 2088 i=i+1 2089 call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2090 if (mp_ierr /= 0) goto 600 2091 i=i+1 2092 call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2093 if (mp_ierr /= 0) goto 600 2094 i=i+1 2095 call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2096 if (mp_ierr /= 0) goto 600 1723 2097 ! 25 1724 2098 1725 2099 ! Send cloud water if it exists. Increment counter always (as on receiving end) 1726 if (readclouds) then 1727 i=i+1 1728 call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,& 1729 &MPI_COMM_WORLD,reqs(i),mp_ierr) 1730 if (mp_ierr /= 0) goto 600 1731 end if 2100 if (readclouds) then 2101 i=i+1 2102 call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,& 2103 &MPI_COMM_WORLD,reqs(i),mp_ierr) 2104 if (mp_ierr /= 0) goto 600 2105 end if 2106 end do 1732 2107 end do 1733 end do1734 2108 1735 2109 if (mp_measure_time) call mpif_mtime('commtime',1) … … 1810 2184 do k=1, numbnests 1811 2185 ! Get MPI tags/requests for communications 1812 j=mp_pid*nvar_async1813 call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1814 &MPI_COMM_WORLD,reqs(j),mp_ierr)1815 if (mp_ierr /= 0) goto 6001816 j=j+11817 call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1818 &MPI_COMM_WORLD,reqs(j),mp_ierr)1819 if (mp_ierr /= 0) goto 6001820 j=j+11821 call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1822 &MPI_COMM_WORLD,reqs(j),mp_ierr)1823 if (mp_ierr /= 0) goto 6001824 j=j+11825 call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1826 &MPI_COMM_WORLD,reqs(j),mp_ierr)1827 if (mp_ierr /= 0) goto 6001828 j=j+11829 call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1830 &MPI_COMM_WORLD,reqs(j),mp_ierr)1831 if (mp_ierr /= 0) goto 6001832 j=j+11833 call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1834 &MPI_COMM_WORLD,reqs(j),mp_ierr)1835 if (mp_ierr /= 0) goto 6001836 j=j+11837 call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&1838 &MPI_COMM_WORLD,reqs(j),mp_ierr)1839 if (mp_ierr /= 0) goto 6001840 j=j+11841 call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&1842 &MPI_COMM_WORLD,reqs(j),mp_ierr)1843 if (mp_ierr /= 0) goto 6001844 j=j+11845 call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1846 &MPI_COMM_WORLD,reqs(j),mp_ierr)1847 if (mp_ierr /= 0) goto 6001848 j=j+11849 call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1850 &MPI_COMM_WORLD,reqs(j),mp_ierr)1851 if (mp_ierr /= 0) goto 6001852 j=j+11853 call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&1854 &MPI_COMM_WORLD,reqs(j),mp_ierr)1855 if (mp_ierr /= 0) goto 6001856 j=j+11857 call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1858 &MPI_COMM_WORLD,reqs(j),mp_ierr)1859 if (mp_ierr /= 0) goto 6001860 j=j+11861 call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,&1862 &MPI_COMM_WORLD,reqs(j),mp_ierr)1863 if (mp_ierr /= 0) goto 6001864 j=j+11865 call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1866 &MPI_COMM_WORLD,reqs(j),mp_ierr)1867 if (mp_ierr /= 0) goto 6001868 j=j+11869 call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1870 &MPI_COMM_WORLD,reqs(j),mp_ierr)1871 if (mp_ierr /= 0) goto 6001872 j=j+11873 call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1874 &MPI_COMM_WORLD,reqs(j),mp_ierr)1875 if (mp_ierr /= 0) goto 6001876 j=j+11877 call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1878 &MPI_COMM_WORLD,reqs(j),mp_ierr)1879 if (mp_ierr /= 0) goto 6001880 j=j+11881 call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1882 &MPI_COMM_WORLD,reqs(j),mp_ierr)1883 if (mp_ierr /= 0) goto 6001884 j=j+11885 call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1886 &MPI_COMM_WORLD,reqs(j),mp_ierr)1887 if (mp_ierr /= 0) goto 6001888 j=j+11889 call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1890 &MPI_COMM_WORLD,reqs(j),mp_ierr)1891 if (mp_ierr /= 0) goto 6001892 call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1893 &MPI_COMM_WORLD,reqs(j),mp_ierr)1894 if (mp_ierr /= 0) goto 6001895 j=j+11896 call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1897 &MPI_COMM_WORLD,reqs(j),mp_ierr)1898 if (mp_ierr /= 0) goto 6001899 j=j+11900 call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1901 &MPI_COMM_WORLD,reqs(j),mp_ierr)1902 if (mp_ierr /= 0) goto 6001903 j=j+11904 call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1905 &MPI_COMM_WORLD,reqs(j),mp_ierr)1906 if (mp_ierr /= 0) goto 6001907 j=j+11908 call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1909 &MPI_COMM_WORLD,reqs(j),mp_ierr)1910 if (mp_ierr /= 0) goto 6002186 j=mp_pid*nvar_async 2187 call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2188 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2189 if (mp_ierr /= 0) goto 600 2190 j=j+1 2191 call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2192 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2193 if (mp_ierr /= 0) goto 600 2194 j=j+1 2195 call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2196 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2197 if (mp_ierr /= 0) goto 600 2198 j=j+1 2199 call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2200 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2201 if (mp_ierr /= 0) goto 600 2202 j=j+1 2203 call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2204 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2205 if (mp_ierr /= 0) goto 600 2206 j=j+1 2207 call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2208 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2209 if (mp_ierr /= 0) goto 600 2210 j=j+1 2211 call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& 2212 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2213 if (mp_ierr /= 0) goto 600 2214 j=j+1 2215 call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& 2216 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2217 if (mp_ierr /= 0) goto 600 2218 j=j+1 2219 call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2220 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2221 if (mp_ierr /= 0) goto 600 2222 j=j+1 2223 call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2224 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2225 if (mp_ierr /= 0) goto 600 2226 j=j+1 2227 call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,& 2228 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2229 if (mp_ierr /= 0) goto 600 2230 j=j+1 2231 call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2232 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2233 if (mp_ierr /= 0) goto 600 2234 j=j+1 2235 call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,& 2236 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2237 if (mp_ierr /= 0) goto 600 2238 j=j+1 2239 call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2240 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2241 if (mp_ierr /= 0) goto 600 2242 j=j+1 2243 call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2244 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2245 if (mp_ierr /= 0) goto 600 2246 j=j+1 2247 call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2248 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2249 if (mp_ierr /= 0) goto 600 2250 j=j+1 2251 call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2252 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2253 if (mp_ierr /= 0) goto 600 2254 j=j+1 2255 call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2256 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2257 if (mp_ierr /= 0) goto 600 2258 j=j+1 2259 call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2260 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2261 if (mp_ierr /= 0) goto 600 2262 j=j+1 2263 call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2264 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2265 if (mp_ierr /= 0) goto 600 2266 call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2267 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2268 if (mp_ierr /= 0) goto 600 2269 j=j+1 2270 call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2271 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2272 if (mp_ierr /= 0) goto 600 2273 j=j+1 2274 call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2275 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2276 if (mp_ierr /= 0) goto 600 2277 j=j+1 2278 call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2279 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2280 if (mp_ierr /= 0) goto 600 2281 j=j+1 2282 call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2283 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2284 if (mp_ierr /= 0) goto 600 1911 2285 1912 2286 ! Post request for clwc. These data are possibly not sent, request must then be cancelled 1913 2287 ! For now assume that data at all steps either have or do not have water 1914 if (readclouds) then1915 j=j+11916 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&1917 &MPI_COMM_WORLD,reqs(j),mp_ierr)1918 if (mp_ierr /= 0) goto 6001919 end if1920 end do2288 if (readclouds) then 2289 j=j+1 2290 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& 2291 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2292 if (mp_ierr /= 0) goto 600 2293 end if 2294 end do 1921 2295 1922 2296 if (mp_measure_time) call mpif_mtime('commtime',1) … … 2349 2723 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',& 2350 2724 & mp_conccalc_time_total 2351 2352 2353 2354 2725 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',& 2726 ! & mp_vt_wtime_total 2727 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',& 2728 ! & mp_vt_time_total 2355 2729 ! NB: the 'flush' function is possibly a gfortran-specific extension 2356 2730 call flush() … … 2388 2762 2389 2763 2390 2391 2392 2393 2764 end subroutine mpif_finalize 2765 2766 2767 subroutine get_lun(my_lun) 2394 2768 !*********************************************************************** 2395 2769 ! get_lun: … … 2397 2771 !*********************************************************************** 2398 2772 2399 2400 2401 2402 2403 2404 2405 !*********************************************************************** 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2773 implicit none 2774 2775 integer, intent(inout) :: my_lun 2776 integer, save :: free_lun=100 2777 logical :: exists, iopen 2778 2779 !*********************************************************************** 2780 2781 loop1: do 2782 inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen) 2783 if (exists .and. .not.iopen) exit loop1 2784 free_lun = free_lun+1 2785 end do loop1 2786 my_lun = free_lun 2787 2788 end subroutine get_lun 2789 2790 2791 subroutine write_data_dbg(array_in, array_name, tstep, ident) 2418 2792 !*********************************************************************** 2419 2793 ! Write one-dimensional arrays to file (for debugging purposes) 2420 2794 !*********************************************************************** 2421 implicit none 2422 2423 real, intent(in), dimension(:) :: array_in 2424 integer, intent(in) :: tstep 2425 integer :: lios 2426 character(LEN=*), intent(in) :: ident, array_name 2427 2428 character(LEN=8) :: c_ts 2429 character(LEN=40) :: fn_1, fn_2 2430 2431 !*********************************************************************** 2432 2433 write(c_ts, FMT='(I8.8,BZ)') tstep 2434 fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident) 2435 write(c_ts, FMT='(I2.2,BZ)') mp_np 2436 fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat' 2437 2438 call get_lun(dat_lun) 2439 open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', & 2440 FORM='UNFORMATTED', STATUS='REPLACE') 2441 write(UNIT=dat_lun, IOSTAT=lios) array_in 2442 close(UNIT=dat_lun) 2443 2444 end subroutine write_data_dbg 2795 implicit none 2796 2797 real, intent(in), dimension(:) :: array_in 2798 integer, intent(in) :: tstep 2799 integer :: lios 2800 character(LEN=*), intent(in) :: ident, array_name 2801 2802 character(LEN=8) :: c_ts 2803 character(LEN=40) :: fn_1, fn_2 2804 2805 !*********************************************************************** 2806 2807 write(c_ts, FMT='(I8.8,BZ)') tstep 2808 fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident) 2809 write(c_ts, FMT='(I2.2,BZ)') mp_np 2810 fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat' 2811 2812 call get_lun(dat_lun) 2813 open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', & 2814 FORM='UNFORMATTED', STATUS='REPLACE') 2815 write(UNIT=dat_lun, IOSTAT=lios) array_in 2816 close(UNIT=dat_lun) 2817 2818 end subroutine write_data_dbg 2819 2820 2821 subroutine set_fields_synthetic() 2822 !******************************************************************************* 2823 ! DESCRIPTION 2824 ! Set all meteorological fields to synthetic (usually constant/homogeneous) 2825 ! values. 2826 ! Used for validation and error-checking 2827 ! 2828 ! NOTE 2829 ! This version uses asynchronious communications. 2830 ! 2831 ! VARIABLES 2832 ! 2833 ! 2834 ! 2835 !******************************************************************************* 2836 use com_mod 2837 2838 implicit none 2839 2840 integer :: li=1, ui=2 ! wfmem indices (i.e, operate on entire field) 2841 2842 if (.not.lmp_sync) ui=3 2843 2844 2845 ! Variables transferred at initialization only 2846 !********************************************* 2847 ! readclouds=readclouds_ 2848 oro(:,:)=0.0 2849 excessoro(:,:)=0.0 2850 lsm(:,:)=0.0 2851 xlanduse(:,:,:)=0.0 2852 ! wftime 2853 ! numbwf 2854 ! nmixz 2855 ! height 2856 2857 ! Time-varying fields: 2858 uu(:,:,:,li:ui) = 10.0 2859 vv(:,:,:,li:ui) = 0.0 2860 uupol(:,:,:,li:ui) = 10.0 2861 vvpol(:,:,:,li:ui)=0.0 2862 ww(:,:,:,li:ui)=0. 2863 tt(:,:,:,li:ui)=300. 2864 rho(:,:,:,li:ui)=1.3 2865 drhodz(:,:,:,li:ui)=.0 2866 tth(:,:,:,li:ui)=0.0 2867 qvh(:,:,:,li:ui)=1.0 2868 qv(:,:,:,li:ui)=1.0 2869 2870 pv(:,:,:,li:ui)=1.0 2871 clouds(:,:,:,li:ui)=0 2872 2873 clwc(:,:,:,li:ui)=0.0 2874 ciwc(:,:,:,li:ui)=0.0 2875 2876 ! 2D fields 2877 2878 cloudsh(:,:,li:ui)=0 2879 vdep(:,:,:,li:ui)=0.0 2880 ps(:,:,:,li:ui)=1.0e5 2881 sd(:,:,:,li:ui)=0.0 2882 tcc(:,:,:,li:ui)=0.0 2883 tt2(:,:,:,li:ui)=300. 2884 td2(:,:,:,li:ui)=250. 2885 lsprec(:,:,:,li:ui)=0.0 2886 convprec(:,:,:,li:ui)=0.0 2887 ustar(:,:,:,li:ui)=1.0 2888 wstar(:,:,:,li:ui)=1.0 2889 hmix(:,:,:,li:ui)=10000. 2890 tropopause(:,:,:,li:ui)=10000. 2891 oli(:,:,:,li:ui)=0.01 2892 2893 end subroutine set_fields_synthetic 2445 2894 2446 2895 end module mpi_mod
Note: See TracChangeset
for help on using the changeset viewer.