Changes in / [d404d98:46706c7] in flexpart.git
- Location:
- src
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
src/mpi_mod.f90
r861805a r0f7835d 90 90 ! MPI tags/requests for send/receive operation 91 91 integer :: tm1 92 integer, parameter :: nvar_async=26 92 integer, parameter :: nvar_async=26 !27 !29 :DBG: 93 93 !integer, dimension(:), allocatable :: tags 94 94 integer, dimension(:), allocatable :: reqs 95 96 ! Status array used for certain MPI operations (MPI_RECV)97 integer, dimension(MPI_STATUS_SIZE) :: mp_status98 95 99 96 … … 122 119 logical, parameter :: mp_dev_mode = .false. 123 120 logical, parameter :: mp_dbg_out = .false. 124 logical, parameter :: mp_time_barrier=. false.121 logical, parameter :: mp_time_barrier=.true. 125 122 logical, parameter :: mp_measure_time=.false. 126 123 logical, parameter :: mp_exact_numpart=.true. … … 152 149 integer, private :: dat_lun 153 150 154 ! Temporary arrays for particles (allocated and deallocated as needed)155 integer, allocatable, dimension(:) :: nclass_tmp, npoint_tmp, itra1_tmp, idt_tmp, &156 & itramem_tmp, itrasplit_tmp157 real(kind=dp), allocatable, dimension(:) :: xtra1_tmp, ytra1_tmp158 real, allocatable, dimension(:) :: ztra1_tmp159 real, allocatable, dimension(:,:) :: xmass1_tmp160 161 ! mp_redist_fract Exchange particles between processes if relative difference162 ! is larger. A good value is between 0.0 and 0.5163 ! mp_maxpart_factor Allocate more memory per process than strictly needed164 ! (safety factor). Recommended value between 1.5 and 2.5165 ! mp_min_redist Do not redistribute particles if below this limit166 real, parameter :: mp_redist_fract=0.2, mp_maxpart_factor=1.5167 integer,parameter :: mp_min_redist=100000168 169 170 151 contains 171 152 … … 214 195 if (dep_prec==dp) then 215 196 mp_cp = MPI_REAL8 216 ! TODO: write info message for serial version as well197 ! TODO: write info message for serial version as well 217 198 if (lroot.and.verbosity>0) write(*,*) 'Using double precision for deposition fields' 218 199 else if (dep_prec==sp) then … … 261 242 !********************************************************************** 262 243 244 ! id_read = min(mp_np-1, 1) 263 245 id_read = mp_np-1 264 246 … … 329 311 330 312 ! Set maxpart per process 331 ! eso 08/2016: Increase maxpart per process, in case of unbalanced distribution 332 maxpart_mpi=int(m p_maxpart_factor*maxpart/mp_partgroup_np)313 if (mp_partid.lt.mod(maxpart,mp_partgroup_np)) addmaxpart=1 314 maxpart_mpi=int(maxpart/mp_partgroup_np)+addmaxpart 333 315 334 316 ! Do not allocate particle data arrays for readwind process … … 339 321 ! Set random seed for each non-root process 340 322 if (mp_pid.gt.0) then 323 ! if (mp_pid.ge.0) then 324 ! call system_clock(s) 341 325 s = 244 342 326 mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729)) 343 327 end if 328 if (mp_dev_mode) write(*,*) 'PID, mp_seed: ',mp_pid, mp_seed 344 329 if (mp_dbg_mode) then 330 ! :DBG: For debugging, set all seed to 0 and maxrand to e.g. 20 345 331 mp_seed=0 346 332 if (lroot) write(*,*) 'MPI: setting seed=0' … … 468 454 469 455 470 subroutine mpif_send_part_properties(num_part) 471 !*********************************************************************** 472 ! Distribute particle properties from root to all processes. 473 ! 474 ! Used by init_domainfill_mpi 475 ! 476 ! Variables: 477 ! 478 ! num_part input, number of particles per process (rounded up) 456 subroutine mpif_tm_send_vars 457 !*********************************************************************** 458 ! Distribute particle variables from pid0 to all processes. 459 ! Called from timemanager 460 ! *NOT IN USE* at the moment, but can be useful for debugging 479 461 ! 480 462 !*********************************************************************** … … 483 465 implicit none 484 466 485 integer,intent(in) :: num_part 486 integer :: i,jj, addone 467 integer :: i 487 468 488 469 ! Time for MPI communications … … 490 471 if (mp_measure_time) call mpif_mtime('commtime',0) 491 472 492 ! Distribute variables, send from pid 0 to other processes (including itself)493 !****************************************************************************494 495 call MPI_SCATTER(nclass_tmp,num_part,MPI_INTEGER,nclass,&496 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)497 if (mp_ierr /= 0) goto 600498 call MPI_SCATTER(npoint_tmp,num_part,MPI_INTEGER,npoint,&499 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)500 if (mp_ierr /= 0) goto 600501 call MPI_SCATTER(itra1_tmp,num_part,MPI_INTEGER,itra1,&502 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)503 if (mp_ierr /= 0) goto 600504 call MPI_SCATTER(idt_tmp,num_part,MPI_INTEGER,idt,&505 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)506 if (mp_ierr /= 0) goto 600507 call MPI_SCATTER(itramem_tmp,num_part,MPI_INTEGER,itramem,&508 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)509 if (mp_ierr /= 0) goto 600510 call MPI_SCATTER(itrasplit_tmp,num_part,MPI_INTEGER,itrasplit,&511 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)512 if (mp_ierr /= 0) goto 600513 call MPI_SCATTER(xtra1_tmp,num_part,mp_dp,xtra1,&514 &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)515 if (mp_ierr /= 0) goto 600516 call MPI_SCATTER(ytra1_tmp,num_part,mp_dp,ytra1,&517 &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)518 if (mp_ierr /= 0) goto 600519 call MPI_SCATTER(ztra1_tmp,num_part,mp_sp,ztra1,&520 &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)521 if (mp_ierr /= 0) goto 600522 do i=1,nspec523 call MPI_SCATTER(xmass1_tmp(:,i),num_part,mp_sp,xmass1(:,i),&524 &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)525 if (mp_ierr /= 0) goto 600526 end do527 528 if (mp_measure_time) call mpif_mtime('commtime',1)529 write(*,*) "PID ", mp_partid, "ending MPI_Scatter operation"530 531 goto 601532 533 600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr534 stop535 536 ! After the transfer of particles it it possible that the value of537 ! "numpart" is larger than the actual, so we reduce it if there are538 ! invalid particles at the end of the arrays539 601 do i=num_part, 1, -1540 if (itra1(i).eq.-999999999) then541 numpart=numpart-1542 else543 exit544 end if545 end do546 547 548 !601 end subroutine mpif_send_part_properties549 end subroutine mpif_send_part_properties550 551 552 subroutine mpif_calculate_part_redist(itime)553 !***********************************************************************554 ! Calculate number of particles to redistribute between processes. This routine555 ! can be called at regular time intervals to keep a level number of556 ! particles on each process.557 !558 ! First, all processes report their local "numpart" to each other, which is559 ! stored in array "numpart_mpi(np)". This array is sorted from low to560 ! high values, along with a corresponding process ID array "idx_arr(np)".561 ! If the relative difference between the highest and lowest "numpart_mpi" value562 ! is above a threshold, particles are transferred from process idx_arr(np-1)563 ! to process idx_arr(0). Repeat for processes idx_arr(np-i) and idx_arr(i)564 ! for all valid i.565 ! Note: If np is an odd number, the process with the median566 ! number of particles is left unchanged567 !568 ! VARIABLES569 ! itime input, current time570 !***********************************************************************571 use com_mod572 573 implicit none574 575 integer, intent(in) :: itime576 real :: pmin,z577 integer :: i,jj,nn, num_part=1,m,imin, num_trans578 logical :: first_iter579 integer,allocatable,dimension(:) :: numparticles_mpi, idx_arr580 real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this581 582 ! Exit if running with only 1 process583 !************************************************************************584 if (mp_np.eq.1) return585 586 ! All processes exchange information on number of particles587 !****************************************************************************588 allocate(numparticles_mpi(0:mp_partgroup_np-1), &589 &idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1))590 591 call MPI_Allgather(numpart, 1, MPI_INTEGER, numparticles_mpi, &592 & 1, MPI_INTEGER, mp_comm_used, mp_ierr)593 594 595 ! Sort from lowest to highest596 ! Initial guess: correct order597 sorted(:) = numparticles_mpi(:)598 do i=0, mp_partgroup_np-1599 idx_arr(i) = i600 end do601 602 ! For each successive element in index array, see if a lower value exists603 do i=0, mp_partgroup_np-2604 pmin=sorted(i)605 imin=idx_arr(i)606 m=i+1607 do jj=m, mp_partgroup_np-1608 if (pmin.le.sorted(jj)) cycle609 z=pmin610 pmin=sorted(jj)611 sorted(jj)=z612 613 nn=imin614 imin=idx_arr(jj)615 idx_arr(jj)=nn616 end do617 sorted(i)=pmin618 idx_arr(i)=imin619 end do620 621 ! For each pair of processes, transfer particles if the difference is above a622 ! limit, and numpart at sending process large enough623 624 m=mp_partgroup_np-1 ! index for last sorted process (most particles)625 do i=0,mp_partgroup_np/2-1626 num_trans = numparticles_mpi(idx_arr(m)) - numparticles_mpi(idx_arr(i))627 if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then628 if ( numparticles_mpi(idx_arr(m)).gt.mp_min_redist.and.&629 & real(num_trans)/real(numparticles_mpi(idx_arr(m))).gt.mp_redist_fract) then630 call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2)631 end if632 end if633 m=m-1634 end do635 636 deallocate(numparticles_mpi, idx_arr, sorted)637 638 end subroutine mpif_calculate_part_redist639 640 641 subroutine mpif_redist_part(itime, src_proc, dest_proc, num_trans)642 !***********************************************************************643 ! Transfer particle properties between two arbitrary processes.644 !645 ! VARIABLES646 !647 ! itime input, current time648 ! src_proc input, ID of source process649 ! dest_proc input, ID of destination process650 ! num_trans input, number of particles to transfer651 !652 !************************************************************************653 use com_mod !TODO: ,only: nclass etc654 655 implicit none656 657 integer, intent(in) :: itime, src_proc, dest_proc, num_trans658 integer :: ll, ul ! lower and upper indices in arrays659 integer :: arr_sz ! size of temporary arrays660 integer :: mtag ! MPI message tag661 integer :: i, j, minpart, ipart, maxnumpart662 663 ! Measure time for MPI communications664 !************************************665 if (mp_measure_time) call mpif_mtime('commtime',0)666 667 ! Send particles668 !***************669 if (mp_partid.eq.src_proc) then670 mtag = 2000671 672 ! Array indices for the transferred particles673 ll=numpart-num_trans+1674 ul=numpart675 676 ! if (mp_dev_mode) then677 ! write(*,FMT='(72("#"))')678 ! write(*,*) "Sending ", num_trans, "particles (from/to)", src_proc, dest_proc679 ! write(*,*) "numpart before: ", numpart680 ! end if681 682 call MPI_SEND(nclass(ll:ul),num_trans,&683 & MPI_INTEGER,dest_proc,mtag+1,mp_comm_used,mp_ierr)684 685 call MPI_SEND(npoint(ll:ul),num_trans,&686 & MPI_INTEGER,dest_proc,mtag+2,mp_comm_used,mp_ierr)687 688 call MPI_SEND(itra1(ll:ul),num_trans, &689 & MPI_INTEGER,dest_proc,mtag+3,mp_comm_used,mp_ierr)690 691 call MPI_SEND(idt(ll:ul),num_trans, &692 & MPI_INTEGER,dest_proc,mtag+4,mp_comm_used,mp_ierr)693 694 call MPI_SEND(itramem(ll:ul),num_trans, &695 & MPI_INTEGER,dest_proc,mtag+5,mp_comm_used,mp_ierr)696 697 call MPI_SEND(itrasplit(ll:ul),num_trans,&698 & MPI_INTEGER,dest_proc,mtag+6,mp_comm_used,mp_ierr)699 700 call MPI_SEND(xtra1(ll:ul),num_trans, &701 & mp_dp,dest_proc,mtag+7,mp_comm_used,mp_ierr)702 703 call MPI_SEND(ytra1(ll:ul),num_trans,&704 & mp_dp,dest_proc,mtag+8,mp_comm_used,mp_ierr)705 706 call MPI_SEND(ztra1(ll:ul),num_trans,&707 & mp_sp,dest_proc,mtag+9,mp_comm_used,mp_ierr)708 709 do j=1,nspec710 call MPI_SEND(xmass1(ll:ul,j),num_trans,&711 & mp_sp,dest_proc,mtag+(9+j),mp_comm_used,mp_ierr)712 end do713 714 ! Terminate transferred particles, update numpart715 itra1(ll:ul) = -999999999716 717 numpart = numpart-num_trans718 719 ! if (mp_dev_mode) then720 ! write(*,*) "numpart after: ", numpart721 ! write(*,FMT='(72("#"))')722 ! end if723 724 else if (mp_partid.eq.dest_proc) then725 726 ! Receive particles727 !******************728 mtag = 2000729 ! Array indices for the transferred particles730 ll=numpart+1731 ul=numpart+num_trans732 733 ! if (mp_dev_mode) then734 ! write(*,FMT='(72("#"))')735 ! write(*,*) "Receiving ", num_trans, "particles (from/to)", src_proc, dest_proc736 ! write(*,*) "numpart before: ", numpart737 ! end if738 739 ! We could receive the data directly at nclass(ll:ul) etc., but this leaves740 ! vacant spaces at lower indices. Using temporary arrays instead.741 arr_sz = ul-ll+1742 allocate(itra1_tmp(arr_sz),npoint_tmp(arr_sz),nclass_tmp(arr_sz),&743 & idt_tmp(arr_sz),itramem_tmp(arr_sz),itrasplit_tmp(arr_sz),&744 & xtra1_tmp(arr_sz),ytra1_tmp(arr_sz),ztra1_tmp(arr_sz),&745 & xmass1_tmp(arr_sz, maxspec))746 747 call MPI_RECV(nclass_tmp,num_trans,&748 & MPI_INTEGER,src_proc,mtag+1,mp_comm_used,mp_status,mp_ierr)749 750 call MPI_RECV(npoint_tmp,num_trans,&751 & MPI_INTEGER,src_proc,mtag+2,mp_comm_used,mp_status,mp_ierr)752 753 call MPI_RECV(itra1_tmp,num_trans, &754 & MPI_INTEGER,src_proc,mtag+3,mp_comm_used,mp_status,mp_ierr)755 756 call MPI_RECV(idt_tmp,num_trans, &757 & MPI_INTEGER,src_proc,mtag+4,mp_comm_used,mp_status,mp_ierr)758 759 call MPI_RECV(itramem_tmp,num_trans, &760 & MPI_INTEGER,src_proc,mtag+5,mp_comm_used,mp_status,mp_ierr)761 762 call MPI_RECV(itrasplit_tmp,num_trans,&763 & MPI_INTEGER,src_proc,mtag+6,mp_comm_used,mp_status,mp_ierr)764 765 call MPI_RECV(xtra1_tmp,num_trans, &766 & mp_dp,src_proc,mtag+7,mp_comm_used,mp_status,mp_ierr)767 768 call MPI_RECV(ytra1_tmp,num_trans,&769 & mp_dp,src_proc,mtag+8,mp_comm_used,mp_status,mp_ierr)770 771 call MPI_RECV(ztra1_tmp,num_trans,&772 & mp_sp,src_proc,mtag+9,mp_comm_used,mp_status,mp_ierr)773 774 do j=1,nspec775 call MPI_RECV(xmass1_tmp(:,j),num_trans,&776 & mp_sp,src_proc,mtag+(9+j),mp_comm_used,mp_status,mp_ierr)777 end do778 779 ! This is the maximum value numpart can possibly have after the transfer780 maxnumpart=numpart+num_trans781 782 ! Search for vacant space and copy from temporary storage783 !********************************************************784 minpart=1785 do i=1, num_trans786 ! Take into acount that we may have transferred invalid particles787 if (itra1_tmp(minpart).ne.itime) goto 200788 do ipart=minpart,maxnumpart789 if (itra1(ipart).ne.itime) then790 itra1(ipart) = itra1_tmp(minpart)791 npoint(ipart) = npoint_tmp(minpart)792 nclass(ipart) = nclass_tmp(minpart)793 idt(ipart) = idt_tmp(minpart)794 itramem(ipart) = itramem_tmp(minpart)795 itrasplit(ipart) = itrasplit_tmp(minpart)796 xtra1(ipart) = xtra1_tmp(minpart)797 ytra1(ipart) = ytra1_tmp(minpart)798 ztra1(ipart) = ztra1_tmp(minpart)799 xmass1(ipart,:) = xmass1_tmp(minpart,:)800 ! Increase numpart, if necessary801 numpart=max(numpart,ipart)802 goto 200 ! Storage space has been found, stop searching803 end if804 end do805 200 minpart=minpart+1806 end do807 808 deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,itrasplit_tmp,&809 & xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp)810 811 ! if (mp_dev_mode) then812 ! write(*,*) "numpart after: ", numpart813 ! write(*,FMT='(72("#"))')814 ! end if815 816 else817 ! This routine should only be called by the two participating processes818 write(*,*) "ERROR: wrong process has entered mpi_mod::mpif_redist_part"819 stop820 return821 end if822 823 ! Measure time for MPI communications824 !************************************825 if (mp_measure_time) call mpif_mtime('commtime',1)826 827 end subroutine mpif_redist_part828 829 830 subroutine mpif_tm_send_vars831 !***********************************************************************832 ! Distribute particle variables from pid0 to all processes.833 ! Called from timemanager834 ! *NOT IN USE* at the moment, but can be useful for debugging835 !836 !***********************************************************************837 use com_mod838 839 implicit none840 841 integer :: i842 843 ! Time for MPI communications844 !****************************845 if (mp_measure_time) call mpif_mtime('commtime',0)846 847 473 ! Distribute variables, send from pid 0 to other processes 848 474 !********************************************************************** … … 851 477 if (lroot) then 852 478 call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 853 & 479 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 854 480 if (mp_ierr /= 0) goto 600 855 481 call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 856 & 482 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 857 483 if (mp_ierr /= 0) goto 600 858 484 call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 859 & 485 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 860 486 if (mp_ierr /= 0) goto 600 861 487 call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 862 & 488 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 863 489 if (mp_ierr /= 0) goto 600 864 490 call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 865 & 491 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 866 492 if (mp_ierr /= 0) goto 600 867 493 868 494 ! int2 869 495 call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,& 870 & 496 &numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr) 871 497 if (mp_ierr /= 0) goto 600 872 498 873 499 ! real 874 500 call MPI_SCATTER(uap,numpart_mpi,mp_sp,MPI_IN_PLACE,& 875 & 501 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 876 502 if (mp_ierr /= 0) goto 600 877 503 call MPI_SCATTER(ucp,numpart_mpi,mp_sp,MPI_IN_PLACE,& 878 & 504 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 879 505 if (mp_ierr /= 0) goto 600 880 506 call MPI_SCATTER(uzp,numpart_mpi,mp_sp,MPI_IN_PLACE,& 881 & 507 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 882 508 if (mp_ierr /= 0) goto 600 883 509 884 510 call MPI_SCATTER(us,numpart_mpi,mp_sp,MPI_IN_PLACE,& 885 & 511 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 886 512 if (mp_ierr /= 0) goto 600 887 513 call MPI_SCATTER(vs,numpart_mpi,mp_sp,MPI_IN_PLACE,& 888 & 514 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 889 515 if (mp_ierr /= 0) goto 600 890 516 call MPI_SCATTER(ws,numpart_mpi,mp_sp,MPI_IN_PLACE,& 891 & 517 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 892 518 if (mp_ierr /= 0) goto 600 893 519 894 520 call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,& 895 & 521 &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 896 522 if (mp_ierr /= 0) goto 600 897 523 call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,& 898 & 524 &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 899 525 if (mp_ierr /= 0) goto 600 900 526 call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,MPI_IN_PLACE,& 901 & 527 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 902 528 if (mp_ierr /= 0) goto 600 903 529 904 530 do i=1,nspec 905 531 call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,MPI_IN_PLACE,& 906 & 532 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 907 533 if (mp_ierr /= 0) goto 600 908 534 end do … … 911 537 ! integers 912 538 call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,& 913 & 539 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 914 540 if (mp_ierr /= 0) goto 600 915 541 call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,& 916 & 542 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 917 543 if (mp_ierr /= 0) goto 600 918 544 call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,& 919 & 545 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 920 546 if (mp_ierr /= 0) goto 600 921 547 call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,& 922 & 548 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 923 549 if (mp_ierr /= 0) goto 600 924 550 call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,& 925 & 551 &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 926 552 if (mp_ierr /= 0) goto 600 927 553 928 554 ! int2 929 555 call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,& 930 & 556 &numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr) 931 557 if (mp_ierr /= 0) goto 600 932 558 933 559 ! reals 934 560 call MPI_SCATTER(uap,numpart_mpi,mp_sp,uap,& 935 & 561 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 936 562 if (mp_ierr /= 0) goto 600 937 563 call MPI_SCATTER(ucp,numpart_mpi,mp_sp,ucp,& 938 & 564 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 939 565 if (mp_ierr /= 0) goto 600 940 566 call MPI_SCATTER(uzp,numpart_mpi,mp_sp,uzp,& 941 & 567 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 942 568 if (mp_ierr /= 0) goto 600 943 569 944 570 call MPI_SCATTER(us,numpart_mpi,mp_sp,us,& 945 & 571 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 946 572 if (mp_ierr /= 0) goto 600 947 573 call MPI_SCATTER(vs,numpart_mpi,mp_sp,vs,& 948 & 574 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 949 575 if (mp_ierr /= 0) goto 600 950 576 call MPI_SCATTER(ws,numpart_mpi,mp_sp,ws,& 951 & 577 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 952 578 if (mp_ierr /= 0) goto 600 953 579 954 580 call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,& 955 & 581 &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 956 582 if (mp_ierr /= 0) goto 600 957 583 call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,& 958 & 584 &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 959 585 if (mp_ierr /= 0) goto 600 960 586 call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,ztra1,& 961 & 587 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 962 588 if (mp_ierr /= 0) goto 600 963 589 964 590 do i=1,nspec 965 591 call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,xmass1(:,i),& 966 & 592 &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 967 593 if (mp_ierr /= 0) goto 600 968 594 end do … … 1519 1145 1520 1146 ! cloud water/ice: 1521 1522 ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)1523 ! if (mp_ierr /= 0) goto 6001524 1525 1526 1147 if (readclouds_nest(i)) then 1148 ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 1149 ! if (mp_ierr /= 0) goto 600 1150 call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 1151 if (mp_ierr /= 0) goto 600 1152 end if 1527 1153 1528 1154 ! 2D fields … … 1772 1398 integer :: d2s1 = nxmax*nymax 1773 1399 integer :: d2s2 = nxmax*nymax*maxspec 1774 !integer :: d1_size1 = maxwf1400 !integer :: d1_size1 = maxwf 1775 1401 1776 1402 ! integer :: d3s1,d3s2,d2s1,d2s2 … … 2019 1645 if (dest.eq.id_read) cycle 2020 1646 do k=1, numbnests 2021 i=dest*nvar_async 2022 call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2023 if (mp_ierr /= 0) goto 600 1647 i=dest*nvar_async 1648 call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1649 if (mp_ierr /= 0) goto 600 1650 i=i+1 1651 call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1652 if (mp_ierr /= 0) goto 600 1653 i=i+1 1654 call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1655 if (mp_ierr /= 0) goto 600 1656 i=i+1 1657 call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1658 if (mp_ierr /= 0) goto 600 1659 i=i+1 1660 call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1661 if (mp_ierr /= 0) goto 600 1662 i=i+1 1663 call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1664 if (mp_ierr /= 0) goto 600 1665 i=i+1 1666 call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1667 if (mp_ierr /= 0) goto 600 1668 i=i+1 1669 call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1670 if (mp_ierr /= 0) goto 600 1671 i=i+1 1672 call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1673 if (mp_ierr /= 0) goto 600 1674 i=i+1 1675 call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1676 if (mp_ierr /= 0) goto 600 1677 i=i+1 1678 call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1679 i=i+1 1680 if (mp_ierr /= 0) goto 600 1681 call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1682 if (mp_ierr /= 0) goto 600 1683 i=i+1 1684 call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1685 if (mp_ierr /= 0) goto 600 1686 i=i+1 1687 call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1688 if (mp_ierr /= 0) goto 600 1689 i=i+1 1690 call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1691 if (mp_ierr /= 0) goto 600 1692 i=i+1 1693 ! 15 1694 call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1695 if (mp_ierr /= 0) goto 600 1696 i=i+1 1697 call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1698 if (mp_ierr /= 0) goto 600 1699 i=i+1 1700 call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1701 if (mp_ierr /= 0) goto 600 1702 i=i+1 1703 call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1704 if (mp_ierr /= 0) goto 600 1705 i=i+1 1706 call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1707 if (mp_ierr /= 0) goto 600 1708 i=i+1 1709 call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1710 if (mp_ierr /= 0) goto 600 1711 i=i+1 1712 call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1713 if (mp_ierr /= 0) goto 600 1714 i=i+1 1715 call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1716 if (mp_ierr /= 0) goto 600 1717 i=i+1 1718 call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1719 if (mp_ierr /= 0) goto 600 1720 i=i+1 1721 call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1722 if (mp_ierr /= 0) goto 600 1723 ! 25 1724 1725 ! Send cloud water if it exists. Increment counter always (as on receiving end) 1726 if (readclouds) then 2024 1727 i=i+1 2025 call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2026 if (mp_ierr /= 0) goto 600 2027 i=i+1 2028 call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 1728 call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,& 1729 &MPI_COMM_WORLD,reqs(i),mp_ierr) 2029 1730 if (mp_ierr /= 0) goto 600 2030 i=i+1 2031 call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2032 if (mp_ierr /= 0) goto 600 2033 i=i+1 2034 call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2035 if (mp_ierr /= 0) goto 600 2036 i=i+1 2037 call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2038 if (mp_ierr /= 0) goto 600 2039 i=i+1 2040 call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2041 if (mp_ierr /= 0) goto 600 2042 i=i+1 2043 call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2044 if (mp_ierr /= 0) goto 600 2045 i=i+1 2046 call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2047 if (mp_ierr /= 0) goto 600 2048 i=i+1 2049 call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2050 if (mp_ierr /= 0) goto 600 2051 i=i+1 2052 call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2053 i=i+1 2054 if (mp_ierr /= 0) goto 600 2055 call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2056 if (mp_ierr /= 0) goto 600 2057 i=i+1 2058 call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2059 if (mp_ierr /= 0) goto 600 2060 i=i+1 2061 call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2062 if (mp_ierr /= 0) goto 600 2063 i=i+1 2064 call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2065 if (mp_ierr /= 0) goto 600 2066 i=i+1 2067 ! 15 2068 call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2069 if (mp_ierr /= 0) goto 600 2070 i=i+1 2071 call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2072 if (mp_ierr /= 0) goto 600 2073 i=i+1 2074 call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2075 if (mp_ierr /= 0) goto 600 2076 i=i+1 2077 call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2078 if (mp_ierr /= 0) goto 600 2079 i=i+1 2080 call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2081 if (mp_ierr /= 0) goto 600 2082 i=i+1 2083 call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2084 if (mp_ierr /= 0) goto 600 2085 i=i+1 2086 call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2087 if (mp_ierr /= 0) goto 600 2088 i=i+1 2089 call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2090 if (mp_ierr /= 0) goto 600 2091 i=i+1 2092 call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2093 if (mp_ierr /= 0) goto 600 2094 i=i+1 2095 call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2096 if (mp_ierr /= 0) goto 600 2097 ! 25 2098 2099 ! Send cloud water if it exists. Increment counter always (as on receiving end) 2100 if (readclouds) then 2101 i=i+1 2102 call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,& 2103 &MPI_COMM_WORLD,reqs(i),mp_ierr) 2104 if (mp_ierr /= 0) goto 600 2105 end if 2106 end do 1731 end if 2107 1732 end do 1733 end do 2108 1734 2109 1735 if (mp_measure_time) call mpif_mtime('commtime',1) … … 2184 1810 do k=1, numbnests 2185 1811 ! Get MPI tags/requests for communications 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 1812 j=mp_pid*nvar_async 1813 call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1814 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1815 if (mp_ierr /= 0) goto 600 1816 j=j+1 1817 call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1818 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1819 if (mp_ierr /= 0) goto 600 1820 j=j+1 1821 call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1822 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1823 if (mp_ierr /= 0) goto 600 1824 j=j+1 1825 call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1826 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1827 if (mp_ierr /= 0) goto 600 1828 j=j+1 1829 call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1830 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1831 if (mp_ierr /= 0) goto 600 1832 j=j+1 1833 call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1834 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1835 if (mp_ierr /= 0) goto 600 1836 j=j+1 1837 call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& 1838 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1839 if (mp_ierr /= 0) goto 600 1840 j=j+1 1841 call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& 1842 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1843 if (mp_ierr /= 0) goto 600 1844 j=j+1 1845 call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1846 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1847 if (mp_ierr /= 0) goto 600 1848 j=j+1 1849 call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 1850 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1851 if (mp_ierr /= 0) goto 600 1852 j=j+1 1853 call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,& 1854 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1855 if (mp_ierr /= 0) goto 600 1856 j=j+1 1857 call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1858 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1859 if (mp_ierr /= 0) goto 600 1860 j=j+1 1861 call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,& 1862 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1863 if (mp_ierr /= 0) goto 600 1864 j=j+1 1865 call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1866 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1867 if (mp_ierr /= 0) goto 600 1868 j=j+1 1869 call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1870 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1871 if (mp_ierr /= 0) goto 600 1872 j=j+1 1873 call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1874 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1875 if (mp_ierr /= 0) goto 600 1876 j=j+1 1877 call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1878 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1879 if (mp_ierr /= 0) goto 600 1880 j=j+1 1881 call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1882 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1883 if (mp_ierr /= 0) goto 600 1884 j=j+1 1885 call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1886 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1887 if (mp_ierr /= 0) goto 600 1888 j=j+1 1889 call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1890 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1891 if (mp_ierr /= 0) goto 600 1892 call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1893 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1894 if (mp_ierr /= 0) goto 600 1895 j=j+1 1896 call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1897 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1898 if (mp_ierr /= 0) goto 600 1899 j=j+1 1900 call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1901 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1902 if (mp_ierr /= 0) goto 600 1903 j=j+1 1904 call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1905 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1906 if (mp_ierr /= 0) goto 600 1907 j=j+1 1908 call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1909 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1910 if (mp_ierr /= 0) goto 600 2285 1911 2286 1912 ! Post request for clwc. These data are possibly not sent, request must then be cancelled 2287 1913 ! For now assume that data at all steps either have or do not have water 2288 2289 2290 2291 2292 2293 2294 1914 if (readclouds) then 1915 j=j+1 1916 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& 1917 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1918 if (mp_ierr /= 0) goto 600 1919 end if 1920 end do 2295 1921 2296 1922 if (mp_measure_time) call mpif_mtime('commtime',1) … … 2723 2349 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',& 2724 2350 & mp_conccalc_time_total 2725 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',&2726 ! & mp_vt_wtime_total2727 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',&2728 ! & mp_vt_time_total2351 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',& 2352 ! & mp_vt_wtime_total 2353 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',& 2354 ! & mp_vt_time_total 2729 2355 ! NB: the 'flush' function is possibly a gfortran-specific extension 2730 2356 call flush() … … 2762 2388 2763 2389 2764 end subroutine mpif_finalize2765 2766 2767 subroutine get_lun(my_lun)2390 end subroutine mpif_finalize 2391 2392 2393 subroutine get_lun(my_lun) 2768 2394 !*********************************************************************** 2769 2395 ! get_lun: … … 2771 2397 !*********************************************************************** 2772 2398 2773 implicit none2774 2775 integer, intent(inout) :: my_lun2776 integer, save :: free_lun=1002777 logical :: exists, iopen2778 2779 !*********************************************************************** 2780 2781 loop1: do2782 inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen)2783 if (exists .and. .not.iopen) exit loop12784 free_lun = free_lun+12785 end do loop12786 my_lun = free_lun2787 2788 end subroutine get_lun2789 2790 2791 subroutine write_data_dbg(array_in, array_name, tstep, ident)2399 implicit none 2400 2401 integer, intent(inout) :: my_lun 2402 integer, save :: free_lun=100 2403 logical :: exists, iopen 2404 2405 !*********************************************************************** 2406 2407 loop1: do 2408 inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen) 2409 if (exists .and. .not.iopen) exit loop1 2410 free_lun = free_lun+1 2411 end do loop1 2412 my_lun = free_lun 2413 2414 end subroutine get_lun 2415 2416 2417 subroutine write_data_dbg(array_in, array_name, tstep, ident) 2792 2418 !*********************************************************************** 2793 2419 ! Write one-dimensional arrays to file (for debugging purposes) 2794 2420 !*********************************************************************** 2795 implicit none 2796 2797 real, intent(in), dimension(:) :: array_in 2798 integer, intent(in) :: tstep 2799 integer :: lios 2800 character(LEN=*), intent(in) :: ident, array_name 2801 2802 character(LEN=8) :: c_ts 2803 character(LEN=40) :: fn_1, fn_2 2804 2805 !*********************************************************************** 2806 2807 write(c_ts, FMT='(I8.8,BZ)') tstep 2808 fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident) 2809 write(c_ts, FMT='(I2.2,BZ)') mp_np 2810 fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat' 2811 2812 call get_lun(dat_lun) 2813 open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', & 2814 FORM='UNFORMATTED', STATUS='REPLACE') 2815 write(UNIT=dat_lun, IOSTAT=lios) array_in 2816 close(UNIT=dat_lun) 2817 2818 end subroutine write_data_dbg 2819 2820 2821 subroutine set_fields_synthetic() 2822 !******************************************************************************* 2823 ! DESCRIPTION 2824 ! Set all meteorological fields to synthetic (usually constant/homogeneous) 2825 ! values. 2826 ! Used for validation and error-checking 2827 ! 2828 ! NOTE 2829 ! This version uses asynchronious communications. 2830 ! 2831 ! VARIABLES 2832 ! 2833 ! 2834 ! 2835 !******************************************************************************* 2836 use com_mod 2837 2838 implicit none 2839 2840 integer :: li=1, ui=2 ! wfmem indices (i.e, operate on entire field) 2841 2842 if (.not.lmp_sync) ui=3 2843 2844 2845 ! Variables transferred at initialization only 2846 !********************************************* 2847 ! readclouds=readclouds_ 2848 oro(:,:)=0.0 2849 excessoro(:,:)=0.0 2850 lsm(:,:)=0.0 2851 xlanduse(:,:,:)=0.0 2852 ! wftime 2853 ! numbwf 2854 ! nmixz 2855 ! height 2856 2857 ! Time-varying fields: 2858 uu(:,:,:,li:ui) = 10.0 2859 vv(:,:,:,li:ui) = 0.0 2860 uupol(:,:,:,li:ui) = 10.0 2861 vvpol(:,:,:,li:ui)=0.0 2862 ww(:,:,:,li:ui)=0. 2863 tt(:,:,:,li:ui)=300. 2864 rho(:,:,:,li:ui)=1.3 2865 drhodz(:,:,:,li:ui)=.0 2866 tth(:,:,:,li:ui)=0.0 2867 qvh(:,:,:,li:ui)=1.0 2868 qv(:,:,:,li:ui)=1.0 2869 2870 pv(:,:,:,li:ui)=1.0 2871 clouds(:,:,:,li:ui)=0 2872 2873 clwc(:,:,:,li:ui)=0.0 2874 ciwc(:,:,:,li:ui)=0.0 2875 2876 ! 2D fields 2877 2878 cloudsh(:,:,li:ui)=0 2879 vdep(:,:,:,li:ui)=0.0 2880 ps(:,:,:,li:ui)=1.0e5 2881 sd(:,:,:,li:ui)=0.0 2882 tcc(:,:,:,li:ui)=0.0 2883 tt2(:,:,:,li:ui)=300. 2884 td2(:,:,:,li:ui)=250. 2885 lsprec(:,:,:,li:ui)=0.0 2886 convprec(:,:,:,li:ui)=0.0 2887 ustar(:,:,:,li:ui)=1.0 2888 wstar(:,:,:,li:ui)=1.0 2889 hmix(:,:,:,li:ui)=10000. 2890 tropopause(:,:,:,li:ui)=10000. 2891 oli(:,:,:,li:ui)=0.01 2892 2893 end subroutine set_fields_synthetic 2421 implicit none 2422 2423 real, intent(in), dimension(:) :: array_in 2424 integer, intent(in) :: tstep 2425 integer :: lios 2426 character(LEN=*), intent(in) :: ident, array_name 2427 2428 character(LEN=8) :: c_ts 2429 character(LEN=40) :: fn_1, fn_2 2430 2431 !*********************************************************************** 2432 2433 write(c_ts, FMT='(I8.8,BZ)') tstep 2434 fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident) 2435 write(c_ts, FMT='(I2.2,BZ)') mp_np 2436 fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat' 2437 2438 call get_lun(dat_lun) 2439 open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', & 2440 FORM='UNFORMATTED', STATUS='REPLACE') 2441 write(UNIT=dat_lun, IOSTAT=lios) array_in 2442 close(UNIT=dat_lun) 2443 2444 end subroutine write_data_dbg 2894 2445 2895 2446 end module mpi_mod -
src/releaseparticles_mpi.f90
r861805a r7e52e2e 21 21 22 22 subroutine releaseparticles(itime) 23 ! o24 !*****************************************************************************25 ! *26 ! This subroutine releases particles from the release locations. *27 ! *28 ! It searches for a "vacant" storage space and assigns all particle *29 ! information to that space. A space is vacant either when no particle *30 ! is yet assigned to it, or when it's particle is expired and, thus, *31 ! the storage space is made available to a new particle. *32 ! *33 ! Author: A. Stohl *34 ! *35 ! 29 June 2002 *36 ! *37 ! CHANGES *38 ! 12/2014 eso: MPI version *39 ! *40 !*****************************************************************************41 ! *42 ! Variables: *43 ! itime [s] current time *44 ! ireleasestart, ireleaseend start and end times of all releases *45 ! npart(maxpoint) number of particles to be released in total *46 ! numrel number of particles to be released during this time *47 ! step *48 ! addoneextra particle assigned to MPI process if *49 ! mod(npart(i),mp_partgroup_np) .ne. 0) *50 !*****************************************************************************23 ! o 24 !***************************************************************************** 25 ! * 26 ! This subroutine releases particles from the release locations. * 27 ! * 28 ! It searches for a "vacant" storage space and assigns all particle * 29 ! information to that space. A space is vacant either when no particle * 30 ! is yet assigned to it, or when it's particle is expired and, thus, * 31 ! the storage space is made available to a new particle. * 32 ! * 33 ! Author: A. Stohl * 34 ! * 35 ! 29 June 2002 * 36 ! * 37 ! CHANGES * 38 ! 12/2014 eso: MPI version * 39 ! * 40 !***************************************************************************** 41 ! * 42 ! Variables: * 43 ! itime [s] current time * 44 ! ireleasestart, ireleaseend start and end times of all releases * 45 ! npart(maxpoint) number of particles to be released in total * 46 ! numrel number of particles to be released during this time * 47 ! step * 48 ! addpart extra particle assigned to MPI process if * 49 ! mod(npart(i),mp_partgroup_np) .ne. 0) * 50 !***************************************************************************** 51 51 52 52 use point_mod … … 59 59 implicit none 60 60 61 !real xaux,yaux,zaux,ran1,rfraction,xmasssave(maxpoint)61 !real xaux,yaux,zaux,ran1,rfraction,xmasssave(maxpoint) 62 62 real :: xaux,yaux,zaux,rfraction 63 63 real :: topo,rhoaux(2),r,t,rhoout,ddx,ddy,rddx,rddy,p1,p2,p3,p4 … … 73 73 74 74 integer :: idummy = -7 75 !save idummy,xmasssave76 !data idummy/-7/,xmasssave/maxpoint*0./75 !save idummy,xmasssave 76 !data idummy/-7/,xmasssave/maxpoint*0./ 77 77 78 78 logical :: first_call=.true. 79 79 80 ! Use different seed for each process.81 !****************************************************************************80 ! Use different seed for each process. 81 !**************************************************************************** 82 82 if (first_call) then 83 83 idummy=idummy+mp_seed … … 87 87 mind2=memind(2) 88 88 89 ! Determine the actual date and time in Greenwich (i.e., UTC + correction for daylight savings time)90 !*****************************************************************************89 ! Determine the actual date and time in Greenwich (i.e., UTC + correction for daylight savings time) 90 !***************************************************************************** 91 91 92 92 julmonday=juldate(19000101,0) ! this is a Monday … … 97 97 98 98 99 ! For every release point, check whether we are in the release time interval100 !***************************************************************************99 ! For every release point, check whether we are in the release time interval 100 !*************************************************************************** 101 101 102 102 minpart=1 … … 105 105 (itime.le.ireleaseend(i))) then 106 106 107 ! Determine the local day and time108 !*********************************107 ! Determine the local day and time 108 !********************************* 109 109 110 110 xlonav=xlon0+(xpoint2(i)+xpoint1(i))/2.*dx ! longitude needed to determine local time … … 124 124 endif 125 125 126 ! Calculate a species- and time-dependent correction factor, distinguishing between127 ! area (those with release starting at surface) and point (release starting above surface) sources128 ! Also, calculate an average time correction factor (species independent)129 !*****************************************************************************126 ! Calculate a species- and time-dependent correction factor, distinguishing between 127 ! area (those with release starting at surface) and point (release starting above surface) sources 128 ! Also, calculate an average time correction factor (species independent) 129 !***************************************************************************** 130 130 average_timecorrect=0. 131 131 do k=1,nspec … … 139 139 average_timecorrect=average_timecorrect/real(nspec) 140 140 141 ! Determine number of particles to be released this time; at start and at end of release,142 ! only half the particles are released143 !*****************************************************************************141 ! Determine number of particles to be released this time; at start and at end of release, 142 ! only half the particles are released 143 !***************************************************************************** 144 144 145 145 if (ireleasestart(i).ne.ireleaseend(i)) then … … 149 149 (itime.eq.ireleaseend(i))) rfraction=rfraction/2. 150 150 151 ! Take the species-average time correction factor in order to scale the152 ! number of particles released this time153 ! Also scale by number of MPI processes154 !**********************************************************************151 ! Take the species-average time correction factor in order to scale the 152 ! number of particles released this time 153 ! Also scale by number of MPI processes 154 !********************************************************************** 155 155 156 156 rfraction=rfraction*average_timecorrect … … 158 158 rfraction=rfraction+xmasssave(i) ! number to be released at this time 159 159 160 ! number to be released for one process160 ! number to be released for one process 161 161 if (mp_partid.lt.mod(int(rfraction),mp_partgroup_np)) then 162 162 addone=1 … … 180 180 numrel=npart(i)/mp_partgroup_np+addone 181 181 endif 182 182 183 183 xaux=xpoint2(i)-xpoint1(i) 184 184 yaux=ypoint2(i)-ypoint1(i) … … 187 187 do ipart=minpart,maxpart_mpi ! search for free storage space 188 188 189 ! If a free storage space is found, attribute everything to this array element190 !*****************************************************************************189 ! If a free storage space is found, attribute everything to this array element 190 !***************************************************************************** 191 191 192 192 if (itra1(ipart).ne.itime) then 193 193 194 ! Particle coordinates are determined by using a random position within the release volume195 !*****************************************************************************196 197 ! Determine horizontal particle position198 !***************************************194 ! Particle coordinates are determined by using a random position within the release volume 195 !***************************************************************************** 196 197 ! Determine horizontal particle position 198 !*************************************** 199 199 200 200 xtra1(ipart)=xpoint1(i)+ran1(idummy)*xaux … … 207 207 ytra1(ipart)=ypoint1(i)+ran1(idummy)*yaux 208 208 209 ! Assign mass to particle: Total mass divided by total number of particles.210 ! Time variation has partly been taken into account already by a species-average211 ! correction factor, by which the number of particles released this time has been212 ! scaled. Adjust the mass per particle by the species-dependent time correction factor213 ! divided by the species-average one214 !*****************************************************************************209 ! Assign mass to particle: Total mass divided by total number of particles. 210 ! Time variation has partly been taken into account already by a species-average 211 ! correction factor, by which the number of particles released this time has been 212 ! scaled. Adjust the mass per particle by the species-dependent time correction factor 213 ! divided by the species-average one 214 !***************************************************************************** 215 215 do k=1,nspec 216 xmass1(ipart,k)=xmass(i,k)/real(npart(i)) &217 *timecorrect(k)/average_timecorrect218 ! write (*,*) 'xmass1: ',xmass1(ipart,k),ipart,k219 ! Assign certain properties to particle220 !**************************************216 xmass1(ipart,k)=xmass(i,k)/real(npart(i)) & 217 *timecorrect(k)/average_timecorrect 218 ! write (*,*) 'xmass1: ',xmass1(ipart,k),ipart,k 219 ! Assign certain properties to particle 220 !************************************** 221 221 end do 222 222 nclass(ipart)=min(int(ran1(idummy)*real(nclassunc))+1, & … … 234 234 235 235 236 ! Determine vertical particle position237 !*************************************236 ! Determine vertical particle position 237 !************************************* 238 238 239 239 ztra1(ipart)=zpoint1(i)+ran1(idummy)*zaux 240 240 241 ! Interpolation of topography and density242 !****************************************243 244 ! Determine the nest we are in245 !*****************************241 ! Interpolation of topography and density 242 !**************************************** 243 244 ! Determine the nest we are in 245 !***************************** 246 246 247 247 ngrid=0 … … 257 257 43 continue 258 258 259 ! Determine (nested) grid coordinates and auxiliary parameters used for interpolation260 !*****************************************************************************259 ! Determine (nested) grid coordinates and auxiliary parameters used for interpolation 260 !***************************************************************************** 261 261 262 262 if (ngrid.gt.0) then … … 294 294 endif 295 295 296 ! If starting height is in pressure coordinates, retrieve pressure profile and convert zpart1 to meters297 !*****************************************************************************296 ! If starting height is in pressure coordinates, retrieve pressure profile and convert zpart1 to meters 297 !***************************************************************************** 298 298 if (kindz(i).eq.3) then 299 299 presspart=ztra1(ipart) … … 337 337 endif 338 338 339 ! If release positions are given in meters above sea level, subtract the340 ! topography from the starting height341 !***********************************************************************339 ! If release positions are given in meters above sea level, subtract the 340 ! topography from the starting height 341 !*********************************************************************** 342 342 343 343 if (kindz(i).eq.2) ztra1(ipart)=ztra1(ipart)-topo … … 348 348 349 349 350 ! For special simulations, multiply particle concentration air density;351 ! Simply take the 2nd field in memory to do this (accurate enough)352 !***********************************************************************353 !AF IND_SOURCE switches between different units for concentrations at the source354 !Af NOTE that in backward simulations the release of particles takes place at the355 !Af receptor and the sampling at the source.356 !Af 1="mass"357 !Af 2="mass mixing ratio"358 !Af IND_RECEPTOR switches between different units for concentrations at the receptor359 !Af 1="mass"360 !Af 2="mass mixing ratio"361 362 !Af switches for the releasefile:363 !Af IND_REL = 1 : xmass * rho364 !Af IND_REL = 0 : xmass * 1365 366 !Af ind_rel is defined in readcommand.f350 ! For special simulations, multiply particle concentration air density; 351 ! Simply take the 2nd field in memory to do this (accurate enough) 352 !*********************************************************************** 353 !AF IND_SOURCE switches between different units for concentrations at the source 354 !Af NOTE that in backward simulations the release of particles takes place at the 355 !Af receptor and the sampling at the source. 356 !Af 1="mass" 357 !Af 2="mass mixing ratio" 358 !Af IND_RECEPTOR switches between different units for concentrations at the receptor 359 !Af 1="mass" 360 !Af 2="mass mixing ratio" 361 362 !Af switches for the releasefile: 363 !Af IND_REL = 1 : xmass * rho 364 !Af IND_REL = 0 : xmass * 1 365 366 !Af ind_rel is defined in readcommand.f 367 367 368 368 if (ind_rel .eq. 1) then 369 369 370 ! Interpolate the air density371 !****************************370 ! Interpolate the air density 371 !**************************** 372 372 373 373 do ii=2,nz … … 403 403 404 404 405 ! Multiply "mass" (i.e., mass mixing ratio in forward runs) with density406 !********************************************************************405 ! Multiply "mass" (i.e., mass mixing ratio in forward runs) with density 406 !******************************************************************** 407 407 408 408 do k=1,nspec … … 416 416 endif 417 417 end do 418 ! ESO TODO: Here we could use dynamic allocation and increase array sizes 419 if (ipart.gt.maxpart_mpi) goto 996 418 if (ipart.gt.maxpart) goto 996 420 419 421 420 34 minpart=ipart+1 422 421 end do 423 endif422 endif 424 423 end do 425 424 … … 427 426 return 428 427 429 996 continue428 996 continue 430 429 write(*,*) '#####################################################' 431 430 write(*,*) '#### FLEXPART MODEL SUBROUTINE RELEASEPARTICLES: ####' -
src/timemanager_mpi.f90
r861805a r0f7835d 104 104 105 105 logical :: reqv_state=.false. ! .true. if waiting for a MPI_Irecv to complete 106 integer :: j,ks,kp,l,n,itime=0,nstop,nstop1,memstat=0 106 integer :: j,ks,kp,l,n,itime=0,nstop,nstop1,memstat=0 !,mind 107 107 ! integer :: ksp 108 108 integer :: ip … … 155 155 156 156 do itime=0,ideltas,lsynctime 157 158 157 159 158 ! Computation of wet deposition, OH reaction and mass transfer … … 167 166 !******************************************************************** 168 167 169 if (mp_d bg_mode) write(*,*) 'myid, itime: ',mp_pid,itime168 if (mp_dev_mode) write(*,*) 'myid, itime: ',mp_pid,itime 170 169 171 170 if (WETDEP .and. itime .ne. 0 .and. numpart .gt. 0) then … … 275 274 276 275 if (mp_measure_time.and..not.(lmpreader.and.lmp_use_reader)) call mpif_mtime('getfields',1) 277 278 ! For validation and tests: call the function below to set all fields to simple279 ! homogeneous values280 ! if (mp_dev_mode) call set_fields_synthetic281 282 !*******************************************************************************283 276 284 277 if (lmpreader.and.nstop1.gt.1) stop 'NO METEO FIELDS AVAILABLE' … … 331 324 call releaseparticles(itime) 332 325 endif 333 334 335 ! Check if particles should be redistributed among processes336 !***********************************************************337 call mpif_calculate_part_redist(itime)338 326 339 327 … … 554 542 ! Decide whether to write an estimate of the number of particles released, 555 543 ! or exact number (require MPI reduce operation) 556 if (mp_d bg_mode) then544 if (mp_dev_mode) then 557 545 numpart_tot_mpi = numpart 558 546 else … … 561 549 562 550 if (mp_exact_numpart.and..not.(lmpreader.and.lmp_use_reader).and.& 563 &.not.mp_d bg_mode) then551 &.not.mp_dev_mode) then 564 552 call MPI_Reduce(numpart, numpart_tot_mpi, 1, MPI_INTEGER, MPI_SUM, id_root, & 565 553 & mp_comm_used, mp_ierr) … … 567 555 568 556 !CGZ-lifetime: output species lifetime 569 if (lroot.or.mp_d bg_mode) then557 if (lroot.or.mp_dev_mode) then 570 558 ! write(*,*) 'Overview species lifetime in days', & 571 559 ! real((species_lifetime(:,1)/species_lifetime(:,2))/real(3600.0*24.0)) … … 577 565 ! end if 578 566 end if 579 580 ! Write particles for all processes581 if (mp_dev_mode) write(*,*) "PID, itime, numpart", mp_pid,itime,numpart582 583 567 584 568 45 format(i13,' SECONDS SIMULATED: ',i13, ' PARTICLES: Uncertainty: ',3f7.3)
Note: See TracChangeset
for help on using the changeset viewer.