Changeset 861805a in flexpart.git
- Timestamp:
- Sep 6, 2016, 9:59:11 AM (8 years ago)
- Branches:
- master, 10.4.1_pesei, GFS_025, bugfixes+enhancements, dev, release-10, release-10.4.1, scaling-bug, univie
- Children:
- 16b61a5, 93786a1
- Parents:
- 0f7835d
- Location:
- src
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
src/mpi_mod.f90
r0f7835d r861805a 90 90 ! MPI tags/requests for send/receive operation 91 91 integer :: tm1 92 integer, parameter :: nvar_async=26 !27 !29 :DBG:92 integer, parameter :: nvar_async=26 93 93 !integer, dimension(:), allocatable :: tags 94 94 integer, dimension(:), allocatable :: reqs 95 96 ! Status array used for certain MPI operations (MPI_RECV) 97 integer, dimension(MPI_STATUS_SIZE) :: mp_status 95 98 96 99 … … 119 122 logical, parameter :: mp_dev_mode = .false. 120 123 logical, parameter :: mp_dbg_out = .false. 121 logical, parameter :: mp_time_barrier=. true.124 logical, parameter :: mp_time_barrier=.false. 122 125 logical, parameter :: mp_measure_time=.false. 123 126 logical, parameter :: mp_exact_numpart=.true. … … 149 152 integer, private :: dat_lun 150 153 154 ! Temporary arrays for particles (allocated and deallocated as needed) 155 integer, allocatable, dimension(:) :: nclass_tmp, npoint_tmp, itra1_tmp, idt_tmp, & 156 & itramem_tmp, itrasplit_tmp 157 real(kind=dp), allocatable, dimension(:) :: xtra1_tmp, ytra1_tmp 158 real, allocatable, dimension(:) :: ztra1_tmp 159 real, allocatable, dimension(:,:) :: xmass1_tmp 160 161 ! mp_redist_fract Exchange particles between processes if relative difference 162 ! is larger. A good value is between 0.0 and 0.5 163 ! mp_maxpart_factor Allocate more memory per process than strictly needed 164 ! (safety factor). Recommended value between 1.5 and 2.5 165 ! mp_min_redist Do not redistribute particles if below this limit 166 real, parameter :: mp_redist_fract=0.2, mp_maxpart_factor=1.5 167 integer,parameter :: mp_min_redist=100000 168 169 151 170 contains 152 171 … … 195 214 if (dep_prec==dp) then 196 215 mp_cp = MPI_REAL8 197 216 ! TODO: write info message for serial version as well 198 217 if (lroot.and.verbosity>0) write(*,*) 'Using double precision for deposition fields' 199 218 else if (dep_prec==sp) then … … 242 261 !********************************************************************** 243 262 244 ! id_read = min(mp_np-1, 1)245 263 id_read = mp_np-1 246 264 … … 311 329 312 330 ! Set maxpart per process 313 if (mp_partid.lt.mod(maxpart,mp_partgroup_np)) addmaxpart=1 314 maxpart_mpi=int(m axpart/mp_partgroup_np)+addmaxpart331 ! eso 08/2016: Increase maxpart per process, in case of unbalanced distribution 332 maxpart_mpi=int(mp_maxpart_factor*maxpart/mp_partgroup_np) 315 333 316 334 ! Do not allocate particle data arrays for readwind process … … 321 339 ! Set random seed for each non-root process 322 340 if (mp_pid.gt.0) then 323 ! if (mp_pid.ge.0) then324 ! call system_clock(s)325 341 s = 244 326 342 mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729)) 327 343 end if 328 if (mp_dev_mode) write(*,*) 'PID, mp_seed: ',mp_pid, mp_seed329 344 if (mp_dbg_mode) then 330 ! :DBG: For debugging, set all seed to 0 and maxrand to e.g. 20331 345 mp_seed=0 332 346 if (lroot) write(*,*) 'MPI: setting seed=0' … … 454 468 455 469 470 subroutine mpif_send_part_properties(num_part) 471 !*********************************************************************** 472 ! Distribute particle properties from root to all processes. 473 ! 474 ! Used by init_domainfill_mpi 475 ! 476 ! Variables: 477 ! 478 ! num_part input, number of particles per process (rounded up) 479 ! 480 !*********************************************************************** 481 use com_mod 482 483 implicit none 484 485 integer,intent(in) :: num_part 486 integer :: i,jj, addone 487 488 ! Time for MPI communications 489 !**************************** 490 if (mp_measure_time) call mpif_mtime('commtime',0) 491 492 ! Distribute variables, send from pid 0 to other processes (including itself) 493 !**************************************************************************** 494 495 call MPI_SCATTER(nclass_tmp,num_part,MPI_INTEGER,nclass,& 496 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 497 if (mp_ierr /= 0) goto 600 498 call MPI_SCATTER(npoint_tmp,num_part,MPI_INTEGER,npoint,& 499 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 500 if (mp_ierr /= 0) goto 600 501 call MPI_SCATTER(itra1_tmp,num_part,MPI_INTEGER,itra1,& 502 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 503 if (mp_ierr /= 0) goto 600 504 call MPI_SCATTER(idt_tmp,num_part,MPI_INTEGER,idt,& 505 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 506 if (mp_ierr /= 0) goto 600 507 call MPI_SCATTER(itramem_tmp,num_part,MPI_INTEGER,itramem,& 508 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 509 if (mp_ierr /= 0) goto 600 510 call MPI_SCATTER(itrasplit_tmp,num_part,MPI_INTEGER,itrasplit,& 511 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 512 if (mp_ierr /= 0) goto 600 513 call MPI_SCATTER(xtra1_tmp,num_part,mp_dp,xtra1,& 514 &num_part,mp_dp,id_root,mp_comm_used,mp_ierr) 515 if (mp_ierr /= 0) goto 600 516 call MPI_SCATTER(ytra1_tmp,num_part,mp_dp,ytra1,& 517 &num_part,mp_dp,id_root,mp_comm_used,mp_ierr) 518 if (mp_ierr /= 0) goto 600 519 call MPI_SCATTER(ztra1_tmp,num_part,mp_sp,ztra1,& 520 &num_part,mp_sp,id_root,mp_comm_used,mp_ierr) 521 if (mp_ierr /= 0) goto 600 522 do i=1,nspec 523 call MPI_SCATTER(xmass1_tmp(:,i),num_part,mp_sp,xmass1(:,i),& 524 &num_part,mp_sp,id_root,mp_comm_used,mp_ierr) 525 if (mp_ierr /= 0) goto 600 526 end do 527 528 if (mp_measure_time) call mpif_mtime('commtime',1) 529 write(*,*) "PID ", mp_partid, "ending MPI_Scatter operation" 530 531 goto 601 532 533 600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr 534 stop 535 536 ! After the transfer of particles it it possible that the value of 537 ! "numpart" is larger than the actual, so we reduce it if there are 538 ! invalid particles at the end of the arrays 539 601 do i=num_part, 1, -1 540 if (itra1(i).eq.-999999999) then 541 numpart=numpart-1 542 else 543 exit 544 end if 545 end do 546 547 548 !601 end subroutine mpif_send_part_properties 549 end subroutine mpif_send_part_properties 550 551 552 subroutine mpif_calculate_part_redist(itime) 553 !*********************************************************************** 554 ! Calculate number of particles to redistribute between processes. This routine 555 ! can be called at regular time intervals to keep a level number of 556 ! particles on each process. 557 ! 558 ! First, all processes report their local "numpart" to each other, which is 559 ! stored in array "numpart_mpi(np)". This array is sorted from low to 560 ! high values, along with a corresponding process ID array "idx_arr(np)". 561 ! If the relative difference between the highest and lowest "numpart_mpi" value 562 ! is above a threshold, particles are transferred from process idx_arr(np-1) 563 ! to process idx_arr(0). Repeat for processes idx_arr(np-i) and idx_arr(i) 564 ! for all valid i. 565 ! Note: If np is an odd number, the process with the median 566 ! number of particles is left unchanged 567 ! 568 ! VARIABLES 569 ! itime input, current time 570 !*********************************************************************** 571 use com_mod 572 573 implicit none 574 575 integer, intent(in) :: itime 576 real :: pmin,z 577 integer :: i,jj,nn, num_part=1,m,imin, num_trans 578 logical :: first_iter 579 integer,allocatable,dimension(:) :: numparticles_mpi, idx_arr 580 real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this 581 582 ! Exit if running with only 1 process 583 !************************************************************************ 584 if (mp_np.eq.1) return 585 586 ! All processes exchange information on number of particles 587 !**************************************************************************** 588 allocate(numparticles_mpi(0:mp_partgroup_np-1), & 589 &idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1)) 590 591 call MPI_Allgather(numpart, 1, MPI_INTEGER, numparticles_mpi, & 592 & 1, MPI_INTEGER, mp_comm_used, mp_ierr) 593 594 595 ! Sort from lowest to highest 596 ! Initial guess: correct order 597 sorted(:) = numparticles_mpi(:) 598 do i=0, mp_partgroup_np-1 599 idx_arr(i) = i 600 end do 601 602 ! For each successive element in index array, see if a lower value exists 603 do i=0, mp_partgroup_np-2 604 pmin=sorted(i) 605 imin=idx_arr(i) 606 m=i+1 607 do jj=m, mp_partgroup_np-1 608 if (pmin.le.sorted(jj)) cycle 609 z=pmin 610 pmin=sorted(jj) 611 sorted(jj)=z 612 613 nn=imin 614 imin=idx_arr(jj) 615 idx_arr(jj)=nn 616 end do 617 sorted(i)=pmin 618 idx_arr(i)=imin 619 end do 620 621 ! For each pair of processes, transfer particles if the difference is above a 622 ! limit, and numpart at sending process large enough 623 624 m=mp_partgroup_np-1 ! index for last sorted process (most particles) 625 do i=0,mp_partgroup_np/2-1 626 num_trans = numparticles_mpi(idx_arr(m)) - numparticles_mpi(idx_arr(i)) 627 if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then 628 if ( numparticles_mpi(idx_arr(m)).gt.mp_min_redist.and.& 629 & real(num_trans)/real(numparticles_mpi(idx_arr(m))).gt.mp_redist_fract) then 630 call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2) 631 end if 632 end if 633 m=m-1 634 end do 635 636 deallocate(numparticles_mpi, idx_arr, sorted) 637 638 end subroutine mpif_calculate_part_redist 639 640 641 subroutine mpif_redist_part(itime, src_proc, dest_proc, num_trans) 642 !*********************************************************************** 643 ! Transfer particle properties between two arbitrary processes. 644 ! 645 ! VARIABLES 646 ! 647 ! itime input, current time 648 ! src_proc input, ID of source process 649 ! dest_proc input, ID of destination process 650 ! num_trans input, number of particles to transfer 651 ! 652 !************************************************************************ 653 use com_mod !TODO: ,only: nclass etc 654 655 implicit none 656 657 integer, intent(in) :: itime, src_proc, dest_proc, num_trans 658 integer :: ll, ul ! lower and upper indices in arrays 659 integer :: arr_sz ! size of temporary arrays 660 integer :: mtag ! MPI message tag 661 integer :: i, j, minpart, ipart, maxnumpart 662 663 ! Measure time for MPI communications 664 !************************************ 665 if (mp_measure_time) call mpif_mtime('commtime',0) 666 667 ! Send particles 668 !*************** 669 if (mp_partid.eq.src_proc) then 670 mtag = 2000 671 672 ! Array indices for the transferred particles 673 ll=numpart-num_trans+1 674 ul=numpart 675 676 ! if (mp_dev_mode) then 677 ! write(*,FMT='(72("#"))') 678 ! write(*,*) "Sending ", num_trans, "particles (from/to)", src_proc, dest_proc 679 ! write(*,*) "numpart before: ", numpart 680 ! end if 681 682 call MPI_SEND(nclass(ll:ul),num_trans,& 683 & MPI_INTEGER,dest_proc,mtag+1,mp_comm_used,mp_ierr) 684 685 call MPI_SEND(npoint(ll:ul),num_trans,& 686 & MPI_INTEGER,dest_proc,mtag+2,mp_comm_used,mp_ierr) 687 688 call MPI_SEND(itra1(ll:ul),num_trans, & 689 & MPI_INTEGER,dest_proc,mtag+3,mp_comm_used,mp_ierr) 690 691 call MPI_SEND(idt(ll:ul),num_trans, & 692 & MPI_INTEGER,dest_proc,mtag+4,mp_comm_used,mp_ierr) 693 694 call MPI_SEND(itramem(ll:ul),num_trans, & 695 & MPI_INTEGER,dest_proc,mtag+5,mp_comm_used,mp_ierr) 696 697 call MPI_SEND(itrasplit(ll:ul),num_trans,& 698 & MPI_INTEGER,dest_proc,mtag+6,mp_comm_used,mp_ierr) 699 700 call MPI_SEND(xtra1(ll:ul),num_trans, & 701 & mp_dp,dest_proc,mtag+7,mp_comm_used,mp_ierr) 702 703 call MPI_SEND(ytra1(ll:ul),num_trans,& 704 & mp_dp,dest_proc,mtag+8,mp_comm_used,mp_ierr) 705 706 call MPI_SEND(ztra1(ll:ul),num_trans,& 707 & mp_sp,dest_proc,mtag+9,mp_comm_used,mp_ierr) 708 709 do j=1,nspec 710 call MPI_SEND(xmass1(ll:ul,j),num_trans,& 711 & mp_sp,dest_proc,mtag+(9+j),mp_comm_used,mp_ierr) 712 end do 713 714 ! Terminate transferred particles, update numpart 715 itra1(ll:ul) = -999999999 716 717 numpart = numpart-num_trans 718 719 ! if (mp_dev_mode) then 720 ! write(*,*) "numpart after: ", numpart 721 ! write(*,FMT='(72("#"))') 722 ! end if 723 724 else if (mp_partid.eq.dest_proc) then 725 726 ! Receive particles 727 !****************** 728 mtag = 2000 729 ! Array indices for the transferred particles 730 ll=numpart+1 731 ul=numpart+num_trans 732 733 ! if (mp_dev_mode) then 734 ! write(*,FMT='(72("#"))') 735 ! write(*,*) "Receiving ", num_trans, "particles (from/to)", src_proc, dest_proc 736 ! write(*,*) "numpart before: ", numpart 737 ! end if 738 739 ! We could receive the data directly at nclass(ll:ul) etc., but this leaves 740 ! vacant spaces at lower indices. Using temporary arrays instead. 741 arr_sz = ul-ll+1 742 allocate(itra1_tmp(arr_sz),npoint_tmp(arr_sz),nclass_tmp(arr_sz),& 743 & idt_tmp(arr_sz),itramem_tmp(arr_sz),itrasplit_tmp(arr_sz),& 744 & xtra1_tmp(arr_sz),ytra1_tmp(arr_sz),ztra1_tmp(arr_sz),& 745 & xmass1_tmp(arr_sz, maxspec)) 746 747 call MPI_RECV(nclass_tmp,num_trans,& 748 & MPI_INTEGER,src_proc,mtag+1,mp_comm_used,mp_status,mp_ierr) 749 750 call MPI_RECV(npoint_tmp,num_trans,& 751 & MPI_INTEGER,src_proc,mtag+2,mp_comm_used,mp_status,mp_ierr) 752 753 call MPI_RECV(itra1_tmp,num_trans, & 754 & MPI_INTEGER,src_proc,mtag+3,mp_comm_used,mp_status,mp_ierr) 755 756 call MPI_RECV(idt_tmp,num_trans, & 757 & MPI_INTEGER,src_proc,mtag+4,mp_comm_used,mp_status,mp_ierr) 758 759 call MPI_RECV(itramem_tmp,num_trans, & 760 & MPI_INTEGER,src_proc,mtag+5,mp_comm_used,mp_status,mp_ierr) 761 762 call MPI_RECV(itrasplit_tmp,num_trans,& 763 & MPI_INTEGER,src_proc,mtag+6,mp_comm_used,mp_status,mp_ierr) 764 765 call MPI_RECV(xtra1_tmp,num_trans, & 766 & mp_dp,src_proc,mtag+7,mp_comm_used,mp_status,mp_ierr) 767 768 call MPI_RECV(ytra1_tmp,num_trans,& 769 & mp_dp,src_proc,mtag+8,mp_comm_used,mp_status,mp_ierr) 770 771 call MPI_RECV(ztra1_tmp,num_trans,& 772 & mp_sp,src_proc,mtag+9,mp_comm_used,mp_status,mp_ierr) 773 774 do j=1,nspec 775 call MPI_RECV(xmass1_tmp(:,j),num_trans,& 776 & mp_sp,src_proc,mtag+(9+j),mp_comm_used,mp_status,mp_ierr) 777 end do 778 779 ! This is the maximum value numpart can possibly have after the transfer 780 maxnumpart=numpart+num_trans 781 782 ! Search for vacant space and copy from temporary storage 783 !******************************************************** 784 minpart=1 785 do i=1, num_trans 786 ! Take into acount that we may have transferred invalid particles 787 if (itra1_tmp(minpart).ne.itime) goto 200 788 do ipart=minpart,maxnumpart 789 if (itra1(ipart).ne.itime) then 790 itra1(ipart) = itra1_tmp(minpart) 791 npoint(ipart) = npoint_tmp(minpart) 792 nclass(ipart) = nclass_tmp(minpart) 793 idt(ipart) = idt_tmp(minpart) 794 itramem(ipart) = itramem_tmp(minpart) 795 itrasplit(ipart) = itrasplit_tmp(minpart) 796 xtra1(ipart) = xtra1_tmp(minpart) 797 ytra1(ipart) = ytra1_tmp(minpart) 798 ztra1(ipart) = ztra1_tmp(minpart) 799 xmass1(ipart,:) = xmass1_tmp(minpart,:) 800 ! Increase numpart, if necessary 801 numpart=max(numpart,ipart) 802 goto 200 ! Storage space has been found, stop searching 803 end if 804 end do 805 200 minpart=minpart+1 806 end do 807 808 deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,itrasplit_tmp,& 809 & xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp) 810 811 ! if (mp_dev_mode) then 812 ! write(*,*) "numpart after: ", numpart 813 ! write(*,FMT='(72("#"))') 814 ! end if 815 816 else 817 ! This routine should only be called by the two participating processes 818 write(*,*) "ERROR: wrong process has entered mpi_mod::mpif_redist_part" 819 stop 820 return 821 end if 822 823 ! Measure time for MPI communications 824 !************************************ 825 if (mp_measure_time) call mpif_mtime('commtime',1) 826 827 end subroutine mpif_redist_part 828 829 456 830 subroutine mpif_tm_send_vars 457 831 !*********************************************************************** … … 477 851 if (lroot) then 478 852 call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 479 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)853 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 480 854 if (mp_ierr /= 0) goto 600 481 855 call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 482 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)856 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 483 857 if (mp_ierr /= 0) goto 600 484 858 call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 485 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)859 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 486 860 if (mp_ierr /= 0) goto 600 487 861 call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 488 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)862 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 489 863 if (mp_ierr /= 0) goto 600 490 864 call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 491 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)865 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 492 866 if (mp_ierr /= 0) goto 600 493 867 494 868 ! int2 495 869 call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,& 496 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)870 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr) 497 871 if (mp_ierr /= 0) goto 600 498 872 499 873 ! real 500 874 call MPI_SCATTER(uap,numpart_mpi,mp_sp,MPI_IN_PLACE,& 501 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)875 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 502 876 if (mp_ierr /= 0) goto 600 503 877 call MPI_SCATTER(ucp,numpart_mpi,mp_sp,MPI_IN_PLACE,& 504 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)878 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 505 879 if (mp_ierr /= 0) goto 600 506 880 call MPI_SCATTER(uzp,numpart_mpi,mp_sp,MPI_IN_PLACE,& 507 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)881 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 508 882 if (mp_ierr /= 0) goto 600 509 883 510 884 call MPI_SCATTER(us,numpart_mpi,mp_sp,MPI_IN_PLACE,& 511 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)885 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 512 886 if (mp_ierr /= 0) goto 600 513 887 call MPI_SCATTER(vs,numpart_mpi,mp_sp,MPI_IN_PLACE,& 514 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)888 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 515 889 if (mp_ierr /= 0) goto 600 516 890 call MPI_SCATTER(ws,numpart_mpi,mp_sp,MPI_IN_PLACE,& 517 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)891 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 518 892 if (mp_ierr /= 0) goto 600 519 893 520 894 call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,& 521 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)895 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 522 896 if (mp_ierr /= 0) goto 600 523 897 call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,& 524 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)898 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 525 899 if (mp_ierr /= 0) goto 600 526 900 call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,MPI_IN_PLACE,& 527 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)901 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 528 902 if (mp_ierr /= 0) goto 600 529 903 530 904 do i=1,nspec 531 905 call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,MPI_IN_PLACE,& 532 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)906 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 533 907 if (mp_ierr /= 0) goto 600 534 908 end do … … 537 911 ! integers 538 912 call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,& 539 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)913 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 540 914 if (mp_ierr /= 0) goto 600 541 915 call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,& 542 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)916 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 543 917 if (mp_ierr /= 0) goto 600 544 918 call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,& 545 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)919 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 546 920 if (mp_ierr /= 0) goto 600 547 921 call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,& 548 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)922 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 549 923 if (mp_ierr /= 0) goto 600 550 924 call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,& 551 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)925 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 552 926 if (mp_ierr /= 0) goto 600 553 927 554 928 ! int2 555 929 call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,& 556 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)930 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr) 557 931 if (mp_ierr /= 0) goto 600 558 932 559 933 ! reals 560 934 call MPI_SCATTER(uap,numpart_mpi,mp_sp,uap,& 561 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)935 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 562 936 if (mp_ierr /= 0) goto 600 563 937 call MPI_SCATTER(ucp,numpart_mpi,mp_sp,ucp,& 564 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)938 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 565 939 if (mp_ierr /= 0) goto 600 566 940 call MPI_SCATTER(uzp,numpart_mpi,mp_sp,uzp,& 567 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)941 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 568 942 if (mp_ierr /= 0) goto 600 569 943 570 944 call MPI_SCATTER(us,numpart_mpi,mp_sp,us,& 571 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)945 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 572 946 if (mp_ierr /= 0) goto 600 573 947 call MPI_SCATTER(vs,numpart_mpi,mp_sp,vs,& 574 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)948 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 575 949 if (mp_ierr /= 0) goto 600 576 950 call MPI_SCATTER(ws,numpart_mpi,mp_sp,ws,& 577 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)951 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 578 952 if (mp_ierr /= 0) goto 600 579 953 580 954 call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,& 581 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)955 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 582 956 if (mp_ierr /= 0) goto 600 583 957 call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,& 584 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)958 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 585 959 if (mp_ierr /= 0) goto 600 586 960 call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,ztra1,& 587 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)961 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 588 962 if (mp_ierr /= 0) goto 600 589 963 590 964 do i=1,nspec 591 965 call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,xmass1(:,i),& 592 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)966 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 593 967 if (mp_ierr /= 0) goto 600 594 968 end do … … 1145 1519 1146 1520 ! cloud water/ice: 1147 if (readclouds_nest(i)) then1148 1149 1150 call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)1151 if (mp_ierr /= 0) goto 6001152 end if1521 if (readclouds_nest(i)) then 1522 ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 1523 ! if (mp_ierr /= 0) goto 600 1524 call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 1525 if (mp_ierr /= 0) goto 600 1526 end if 1153 1527 1154 1528 ! 2D fields … … 1398 1772 integer :: d2s1 = nxmax*nymax 1399 1773 integer :: d2s2 = nxmax*nymax*maxspec 1400 1774 !integer :: d1_size1 = maxwf 1401 1775 1402 1776 ! integer :: d3s1,d3s2,d2s1,d2s2 … … 1645 2019 if (dest.eq.id_read) cycle 1646 2020 do k=1, numbnests 1647 i=dest*nvar_async1648 call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1649 if (mp_ierr /= 0) goto 6001650 i=i+11651 call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1652 if (mp_ierr /= 0) goto 6001653 i=i+11654 call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1655 if (mp_ierr /= 0) goto 6001656 i=i+11657 call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1658 if (mp_ierr /= 0) goto 6001659 i=i+11660 call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1661 if (mp_ierr /= 0) goto 6001662 i=i+11663 call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1664 if (mp_ierr /= 0) goto 6001665 i=i+11666 call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1667 if (mp_ierr /= 0) goto 6001668 i=i+11669 call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1670 if (mp_ierr /= 0) goto 6001671 i=i+11672 call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1673 if (mp_ierr /= 0) goto 6001674 i=i+11675 call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1676 if (mp_ierr /= 0) goto 6001677 i=i+11678 call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1679 i=i+11680 if (mp_ierr /= 0) goto 6001681 call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1682 if (mp_ierr /= 0) goto 6001683 i=i+11684 call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1685 if (mp_ierr /= 0) goto 6001686 i=i+11687 call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1688 if (mp_ierr /= 0) goto 6001689 i=i+11690 call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1691 if (mp_ierr /= 0) goto 6001692 i=i+12021 i=dest*nvar_async 2022 call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2023 if (mp_ierr /= 0) goto 600 2024 i=i+1 2025 call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2026 if (mp_ierr /= 0) goto 600 2027 i=i+1 2028 call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2029 if (mp_ierr /= 0) goto 600 2030 i=i+1 2031 call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2032 if (mp_ierr /= 0) goto 600 2033 i=i+1 2034 call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2035 if (mp_ierr /= 0) goto 600 2036 i=i+1 2037 call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2038 if (mp_ierr /= 0) goto 600 2039 i=i+1 2040 call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2041 if (mp_ierr /= 0) goto 600 2042 i=i+1 2043 call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2044 if (mp_ierr /= 0) goto 600 2045 i=i+1 2046 call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2047 if (mp_ierr /= 0) goto 600 2048 i=i+1 2049 call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2050 if (mp_ierr /= 0) goto 600 2051 i=i+1 2052 call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2053 i=i+1 2054 if (mp_ierr /= 0) goto 600 2055 call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2056 if (mp_ierr /= 0) goto 600 2057 i=i+1 2058 call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2059 if (mp_ierr /= 0) goto 600 2060 i=i+1 2061 call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2062 if (mp_ierr /= 0) goto 600 2063 i=i+1 2064 call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2065 if (mp_ierr /= 0) goto 600 2066 i=i+1 1693 2067 ! 15 1694 call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1695 if (mp_ierr /= 0) goto 6001696 i=i+11697 call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1698 if (mp_ierr /= 0) goto 6001699 i=i+11700 call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1701 if (mp_ierr /= 0) goto 6001702 i=i+11703 call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1704 if (mp_ierr /= 0) goto 6001705 i=i+11706 call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1707 if (mp_ierr /= 0) goto 6001708 i=i+11709 call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1710 if (mp_ierr /= 0) goto 6001711 i=i+11712 call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1713 if (mp_ierr /= 0) goto 6001714 i=i+11715 call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1716 if (mp_ierr /= 0) goto 6001717 i=i+11718 call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1719 if (mp_ierr /= 0) goto 6001720 i=i+11721 call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1722 if (mp_ierr /= 0) goto 6002068 call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2069 if (mp_ierr /= 0) goto 600 2070 i=i+1 2071 call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2072 if (mp_ierr /= 0) goto 600 2073 i=i+1 2074 call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2075 if (mp_ierr /= 0) goto 600 2076 i=i+1 2077 call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2078 if (mp_ierr /= 0) goto 600 2079 i=i+1 2080 call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2081 if (mp_ierr /= 0) goto 600 2082 i=i+1 2083 call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2084 if (mp_ierr /= 0) goto 600 2085 i=i+1 2086 call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2087 if (mp_ierr /= 0) goto 600 2088 i=i+1 2089 call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2090 if (mp_ierr /= 0) goto 600 2091 i=i+1 2092 call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2093 if (mp_ierr /= 0) goto 600 2094 i=i+1 2095 call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2096 if (mp_ierr /= 0) goto 600 1723 2097 ! 25 1724 2098 1725 2099 ! Send cloud water if it exists. Increment counter always (as on receiving end) 1726 if (readclouds) then 1727 i=i+1 1728 call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,& 1729 &MPI_COMM_WORLD,reqs(i),mp_ierr) 1730 if (mp_ierr /= 0) goto 600 1731 end if 2100 if (readclouds) then 2101 i=i+1 2102 call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,& 2103 &MPI_COMM_WORLD,reqs(i),mp_ierr) 2104 if (mp_ierr /= 0) goto 600 2105 end if 2106 end do 1732 2107 end do 1733 end do1734 2108 1735 2109 if (mp_measure_time) call mpif_mtime('commtime',1) … … 1810 2184 do k=1, numbnests 1811 2185 ! Get MPI tags/requests for communications 1812 j=mp_pid*nvar_async1813 call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1814 &MPI_COMM_WORLD,reqs(j),mp_ierr)1815 if (mp_ierr /= 0) goto 6001816 j=j+11817 call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1818 &MPI_COMM_WORLD,reqs(j),mp_ierr)1819 if (mp_ierr /= 0) goto 6001820 j=j+11821 call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1822 &MPI_COMM_WORLD,reqs(j),mp_ierr)1823 if (mp_ierr /= 0) goto 6001824 j=j+11825 call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1826 &MPI_COMM_WORLD,reqs(j),mp_ierr)1827 if (mp_ierr /= 0) goto 6001828 j=j+11829 call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1830 &MPI_COMM_WORLD,reqs(j),mp_ierr)1831 if (mp_ierr /= 0) goto 6001832 j=j+11833 call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1834 &MPI_COMM_WORLD,reqs(j),mp_ierr)1835 if (mp_ierr /= 0) goto 6001836 j=j+11837 call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&1838 &MPI_COMM_WORLD,reqs(j),mp_ierr)1839 if (mp_ierr /= 0) goto 6001840 j=j+11841 call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&1842 &MPI_COMM_WORLD,reqs(j),mp_ierr)1843 if (mp_ierr /= 0) goto 6001844 j=j+11845 call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1846 &MPI_COMM_WORLD,reqs(j),mp_ierr)1847 if (mp_ierr /= 0) goto 6001848 j=j+11849 call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1850 &MPI_COMM_WORLD,reqs(j),mp_ierr)1851 if (mp_ierr /= 0) goto 6001852 j=j+11853 call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&1854 &MPI_COMM_WORLD,reqs(j),mp_ierr)1855 if (mp_ierr /= 0) goto 6001856 j=j+11857 call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1858 &MPI_COMM_WORLD,reqs(j),mp_ierr)1859 if (mp_ierr /= 0) goto 6001860 j=j+11861 call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,&1862 &MPI_COMM_WORLD,reqs(j),mp_ierr)1863 if (mp_ierr /= 0) goto 6001864 j=j+11865 call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1866 &MPI_COMM_WORLD,reqs(j),mp_ierr)1867 if (mp_ierr /= 0) goto 6001868 j=j+11869 call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1870 &MPI_COMM_WORLD,reqs(j),mp_ierr)1871 if (mp_ierr /= 0) goto 6001872 j=j+11873 call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1874 &MPI_COMM_WORLD,reqs(j),mp_ierr)1875 if (mp_ierr /= 0) goto 6001876 j=j+11877 call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1878 &MPI_COMM_WORLD,reqs(j),mp_ierr)1879 if (mp_ierr /= 0) goto 6001880 j=j+11881 call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1882 &MPI_COMM_WORLD,reqs(j),mp_ierr)1883 if (mp_ierr /= 0) goto 6001884 j=j+11885 call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1886 &MPI_COMM_WORLD,reqs(j),mp_ierr)1887 if (mp_ierr /= 0) goto 6001888 j=j+11889 call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1890 &MPI_COMM_WORLD,reqs(j),mp_ierr)1891 if (mp_ierr /= 0) goto 6001892 call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1893 &MPI_COMM_WORLD,reqs(j),mp_ierr)1894 if (mp_ierr /= 0) goto 6001895 j=j+11896 call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1897 &MPI_COMM_WORLD,reqs(j),mp_ierr)1898 if (mp_ierr /= 0) goto 6001899 j=j+11900 call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1901 &MPI_COMM_WORLD,reqs(j),mp_ierr)1902 if (mp_ierr /= 0) goto 6001903 j=j+11904 call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1905 &MPI_COMM_WORLD,reqs(j),mp_ierr)1906 if (mp_ierr /= 0) goto 6001907 j=j+11908 call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1909 &MPI_COMM_WORLD,reqs(j),mp_ierr)1910 if (mp_ierr /= 0) goto 6002186 j=mp_pid*nvar_async 2187 call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2188 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2189 if (mp_ierr /= 0) goto 600 2190 j=j+1 2191 call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2192 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2193 if (mp_ierr /= 0) goto 600 2194 j=j+1 2195 call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2196 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2197 if (mp_ierr /= 0) goto 600 2198 j=j+1 2199 call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2200 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2201 if (mp_ierr /= 0) goto 600 2202 j=j+1 2203 call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2204 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2205 if (mp_ierr /= 0) goto 600 2206 j=j+1 2207 call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2208 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2209 if (mp_ierr /= 0) goto 600 2210 j=j+1 2211 call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& 2212 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2213 if (mp_ierr /= 0) goto 600 2214 j=j+1 2215 call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& 2216 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2217 if (mp_ierr /= 0) goto 600 2218 j=j+1 2219 call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2220 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2221 if (mp_ierr /= 0) goto 600 2222 j=j+1 2223 call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2224 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2225 if (mp_ierr /= 0) goto 600 2226 j=j+1 2227 call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,& 2228 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2229 if (mp_ierr /= 0) goto 600 2230 j=j+1 2231 call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2232 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2233 if (mp_ierr /= 0) goto 600 2234 j=j+1 2235 call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,& 2236 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2237 if (mp_ierr /= 0) goto 600 2238 j=j+1 2239 call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2240 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2241 if (mp_ierr /= 0) goto 600 2242 j=j+1 2243 call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2244 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2245 if (mp_ierr /= 0) goto 600 2246 j=j+1 2247 call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2248 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2249 if (mp_ierr /= 0) goto 600 2250 j=j+1 2251 call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2252 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2253 if (mp_ierr /= 0) goto 600 2254 j=j+1 2255 call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2256 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2257 if (mp_ierr /= 0) goto 600 2258 j=j+1 2259 call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2260 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2261 if (mp_ierr /= 0) goto 600 2262 j=j+1 2263 call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2264 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2265 if (mp_ierr /= 0) goto 600 2266 call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2267 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2268 if (mp_ierr /= 0) goto 600 2269 j=j+1 2270 call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2271 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2272 if (mp_ierr /= 0) goto 600 2273 j=j+1 2274 call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2275 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2276 if (mp_ierr /= 0) goto 600 2277 j=j+1 2278 call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2279 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2280 if (mp_ierr /= 0) goto 600 2281 j=j+1 2282 call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2283 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2284 if (mp_ierr /= 0) goto 600 1911 2285 1912 2286 ! Post request for clwc. These data are possibly not sent, request must then be cancelled 1913 2287 ! For now assume that data at all steps either have or do not have water 1914 if (readclouds) then1915 j=j+11916 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&1917 &MPI_COMM_WORLD,reqs(j),mp_ierr)1918 if (mp_ierr /= 0) goto 6001919 end if1920 end do2288 if (readclouds) then 2289 j=j+1 2290 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& 2291 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2292 if (mp_ierr /= 0) goto 600 2293 end if 2294 end do 1921 2295 1922 2296 if (mp_measure_time) call mpif_mtime('commtime',1) … … 2349 2723 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',& 2350 2724 & mp_conccalc_time_total 2351 2352 2353 2354 2725 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',& 2726 ! & mp_vt_wtime_total 2727 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',& 2728 ! & mp_vt_time_total 2355 2729 ! NB: the 'flush' function is possibly a gfortran-specific extension 2356 2730 call flush() … … 2388 2762 2389 2763 2390 2391 2392 2393 2764 end subroutine mpif_finalize 2765 2766 2767 subroutine get_lun(my_lun) 2394 2768 !*********************************************************************** 2395 2769 ! get_lun: … … 2397 2771 !*********************************************************************** 2398 2772 2399 2400 2401 2402 2403 2404 2405 !*********************************************************************** 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2773 implicit none 2774 2775 integer, intent(inout) :: my_lun 2776 integer, save :: free_lun=100 2777 logical :: exists, iopen 2778 2779 !*********************************************************************** 2780 2781 loop1: do 2782 inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen) 2783 if (exists .and. .not.iopen) exit loop1 2784 free_lun = free_lun+1 2785 end do loop1 2786 my_lun = free_lun 2787 2788 end subroutine get_lun 2789 2790 2791 subroutine write_data_dbg(array_in, array_name, tstep, ident) 2418 2792 !*********************************************************************** 2419 2793 ! Write one-dimensional arrays to file (for debugging purposes) 2420 2794 !*********************************************************************** 2421 implicit none 2422 2423 real, intent(in), dimension(:) :: array_in 2424 integer, intent(in) :: tstep 2425 integer :: lios 2426 character(LEN=*), intent(in) :: ident, array_name 2427 2428 character(LEN=8) :: c_ts 2429 character(LEN=40) :: fn_1, fn_2 2430 2431 !*********************************************************************** 2432 2433 write(c_ts, FMT='(I8.8,BZ)') tstep 2434 fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident) 2435 write(c_ts, FMT='(I2.2,BZ)') mp_np 2436 fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat' 2437 2438 call get_lun(dat_lun) 2439 open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', & 2440 FORM='UNFORMATTED', STATUS='REPLACE') 2441 write(UNIT=dat_lun, IOSTAT=lios) array_in 2442 close(UNIT=dat_lun) 2443 2444 end subroutine write_data_dbg 2795 implicit none 2796 2797 real, intent(in), dimension(:) :: array_in 2798 integer, intent(in) :: tstep 2799 integer :: lios 2800 character(LEN=*), intent(in) :: ident, array_name 2801 2802 character(LEN=8) :: c_ts 2803 character(LEN=40) :: fn_1, fn_2 2804 2805 !*********************************************************************** 2806 2807 write(c_ts, FMT='(I8.8,BZ)') tstep 2808 fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident) 2809 write(c_ts, FMT='(I2.2,BZ)') mp_np 2810 fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat' 2811 2812 call get_lun(dat_lun) 2813 open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', & 2814 FORM='UNFORMATTED', STATUS='REPLACE') 2815 write(UNIT=dat_lun, IOSTAT=lios) array_in 2816 close(UNIT=dat_lun) 2817 2818 end subroutine write_data_dbg 2819 2820 2821 subroutine set_fields_synthetic() 2822 !******************************************************************************* 2823 ! DESCRIPTION 2824 ! Set all meteorological fields to synthetic (usually constant/homogeneous) 2825 ! values. 2826 ! Used for validation and error-checking 2827 ! 2828 ! NOTE 2829 ! This version uses asynchronious communications. 2830 ! 2831 ! VARIABLES 2832 ! 2833 ! 2834 ! 2835 !******************************************************************************* 2836 use com_mod 2837 2838 implicit none 2839 2840 integer :: li=1, ui=2 ! wfmem indices (i.e, operate on entire field) 2841 2842 if (.not.lmp_sync) ui=3 2843 2844 2845 ! Variables transferred at initialization only 2846 !********************************************* 2847 ! readclouds=readclouds_ 2848 oro(:,:)=0.0 2849 excessoro(:,:)=0.0 2850 lsm(:,:)=0.0 2851 xlanduse(:,:,:)=0.0 2852 ! wftime 2853 ! numbwf 2854 ! nmixz 2855 ! height 2856 2857 ! Time-varying fields: 2858 uu(:,:,:,li:ui) = 10.0 2859 vv(:,:,:,li:ui) = 0.0 2860 uupol(:,:,:,li:ui) = 10.0 2861 vvpol(:,:,:,li:ui)=0.0 2862 ww(:,:,:,li:ui)=0. 2863 tt(:,:,:,li:ui)=300. 2864 rho(:,:,:,li:ui)=1.3 2865 drhodz(:,:,:,li:ui)=.0 2866 tth(:,:,:,li:ui)=0.0 2867 qvh(:,:,:,li:ui)=1.0 2868 qv(:,:,:,li:ui)=1.0 2869 2870 pv(:,:,:,li:ui)=1.0 2871 clouds(:,:,:,li:ui)=0 2872 2873 clwc(:,:,:,li:ui)=0.0 2874 ciwc(:,:,:,li:ui)=0.0 2875 2876 ! 2D fields 2877 2878 cloudsh(:,:,li:ui)=0 2879 vdep(:,:,:,li:ui)=0.0 2880 ps(:,:,:,li:ui)=1.0e5 2881 sd(:,:,:,li:ui)=0.0 2882 tcc(:,:,:,li:ui)=0.0 2883 tt2(:,:,:,li:ui)=300. 2884 td2(:,:,:,li:ui)=250. 2885 lsprec(:,:,:,li:ui)=0.0 2886 convprec(:,:,:,li:ui)=0.0 2887 ustar(:,:,:,li:ui)=1.0 2888 wstar(:,:,:,li:ui)=1.0 2889 hmix(:,:,:,li:ui)=10000. 2890 tropopause(:,:,:,li:ui)=10000. 2891 oli(:,:,:,li:ui)=0.01 2892 2893 end subroutine set_fields_synthetic 2445 2894 2446 2895 end module mpi_mod -
src/releaseparticles_mpi.f90
r7e52e2e r861805a 21 21 22 22 subroutine releaseparticles(itime) 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 ! addpartextra particle assigned to MPI process if *49 50 23 ! o 24 !***************************************************************************** 25 ! * 26 ! This subroutine releases particles from the release locations. * 27 ! * 28 ! It searches for a "vacant" storage space and assigns all particle * 29 ! information to that space. A space is vacant either when no particle * 30 ! is yet assigned to it, or when it's particle is expired and, thus, * 31 ! the storage space is made available to a new particle. * 32 ! * 33 ! Author: A. Stohl * 34 ! * 35 ! 29 June 2002 * 36 ! * 37 ! CHANGES * 38 ! 12/2014 eso: MPI version * 39 ! * 40 !***************************************************************************** 41 ! * 42 ! Variables: * 43 ! itime [s] current time * 44 ! ireleasestart, ireleaseend start and end times of all releases * 45 ! npart(maxpoint) number of particles to be released in total * 46 ! numrel number of particles to be released during this time * 47 ! step * 48 ! addone extra particle assigned to MPI process if * 49 ! mod(npart(i),mp_partgroup_np) .ne. 0) * 50 !***************************************************************************** 51 51 52 52 use point_mod … … 59 59 implicit none 60 60 61 61 !real xaux,yaux,zaux,ran1,rfraction,xmasssave(maxpoint) 62 62 real :: xaux,yaux,zaux,rfraction 63 63 real :: topo,rhoaux(2),r,t,rhoout,ddx,ddy,rddx,rddy,p1,p2,p3,p4 … … 73 73 74 74 integer :: idummy = -7 75 76 75 !save idummy,xmasssave 76 !data idummy/-7/,xmasssave/maxpoint*0./ 77 77 78 78 logical :: first_call=.true. 79 79 80 81 80 ! Use different seed for each process. 81 !**************************************************************************** 82 82 if (first_call) then 83 83 idummy=idummy+mp_seed … … 87 87 mind2=memind(2) 88 88 89 90 89 ! Determine the actual date and time in Greenwich (i.e., UTC + correction for daylight savings time) 90 !***************************************************************************** 91 91 92 92 julmonday=juldate(19000101,0) ! this is a Monday … … 97 97 98 98 99 100 99 ! For every release point, check whether we are in the release time interval 100 !*************************************************************************** 101 101 102 102 minpart=1 … … 105 105 (itime.le.ireleaseend(i))) then 106 106 107 108 107 ! Determine the local day and time 108 !********************************* 109 109 110 110 xlonav=xlon0+(xpoint2(i)+xpoint1(i))/2.*dx ! longitude needed to determine local time … … 124 124 endif 125 125 126 127 128 129 126 ! Calculate a species- and time-dependent correction factor, distinguishing between 127 ! area (those with release starting at surface) and point (release starting above surface) sources 128 ! Also, calculate an average time correction factor (species independent) 129 !***************************************************************************** 130 130 average_timecorrect=0. 131 131 do k=1,nspec … … 139 139 average_timecorrect=average_timecorrect/real(nspec) 140 140 141 142 143 141 ! Determine number of particles to be released this time; at start and at end of release, 142 ! only half the particles are released 143 !***************************************************************************** 144 144 145 145 if (ireleasestart(i).ne.ireleaseend(i)) then … … 149 149 (itime.eq.ireleaseend(i))) rfraction=rfraction/2. 150 150 151 152 153 154 151 ! Take the species-average time correction factor in order to scale the 152 ! number of particles released this time 153 ! Also scale by number of MPI processes 154 !********************************************************************** 155 155 156 156 rfraction=rfraction*average_timecorrect … … 158 158 rfraction=rfraction+xmasssave(i) ! number to be released at this time 159 159 160 160 ! number to be released for one process 161 161 if (mp_partid.lt.mod(int(rfraction),mp_partgroup_np)) then 162 162 addone=1 … … 180 180 numrel=npart(i)/mp_partgroup_np+addone 181 181 endif 182 182 183 183 xaux=xpoint2(i)-xpoint1(i) 184 184 yaux=ypoint2(i)-ypoint1(i) … … 187 187 do ipart=minpart,maxpart_mpi ! search for free storage space 188 188 189 190 189 ! If a free storage space is found, attribute everything to this array element 190 !***************************************************************************** 191 191 192 192 if (itra1(ipart).ne.itime) then 193 193 194 195 196 197 198 194 ! Particle coordinates are determined by using a random position within the release volume 195 !***************************************************************************** 196 197 ! Determine horizontal particle position 198 !*************************************** 199 199 200 200 xtra1(ipart)=xpoint1(i)+ran1(idummy)*xaux … … 207 207 ytra1(ipart)=ypoint1(i)+ran1(idummy)*yaux 208 208 209 210 211 212 213 214 209 ! Assign mass to particle: Total mass divided by total number of particles. 210 ! Time variation has partly been taken into account already by a species-average 211 ! correction factor, by which the number of particles released this time has been 212 ! scaled. Adjust the mass per particle by the species-dependent time correction factor 213 ! divided by the species-average one 214 !***************************************************************************** 215 215 do k=1,nspec 216 217 218 219 220 216 xmass1(ipart,k)=xmass(i,k)/real(npart(i)) & 217 *timecorrect(k)/average_timecorrect 218 ! write (*,*) 'xmass1: ',xmass1(ipart,k),ipart,k 219 ! Assign certain properties to particle 220 !************************************** 221 221 end do 222 222 nclass(ipart)=min(int(ran1(idummy)*real(nclassunc))+1, & … … 234 234 235 235 236 237 236 ! Determine vertical particle position 237 !************************************* 238 238 239 239 ztra1(ipart)=zpoint1(i)+ran1(idummy)*zaux 240 240 241 242 243 244 245 241 ! Interpolation of topography and density 242 !**************************************** 243 244 ! Determine the nest we are in 245 !***************************** 246 246 247 247 ngrid=0 … … 257 257 43 continue 258 258 259 260 259 ! Determine (nested) grid coordinates and auxiliary parameters used for interpolation 260 !***************************************************************************** 261 261 262 262 if (ngrid.gt.0) then … … 294 294 endif 295 295 296 297 296 ! If starting height is in pressure coordinates, retrieve pressure profile and convert zpart1 to meters 297 !***************************************************************************** 298 298 if (kindz(i).eq.3) then 299 299 presspart=ztra1(ipart) … … 337 337 endif 338 338 339 340 341 339 ! If release positions are given in meters above sea level, subtract the 340 ! topography from the starting height 341 !*********************************************************************** 342 342 343 343 if (kindz(i).eq.2) ztra1(ipart)=ztra1(ipart)-topo … … 348 348 349 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 350 ! For special simulations, multiply particle concentration air density; 351 ! Simply take the 2nd field in memory to do this (accurate enough) 352 !*********************************************************************** 353 !AF IND_SOURCE switches between different units for concentrations at the source 354 !Af NOTE that in backward simulations the release of particles takes place at the 355 !Af receptor and the sampling at the source. 356 !Af 1="mass" 357 !Af 2="mass mixing ratio" 358 !Af IND_RECEPTOR switches between different units for concentrations at the receptor 359 !Af 1="mass" 360 !Af 2="mass mixing ratio" 361 362 !Af switches for the releasefile: 363 !Af IND_REL = 1 : xmass * rho 364 !Af IND_REL = 0 : xmass * 1 365 366 !Af ind_rel is defined in readcommand.f 367 367 368 368 if (ind_rel .eq. 1) then 369 369 370 371 370 ! Interpolate the air density 371 !**************************** 372 372 373 373 do ii=2,nz … … 403 403 404 404 405 406 405 ! Multiply "mass" (i.e., mass mixing ratio in forward runs) with density 406 !******************************************************************** 407 407 408 408 do k=1,nspec … … 416 416 endif 417 417 end do 418 if (ipart.gt.maxpart) goto 996 418 ! ESO TODO: Here we could use dynamic allocation and increase array sizes 419 if (ipart.gt.maxpart_mpi) goto 996 419 420 420 421 34 minpart=ipart+1 421 422 end do 422 423 endif 423 424 end do 424 425 … … 426 427 return 427 428 428 996 429 996 continue 429 430 write(*,*) '#####################################################' 430 431 write(*,*) '#### FLEXPART MODEL SUBROUTINE RELEASEPARTICLES: ####' -
src/timemanager_mpi.f90
r0f7835d r861805a 104 104 105 105 logical :: reqv_state=.false. ! .true. if waiting for a MPI_Irecv to complete 106 integer :: j,ks,kp,l,n,itime=0,nstop,nstop1,memstat=0 !,mind106 integer :: j,ks,kp,l,n,itime=0,nstop,nstop1,memstat=0 107 107 ! integer :: ksp 108 108 integer :: ip … … 155 155 156 156 do itime=0,ideltas,lsynctime 157 157 158 158 159 ! Computation of wet deposition, OH reaction and mass transfer … … 166 167 !******************************************************************** 167 168 168 if (mp_d ev_mode) write(*,*) 'myid, itime: ',mp_pid,itime169 if (mp_dbg_mode) write(*,*) 'myid, itime: ',mp_pid,itime 169 170 170 171 if (WETDEP .and. itime .ne. 0 .and. numpart .gt. 0) then … … 274 275 275 276 if (mp_measure_time.and..not.(lmpreader.and.lmp_use_reader)) call mpif_mtime('getfields',1) 277 278 ! For validation and tests: call the function below to set all fields to simple 279 ! homogeneous values 280 ! if (mp_dev_mode) call set_fields_synthetic 281 282 !******************************************************************************* 276 283 277 284 if (lmpreader.and.nstop1.gt.1) stop 'NO METEO FIELDS AVAILABLE' … … 324 331 call releaseparticles(itime) 325 332 endif 333 334 335 ! Check if particles should be redistributed among processes 336 !*********************************************************** 337 call mpif_calculate_part_redist(itime) 326 338 327 339 … … 542 554 ! Decide whether to write an estimate of the number of particles released, 543 555 ! or exact number (require MPI reduce operation) 544 if (mp_d ev_mode) then556 if (mp_dbg_mode) then 545 557 numpart_tot_mpi = numpart 546 558 else … … 549 561 550 562 if (mp_exact_numpart.and..not.(lmpreader.and.lmp_use_reader).and.& 551 &.not.mp_d ev_mode) then563 &.not.mp_dbg_mode) then 552 564 call MPI_Reduce(numpart, numpart_tot_mpi, 1, MPI_INTEGER, MPI_SUM, id_root, & 553 565 & mp_comm_used, mp_ierr) … … 555 567 556 568 !CGZ-lifetime: output species lifetime 557 if (lroot.or.mp_d ev_mode) then569 if (lroot.or.mp_dbg_mode) then 558 570 ! write(*,*) 'Overview species lifetime in days', & 559 571 ! real((species_lifetime(:,1)/species_lifetime(:,2))/real(3600.0*24.0)) … … 565 577 ! end if 566 578 end if 579 580 ! Write particles for all processes 581 if (mp_dev_mode) write(*,*) "PID, itime, numpart", mp_pid,itime,numpart 582 567 583 568 584 45 format(i13,' SECONDS SIMULATED: ',i13, ' PARTICLES: Uncertainty: ',3f7.3)
Note: See TracChangeset
for help on using the changeset viewer.