Changes in src/mpi_mod.f90 [79abee9:4c64400] in flexpart.git


Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/mpi_mod.f90

    r79abee9 r4c64400  
    9090! MPI tags/requests for send/receive operation
    9191  integer :: tm1
    92   integer, parameter :: nvar_async=26 !27 !29 :DBG:
     92  integer, parameter :: nvar_async=26
    9393!integer, dimension(:), allocatable :: tags
    9494  integer, dimension(:), allocatable :: reqs
     95
     96! Status array used for certain MPI operations (MPI_RECV)
     97  integer, dimension(MPI_STATUS_SIZE) :: mp_status
    9598
    9699
     
    149152  integer, private :: dat_lun
    150153
     154! Temporary arrays for particles (allocated and deallocated as needed)
     155  integer, allocatable, dimension(:) :: nclass_tmp, npoint_tmp, itra1_tmp, idt_tmp, &
     156       & itramem_tmp, itrasplit_tmp
     157  real(kind=dp), allocatable, dimension(:) :: xtra1_tmp, ytra1_tmp
     158  real, allocatable, dimension(:) :: ztra1_tmp
     159  real, allocatable, dimension(:,:) :: xmass1_tmp
     160
     161! mp_redist_fract        Exchange particles between processes if relative difference
     162!                        is larger. A good value is between 0.0 and 0.5
     163! mp_maxpart_factor      Allocate more memory per process than strictly needed
     164!                        (safety factor). Recommended value between 1.5 and 2.5
     165! mp_min_redist          Do not redistribute particles if below this limit
     166  real, parameter :: mp_redist_fract=0.2, mp_maxpart_factor=1.5
     167  integer,parameter :: mp_min_redist=100000
     168
     169
    151170contains
    152171
     
    195214    if (dep_prec==dp) then
    196215      mp_cp = MPI_REAL8
    197       ! TODO: write info message for serial version as well
     216! TODO: write info message for serial version as well
    198217      if (lroot.and.verbosity>0) write(*,*) 'Using double precision for deposition fields'
    199218    else if (dep_prec==sp) then
     
    232251        write(*,FMT='(80("#"))')
    233252      end if
    234       lmp_sync=.true. ! :DBG: eso fix this...
     253      lmp_sync=.true.
    235254    end if
    236255
     
    311330
    312331! Set maxpart per process
    313     if (mp_partid.lt.mod(maxpart,mp_partgroup_np)) addmaxpart=1
    314     maxpart_mpi=int(maxpart/mp_partgroup_np)+addmaxpart
     332! eso 08/2016: Increase maxpart per process, in case of unbalanced distribution
     333    maxpart_mpi=int(mp_maxpart_factor*real(maxpart)/real(mp_partgroup_np))
    315334
    316335! Do not allocate particle data arrays for readwind process
     
    321340! Set random seed for each non-root process
    322341    if (mp_pid.gt.0) then
    323 !    if (mp_pid.ge.0) then
    324 !      call system_clock(s)
    325342      s = 244
    326343      mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729))
    327344    end if
    328     if (mp_dev_mode) write(*,*) 'PID, mp_seed: ',mp_pid, mp_seed
    329345    if (mp_dbg_mode) then
    330 ! :DBG: For debugging, set all seed to 0 and maxrand to e.g. 20
    331346      mp_seed=0
    332347      if (lroot) write(*,*) 'MPI: setting seed=0'
     
    454469
    455470
     471  subroutine mpif_send_part_properties(num_part)
     472!***********************************************************************
     473! Distribute particle properties from root to all processes.
     474
     475! Used by init_domainfill_mpi
     476!
     477! Variables:
     478!
     479! num_part        input, number of particles per process (rounded up)
     480!
     481!***********************************************************************
     482    use com_mod
     483
     484    implicit none
     485
     486    integer,intent(in) :: num_part
     487    integer :: i,jj, addone
     488
     489! Exit if too many particles
     490    if (num_part.gt.maxpart_mpi) then
     491      write(*,*) '#####################################################'
     492      write(*,*) '#### ERROR - TOTAL NUMBER OF PARTICLES REQUIRED  ####'
     493      write(*,*) '#### EXCEEDS THE MAXIMUM ALLOWED NUMBER. REDUCE  ####'
     494      write(*,*) '#### EITHER NUMBER OF PARTICLES PER RELEASE POINT####'
     495      write(*,*) '#### OR INCREASE MAXPART.                        ####'
     496      write(*,*) '#####################################################'
     497!      call MPI_FINALIZE(mp_ierr)
     498      stop
     499    end if
     500
     501
     502! Time for MPI communications
     503!****************************
     504    if (mp_measure_time) call mpif_mtime('commtime',0)
     505
     506! Distribute variables, send from pid 0 to other processes (including itself)
     507!****************************************************************************
     508
     509    call MPI_SCATTER(nclass_tmp,num_part,MPI_INTEGER,nclass,&
     510         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     511    if (mp_ierr /= 0) goto 600
     512    call MPI_SCATTER(npoint_tmp,num_part,MPI_INTEGER,npoint,&
     513         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     514    if (mp_ierr /= 0) goto 600
     515    call MPI_SCATTER(itra1_tmp,num_part,MPI_INTEGER,itra1,&
     516         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     517    if (mp_ierr /= 0) goto 600
     518    call MPI_SCATTER(idt_tmp,num_part,MPI_INTEGER,idt,&
     519         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     520    if (mp_ierr /= 0) goto 600
     521    call MPI_SCATTER(itramem_tmp,num_part,MPI_INTEGER,itramem,&
     522         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     523    if (mp_ierr /= 0) goto 600
     524    call MPI_SCATTER(itrasplit_tmp,num_part,MPI_INTEGER,itrasplit,&
     525         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     526    if (mp_ierr /= 0) goto 600
     527    call MPI_SCATTER(xtra1_tmp,num_part,mp_dp,xtra1,&
     528         &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)
     529    if (mp_ierr /= 0) goto 600
     530    call MPI_SCATTER(ytra1_tmp,num_part,mp_dp,ytra1,&
     531         &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)
     532    if (mp_ierr /= 0) goto 600
     533    call MPI_SCATTER(ztra1_tmp,num_part,mp_sp,ztra1,&
     534         &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)
     535    if (mp_ierr /= 0) goto 600
     536    do i=1,nspec
     537      call MPI_SCATTER(xmass1_tmp(:,i),num_part,mp_sp,xmass1(:,i),&
     538           &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)
     539      if (mp_ierr /= 0) goto 600
     540    end do
     541
     542    if (mp_measure_time) call mpif_mtime('commtime',1)
     543
     544    goto 601
     545
     546600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
     547    stop
     548
     549! After the transfer of particles it it possible that the value of
     550! "numpart" is larger than the actual used, so we reduce it if there are
     551! invalid particles at the end of the arrays
     552601 do i=num_part, 1, -1
     553      if (itra1(i).eq.-999999999) then
     554        numpart=numpart-1
     555      else
     556        exit
     557      end if
     558    end do
     559
     560
     561!601 end subroutine mpif_send_part_properties
     562  end subroutine mpif_send_part_properties
     563
     564
     565  subroutine mpif_calculate_part_redist(itime)
     566!***********************************************************************
     567! Calculate number of particles to redistribute between processes. This routine
     568! can be called at regular time intervals to keep a level number of
     569! particles on each process.
     570!
     571! First, all processes report their local "numpart" to each other, which is
     572! stored in array "numpart_mpi(np)". This array is sorted from low to
     573! high values, along with a corresponding process ID array "idx_arr(np)".
     574! If the relative difference between the highest and lowest "numpart_mpi" value
     575! is above a threshold, particles are transferred from process idx_arr(np-1)
     576! to process idx_arr(0). Repeat for processes idx_arr(np-i) and idx_arr(i)
     577! for all valid i.
     578! Note: If np is an odd number, the process with the median
     579! number of particles is left unchanged
     580!
     581! VARIABLES
     582! itime        input, current time
     583!***********************************************************************
     584    use com_mod
     585
     586    implicit none
     587   
     588    integer, intent(in) :: itime
     589    real :: pmin,z
     590    integer :: i,jj,nn, num_part=1,m,imin, num_trans
     591    logical :: first_iter
     592    integer,allocatable,dimension(:) :: numparticles_mpi, idx_arr
     593    real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this
     594
     595! Exit if running with only 1 process
     596!************************************************************************
     597    if (mp_np.eq.1) return
     598
     599! All processes exchange information on number of particles
     600!****************************************************************************
     601    allocate(numparticles_mpi(0:mp_partgroup_np-1), &
     602         &idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1))
     603
     604    call MPI_Allgather(numpart, 1, MPI_INTEGER, numparticles_mpi, &
     605         & 1, MPI_INTEGER, mp_comm_used, mp_ierr)
     606
     607
     608! Sort from lowest to highest
     609! Initial guess: correct order
     610    sorted(:) = numparticles_mpi(:)
     611    do i=0, mp_partgroup_np-1
     612      idx_arr(i) = i
     613    end do
     614
     615! For each successive element in index array, see if a lower value exists
     616    do i=0, mp_partgroup_np-2
     617      pmin=sorted(i)
     618      imin=idx_arr(i)
     619      m=i+1
     620      do jj=m, mp_partgroup_np-1
     621        if (pmin.le.sorted(jj)) cycle
     622        z=pmin
     623        pmin=sorted(jj)
     624        sorted(jj)=z
     625
     626        nn=imin
     627        imin=idx_arr(jj)
     628        idx_arr(jj)=nn
     629      end do
     630      sorted(i)=pmin
     631      idx_arr(i)=imin
     632    end do
     633
     634! For each pair of processes, transfer particles if the difference is above a
     635! limit, and numpart at sending process large enough
     636
     637    m=mp_partgroup_np-1 ! index for last sorted process (most particles)
     638    do i=0,mp_partgroup_np/2-1
     639      num_trans = numparticles_mpi(idx_arr(m)) - numparticles_mpi(idx_arr(i))
     640      if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then
     641        if ( numparticles_mpi(idx_arr(m)).gt.mp_min_redist.and.&
     642             & real(num_trans)/real(numparticles_mpi(idx_arr(m))).gt.mp_redist_fract) then
     643! DBG
     644          ! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi', &
     645          !      &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi
     646! DBG
     647          call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2)
     648        end if
     649      end if
     650      m=m-1
     651    end do
     652
     653    deallocate(numparticles_mpi, idx_arr, sorted)
     654
     655  end subroutine mpif_calculate_part_redist
     656
     657
     658  subroutine mpif_redist_part(itime, src_proc, dest_proc, num_trans)
     659!***********************************************************************
     660! Transfer particle properties between two arbitrary processes.
     661!
     662! VARIABLES
     663!
     664! itime           input, current time
     665! src_proc        input, ID of source process
     666! dest_proc       input, ID of destination process
     667! num_trans       input, number of particles to transfer
     668!
     669!************************************************************************
     670    use com_mod !TODO:  ,only: nclass etc
     671
     672    implicit none
     673
     674    integer, intent(in) :: itime, src_proc, dest_proc, num_trans
     675    integer :: ll, ul ! lower and upper indices in arrays
     676    integer :: arr_sz ! size of temporary arrays
     677    integer :: mtag   ! MPI message tag
     678    integer :: i, j, minpart, ipart, maxnumpart
     679 
     680! Check for invalid input arguments
     681!**********************************
     682 if (src_proc.eq.dest_proc) then
     683   write(*,*) '<mpi_mod::mpif_redist_part>: Error: &
     684        &src_proc.eq.dest_proc'
     685   stop
     686 end if
     687
     688! Measure time for MPI communications
     689!************************************
     690    if (mp_measure_time) call mpif_mtime('commtime',0)
     691
     692! Send particles
     693!***************
     694    if (mp_partid.eq.src_proc) then
     695      mtag = 2000
     696
     697! Array indices for the transferred particles
     698      ll=numpart-num_trans+1
     699      ul=numpart
     700
     701      if (mp_dev_mode) then
     702        write(*,FMT='(72("#"))')
     703        write(*,*) "Sending ", num_trans, "particles (from/to)", src_proc, dest_proc
     704        write(*,*) "numpart before: ", numpart
     705      end if
     706
     707      call MPI_SEND(nclass(ll:ul),num_trans,&
     708           & MPI_INTEGER,dest_proc,mtag+1,mp_comm_used,mp_ierr)
     709
     710      call MPI_SEND(npoint(ll:ul),num_trans,&
     711           & MPI_INTEGER,dest_proc,mtag+2,mp_comm_used,mp_ierr)
     712
     713      call MPI_SEND(itra1(ll:ul),num_trans, &
     714           & MPI_INTEGER,dest_proc,mtag+3,mp_comm_used,mp_ierr)
     715
     716      call MPI_SEND(idt(ll:ul),num_trans, &
     717           & MPI_INTEGER,dest_proc,mtag+4,mp_comm_used,mp_ierr)
     718
     719      call MPI_SEND(itramem(ll:ul),num_trans, &
     720           & MPI_INTEGER,dest_proc,mtag+5,mp_comm_used,mp_ierr)
     721
     722      call MPI_SEND(itrasplit(ll:ul),num_trans,&
     723           & MPI_INTEGER,dest_proc,mtag+6,mp_comm_used,mp_ierr)
     724
     725      call MPI_SEND(xtra1(ll:ul),num_trans, &
     726           & mp_dp,dest_proc,mtag+7,mp_comm_used,mp_ierr)
     727
     728      call MPI_SEND(ytra1(ll:ul),num_trans,&
     729           & mp_dp,dest_proc,mtag+8,mp_comm_used,mp_ierr)
     730
     731      call MPI_SEND(ztra1(ll:ul),num_trans,&
     732           & mp_sp,dest_proc,mtag+9,mp_comm_used,mp_ierr)
     733
     734      do j=1,nspec
     735        call MPI_SEND(xmass1(ll:ul,j),num_trans,&
     736             & mp_sp,dest_proc,mtag+(9+j),mp_comm_used,mp_ierr)
     737      end do
     738
     739! Terminate transferred particles, update numpart
     740      itra1(ll:ul) = -999999999
     741
     742      numpart = numpart-num_trans
     743
     744      if (mp_dev_mode) then
     745        write(*,*) "numpart after: ", numpart
     746        write(*,FMT='(72("#"))')
     747      end if
     748
     749    else if (mp_partid.eq.dest_proc) then
     750
     751! Receive particles
     752!******************
     753      mtag = 2000
     754      ! Array indices for the transferred particles
     755      ll=numpart+1
     756      ul=numpart+num_trans
     757
     758      if (mp_dev_mode) then
     759        write(*,FMT='(72("#"))')
     760        write(*,*) "Receiving ", num_trans, "particles (from/to)", src_proc, dest_proc
     761        write(*,*) "numpart before: ", numpart
     762      end if
     763
     764! We could receive the data directly at nclass(ll:ul) etc., but this leaves
     765! vacant spaces at lower indices. Using temporary arrays instead.
     766      arr_sz = ul-ll+1
     767      allocate(itra1_tmp(arr_sz),npoint_tmp(arr_sz),nclass_tmp(arr_sz),&
     768           & idt_tmp(arr_sz),itramem_tmp(arr_sz),itrasplit_tmp(arr_sz),&
     769           & xtra1_tmp(arr_sz),ytra1_tmp(arr_sz),ztra1_tmp(arr_sz),&
     770           & xmass1_tmp(arr_sz, maxspec))
     771     
     772      call MPI_RECV(nclass_tmp,num_trans,&
     773           & MPI_INTEGER,src_proc,mtag+1,mp_comm_used,mp_status,mp_ierr)
     774
     775      call MPI_RECV(npoint_tmp,num_trans,&
     776           & MPI_INTEGER,src_proc,mtag+2,mp_comm_used,mp_status,mp_ierr)
     777
     778      call MPI_RECV(itra1_tmp,num_trans, &
     779           & MPI_INTEGER,src_proc,mtag+3,mp_comm_used,mp_status,mp_ierr)
     780
     781      call MPI_RECV(idt_tmp,num_trans, &
     782           & MPI_INTEGER,src_proc,mtag+4,mp_comm_used,mp_status,mp_ierr)
     783
     784      call MPI_RECV(itramem_tmp,num_trans, &
     785           & MPI_INTEGER,src_proc,mtag+5,mp_comm_used,mp_status,mp_ierr)
     786
     787      call MPI_RECV(itrasplit_tmp,num_trans,&
     788           & MPI_INTEGER,src_proc,mtag+6,mp_comm_used,mp_status,mp_ierr)
     789
     790      call MPI_RECV(xtra1_tmp,num_trans, &
     791           & mp_dp,src_proc,mtag+7,mp_comm_used,mp_status,mp_ierr)
     792
     793      call MPI_RECV(ytra1_tmp,num_trans,&
     794           & mp_dp,src_proc,mtag+8,mp_comm_used,mp_status,mp_ierr)
     795
     796      call MPI_RECV(ztra1_tmp,num_trans,&
     797           & mp_sp,src_proc,mtag+9,mp_comm_used,mp_status,mp_ierr)
     798
     799      do j=1,nspec
     800        call MPI_RECV(xmass1_tmp(:,j),num_trans,&
     801             & mp_sp,src_proc,mtag+(9+j),mp_comm_used,mp_status,mp_ierr)
     802      end do
     803
     804! This is the maximum value numpart can possibly have after the transfer
     805      maxnumpart=numpart+num_trans
     806
     807! Search for vacant space and copy from temporary storage
     808!********************************************************
     809      minpart=1
     810      do i=1, num_trans
     811! Take into acount that we may have transferred invalid particles
     812        if (itra1_tmp(i).ne.itime) cycle
     813        do ipart=minpart,maxnumpart
     814          if (itra1(ipart).ne.itime) then
     815            itra1(ipart) = itra1_tmp(i)
     816            npoint(ipart) = npoint_tmp(i)
     817            nclass(ipart) = nclass_tmp(i)
     818            idt(ipart) = idt_tmp(i)
     819            itramem(ipart) = itramem_tmp(i)
     820            itrasplit(ipart) = itrasplit_tmp(i)
     821            xtra1(ipart) = xtra1_tmp(i)
     822            ytra1(ipart) = ytra1_tmp(i)
     823            ztra1(ipart) = ztra1_tmp(i)
     824            xmass1(ipart,:) = xmass1_tmp(i,:)
     825            goto 200 ! Storage space has been found, stop searching
     826          end if
     827! :TODO: add check for if too many particles requiried
     828        end do
     829200     minpart=ipart+1
     830      end do
     831! Increase numpart, if necessary
     832      numpart=max(numpart,ipart)
     833
     834      deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,&
     835           &itrasplit_tmp,xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp)
     836
     837      if (mp_dev_mode) then
     838        write(*,*) "numpart after: ", numpart
     839        write(*,FMT='(72("#"))')
     840      end if
     841
     842    else
     843! This routine should only be called by the two participating processes
     844      write(*,*) "ERROR: wrong process has entered mpi_mod::mpif_redist_part"
     845      stop
     846      return
     847    end if
     848
     849! Measure time for MPI communications
     850!************************************
     851    if (mp_measure_time) call mpif_mtime('commtime',1)
     852
     853  end subroutine mpif_redist_part
     854
     855
    456856  subroutine mpif_tm_send_vars
    457857!***********************************************************************
     
    477877    if (lroot) then
    478878      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
    479            &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     879           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    480880      if (mp_ierr /= 0) goto 600
    481881      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
    482            &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     882           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    483883      if (mp_ierr /= 0) goto 600
    484884      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
    485            &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     885           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    486886      if (mp_ierr /= 0) goto 600
    487887      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
    488            &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     888           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    489889      if (mp_ierr /= 0) goto 600
    490890      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
    491            &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     891           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    492892      if (mp_ierr /= 0) goto 600
    493893
    494894! int2
    495895      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,&
    496            &numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
     896           & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
    497897      if (mp_ierr /= 0) goto 600
    498898
    499899! real
    500900      call MPI_SCATTER(uap,numpart_mpi,mp_sp,MPI_IN_PLACE,&
    501            &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     901           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    502902      if (mp_ierr /= 0) goto 600
    503903      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
    504            &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     904           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    505905      if (mp_ierr /= 0) goto 600
    506906      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
    507            &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     907           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    508908      if (mp_ierr /= 0) goto 600
    509909
    510910      call MPI_SCATTER(us,numpart_mpi,mp_sp,MPI_IN_PLACE,&
    511            &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     911           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    512912      if (mp_ierr /= 0) goto 600
    513913      call MPI_SCATTER(vs,numpart_mpi,mp_sp,MPI_IN_PLACE,&
    514            &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     914           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    515915      if (mp_ierr /= 0) goto 600
    516916      call MPI_SCATTER(ws,numpart_mpi,mp_sp,MPI_IN_PLACE,&
    517            &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     917           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    518918      if (mp_ierr /= 0) goto 600
    519919
    520920      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
    521            &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
     921           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
    522922      if (mp_ierr /= 0) goto 600
    523923      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
    524            &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
     924           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
    525925      if (mp_ierr /= 0) goto 600
    526926      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,MPI_IN_PLACE,&
    527            &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     927           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    528928      if (mp_ierr /= 0) goto 600
    529929
    530930      do i=1,nspec
    531931        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,MPI_IN_PLACE,&
    532              &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     932             & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    533933        if (mp_ierr /= 0) goto 600
    534934      end do
     
    537937! integers
    538938      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,&
    539            &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     939           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    540940      if (mp_ierr /= 0) goto 600
    541941      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,&
    542            &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     942           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    543943      if (mp_ierr /= 0) goto 600
    544944      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,&
    545            &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     945           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    546946      if (mp_ierr /= 0) goto 600
    547947      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,&
    548            &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     948           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    549949      if (mp_ierr /= 0) goto 600
    550950      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,&
    551            &numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
     951           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    552952      if (mp_ierr /= 0) goto 600
    553953
    554954! int2
    555955      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,&
    556            &numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
     956           & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
    557957      if (mp_ierr /= 0) goto 600
    558958
    559959! reals
    560960      call MPI_SCATTER(uap,numpart_mpi,mp_sp,uap,&
    561            &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     961           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    562962      if (mp_ierr /= 0) goto 600
    563963      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,ucp,&
    564            &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     964           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    565965      if (mp_ierr /= 0) goto 600
    566966      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,uzp,&
    567            &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     967           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    568968      if (mp_ierr /= 0) goto 600
    569969
    570970      call MPI_SCATTER(us,numpart_mpi,mp_sp,us,&
    571            &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     971           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    572972      if (mp_ierr /= 0) goto 600
    573973      call MPI_SCATTER(vs,numpart_mpi,mp_sp,vs,&
    574            &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     974           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    575975      if (mp_ierr /= 0) goto 600
    576976      call MPI_SCATTER(ws,numpart_mpi,mp_sp,ws,&
    577            &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     977           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    578978      if (mp_ierr /= 0) goto 600
    579979
    580980      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,&
    581            &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
     981           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
    582982      if (mp_ierr /= 0) goto 600
    583983      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,&
    584            &numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
     984           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
    585985      if (mp_ierr /= 0) goto 600
    586986      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,ztra1,&
    587            &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     987           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    588988      if (mp_ierr /= 0) goto 600
    589989
    590990      do i=1,nspec
    591991        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,xmass1(:,i),&
    592              &numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
     992             & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
    593993        if (mp_ierr /= 0) goto 600
    594994      end do
     
    11451545
    11461546! cloud water/ice:
    1147     if (readclouds_nest(i)) then
    1148       ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
    1149       ! if (mp_ierr /= 0) goto 600
    1150       call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
    1151       if (mp_ierr /= 0) goto 600
    1152     end if
     1547      if (readclouds_nest(i)) then
     1548! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
     1549! if (mp_ierr /= 0) goto 600
     1550        call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
     1551        if (mp_ierr /= 0) goto 600
     1552      end if
    11531553
    11541554! 2D fields
     
    13981798    integer :: d2s1 = nxmax*nymax
    13991799    integer :: d2s2 = nxmax*nymax*maxspec
    1400     !integer :: d1_size1 = maxwf
     1800!integer :: d1_size1 = maxwf
    14011801
    14021802!    integer :: d3s1,d3s2,d2s1,d2s2
     
    16452045      if (dest.eq.id_read) cycle
    16462046      do k=1, numbnests
    1647       i=dest*nvar_async
    1648       call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1649       if (mp_ierr /= 0) goto 600
    1650       i=i+1
    1651       call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1652       if (mp_ierr /= 0) goto 600
    1653       i=i+1
    1654       call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1655       if (mp_ierr /= 0) goto 600
    1656       i=i+1
    1657       call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1658       if (mp_ierr /= 0) goto 600
    1659       i=i+1
    1660       call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1661       if (mp_ierr /= 0) goto 600
    1662       i=i+1
    1663       call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1664       if (mp_ierr /= 0) goto 600
    1665       i=i+1
    1666       call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1667       if (mp_ierr /= 0) goto 600
    1668       i=i+1
    1669       call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1670       if (mp_ierr /= 0) goto 600
    1671       i=i+1
    1672       call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1673       if (mp_ierr /= 0) goto 600
    1674       i=i+1
    1675       call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1676       if (mp_ierr /= 0) goto 600
    1677       i=i+1
    1678       call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1679       i=i+1
    1680       if (mp_ierr /= 0) goto 600
    1681       call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1682       if (mp_ierr /= 0) goto 600
    1683       i=i+1
    1684       call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1685       if (mp_ierr /= 0) goto 600
    1686       i=i+1
    1687       call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1688       if (mp_ierr /= 0) goto 600
    1689       i=i+1
    1690       call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1691       if (mp_ierr /= 0) goto 600
    1692       i=i+1
     2047        i=dest*nvar_async
     2048        call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2049        if (mp_ierr /= 0) goto 600
     2050        i=i+1
     2051        call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2052        if (mp_ierr /= 0) goto 600
     2053        i=i+1
     2054        call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2055        if (mp_ierr /= 0) goto 600
     2056        i=i+1
     2057        call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2058        if (mp_ierr /= 0) goto 600
     2059        i=i+1
     2060        call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2061        if (mp_ierr /= 0) goto 600
     2062        i=i+1
     2063        call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2064        if (mp_ierr /= 0) goto 600
     2065        i=i+1
     2066        call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2067        if (mp_ierr /= 0) goto 600
     2068        i=i+1
     2069        call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2070        if (mp_ierr /= 0) goto 600
     2071        i=i+1
     2072        call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2073        if (mp_ierr /= 0) goto 600
     2074        i=i+1
     2075        call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2076        if (mp_ierr /= 0) goto 600
     2077        i=i+1
     2078        call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2079        i=i+1
     2080        if (mp_ierr /= 0) goto 600
     2081        call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2082        if (mp_ierr /= 0) goto 600
     2083        i=i+1
     2084        call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2085        if (mp_ierr /= 0) goto 600
     2086        i=i+1
     2087        call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2088        if (mp_ierr /= 0) goto 600
     2089        i=i+1
     2090        call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2091        if (mp_ierr /= 0) goto 600
     2092        i=i+1
    16932093! 15
    1694       call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1695       if (mp_ierr /= 0) goto 600
    1696       i=i+1
    1697       call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1698       if (mp_ierr /= 0) goto 600
    1699       i=i+1
    1700       call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1701       if (mp_ierr /= 0) goto 600
    1702       i=i+1
    1703       call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1704       if (mp_ierr /= 0) goto 600
    1705       i=i+1
    1706       call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1707       if (mp_ierr /= 0) goto 600
    1708       i=i+1
    1709       call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1710       if (mp_ierr /= 0) goto 600
    1711       i=i+1
    1712       call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1713       if (mp_ierr /= 0) goto 600
    1714       i=i+1
    1715       call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1716       if (mp_ierr /= 0) goto 600
    1717       i=i+1
    1718       call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1719       if (mp_ierr /= 0) goto 600
    1720       i=i+1
    1721       call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
    1722       if (mp_ierr /= 0) goto 600
     2094        call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2095        if (mp_ierr /= 0) goto 600
     2096        i=i+1
     2097        call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2098        if (mp_ierr /= 0) goto 600
     2099        i=i+1
     2100        call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2101        if (mp_ierr /= 0) goto 600
     2102        i=i+1
     2103        call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2104        if (mp_ierr /= 0) goto 600
     2105        i=i+1
     2106        call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2107        if (mp_ierr /= 0) goto 600
     2108        i=i+1
     2109        call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2110        if (mp_ierr /= 0) goto 600
     2111        i=i+1
     2112        call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2113        if (mp_ierr /= 0) goto 600
     2114        i=i+1
     2115        call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2116        if (mp_ierr /= 0) goto 600
     2117        i=i+1
     2118        call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2119        if (mp_ierr /= 0) goto 600
     2120        i=i+1
     2121        call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)
     2122        if (mp_ierr /= 0) goto 600
    17232123! 25
    17242124
    17252125! Send cloud water if it exists. Increment counter always (as on receiving end)
    1726       if (readclouds) then
    1727         i=i+1
    1728         call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,&
    1729              &MPI_COMM_WORLD,reqs(i),mp_ierr)
    1730         if (mp_ierr /= 0) goto 600
    1731       end if
     2126        if (readclouds) then
     2127          i=i+1
     2128          call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,&
     2129               &MPI_COMM_WORLD,reqs(i),mp_ierr)
     2130          if (mp_ierr /= 0) goto 600
     2131        end if
     2132      end do
    17322133    end do
    1733   end do
    17342134
    17352135    if (mp_measure_time) call mpif_mtime('commtime',1)
     
    18102210    do k=1, numbnests
    18112211! Get MPI tags/requests for communications
    1812     j=mp_pid*nvar_async
    1813     call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1814          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1815     if (mp_ierr /= 0) goto 600
    1816     j=j+1
    1817     call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1818          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1819     if (mp_ierr /= 0) goto 600
    1820     j=j+1
    1821     call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1822          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1823     if (mp_ierr /= 0) goto 600
    1824     j=j+1
    1825     call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1826          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1827     if (mp_ierr /= 0) goto 600
    1828     j=j+1
    1829     call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1830          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1831     if (mp_ierr /= 0) goto 600
    1832     j=j+1
    1833     call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1834          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1835     if (mp_ierr /= 0) goto 600
    1836     j=j+1
    1837     call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
    1838          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1839     if (mp_ierr /= 0) goto 600
    1840     j=j+1
    1841     call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
    1842          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1843     if (mp_ierr /= 0) goto 600
    1844     j=j+1
    1845     call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1846          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1847     if (mp_ierr /= 0) goto 600
    1848     j=j+1
    1849     call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
    1850          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1851     if (mp_ierr /= 0) goto 600
    1852     j=j+1
    1853     call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
    1854          &MPI_COMM_WORLD,reqs(j),mp_ierr)   
    1855     if (mp_ierr /= 0) goto 600
    1856     j=j+1
    1857     call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1858          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1859     if (mp_ierr /= 0) goto 600
    1860     j=j+1
    1861     call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,&
    1862          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1863     if (mp_ierr /= 0) goto 600
    1864     j=j+1
    1865     call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1866          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1867     if (mp_ierr /= 0) goto 600
    1868     j=j+1
    1869     call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1870          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1871     if (mp_ierr /= 0) goto 600
    1872     j=j+1
    1873     call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1874          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1875     if (mp_ierr /= 0) goto 600
    1876     j=j+1
    1877     call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1878          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1879     if (mp_ierr /= 0) goto 600
    1880     j=j+1
    1881     call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1882          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1883     if (mp_ierr /= 0) goto 600
    1884     j=j+1
    1885     call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1886          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1887     if (mp_ierr /= 0) goto 600
    1888     j=j+1
    1889     call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1890          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1891     if (mp_ierr /= 0) goto 600
    1892     call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1893          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1894     if (mp_ierr /= 0) goto 600
    1895     j=j+1
    1896     call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1897          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1898     if (mp_ierr /= 0) goto 600
    1899     j=j+1
    1900     call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1901          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1902     if (mp_ierr /= 0) goto 600
    1903     j=j+1
    1904     call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1905          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1906     if (mp_ierr /= 0) goto 600
    1907     j=j+1
    1908     call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
    1909          &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1910     if (mp_ierr /= 0) goto 600
     2212      j=mp_pid*nvar_async
     2213      call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
     2214           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2215      if (mp_ierr /= 0) goto 600
     2216      j=j+1
     2217      call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
     2218           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2219      if (mp_ierr /= 0) goto 600
     2220      j=j+1
     2221      call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
     2222           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2223      if (mp_ierr /= 0) goto 600
     2224      j=j+1
     2225      call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
     2226           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2227      if (mp_ierr /= 0) goto 600
     2228      j=j+1
     2229      call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
     2230           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2231      if (mp_ierr /= 0) goto 600
     2232      j=j+1
     2233      call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
     2234           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2235      if (mp_ierr /= 0) goto 600
     2236      j=j+1
     2237      call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
     2238           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2239      if (mp_ierr /= 0) goto 600
     2240      j=j+1
     2241      call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&
     2242           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2243      if (mp_ierr /= 0) goto 600
     2244      j=j+1
     2245      call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
     2246           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2247      if (mp_ierr /= 0) goto 600
     2248      j=j+1
     2249      call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&
     2250           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2251      if (mp_ierr /= 0) goto 600
     2252      j=j+1
     2253      call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&
     2254           &MPI_COMM_WORLD,reqs(j),mp_ierr)   
     2255      if (mp_ierr /= 0) goto 600
     2256      j=j+1
     2257      call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
     2258           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2259      if (mp_ierr /= 0) goto 600
     2260      j=j+1
     2261      call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,&
     2262           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2263      if (mp_ierr /= 0) goto 600
     2264      j=j+1
     2265      call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
     2266           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2267      if (mp_ierr /= 0) goto 600
     2268      j=j+1
     2269      call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
     2270           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2271      if (mp_ierr /= 0) goto 600
     2272      j=j+1
     2273      call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
     2274           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2275      if (mp_ierr /= 0) goto 600
     2276      j=j+1
     2277      call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
     2278           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2279      if (mp_ierr /= 0) goto 600
     2280      j=j+1
     2281      call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
     2282           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2283      if (mp_ierr /= 0) goto 600
     2284      j=j+1
     2285      call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
     2286           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2287      if (mp_ierr /= 0) goto 600
     2288      j=j+1
     2289      call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
     2290           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2291      if (mp_ierr /= 0) goto 600
     2292      call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
     2293           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2294      if (mp_ierr /= 0) goto 600
     2295      j=j+1
     2296      call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
     2297           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2298      if (mp_ierr /= 0) goto 600
     2299      j=j+1
     2300      call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
     2301           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2302      if (mp_ierr /= 0) goto 600
     2303      j=j+1
     2304      call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
     2305           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2306      if (mp_ierr /= 0) goto 600
     2307      j=j+1
     2308      call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
     2309           &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2310      if (mp_ierr /= 0) goto 600
    19112311
    19122312! Post request for clwc. These data are possibly not sent, request must then be cancelled
    19132313! For now assume that data at all steps either have or do not have water
    1914     if (readclouds) then
    1915       j=j+1
    1916       call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
    1917            &MPI_COMM_WORLD,reqs(j),mp_ierr)
    1918       if (mp_ierr /= 0) goto 600
    1919     end if
    1920   end do
     2314      if (readclouds) then
     2315        j=j+1
     2316        call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
     2317             &MPI_COMM_WORLD,reqs(j),mp_ierr)
     2318        if (mp_ierr /= 0) goto 600
     2319      end if
     2320    end do
    19212321
    19222322    if (mp_measure_time) call mpif_mtime('commtime',1)
     
    19402340!
    19412341! TODO
    1942 !   take into account nested wind fields
     2342!   NB: take into account nested wind fields by using a separate
     2343!   subroutine mpif_gf_request_nest (and separate request arrays for the
     2344!   nested variables)
    19432345!
    19442346!*******************************************************************************
     
    23492751          write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',&
    23502752               & mp_conccalc_time_total
    2351           ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',&
    2352           !      & mp_vt_wtime_total
    2353           ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',&
    2354           !      & mp_vt_time_total
    2355 ! NB: the 'flush' function is possibly a gfortran-specific extension
    2356           call flush()
     2753! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',&
     2754!      & mp_vt_wtime_total
     2755! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',&
     2756!      & mp_vt_time_total
     2757! NB: the 'flush' function is possibly a gfortran-specific extension,
     2758! comment out if it gives problems
     2759!          call flush()
    23572760        end if
    23582761      end do
     
    23882791
    23892792
    2390     end subroutine mpif_finalize
    2391 
    2392 
    2393     subroutine get_lun(my_lun)
     2793  end subroutine mpif_finalize
     2794
     2795
     2796  subroutine get_lun(my_lun)
    23942797!***********************************************************************
    23952798! get_lun:
     
    23972800!***********************************************************************
    23982801
    2399       implicit none
    2400 
    2401       integer, intent(inout) :: my_lun
    2402       integer, save :: free_lun=100
    2403       logical :: exists, iopen
    2404 
    2405 !***********************************************************************
    2406 
    2407       loop1: do
    2408         inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen)
    2409         if (exists .and. .not.iopen) exit loop1
    2410         free_lun = free_lun+1
    2411       end do loop1
    2412       my_lun = free_lun
    2413 
    2414     end subroutine get_lun
    2415 
    2416 
    2417     subroutine write_data_dbg(array_in, array_name, tstep, ident)
     2802    implicit none
     2803
     2804    integer, intent(inout) :: my_lun
     2805    integer, save :: free_lun=100
     2806    logical :: exists, iopen
     2807
     2808!***********************************************************************
     2809
     2810    loop1: do
     2811      inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen)
     2812      if (exists .and. .not.iopen) exit loop1
     2813      free_lun = free_lun+1
     2814    end do loop1
     2815    my_lun = free_lun
     2816
     2817  end subroutine get_lun
     2818
     2819
     2820  subroutine write_data_dbg(array_in, array_name, tstep, ident)
    24182821!***********************************************************************
    24192822! Write one-dimensional arrays to file (for debugging purposes)
    24202823!***********************************************************************
    2421       implicit none
    2422 
    2423       real, intent(in), dimension(:) :: array_in
    2424       integer, intent(in) :: tstep
    2425       integer :: lios
    2426       character(LEN=*), intent(in) :: ident, array_name
    2427 
    2428       character(LEN=8) :: c_ts
    2429       character(LEN=40) :: fn_1, fn_2
    2430 
    2431 !***********************************************************************
    2432 
    2433       write(c_ts, FMT='(I8.8,BZ)') tstep
    2434       fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident)
    2435       write(c_ts, FMT='(I2.2,BZ)') mp_np
    2436       fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat'
    2437 
    2438       call get_lun(dat_lun)
    2439       open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', &
    2440            FORM='UNFORMATTED', STATUS='REPLACE')
    2441       write(UNIT=dat_lun, IOSTAT=lios) array_in
    2442       close(UNIT=dat_lun)
    2443 
    2444     end subroutine write_data_dbg
     2824    implicit none
     2825
     2826    real, intent(in), dimension(:) :: array_in
     2827    integer, intent(in) :: tstep
     2828    integer :: lios
     2829    character(LEN=*), intent(in) :: ident, array_name
     2830
     2831    character(LEN=8) :: c_ts
     2832    character(LEN=40) :: fn_1, fn_2
     2833
     2834!***********************************************************************
     2835
     2836    write(c_ts, FMT='(I8.8,BZ)') tstep
     2837    fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident)
     2838    write(c_ts, FMT='(I2.2,BZ)') mp_np
     2839    fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat'
     2840
     2841    call get_lun(dat_lun)
     2842    open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', &
     2843         FORM='UNFORMATTED', STATUS='REPLACE')
     2844    write(UNIT=dat_lun, IOSTAT=lios) array_in
     2845    close(UNIT=dat_lun)
     2846
     2847  end subroutine write_data_dbg
    24452848
    24462849
     
    24992902    clwc(:,:,:,li:ui)=0.0
    25002903    ciwc(:,:,:,li:ui)=0.0
    2501  
     2904
    25022905! 2D fields
    25032906
     
    25162919    tropopause(:,:,:,li:ui)=10000.
    25172920    oli(:,:,:,li:ui)=0.01
    2518  
     2921
    25192922  end subroutine set_fields_synthetic
    25202923
Note: See TracChangeset for help on using the changeset viewer.
hosted by ZAMG