Changes in src/timemanager_mpi.f90 [8ed5f11:ec7fc72] in flexpart.git


Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/timemanager_mpi.f90

    r8ed5f11 rec7fc72  
    228228    if (mp_measure_time.and..not.(lmpreader.and.lmp_use_reader)) call mpif_mtime('getfields',0)
    229229
    230 ! Two approaches to MPI getfields is implemented:
    231230! Version 1 (lmp_sync=.true.) uses a read-ahead process where send/recv is done
    232231! in sync at start of each new field time interval
    233 !
    234 ! Version 2 (lmp_sync=.false.) is for holding three fields in memory. Uses a
    235 ! read-ahead process where sending/receiving of the 3rd fields is done in
    236 ! the background in parallel with performing computations with fields 1&2
    237 !********************************************************************************
    238 
    239232    if (lmp_sync.and.lmp_use_reader.and.memstat.gt.0) then
    240233      call mpif_gf_send_vars(memstat)
    241234      if (numbnests>0) call mpif_gf_send_vars_nest(memstat)
    242 ! Version 2  (lmp_sync=.false.) is also used whenever 2 new fields are
     235! Version 2  (lmp_sync=.false., see below) is also used whenever 2 new fields are
    243236! read (as at first time step), in which case async send/recv is impossible.
    244237    else if (.not.lmp_sync.and.lmp_use_reader.and.memstat.ge.32) then
     
    247240    end if
    248241
     242! Version 2 (lmp_sync=.false.) is for holding three fields in memory. Uses a
     243! read-ahead process where sending/receiving of the 3rd fields is done in
     244! the background in parallel with performing computations with fields 1&2
     245!********************************************************************************
    249246    if (.not.lmp_sync) then
    250247   
    251 ! Reader process:
     248! READER PROCESS:
    252249      if (memstat.gt.0..and.memstat.lt.32.and.lmp_use_reader.and.lmpreader) then
    253250        if (mp_dev_mode) write(*,*) 'Reader process: calling mpif_gf_send_vars_async'
     
    255252      end if
    256253
    257 ! Completion check:
     254! COMPLETION CHECK:
    258255! Issued at start of each new field period.
    259256      if (memstat.ne.0.and.memstat.lt.32.and.lmp_use_reader) then
     
    261258      end if
    262259
    263 ! Recveiving process(es):
    264 ! eso TODO: at this point we do not know if clwc/ciwc will be available
    265 ! at next time step. Issue receive request anyway, cancel at mpif_gf_request
     260! RECVEIVING PROCESS(ES):
     261      ! eso TODO: at this point we do not know if clwc/ciwc will be available
     262      ! at next time step. Issue receive request anyway, cancel at mpif_gf_request
    266263      if (memstat.gt.0.and.lmp_use_reader.and..not.lmpreader) then
    267264        if (mp_dev_mode) write(*,*) 'Receiving process: calling mpif_gf_send_vars_async. PID: ', mp_pid
     
    493490          endif
    494491          if (mp_measure_time) call mpif_mtime('iotime',1)
    495 
    496 ! :TODO: Correct calling of conc_surf above?
    497 
    498 !   call concoutput_surf(itime,outnum,gridtotalunc,wetgridtotalunc,drygridtotalunc)
    499 ! endif
    500492
    501493          if (nested_output.eq.1) then
     
    694686
    695687        if (mp_measure_time) call mpif_mtime('advance',0)
    696 !mp_advance_wtime_beg = mpi_wtime()
    697688
    698689        call advance(itime,npoint(j),idt(j),uap(j),ucp(j),uzp(j), &
     
    701692
    702693        if (mp_measure_time) call mpif_mtime('advance',1)
    703 
    704         ! mp_advance_wtime_end = mpi_wtime()
    705         ! mp_advance_wtime_total = mp_advance_wtime_total + (mp_advance_wtime_end - &
    706         !      & mp_advance_wtime_beg)
    707694
    708695
Note: See TracChangeset for help on using the changeset viewer.
hosted by ZAMG