Changeset 328fdf9 in flexpart.git
- Timestamp:
- Apr 28, 2019, 7:53:42 PM (5 years ago)
- Branches:
- master, 10.4.1_pesei, GFS_025, bugfixes+enhancements, dev, release-10, release-10.4.1, scaling-bug
- Children:
- 0a94e13
- Parents:
- 77783e3
- Location:
- src
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
src/com_mod.f90
re9e0f06 r328fdf9 651 651 real :: receptorarea(maxreceptor) 652 652 real :: creceptor(maxreceptor,maxspec) 653 real, allocatable, dimension(:,:) :: creceptor0 653 654 character(len=16) :: receptorname(maxreceptor) 654 655 integer :: numreceptor -
src/init_domainfill.f90
rb5127f9 r328fdf9 414 414 !*************************************************************************** 415 415 416 if ( ipin.eq.1) then416 if ((ipin.eq.1).and.(.not.gdomainfill)) then 417 417 open(unitboundcond,file=path(2)(1:length(2))//'boundcond.bin', & 418 418 form='unformatted') -
src/init_domainfill_mpi.f90
rb5127f9 r328fdf9 110 110 endif 111 111 112 ! Exit here if resuming a run from particle dump 113 !*********************************************** 114 if (gdomainfill.and.ipin.ne.0) return 115 112 116 ! Do not release particles twice (i.e., not at both in the leftmost and rightmost 113 117 ! grid cell) for a global domain … … 213 217 colmass(ix,jy)=(pp(1)-pp(nz))/ga*gridarea(jy) 214 218 colmasstotal=colmasstotal+colmass(ix,jy) 215 216 219 end do 217 220 end do … … 466 469 467 470 ! eso TODO: only needed for root process 468 if ( ipin.eq.1) then471 if ((ipin.eq.1).and.(.not.gdomainfill)) then 469 472 open(unitboundcond,file=path(2)(1:length(2))//'boundcond.bin', & 470 473 form='unformatted') … … 474 477 endif 475 478 476 numpart = numpart/mp_partgroup_np 477 if (mod(numpart,mp_partgroup_np).ne.0) numpart=numpart+1 478 479 else ! Allocate dummy arrays for receiving processes 480 allocate(itra1_tmp(nullsize),npoint_tmp(nullsize),nclass_tmp(nullsize),& 481 & idt_tmp(nullsize),itramem_tmp(nullsize),itrasplit_tmp(nullsize),& 482 & xtra1_tmp(nullsize),ytra1_tmp(nullsize),ztra1_tmp(nullsize),& 483 & xmass1_tmp(nullsize, nullsize)) 479 if (ipin.eq.0) then 480 numpart = numpart/mp_partgroup_np 481 if (mod(numpart,mp_partgroup_np).ne.0) numpart=numpart+1 482 end if 483 484 else ! Allocate dummy arrays for receiving processes 485 if (ipin.eq.0) then 486 allocate(itra1_tmp(nullsize),npoint_tmp(nullsize),nclass_tmp(nullsize),& 487 & idt_tmp(nullsize),itramem_tmp(nullsize),itrasplit_tmp(nullsize),& 488 & xtra1_tmp(nullsize),ytra1_tmp(nullsize),ztra1_tmp(nullsize),& 489 & xmass1_tmp(nullsize, nullsize)) 490 end if 484 491 485 end if ! end if(lroot) 492 end if ! end if(lroot) 493 486 494 487 495 488 496 ! Distribute particles to other processes (numpart is 'per-process', not total) 489 call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) 490 ! eso TODO: xmassperparticle: not necessary to send 491 call MPI_Bcast(xmassperparticle, 1, mp_sp, id_root, mp_comm_used, mp_ierr)492 call mpif_send_part_properties(numpart)497 ! Only if not restarting from previous run 498 if (ipin.eq.0) then 499 call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) 500 call mpif_send_part_properties(npart(1)/mp_partgroup_np) 493 501 494 502 ! Deallocate the temporary arrays used for all particles 495 deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,&503 deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,& 496 504 & itrasplit_tmp,xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp) 505 end if 497 506 498 507 -
src/mpi_mod.f90
r0ecc1fe r328fdf9 125 125 ! mp_time_barrier Measure MPI barrier time 126 126 ! mp_exact_numpart Use an extra MPI communication to give the exact number of particles 127 ! to standard output (this does *not* otherwise affect the simulation) 127 ! to standard output (this does *not* otherwise affect the simulation) 128 ! mp_rebalance Attempt to rebalance particle between processes 128 129 logical, parameter :: mp_dbg_mode = .false. 129 130 logical, parameter :: mp_dev_mode = .false. … … 132 133 logical, parameter :: mp_measure_time=.false. 133 134 logical, parameter :: mp_exact_numpart=.true. 135 logical, parameter :: mp_rebalance=.true. 134 136 135 137 ! for measuring CPU/Wall time … … 144 146 real(dp),private :: mp_getfields_wtime_beg, mp_getfields_wtime_end, mp_getfields_wtime_total=0. 145 147 real(sp),private :: mp_getfields_time_beg, mp_getfields_time_end, mp_getfields_time_total=0. 146 real(dp),private :: mp_readwind_wtime_beg, mp_readwind_wtime_end, mp_readwind_wtime_total=0.147 real(sp),private :: mp_readwind_time_beg, mp_readwind_time_end, mp_readwind_time_total=0.148 ! real(dp),private :: mp_readwind_wtime_beg, mp_readwind_wtime_end, mp_readwind_wtime_total=0. 149 ! real(sp),private :: mp_readwind_time_beg, mp_readwind_time_end, mp_readwind_time_total=0. 148 150 real(dp),private :: mp_io_wtime_beg, mp_io_wtime_end, mp_io_wtime_total=0. 149 151 real(sp),private :: mp_io_time_beg, mp_io_time_end, mp_io_time_total=0. … … 190 192 ! mp_np number of running processes, decided at run-time 191 193 !*********************************************************************** 192 use par_mod, only: maxpart, numwfmem, dep_prec 193 use com_mod, only: mpi_mode, verbosity 194 use par_mod, only: maxpart, numwfmem, dep_prec, maxreceptor, maxspec 195 use com_mod, only: mpi_mode, verbosity, creceptor0 194 196 195 197 implicit none … … 337 339 338 340 ! Set maxpart per process 339 ! eso08/2016: Increase maxpart per process, in case of unbalanced distribution341 ! ESO 08/2016: Increase maxpart per process, in case of unbalanced distribution 340 342 maxpart_mpi=int(mp_maxpart_factor*real(maxpart)/real(mp_partgroup_np)) 341 343 if (mp_np == 1) maxpart_mpi = maxpart … … 365 367 end if 366 368 369 ! Write whether MPI_IN_PLACE is used or not 370 #ifdef USE_MPIINPLACE 371 if (lroot) write(*,*) 'Using MPI_IN_PLACE operations' 372 #else 373 if (lroot) allocate(creceptor0(maxreceptor,maxspec)) 374 if (lroot) write(*,*) 'Not using MPI_IN_PLACE operations' 375 #endif 367 376 goto 101 368 377 … … 559 568 ! invalid particles at the end of the arrays 560 569 561 601 do i=num _part, 1, -1570 601 do i=numpart, 1, -1 562 571 if (itra1(i).eq.-999999999) then 563 572 numpart=numpart-1 … … 1961 1970 if (readclouds) then 1962 1971 j=j+1 1963 call MPI_Irecv(ctwc(:,:,mind),d2s1 ,mp_sp,id_read,MPI_ANY_TAG,&1972 call MPI_Irecv(ctwc(:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& 1964 1973 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1965 1974 if (mp_ierr /= 0) goto 600 … … 2326 2335 if (readclouds) then 2327 2336 j=j+1 2328 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1 ,mp_sp,id_read,MPI_ANY_TAG,&2337 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& 2329 2338 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2330 2339 if (mp_ierr /= 0) goto 600 … … 2462 2471 end if 2463 2472 2473 ! Receptor concentrations 2474 if (lroot) then 2475 call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, & 2476 & mp_comm_used,mp_ierr) 2477 if (mp_ierr /= 0) goto 600 2478 else 2479 call MPI_Reduce(creceptor,0,rcpt_size,mp_sp,MPI_SUM,id_root, & 2480 & mp_comm_used,mp_ierr) 2481 end if 2482 2464 2483 #else 2465 2484 2466 2485 call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, & 2467 2486 & mp_comm_used, mp_ierr) 2487 if (mp_ierr /= 0) goto 600 2468 2488 if (lroot) gridunc = gridunc0 2489 2490 call MPI_Reduce(creceptor, creceptor0,rcpt_size,mp_sp,MPI_SUM,id_root, & 2491 & mp_comm_used,mp_ierr) 2492 if (mp_ierr /= 0) goto 600 2493 if (lroot) creceptor = creceptor0 2469 2494 2470 2495 #endif … … 2482 2507 end if 2483 2508 2484 ! Receptor concentrations2485 if (lroot) then2486 call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &2487 & mp_comm_used,mp_ierr)2488 if (mp_ierr /= 0) goto 6002489 else2490 call MPI_Reduce(creceptor,0,rcpt_size,mp_sp,MPI_SUM,id_root, &2491 & mp_comm_used,mp_ierr)2492 end if2493 2509 2494 2510 if (mp_measure_time) call mpif_mtime('commtime',1) … … 2700 2716 end if 2701 2717 2702 case ('readwind')2703 if (imode.eq.0) then2704 call cpu_time(mp_readwind_time_beg)2705 mp_readwind_wtime_beg = mpi_wtime()2706 else2707 call cpu_time(mp_readwind_time_end)2708 mp_readwind_wtime_end = mpi_wtime()2709 2710 mp_readwind_time_total = mp_readwind_time_total + &2711 &(mp_readwind_time_end - mp_readwind_time_beg)2712 mp_readwind_wtime_total = mp_readwind_wtime_total + &2713 &(mp_readwind_wtime_end - mp_readwind_wtime_beg)2714 end if2718 ! case ('readwind') 2719 ! if (imode.eq.0) then 2720 ! call cpu_time(mp_readwind_time_beg) 2721 ! mp_readwind_wtime_beg = mpi_wtime() 2722 ! else 2723 ! call cpu_time(mp_readwind_time_end) 2724 ! mp_readwind_wtime_end = mpi_wtime() 2725 ! 2726 ! mp_readwind_time_total = mp_readwind_time_total + & 2727 ! &(mp_readwind_time_end - mp_readwind_time_beg) 2728 ! mp_readwind_wtime_total = mp_readwind_wtime_total + & 2729 ! &(mp_readwind_wtime_end - mp_readwind_wtime_beg) 2730 ! end if 2715 2731 2716 2732 case ('commtime') … … 2788 2804 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR GETFIELDS:',& 2789 2805 & mp_getfields_time_total 2790 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',&2791 & mp_readwind_wtime_total2792 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',&2793 & mp_readwind_time_total2806 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',& 2807 ! & mp_readwind_wtime_total 2808 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',& 2809 ! & mp_readwind_time_total 2794 2810 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FILE IO:',& 2795 2811 & mp_io_wtime_total -
src/timemanager_mpi.f90
r20963b1 r328fdf9 344 344 ! Check if particles should be redistributed among processes 345 345 !*********************************************************** 346 call mpif_calculate_part_redist(itime)346 if (mp_rebalance) call mpif_calculate_part_redist(itime) 347 347 348 348 … … 532 532 end if 533 533 534 else ! :TODO: check for zeroing in the netcdf module534 else 535 535 call concoutput_surf_nest(itime,outnum) 536 536 end if
Note: See TracChangeset
for help on using the changeset viewer.