Changes in / [d005a67:5d74ed9] in flexpart.git
- Files:
-
- 3 deleted
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
options/COMMAND
r0a94e13 r6d73c4b 19 19 IFINE= 4, ! Reduction for time step in vertical transport, used only if CTL>1 20 20 IOUT= 1, ! Output type: [1]mass 2]pptv 3]1&2 4]plume 5]1&4, +8 for NetCDF output 21 IPOUT= 0, ! Particle position output: 0]no 1]every output 2]only at end 3]time averaged21 IPOUT= 0, ! Particle position output: 0]no 1]every output 2]only at end 22 22 LSUBGRID= 0, ! Increase of ABL heights due to sub-grid scale orographic variations;[0]off 1]on 23 23 LCONVECTION= 1, ! Switch for convection parameterization;0]off [1]on -
src/FLEXPART.f90
r0a94e13 r50958b8 68 68 integer :: detectformat 69 69 70 71 72 ! Initialize arrays in com_mod 73 !***************************** 74 call com_mod_allocate_part(maxpart) 70 75 76 71 77 ! Generate a large number of random numbers 72 78 !****************************************** … … 166 172 endif 167 173 168 ! Initialize arrays in com_mod169 !*****************************170 call com_mod_allocate_part(maxpart)171 172 173 174 ! Read the age classes to be used 174 175 !******************************** -
src/FLEXPART_MPI.f90
r0c8c7f2 r20963b1 77 77 if (mp_measure_time) call mpif_mtime('flexpart',0) 78 78 79 79 ! Initialize arrays in com_mod 80 !***************************** 81 82 if(.not.(lmpreader.and.lmp_use_reader)) call com_mod_allocate_part(maxpart_mpi) 83 84 80 85 ! Generate a large number of random numbers 81 86 !****************************************** … … 175 180 endif 176 181 177 ! Initialize arrays in com_mod178 !*****************************179 180 if(.not.(lmpreader.and.lmp_use_reader)) call com_mod_allocate_part(maxpart_mpi)181 182 182 183 183 ! Read the age classes to be used … … 413 413 end if ! (mpif_pid == 0) 414 414 415 if (mp_measure_time) call mpif_mtime('iotime', 1)415 if (mp_measure_time) call mpif_mtime('iotime',0) 416 416 417 417 if (verbosity.gt.0 .and. lroot) then -
src/com_mod.f90
r0a94e13 re9e0f06 18 18 19 19 implicit none 20 21 22 20 23 21 !**************************************************************** … … 71 69 72 70 real :: ctl,fine 73 integer :: ifine,iout,ipout,ipin,iflux,mdomainfill ,ipoutfac71 integer :: ifine,iout,ipout,ipin,iflux,mdomainfill 74 72 integer :: mquasilag,nested_output,ind_source,ind_receptor 75 73 integer :: ind_rel,ind_samp,ioutputforeachrelease,linit_cond,surf_only … … 84 82 ! iout output options: 1 conc. output (ng/m3), 2 mixing ratio (pptv), 3 both 85 83 ! ipout particle dump options: 0 no, 1 every output interval, 2 only at end 86 ! ipoutfac increase particle dump interval by factor (default 1)87 84 ! ipin read in particle positions from dumped file from a previous run 88 85 ! fine real(ifine) … … 131 128 132 129 logical :: gdomainfill 130 133 131 ! gdomainfill .T., if domain-filling is global, .F. if not 134 132 … … 653 651 real :: receptorarea(maxreceptor) 654 652 real :: creceptor(maxreceptor,maxspec) 655 real, allocatable, dimension(:,:) :: creceptor0656 653 character(len=16) :: receptorname(maxreceptor) 657 654 integer :: numreceptor … … 676 673 real, allocatable, dimension(:,:) :: xmass1 677 674 real, allocatable, dimension(:,:) :: xscav_frac1 678 679 ! Variables used for writing out interval averages for partoutput680 !****************************************************************681 682 integer, allocatable, dimension(:) :: npart_av683 real, allocatable, dimension(:) :: part_av_cartx,part_av_carty,part_av_cartz,part_av_z,part_av_topo684 real, allocatable, dimension(:) :: part_av_pv,part_av_qv,part_av_tt,part_av_rho,part_av_tro,part_av_hmix685 real, allocatable, dimension(:) :: part_av_uu,part_av_vv,part_av_energy686 675 687 676 ! eso: Moved from timemanager … … 791 780 & idt(nmpart),itramem(nmpart),itrasplit(nmpart),& 792 781 & xtra1(nmpart),ytra1(nmpart),ztra1(nmpart),& 793 & xmass1(nmpart, maxspec)) ! ,& 794 ! & checklifetime(nmpart,maxspec), species_lifetime(maxspec,2))!CGZ-lifetime 795 796 if (ipout.eq.3) then 797 allocate(npart_av(nmpart),part_av_cartx(nmpart),part_av_carty(nmpart),& 798 & part_av_cartz(nmpart),part_av_z(nmpart),part_av_topo(nmpart)) 799 allocate(part_av_pv(nmpart),part_av_qv(nmpart),part_av_tt(nmpart),& 800 & part_av_rho(nmpart),part_av_tro(nmpart),part_av_hmix(nmpart)) 801 allocate(part_av_uu(nmpart),part_av_vv(nmpart),part_av_energy(nmpart)) 802 end if 782 & xmass1(nmpart, maxspec),& 783 & checklifetime(nmpart,maxspec), species_lifetime(maxspec,2))!CGZ-lifetime 803 784 804 785 805 786 allocate(uap(nmpart),ucp(nmpart),uzp(nmpart),us(nmpart),& 806 787 & vs(nmpart),ws(nmpart),cbt(nmpart)) 807 788 808 789 end subroutine com_mod_allocate_part 809 790 -
src/init_domainfill.f90
r0a94e13 rb5127f9 86 86 endif 87 87 endif 88 89 ! Exit here if resuming a run from particle dump90 !***********************************************91 if (gdomainfill.and.ipin.ne.0) return92 88 93 89 ! Do not release particles twice (i.e., not at both in the leftmost and rightmost … … 418 414 !*************************************************************************** 419 415 420 if ( (ipin.eq.1).and.(.not.gdomainfill)) then416 if (ipin.eq.1) then 421 417 open(unitboundcond,file=path(2)(1:length(2))//'boundcond.bin', & 422 418 form='unformatted') -
src/init_domainfill_mpi.f90
r328fdf9 rb5127f9 110 110 endif 111 111 112 ! Exit here if resuming a run from particle dump113 !***********************************************114 if (gdomainfill.and.ipin.ne.0) return115 116 112 ! Do not release particles twice (i.e., not at both in the leftmost and rightmost 117 113 ! grid cell) for a global domain … … 217 213 colmass(ix,jy)=(pp(1)-pp(nz))/ga*gridarea(jy) 218 214 colmasstotal=colmasstotal+colmass(ix,jy) 215 219 216 end do 220 217 end do … … 469 466 470 467 ! eso TODO: only needed for root process 471 if ( (ipin.eq.1).and.(.not.gdomainfill)) then468 if (ipin.eq.1) then 472 469 open(unitboundcond,file=path(2)(1:length(2))//'boundcond.bin', & 473 470 form='unformatted') … … 477 474 endif 478 475 479 if (ipin.eq.0) then 480 numpart = numpart/mp_partgroup_np 481 if (mod(numpart,mp_partgroup_np).ne.0) numpart=numpart+1 482 end if 483 484 else ! Allocate dummy arrays for receiving processes 485 if (ipin.eq.0) then 486 allocate(itra1_tmp(nullsize),npoint_tmp(nullsize),nclass_tmp(nullsize),& 487 & idt_tmp(nullsize),itramem_tmp(nullsize),itrasplit_tmp(nullsize),& 488 & xtra1_tmp(nullsize),ytra1_tmp(nullsize),ztra1_tmp(nullsize),& 489 & xmass1_tmp(nullsize, nullsize)) 490 end if 476 numpart = numpart/mp_partgroup_np 477 if (mod(numpart,mp_partgroup_np).ne.0) numpart=numpart+1 478 479 else ! Allocate dummy arrays for receiving processes 480 allocate(itra1_tmp(nullsize),npoint_tmp(nullsize),nclass_tmp(nullsize),& 481 & idt_tmp(nullsize),itramem_tmp(nullsize),itrasplit_tmp(nullsize),& 482 & xtra1_tmp(nullsize),ytra1_tmp(nullsize),ztra1_tmp(nullsize),& 483 & xmass1_tmp(nullsize, nullsize)) 491 484 492 end if ! end if(lroot) 493 485 end if ! end if(lroot) 494 486 495 487 496 488 ! Distribute particles to other processes (numpart is 'per-process', not total) 497 ! Only if not restarting from previous run 498 if (ipin.eq.0) then 499 call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)500 call mpif_send_part_properties(npart(1)/mp_partgroup_np)489 call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) 490 ! eso TODO: xmassperparticle: not necessary to send 491 call MPI_Bcast(xmassperparticle, 1, mp_sp, id_root, mp_comm_used, mp_ierr) 492 call mpif_send_part_properties(numpart) 501 493 502 494 ! Deallocate the temporary arrays used for all particles 503 495 deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,& 504 496 & itrasplit_tmp,xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp) 505 end if506 497 507 498 -
src/makefile
r0a94e13 r7123c70 118 118 OBJECTS_SERIAL = \ 119 119 releaseparticles.o partoutput.o \ 120 partoutput_average.o \121 120 conccalc.o \ 122 121 init_domainfill.o concoutput.o \ … … 133 132 ## For MPI version 134 133 OBJECTS_MPI = releaseparticles_mpi.o partoutput_mpi.o \ 135 partoutput_average_mpi.oconccalc_mpi.o \134 conccalc_mpi.o \ 136 135 init_domainfill_mpi.o concoutput_mpi.o \ 137 136 timemanager_mpi.o FLEXPART_MPI.o \ … … 150 149 advance.o initialize.o \ 151 150 writeheader.o writeheader_txt.o \ 152 partpos_average.owriteprecip.o \151 writeprecip.o \ 153 152 writeheader_surf.o assignland.o\ 154 153 part0.o gethourlyOH.o\ … … 349 348 part0.o: par_mod.o 350 349 partdep.o: par_mod.o 351 partpos_average.o: com_mod.o par_mod.o352 350 partoutput.o: com_mod.o par_mod.o 353 partoutput_average.o: com_mod.o par_mod.o354 partoutput_average_mpi.o: com_mod.o par_mod.o mpi_mod.o355 351 partoutput_mpi.o: com_mod.o mpi_mod.o par_mod.o 356 352 partoutput_short.o: com_mod.o par_mod.o -
src/mpi_mod.f90
r0c8c7f2 r0ecc1fe 88 88 ! Variables for MPI processes in the 'particle' group 89 89 integer, allocatable, dimension(:) :: mp_partgroup_rank 90 integer, allocatable, dimension(:) :: npart_per_process91 90 integer :: mp_partgroup_comm, mp_partgroup_pid, mp_partgroup_np 92 91 … … 126 125 ! mp_time_barrier Measure MPI barrier time 127 126 ! mp_exact_numpart Use an extra MPI communication to give the exact number of particles 128 ! to standard output (this does not otherwise affect the simulation)127 ! to standard output (this does *not* otherwise affect the simulation) 129 128 logical, parameter :: mp_dbg_mode = .false. 130 129 logical, parameter :: mp_dev_mode = .false. … … 191 190 ! mp_np number of running processes, decided at run-time 192 191 !*********************************************************************** 193 use par_mod, only: maxpart, numwfmem, dep_prec , maxreceptor, maxspec194 use com_mod, only: mpi_mode, verbosity , creceptor0192 use par_mod, only: maxpart, numwfmem, dep_prec 193 use com_mod, only: mpi_mode, verbosity 195 194 196 195 implicit none … … 338 337 339 338 ! Set maxpart per process 340 ! ESO08/2016: Increase maxpart per process, in case of unbalanced distribution339 ! eso 08/2016: Increase maxpart per process, in case of unbalanced distribution 341 340 maxpart_mpi=int(mp_maxpart_factor*real(maxpart)/real(mp_partgroup_np)) 342 341 if (mp_np == 1) maxpart_mpi = maxpart … … 366 365 end if 367 366 368 ! Allocate array for number of particles per process369 allocate(npart_per_process(0:mp_partgroup_np-1))370 371 ! Write whether MPI_IN_PLACE is used or not372 #ifdef USE_MPIINPLACE373 if (lroot) write(*,*) 'Using MPI_IN_PLACE operations'374 #else375 if (lroot) allocate(creceptor0(maxreceptor,maxspec))376 if (lroot) write(*,*) 'Not using MPI_IN_PLACE operations'377 #endif378 367 goto 101 379 368 … … 570 559 ! invalid particles at the end of the arrays 571 560 572 601 do i=num part, 1, -1561 601 do i=num_part, 1, -1 573 562 if (itra1(i).eq.-999999999) then 574 563 numpart=numpart-1 … … 609 598 integer :: i,jj,nn, num_part=1,m,imin, num_trans 610 599 logical :: first_iter 611 integer,allocatable,dimension(:) :: idx_arr600 integer,allocatable,dimension(:) :: numparticles_mpi, idx_arr 612 601 real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this 613 602 … … 618 607 ! All processes exchange information on number of particles 619 608 !**************************************************************************** 620 allocate( idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1)) 621 622 call MPI_Allgather(numpart, 1, MPI_INTEGER, npart_per_process, & 609 allocate(numparticles_mpi(0:mp_partgroup_np-1), & 610 &idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1)) 611 612 call MPI_Allgather(numpart, 1, MPI_INTEGER, numparticles_mpi, & 623 613 & 1, MPI_INTEGER, mp_comm_used, mp_ierr) 624 614 … … 626 616 ! Sort from lowest to highest 627 617 ! Initial guess: correct order 628 sorted(:) = n part_per_process(:)618 sorted(:) = numparticles_mpi(:) 629 619 do i=0, mp_partgroup_np-1 630 620 idx_arr(i) = i 631 621 end do 632 633 ! Do not rebalance particles for ipout=3634 if (ipout.eq.3) return635 622 636 623 ! For each successive element in index array, see if a lower value exists … … 658 645 m=mp_partgroup_np-1 ! index for last sorted process (most particles) 659 646 do i=0,mp_partgroup_np/2-1 660 num_trans = n part_per_process(idx_arr(m)) - npart_per_process(idx_arr(i))647 num_trans = numparticles_mpi(idx_arr(m)) - numparticles_mpi(idx_arr(i)) 661 648 if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then 662 if ( n part_per_process(idx_arr(m)).gt.mp_min_redist.and.&663 & real(num_trans)/real(n part_per_process(idx_arr(m))).gt.mp_redist_fract) then649 if ( numparticles_mpi(idx_arr(m)).gt.mp_min_redist.and.& 650 & real(num_trans)/real(numparticles_mpi(idx_arr(m))).gt.mp_redist_fract) then 664 651 ! DBG 665 ! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, n part_per_process', &666 ! &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, n part_per_process652 ! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi', & 653 ! &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi 667 654 ! DBG 668 655 call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2) … … 672 659 end do 673 660 674 deallocate( idx_arr, sorted)661 deallocate(numparticles_mpi, idx_arr, sorted) 675 662 676 663 end subroutine mpif_calculate_part_redist … … 1974 1961 if (readclouds) then 1975 1962 j=j+1 1976 call MPI_Irecv(ctwc(:,:,mind),d2s1 *5,mp_sp,id_read,MPI_ANY_TAG,&1963 call MPI_Irecv(ctwc(:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 1977 1964 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1978 1965 if (mp_ierr /= 0) goto 600 … … 2339 2326 if (readclouds) then 2340 2327 j=j+1 2341 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1 *5,mp_sp,id_read,MPI_ANY_TAG,&2328 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2342 2329 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2343 2330 if (mp_ierr /= 0) goto 600 … … 2475 2462 end if 2476 2463 2477 ! Receptor concentrations 2464 #else 2465 2466 call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, & 2467 & mp_comm_used, mp_ierr) 2468 if (lroot) gridunc = gridunc0 2469 2470 #endif 2471 2472 if ((WETDEP).and.(ldirect.gt.0)) then 2473 call MPI_Reduce(wetgridunc, wetgridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, & 2474 & mp_comm_used, mp_ierr) 2475 if (mp_ierr /= 0) goto 600 2476 end if 2477 2478 if ((DRYDEP).and.(ldirect.gt.0)) then 2479 call MPI_Reduce(drygridunc, drygridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, & 2480 & mp_comm_used, mp_ierr) 2481 if (mp_ierr /= 0) goto 600 2482 end if 2483 2484 ! Receptor concentrations 2478 2485 if (lroot) then 2479 2486 call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, & … … 2484 2491 & mp_comm_used,mp_ierr) 2485 2492 end if 2486 2487 #else2488 2489 call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, &2490 & mp_comm_used, mp_ierr)2491 if (mp_ierr /= 0) goto 6002492 if (lroot) gridunc = gridunc02493 2494 call MPI_Reduce(creceptor, creceptor0,rcpt_size,mp_sp,MPI_SUM,id_root, &2495 & mp_comm_used,mp_ierr)2496 if (mp_ierr /= 0) goto 6002497 if (lroot) creceptor = creceptor02498 2499 #endif2500 2501 if ((WETDEP).and.(ldirect.gt.0)) then2502 call MPI_Reduce(wetgridunc, wetgridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, &2503 & mp_comm_used, mp_ierr)2504 if (mp_ierr /= 0) goto 6002505 end if2506 2507 if ((DRYDEP).and.(ldirect.gt.0)) then2508 call MPI_Reduce(drygridunc, drygridunc0, grid_size2d, mp_cp, MPI_SUM, id_root, &2509 & mp_comm_used, mp_ierr)2510 if (mp_ierr /= 0) goto 6002511 end if2512 2513 2493 2514 2494 if (mp_measure_time) call mpif_mtime('commtime',1) … … 2720 2700 end if 2721 2701 2722 case ('readwind')2723 if (imode.eq.0) then2724 call cpu_time(mp_readwind_time_beg)2725 mp_readwind_wtime_beg = mpi_wtime()2726 else2727 call cpu_time(mp_readwind_time_end)2728 mp_readwind_wtime_end = mpi_wtime()2729 2730 mp_readwind_time_total = mp_readwind_time_total + &2731 &(mp_readwind_time_end - mp_readwind_time_beg)2732 mp_readwind_wtime_total = mp_readwind_wtime_total + &2733 &(mp_readwind_wtime_end - mp_readwind_wtime_beg)2734 end if2702 case ('readwind') 2703 if (imode.eq.0) then 2704 call cpu_time(mp_readwind_time_beg) 2705 mp_readwind_wtime_beg = mpi_wtime() 2706 else 2707 call cpu_time(mp_readwind_time_end) 2708 mp_readwind_wtime_end = mpi_wtime() 2709 2710 mp_readwind_time_total = mp_readwind_time_total + & 2711 &(mp_readwind_time_end - mp_readwind_time_beg) 2712 mp_readwind_wtime_total = mp_readwind_wtime_total + & 2713 &(mp_readwind_wtime_end - mp_readwind_wtime_beg) 2714 end if 2735 2715 2736 2716 case ('commtime') … … 2808 2788 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR GETFIELDS:',& 2809 2789 & mp_getfields_time_total 2810 !write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',&2811 !& mp_readwind_wtime_total2812 !write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',&2813 !& mp_readwind_time_total2790 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',& 2791 & mp_readwind_wtime_total 2792 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',& 2793 & mp_readwind_time_total 2814 2794 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FILE IO:',& 2815 2795 & mp_io_wtime_total -
src/netcdf_output_mod.f90
r0a94e13 r4ad96c5 93 93 character(len=255), parameter :: institution = 'NILU' 94 94 95 integer :: tpointer =095 integer :: tpointer 96 96 character(len=255) :: ncfname, ncfnamen 97 97 -
src/par_mod.f90
r0a94e13 r79e0349 280 280 281 281 integer,parameter :: unitpath=1, unitcommand=1, unitageclasses=1, unitgrid=1 282 integer,parameter :: unitavailab=1, unitreleases=88, unitpartout=93 , unitpartout_average=105282 integer,parameter :: unitavailab=1, unitreleases=88, unitpartout=93 283 283 integer,parameter :: unitpartin=93, unitflux=98, unitouttraj=96 284 284 integer,parameter :: unitvert=1, unitoro=1, unitpoin=1, unitreceptor=1 -
src/partoutput.f90
r0a94e13 rd2a5a83 71 71 !************************************** 72 72 73 if (ipout.eq.1 .or.ipout.eq.3) then73 if (ipout.eq.1) then 74 74 open(unitpartout,file=path(2)(1:length(2))//'partposit_'//adate// & 75 75 atime,form='unformatted') -
src/partoutput_mpi.f90
r0a94e13 rd2a5a83 78 78 !************************************** 79 79 80 if (ipout.eq.1 .or.ipout.eq.3) then80 if (ipout.eq.1) then 81 81 open(unitpartout,file=path(2)(1:length(2))//'partposit_'//adate// & 82 82 atime,form='unformatted',status=file_stat,position='append') -
src/readcommand.f90
r0a94e13 r20963b1 50 50 ! ipin 1 continue simulation with dumped particle data, 0 no * 51 51 ! ipout 0 no particle dump, 1 every output time, 3 only at end* 52 ! ipoutfac increase particle dump interval by factor (default 1) *53 52 ! itsplit [s] time constant for particle splitting * 54 53 ! loutaver [s] concentration output is an average over loutaver * … … 98 97 iout, & 99 98 ipout, & 100 ipoutfac, &101 99 lsubgrid, & 102 100 lconvection, & … … 131 129 iout=3 132 130 ipout=0 133 ipoutfac=1134 131 lsubgrid=1 135 132 lconvection=1 … … 510 507 !**************************************************************** 511 508 512 if ((ipout.ne.0).and.(ipout.ne.1).and.(ipout.ne.2) .and.(ipout.ne.3)) then509 if ((ipout.ne.0).and.(ipout.ne.1).and.(ipout.ne.2)) then 513 510 write(*,*) ' #### FLEXPART MODEL ERROR! FILE COMMAND: #### ' 514 write(*,*) ' #### IPOUT MUST BE 0, 1, 2 OR 3!#### '511 write(*,*) ' #### IPOUT MUST BE 1, 2 OR 3! #### ' 515 512 stop 516 513 endif -
src/timemanager.f90
r0a94e13 rc7d1052 451 451 45 format(i13,' Seconds simulated: ',i13, ' Particles: Uncertainty: ',3f7.3) 452 452 46 format(' Simulated ',f7.1,' hours (',i13,' s), ',i13, ' particles') 453 if (ipout.ge.1) then 454 if (mod(itime,ipoutfac*loutstep).eq.0) call partoutput(itime) ! dump particle positions 455 if (ipout.eq.3) call partoutput_average(itime) ! dump particle positions 456 endif 453 if (ipout.ge.1) call partoutput(itime) ! dump particle positions 457 454 loutnext=loutnext+loutstep 458 455 loutstart=loutnext-loutaver/2 … … 612 609 ! write (*,*) 'advance: ',prob(1),xmass1(j,1),ztra1(j) 613 610 614 ! Calculate average position for particle dump output615 !****************************************************616 617 if (ipout.eq.3) call partpos_average(itime,j)618 619 620 611 ! Calculate the gross fluxes across layer interfaces 621 612 !*************************************************** -
src/timemanager_mpi.f90
r0c8c7f2 r20963b1 113 113 integer :: j,ks,kp,l,n,itime=0,nstop,nstop1,memstat=0 114 114 ! integer :: ksp 115 integer :: ip ,irec115 integer :: ip 116 116 integer :: loutnext,loutstart,loutend 117 117 integer :: ix,jy,ldeltat,itage,nage,idummy … … 129 129 ! Measure time spent in timemanager 130 130 if (mp_measure_time) call mpif_mtime('timemanager',0) 131 132 131 133 132 ! First output for time 0 … … 533 532 end if 534 533 535 else 534 else ! :TODO: check for zeroing in the netcdf module 536 535 call concoutput_surf_nest(itime,outnum) 537 536 end if … … 594 593 46 format(' Simulated ',f7.1,' hours (',i13,' s), ',i13, ' particles') 595 594 if (ipout.ge.1) then 596 if (mp_measure_time) call mpif_mtime('iotime',0)597 irec=0598 595 do ip=0, mp_partgroup_np-1 599 if (ip.eq.mp_partid) then 600 if (mod(itime,ipoutfac*loutstep).eq.0) call partoutput(itime) ! dump particle positions 601 if (ipout.eq.3) call partoutput_average(itime,irec) ! dump particle positions 602 endif 603 if (ipout.eq.3) irec=irec+npart_per_process(ip) 596 if (ip.eq.mp_partid) call partoutput(itime) ! dump particle positions 604 597 call mpif_mpi_barrier 605 598 end do 606 if (mp_measure_time) call mpif_mtime('iotime',1)607 599 end if 608 600 … … 765 757 if (mp_measure_time) call mpif_mtime('advance',1) 766 758 767 ! Calculate average position for particle dump output768 !****************************************************769 770 if (ipout.eq.3) call partpos_average(itime,j)771 772 759 773 760 ! Calculate the gross fluxes across layer interfaces … … 908 895 do ip=0, mp_partgroup_np-1 909 896 if (ip.eq.mp_partid) then 910 if (mp_dbg_mode) write(*,*) 'call partoutput(itime), proc, mp_partid',ip,mp_partid897 !if (mp_dbg_mode) write(*,*) 'call partoutput(itime), proc, mp_partid',ip,mp_partid 911 898 call partoutput(itime) ! dump particle positions 912 899 end if -
src/verttransform_ecmwf.f90
r0a94e13 r437c545 73 73 use com_mod 74 74 use cmapf_mod, only: cc2gll 75 ! use mpi_mod 75 76 76 77 implicit none
Note: See TracChangeset
for help on using the changeset viewer.