Changeset d005a67 in flexpart.git
- Timestamp:
- May 21, 2019, 1:09:18 PM (5 years ago)
- Branches:
- master, 10.4.1_pesei, GFS_025, bugfixes+enhancements, dev, release-10, release-10.4.1, scaling-bug
- Children:
- f963113, 0a98afe
- Parents:
- 5d74ed9 (diff), 0c8c7f2 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent. - Files:
-
- 3 added
- 19 edited
Legend:
- Unmodified
- Added
- Removed
-
options/COMMAND
r2753a5c r0a94e13 19 19 IFINE= 4, ! Reduction for time step in vertical transport, used only if CTL>1 20 20 IOUT= 1, ! Output type: [1]mass 2]pptv 3]1&2 4]plume 5]1&4, +8 for NetCDF output 21 IPOUT= 0, ! Particle position output: 0]no 1]every output 2]only at end 21 IPOUT= 0, ! Particle position output: 0]no 1]every output 2]only at end 3]time averaged 22 22 LSUBGRID= 0, ! Increase of ABL heights due to sub-grid scale orographic variations;[0]off 1]on 23 23 LCONVECTION= 1, ! Switch for convection parameterization;0]off [1]on -
src/FLEXPART.f90
r2753a5c r0a94e13 68 68 integer :: detectformat 69 69 70 71 72 ! Initialize arrays in com_mod73 !*****************************74 call com_mod_allocate_part(maxpart)75 70 76 77 71 ! Generate a large number of random numbers 78 72 !****************************************** … … 172 166 endif 173 167 168 ! Initialize arrays in com_mod 169 !***************************** 170 call com_mod_allocate_part(maxpart) 171 172 174 173 ! Read the age classes to be used 175 174 !******************************** -
src/FLEXPART_MPI.f90
r20963b1 r0c8c7f2 77 77 if (mp_measure_time) call mpif_mtime('flexpart',0) 78 78 79 ! Initialize arrays in com_mod 80 !***************************** 81 82 if(.not.(lmpreader.and.lmp_use_reader)) call com_mod_allocate_part(maxpart_mpi) 83 84 79 85 80 ! Generate a large number of random numbers 86 81 !****************************************** … … 180 175 endif 181 176 177 ! Initialize arrays in com_mod 178 !***************************** 179 180 if(.not.(lmpreader.and.lmp_use_reader)) call com_mod_allocate_part(maxpart_mpi) 181 182 182 183 183 ! Read the age classes to be used … … 413 413 end if ! (mpif_pid == 0) 414 414 415 if (mp_measure_time) call mpif_mtime('iotime', 0)415 if (mp_measure_time) call mpif_mtime('iotime',1) 416 416 417 417 if (verbosity.gt.0 .and. lroot) then -
src/com_mod.f90
re9e0f06 r0a94e13 18 18 19 19 implicit none 20 21 20 22 21 23 !**************************************************************** … … 69 71 70 72 real :: ctl,fine 71 integer :: ifine,iout,ipout,ipin,iflux,mdomainfill 73 integer :: ifine,iout,ipout,ipin,iflux,mdomainfill,ipoutfac 72 74 integer :: mquasilag,nested_output,ind_source,ind_receptor 73 75 integer :: ind_rel,ind_samp,ioutputforeachrelease,linit_cond,surf_only … … 82 84 ! iout output options: 1 conc. output (ng/m3), 2 mixing ratio (pptv), 3 both 83 85 ! ipout particle dump options: 0 no, 1 every output interval, 2 only at end 86 ! ipoutfac increase particle dump interval by factor (default 1) 84 87 ! ipin read in particle positions from dumped file from a previous run 85 88 ! fine real(ifine) … … 128 131 129 132 logical :: gdomainfill 130 131 133 ! gdomainfill .T., if domain-filling is global, .F. if not 132 134 … … 651 653 real :: receptorarea(maxreceptor) 652 654 real :: creceptor(maxreceptor,maxspec) 655 real, allocatable, dimension(:,:) :: creceptor0 653 656 character(len=16) :: receptorname(maxreceptor) 654 657 integer :: numreceptor … … 673 676 real, allocatable, dimension(:,:) :: xmass1 674 677 real, allocatable, dimension(:,:) :: xscav_frac1 678 679 ! Variables used for writing out interval averages for partoutput 680 !**************************************************************** 681 682 integer, allocatable, dimension(:) :: npart_av 683 real, allocatable, dimension(:) :: part_av_cartx,part_av_carty,part_av_cartz,part_av_z,part_av_topo 684 real, allocatable, dimension(:) :: part_av_pv,part_av_qv,part_av_tt,part_av_rho,part_av_tro,part_av_hmix 685 real, allocatable, dimension(:) :: part_av_uu,part_av_vv,part_av_energy 675 686 676 687 ! eso: Moved from timemanager … … 780 791 & idt(nmpart),itramem(nmpart),itrasplit(nmpart),& 781 792 & xtra1(nmpart),ytra1(nmpart),ztra1(nmpart),& 782 & xmass1(nmpart, maxspec),& 783 & checklifetime(nmpart,maxspec), species_lifetime(maxspec,2))!CGZ-lifetime 793 & xmass1(nmpart, maxspec)) ! ,& 794 ! & checklifetime(nmpart,maxspec), species_lifetime(maxspec,2))!CGZ-lifetime 795 796 if (ipout.eq.3) then 797 allocate(npart_av(nmpart),part_av_cartx(nmpart),part_av_carty(nmpart),& 798 & part_av_cartz(nmpart),part_av_z(nmpart),part_av_topo(nmpart)) 799 allocate(part_av_pv(nmpart),part_av_qv(nmpart),part_av_tt(nmpart),& 800 & part_av_rho(nmpart),part_av_tro(nmpart),part_av_hmix(nmpart)) 801 allocate(part_av_uu(nmpart),part_av_vv(nmpart),part_av_energy(nmpart)) 802 end if 784 803 785 804 786 805 allocate(uap(nmpart),ucp(nmpart),uzp(nmpart),us(nmpart),& 787 806 & vs(nmpart),ws(nmpart),cbt(nmpart)) 788 807 789 808 end subroutine com_mod_allocate_part 790 809 -
src/init_domainfill.f90
rb5127f9 r0a94e13 86 86 endif 87 87 endif 88 89 ! Exit here if resuming a run from particle dump 90 !*********************************************** 91 if (gdomainfill.and.ipin.ne.0) return 88 92 89 93 ! Do not release particles twice (i.e., not at both in the leftmost and rightmost … … 414 418 !*************************************************************************** 415 419 416 if ( ipin.eq.1) then420 if ((ipin.eq.1).and.(.not.gdomainfill)) then 417 421 open(unitboundcond,file=path(2)(1:length(2))//'boundcond.bin', & 418 422 form='unformatted') -
src/init_domainfill_mpi.f90
rb5127f9 r328fdf9 110 110 endif 111 111 112 ! Exit here if resuming a run from particle dump 113 !*********************************************** 114 if (gdomainfill.and.ipin.ne.0) return 115 112 116 ! Do not release particles twice (i.e., not at both in the leftmost and rightmost 113 117 ! grid cell) for a global domain … … 213 217 colmass(ix,jy)=(pp(1)-pp(nz))/ga*gridarea(jy) 214 218 colmasstotal=colmasstotal+colmass(ix,jy) 215 216 219 end do 217 220 end do … … 466 469 467 470 ! eso TODO: only needed for root process 468 if ( ipin.eq.1) then471 if ((ipin.eq.1).and.(.not.gdomainfill)) then 469 472 open(unitboundcond,file=path(2)(1:length(2))//'boundcond.bin', & 470 473 form='unformatted') … … 474 477 endif 475 478 476 numpart = numpart/mp_partgroup_np 477 if (mod(numpart,mp_partgroup_np).ne.0) numpart=numpart+1 478 479 else ! Allocate dummy arrays for receiving processes 480 allocate(itra1_tmp(nullsize),npoint_tmp(nullsize),nclass_tmp(nullsize),& 481 & idt_tmp(nullsize),itramem_tmp(nullsize),itrasplit_tmp(nullsize),& 482 & xtra1_tmp(nullsize),ytra1_tmp(nullsize),ztra1_tmp(nullsize),& 483 & xmass1_tmp(nullsize, nullsize)) 479 if (ipin.eq.0) then 480 numpart = numpart/mp_partgroup_np 481 if (mod(numpart,mp_partgroup_np).ne.0) numpart=numpart+1 482 end if 483 484 else ! Allocate dummy arrays for receiving processes 485 if (ipin.eq.0) then 486 allocate(itra1_tmp(nullsize),npoint_tmp(nullsize),nclass_tmp(nullsize),& 487 & idt_tmp(nullsize),itramem_tmp(nullsize),itrasplit_tmp(nullsize),& 488 & xtra1_tmp(nullsize),ytra1_tmp(nullsize),ztra1_tmp(nullsize),& 489 & xmass1_tmp(nullsize, nullsize)) 490 end if 484 491 485 end if ! end if(lroot) 492 end if ! end if(lroot) 493 486 494 487 495 488 496 ! Distribute particles to other processes (numpart is 'per-process', not total) 489 call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) 490 ! eso TODO: xmassperparticle: not necessary to send 491 call MPI_Bcast(xmassperparticle, 1, mp_sp, id_root, mp_comm_used, mp_ierr)492 call mpif_send_part_properties(numpart)497 ! Only if not restarting from previous run 498 if (ipin.eq.0) then 499 call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) 500 call mpif_send_part_properties(npart(1)/mp_partgroup_np) 493 501 494 502 ! Deallocate the temporary arrays used for all particles 495 deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,&503 deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,& 496 504 & itrasplit_tmp,xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp) 505 end if 497 506 498 507 -
src/makefile
r7123c70 r0a94e13 118 118 OBJECTS_SERIAL = \ 119 119 releaseparticles.o partoutput.o \ 120 partoutput_average.o \ 120 121 conccalc.o \ 121 122 init_domainfill.o concoutput.o \ … … 132 133 ## For MPI version 133 134 OBJECTS_MPI = releaseparticles_mpi.o partoutput_mpi.o \ 134 conccalc_mpi.o \135 partoutput_average_mpi.o conccalc_mpi.o \ 135 136 init_domainfill_mpi.o concoutput_mpi.o \ 136 137 timemanager_mpi.o FLEXPART_MPI.o \ … … 149 150 advance.o initialize.o \ 150 151 writeheader.o writeheader_txt.o \ 151 writeprecip.o \152 partpos_average.o writeprecip.o \ 152 153 writeheader_surf.o assignland.o\ 153 154 part0.o gethourlyOH.o\ … … 348 349 part0.o: par_mod.o 349 350 partdep.o: par_mod.o 351 partpos_average.o: com_mod.o par_mod.o 350 352 partoutput.o: com_mod.o par_mod.o 353 partoutput_average.o: com_mod.o par_mod.o 354 partoutput_average_mpi.o: com_mod.o par_mod.o mpi_mod.o 351 355 partoutput_mpi.o: com_mod.o mpi_mod.o par_mod.o 352 356 partoutput_short.o: com_mod.o par_mod.o -
src/mpi_mod.f90
r0ecc1fe r0c8c7f2 88 88 ! Variables for MPI processes in the 'particle' group 89 89 integer, allocatable, dimension(:) :: mp_partgroup_rank 90 integer, allocatable, dimension(:) :: npart_per_process 90 91 integer :: mp_partgroup_comm, mp_partgroup_pid, mp_partgroup_np 91 92 … … 125 126 ! mp_time_barrier Measure MPI barrier time 126 127 ! mp_exact_numpart Use an extra MPI communication to give the exact number of particles 127 ! to standard output (this does *not* otherwise affect the simulation)128 ! to standard output (this does not otherwise affect the simulation) 128 129 logical, parameter :: mp_dbg_mode = .false. 129 130 logical, parameter :: mp_dev_mode = .false. … … 190 191 ! mp_np number of running processes, decided at run-time 191 192 !*********************************************************************** 192 use par_mod, only: maxpart, numwfmem, dep_prec 193 use com_mod, only: mpi_mode, verbosity 193 use par_mod, only: maxpart, numwfmem, dep_prec, maxreceptor, maxspec 194 use com_mod, only: mpi_mode, verbosity, creceptor0 194 195 195 196 implicit none … … 337 338 338 339 ! Set maxpart per process 339 ! eso08/2016: Increase maxpart per process, in case of unbalanced distribution340 ! ESO 08/2016: Increase maxpart per process, in case of unbalanced distribution 340 341 maxpart_mpi=int(mp_maxpart_factor*real(maxpart)/real(mp_partgroup_np)) 341 342 if (mp_np == 1) maxpart_mpi = maxpart … … 365 366 end if 366 367 368 ! Allocate array for number of particles per process 369 allocate(npart_per_process(0:mp_partgroup_np-1)) 370 371 ! Write whether MPI_IN_PLACE is used or not 372 #ifdef USE_MPIINPLACE 373 if (lroot) write(*,*) 'Using MPI_IN_PLACE operations' 374 #else 375 if (lroot) allocate(creceptor0(maxreceptor,maxspec)) 376 if (lroot) write(*,*) 'Not using MPI_IN_PLACE operations' 377 #endif 367 378 goto 101 368 379 … … 559 570 ! invalid particles at the end of the arrays 560 571 561 601 do i=num _part, 1, -1572 601 do i=numpart, 1, -1 562 573 if (itra1(i).eq.-999999999) then 563 574 numpart=numpart-1 … … 598 609 integer :: i,jj,nn, num_part=1,m,imin, num_trans 599 610 logical :: first_iter 600 integer,allocatable,dimension(:) :: numparticles_mpi,idx_arr611 integer,allocatable,dimension(:) :: idx_arr 601 612 real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this 602 613 … … 607 618 ! All processes exchange information on number of particles 608 619 !**************************************************************************** 609 allocate(numparticles_mpi(0:mp_partgroup_np-1), & 610 &idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1)) 611 612 call MPI_Allgather(numpart, 1, MPI_INTEGER, numparticles_mpi, & 620 allocate( idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1)) 621 622 call MPI_Allgather(numpart, 1, MPI_INTEGER, npart_per_process, & 613 623 & 1, MPI_INTEGER, mp_comm_used, mp_ierr) 614 624 … … 616 626 ! Sort from lowest to highest 617 627 ! Initial guess: correct order 618 sorted(:) = n umparticles_mpi(:)628 sorted(:) = npart_per_process(:) 619 629 do i=0, mp_partgroup_np-1 620 630 idx_arr(i) = i 621 631 end do 632 633 ! Do not rebalance particles for ipout=3 634 if (ipout.eq.3) return 622 635 623 636 ! For each successive element in index array, see if a lower value exists … … 645 658 m=mp_partgroup_np-1 ! index for last sorted process (most particles) 646 659 do i=0,mp_partgroup_np/2-1 647 num_trans = n umparticles_mpi(idx_arr(m)) - numparticles_mpi(idx_arr(i))660 num_trans = npart_per_process(idx_arr(m)) - npart_per_process(idx_arr(i)) 648 661 if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then 649 if ( n umparticles_mpi(idx_arr(m)).gt.mp_min_redist.and.&650 & real(num_trans)/real(n umparticles_mpi(idx_arr(m))).gt.mp_redist_fract) then662 if ( npart_per_process(idx_arr(m)).gt.mp_min_redist.and.& 663 & real(num_trans)/real(npart_per_process(idx_arr(m))).gt.mp_redist_fract) then 651 664 ! DBG 652 ! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, n umparticles_mpi', &653 ! &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, n umparticles_mpi665 ! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart_per_process', & 666 ! &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart_per_process 654 667 ! DBG 655 668 call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2) … … 659 672 end do 660 673 661 deallocate( numparticles_mpi,idx_arr, sorted)674 deallocate(idx_arr, sorted) 662 675 663 676 end subroutine mpif_calculate_part_redist … … 1961 1974 if (readclouds) then 1962 1975 j=j+1 1963 call MPI_Irecv(ctwc(:,:,mind),d2s1 ,mp_sp,id_read,MPI_ANY_TAG,&1976 call MPI_Irecv(ctwc(:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& 1964 1977 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1965 1978 if (mp_ierr /= 0) goto 600 … … 2326 2339 if (readclouds) then 2327 2340 j=j+1 2328 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1 ,mp_sp,id_read,MPI_ANY_TAG,&2341 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& 2329 2342 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2330 2343 if (mp_ierr /= 0) goto 600 … … 2462 2475 end if 2463 2476 2477 ! Receptor concentrations 2478 if (lroot) then 2479 call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, & 2480 & mp_comm_used,mp_ierr) 2481 if (mp_ierr /= 0) goto 600 2482 else 2483 call MPI_Reduce(creceptor,0,rcpt_size,mp_sp,MPI_SUM,id_root, & 2484 & mp_comm_used,mp_ierr) 2485 end if 2486 2464 2487 #else 2465 2488 2466 2489 call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, & 2467 2490 & mp_comm_used, mp_ierr) 2491 if (mp_ierr /= 0) goto 600 2468 2492 if (lroot) gridunc = gridunc0 2493 2494 call MPI_Reduce(creceptor, creceptor0,rcpt_size,mp_sp,MPI_SUM,id_root, & 2495 & mp_comm_used,mp_ierr) 2496 if (mp_ierr /= 0) goto 600 2497 if (lroot) creceptor = creceptor0 2469 2498 2470 2499 #endif … … 2482 2511 end if 2483 2512 2484 ! Receptor concentrations2485 if (lroot) then2486 call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &2487 & mp_comm_used,mp_ierr)2488 if (mp_ierr /= 0) goto 6002489 else2490 call MPI_Reduce(creceptor,0,rcpt_size,mp_sp,MPI_SUM,id_root, &2491 & mp_comm_used,mp_ierr)2492 end if2493 2513 2494 2514 if (mp_measure_time) call mpif_mtime('commtime',1) … … 2700 2720 end if 2701 2721 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2722 case ('readwind') 2723 if (imode.eq.0) then 2724 call cpu_time(mp_readwind_time_beg) 2725 mp_readwind_wtime_beg = mpi_wtime() 2726 else 2727 call cpu_time(mp_readwind_time_end) 2728 mp_readwind_wtime_end = mpi_wtime() 2729 2730 mp_readwind_time_total = mp_readwind_time_total + & 2731 &(mp_readwind_time_end - mp_readwind_time_beg) 2732 mp_readwind_wtime_total = mp_readwind_wtime_total + & 2733 &(mp_readwind_wtime_end - mp_readwind_wtime_beg) 2734 end if 2715 2735 2716 2736 case ('commtime') … … 2788 2808 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR GETFIELDS:',& 2789 2809 & mp_getfields_time_total 2790 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',&2791 & mp_readwind_wtime_total2792 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',&2793 & mp_readwind_time_total2810 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',& 2811 ! & mp_readwind_wtime_total 2812 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',& 2813 ! & mp_readwind_time_total 2794 2814 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FILE IO:',& 2795 2815 & mp_io_wtime_total -
src/netcdf_output_mod.f90
r4ad96c5 r0a94e13 93 93 character(len=255), parameter :: institution = 'NILU' 94 94 95 integer :: tpointer 95 integer :: tpointer=0 96 96 character(len=255) :: ncfname, ncfnamen 97 97 -
src/par_mod.f90
rdf96ea65 r0a94e13 280 280 281 281 integer,parameter :: unitpath=1, unitcommand=1, unitageclasses=1, unitgrid=1 282 integer,parameter :: unitavailab=1, unitreleases=88, unitpartout=93 282 integer,parameter :: unitavailab=1, unitreleases=88, unitpartout=93, unitpartout_average=105 283 283 integer,parameter :: unitpartin=93, unitflux=98, unitouttraj=96 284 284 integer,parameter :: unitvert=1, unitoro=1, unitpoin=1, unitreceptor=1 -
src/partoutput.f90
rd2a5a83 r0a94e13 71 71 !************************************** 72 72 73 if (ipout.eq.1 ) then73 if (ipout.eq.1.or.ipout.eq.3) then 74 74 open(unitpartout,file=path(2)(1:length(2))//'partposit_'//adate// & 75 75 atime,form='unformatted') -
src/partoutput_mpi.f90
rd2a5a83 r0a94e13 78 78 !************************************** 79 79 80 if (ipout.eq.1 ) then80 if (ipout.eq.1.or.ipout.eq.3) then 81 81 open(unitpartout,file=path(2)(1:length(2))//'partposit_'//adate// & 82 82 atime,form='unformatted',status=file_stat,position='append') -
src/readcommand.f90
r20963b1 r0a94e13 50 50 ! ipin 1 continue simulation with dumped particle data, 0 no * 51 51 ! ipout 0 no particle dump, 1 every output time, 3 only at end* 52 ! ipoutfac increase particle dump interval by factor (default 1) * 52 53 ! itsplit [s] time constant for particle splitting * 53 54 ! loutaver [s] concentration output is an average over loutaver * … … 97 98 iout, & 98 99 ipout, & 100 ipoutfac, & 99 101 lsubgrid, & 100 102 lconvection, & … … 129 131 iout=3 130 132 ipout=0 133 ipoutfac=1 131 134 lsubgrid=1 132 135 lconvection=1 … … 507 510 !**************************************************************** 508 511 509 if ((ipout.ne.0).and.(ipout.ne.1).and.(ipout.ne.2) ) then512 if ((ipout.ne.0).and.(ipout.ne.1).and.(ipout.ne.2).and.(ipout.ne.3)) then 510 513 write(*,*) ' #### FLEXPART MODEL ERROR! FILE COMMAND: #### ' 511 write(*,*) ' #### IPOUT MUST BE 1, 2 OR 3!#### '514 write(*,*) ' #### IPOUT MUST BE 0, 1, 2 OR 3! #### ' 512 515 stop 513 516 endif -
src/timemanager.f90
rc7d1052 r0a94e13 451 451 45 format(i13,' Seconds simulated: ',i13, ' Particles: Uncertainty: ',3f7.3) 452 452 46 format(' Simulated ',f7.1,' hours (',i13,' s), ',i13, ' particles') 453 if (ipout.ge.1) call partoutput(itime) ! dump particle positions 453 if (ipout.ge.1) then 454 if (mod(itime,ipoutfac*loutstep).eq.0) call partoutput(itime) ! dump particle positions 455 if (ipout.eq.3) call partoutput_average(itime) ! dump particle positions 456 endif 454 457 loutnext=loutnext+loutstep 455 458 loutstart=loutnext-loutaver/2 … … 609 612 ! write (*,*) 'advance: ',prob(1),xmass1(j,1),ztra1(j) 610 613 614 ! Calculate average position for particle dump output 615 !**************************************************** 616 617 if (ipout.eq.3) call partpos_average(itime,j) 618 619 611 620 ! Calculate the gross fluxes across layer interfaces 612 621 !*************************************************** -
src/timemanager_mpi.f90
r20963b1 r0c8c7f2 113 113 integer :: j,ks,kp,l,n,itime=0,nstop,nstop1,memstat=0 114 114 ! integer :: ksp 115 integer :: ip 115 integer :: ip,irec 116 116 integer :: loutnext,loutstart,loutend 117 117 integer :: ix,jy,ldeltat,itage,nage,idummy … … 129 129 ! Measure time spent in timemanager 130 130 if (mp_measure_time) call mpif_mtime('timemanager',0) 131 131 132 132 133 ! First output for time 0 … … 532 533 end if 533 534 534 else ! :TODO: check for zeroing in the netcdf module535 else 535 536 call concoutput_surf_nest(itime,outnum) 536 537 end if … … 593 594 46 format(' Simulated ',f7.1,' hours (',i13,' s), ',i13, ' particles') 594 595 if (ipout.ge.1) then 596 if (mp_measure_time) call mpif_mtime('iotime',0) 597 irec=0 595 598 do ip=0, mp_partgroup_np-1 596 if (ip.eq.mp_partid) call partoutput(itime) ! dump particle positions 599 if (ip.eq.mp_partid) then 600 if (mod(itime,ipoutfac*loutstep).eq.0) call partoutput(itime) ! dump particle positions 601 if (ipout.eq.3) call partoutput_average(itime,irec) ! dump particle positions 602 endif 603 if (ipout.eq.3) irec=irec+npart_per_process(ip) 597 604 call mpif_mpi_barrier 598 605 end do 606 if (mp_measure_time) call mpif_mtime('iotime',1) 599 607 end if 600 608 … … 757 765 if (mp_measure_time) call mpif_mtime('advance',1) 758 766 767 ! Calculate average position for particle dump output 768 !**************************************************** 769 770 if (ipout.eq.3) call partpos_average(itime,j) 771 759 772 760 773 ! Calculate the gross fluxes across layer interfaces … … 895 908 do ip=0, mp_partgroup_np-1 896 909 if (ip.eq.mp_partid) then 897 !if (mp_dbg_mode) write(*,*) 'call partoutput(itime), proc, mp_partid',ip,mp_partid910 if (mp_dbg_mode) write(*,*) 'call partoutput(itime), proc, mp_partid',ip,mp_partid 898 911 call partoutput(itime) ! dump particle positions 899 912 end if -
src/verttransform_ecmwf.f90
r437c545 rd005a67 73 73 use com_mod 74 74 use cmapf_mod, only: cc2gll 75 ! use mpi_mod76 75 77 76 implicit none -
src/readwind_gfs.f90
rdb91eb7 r5d74ed9 83 83 84 84 ! NCEP 85 integer :: numpt,numpu,numpv,numpw,numprh 85 integer :: numpt,numpu,numpv,numpw,numprh,numpclwch 86 86 real :: help, temp, ew 87 87 real :: elev … … 134 134 numpw=0 135 135 numprh=0 136 numpclwch=0 136 137 ifield=0 137 138 10 ifield=ifield+1 … … 557 558 endif 558 559 ! SEC & IP 12/2018 read GFS clouds 559 if(isec1(6).eq.153) then !! CLWCR Cloud liquid water content [kg/kg] 560 clwch(i,j,nlev_ec-k+2,n)=zsec4(nxfield*(ny-j-1)+i+1) 560 if((isec1(6).eq.153).and.(isec1(7).eq.100)) then !! CLWCR Cloud liquid water content [kg/kg] 561 if((i.eq.0).and.(j.eq.0)) then 562 do ii=1,nuvz 563 if ((isec1(8)*100.0).eq.akz(ii)) numpclwch=ii 564 end do 565 endif 566 help=zsec4(nxfield*(ny-j-1)+i+1) 567 if(i.le.i180) then 568 clwch(i179+i,j,numpclwch,n)=help 569 else 570 clwch(i-i181,j,numpclwch,n)=help 571 endif 561 572 readclouds=.true. 562 573 sumclouds=.true. 574 ! readclouds=.false. 575 ! sumclouds=.false. 563 576 endif 564 577 -
src/releaseparticles.f90
r75a4ded r7873bf7 114 114 average_timecorrect=0. 115 115 do k=1,nspec 116 if (zpoint1(i).gt.0.5) then ! point source 116 if(abs(xpoint2(i)-xpoint1(i)).lt.1.E-4.and.abs(ypoint2(i)-ypoint1(i)).lt.1.E-4) then 117 ! if (zpoint1(i).gt.0.5) then ! point source 117 118 timecorrect(k)=point_hour(k,nhour)*point_dow(k,ndayofweek) 118 119 else ! area source -
src/verttransform_gfs.f90
rdb91eb7 r437c545 548 548 if ((lsp.gt.0.01).or.(convp.gt.0.01)) then ! cloud and precipitation 549 549 550 do kz=nz, 1,-1 !go Bottom up!550 do kz=nz,2,-1 !go Bottom up! 551 551 if (clw(ix,jy,kz,n).gt. 0) then ! is in cloud 552 552 cloudsh(ix,jy,n)=cloudsh(ix,jy,n)+height(kz)-height(kz-1) 553 553 clouds(ix,jy,kz,n)=1 ! is a cloud 554 554 if (lsp.ge.convp) then 555 clouds(ix,jy,kz,n)=3 ! lsp in-cloud555 clouds(ix,jy,kz,n)=3 ! lsp in-cloud 556 556 else 557 557 clouds(ix,jy,kz,n)=2 ! convp in-cloud
Note: See TracChangeset
for help on using the changeset viewer.