Changeset 0ecc1fe in flexpart.git
- Timestamp:
- Nov 30, 2017, 4:04:54 PM (6 years ago)
- Branches:
- master, 10.4.1_pesei, GFS_025, bugfixes+enhancements, dev, release-10, release-10.4.1, scaling-bug, univie
- Children:
- d2a5a83
- Parents:
- c2bd55e
- Location:
- src
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
src/interpol_rain.f90
rc7e771d r0ecc1fe 56 56 ! ix,jy x,y coordinates of lower left subgrid point * 57 57 ! level level at which interpolation shall be done * 58 ! memind(3) points to the places of the wind fields*58 ! iwftouse points to the place of the wind field * 59 59 ! nx,ny actual field dimensions in x,y and z direction * 60 60 ! nxmax,nymax,nzmax maximum field dimensions in x,y and z direction * -
src/interpol_rain_nests.f90
rdb712a8 r0ecc1fe 21 21 22 22 subroutine interpol_rain_nests(yy1,yy2,yy3,nxmaxn,nymaxn,nzmax, & 23 maxnests,ngrid,nxn,nyn, memind,xt,yt,level,itime1,itime2,itime, &23 maxnests,ngrid,nxn,nyn,iwftouse,xt,yt,level,itime1,itime2,itime, & 24 24 yint1,yint2,yint3) 25 25 ! i i i i i i … … 60 60 ! ix,jy x,y coordinates of lower left subgrid point * 61 61 ! level level at which interpolation shall be done * 62 ! memind(3) points to the places of the wind fields*62 ! iwftouse points to the place of the wind field * 63 63 ! nx,ny actual field dimensions in x,y and z direction * 64 64 ! nxmax,nymax,nzmax maximum field dimensions in x,y and z direction * … … 75 75 76 76 integer :: maxnests,ngrid 77 integer :: nxn(maxnests),nyn(maxnests),nxmaxn,nymaxn,nzmax, memind(numwfmem)77 integer :: nxn(maxnests),nyn(maxnests),nxmaxn,nymaxn,nzmax,iwftouse 78 78 integer :: m,ix,jy,ixp,jyp,itime,itime1,itime2,level,indexh 79 79 real :: yy1(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,maxnests) … … 125 125 !*********************** 126 126 127 do m=1,2 128 indexh=memind(m) 127 ! do m=1,2 128 ! indexh=memind(m) 129 indexh=iwftouse 129 130 130 y1( m)=p1*yy1(ix ,jy ,level,indexh,ngrid) &131 y1(1)=p1*yy1(ix ,jy ,level,indexh,ngrid) & 131 132 + p2*yy1(ixp,jy ,level,indexh,ngrid) & 132 133 + p3*yy1(ix ,jyp,level,indexh,ngrid) & 133 134 + p4*yy1(ixp,jyp,level,indexh,ngrid) 134 y2( m)=p1*yy2(ix ,jy ,level,indexh,ngrid) &135 y2(1)=p1*yy2(ix ,jy ,level,indexh,ngrid) & 135 136 + p2*yy2(ixp,jy ,level,indexh,ngrid) & 136 137 + p3*yy2(ix ,jyp,level,indexh,ngrid) & 137 138 + p4*yy2(ixp,jyp,level,indexh,ngrid) 138 y3( m)=p1*yy3(ix ,jy ,level,indexh,ngrid) &139 y3(1)=p1*yy3(ix ,jy ,level,indexh,ngrid) & 139 140 + p2*yy3(ixp,jy ,level,indexh,ngrid) & 140 141 + p3*yy3(ix ,jyp,level,indexh,ngrid) & 141 142 + p4*yy3(ixp,jyp,level,indexh,ngrid) 142 end do143 ! end do 143 144 144 145 … … 147 148 !************************************ 148 149 149 dt1=real(itime-itime1)150 dt2=real(itime2-itime)151 dt=dt1+dt2150 ! dt1=real(itime-itime1) 151 ! dt2=real(itime2-itime) 152 ! dt=dt1+dt2 152 153 153 yint1=(y1(1)*dt2+y1(2)*dt1)/dt154 yint2=(y2(1)*dt2+y2(2)*dt1)/dt155 yint3=(y3(1)*dt2+y3(2)*dt1)/dt154 ! yint1=(y1(1)*dt2+y1(2)*dt1)/dt 155 ! yint2=(y2(1)*dt2+y2(2)*dt1)/dt 156 ! yint3=(y3(1)*dt2+y3(2)*dt1)/dt 156 157 158 yint1=y1(1) 159 yint2=y2(1) 160 yint3=y3(1) 157 161 158 162 end subroutine interpol_rain_nests -
src/mpi_mod.f90
rc2bd55e r0ecc1fe 2536 2536 ! if (mp_ierr /= 0) goto 600 2537 2537 2538 #ifdef USE_MPIINPLACE 2538 2539 ! Using in-place reduction 2539 2540 if (lroot) then … … 2544 2545 call MPI_Reduce(griduncn, 0, grid_size3d, mp_sp, MPI_SUM, id_root, & 2545 2546 & mp_comm_used, mp_ierr) 2546 end if 2547 if (mp_ierr /= 0) goto 600 2548 end if 2549 2550 #else 2551 call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_sp, MPI_SUM, id_root, & 2552 & mp_comm_used, mp_ierr) 2553 if (mp_ierr /= 0) goto 600 2554 if (lroot) griduncn = griduncn0 2555 2556 #endif 2547 2557 2548 2558 if ((WETDEP).and.(ldirect.gt.0)) then -
src/netcdf_output_mod.f90
rc2bd55e r0ecc1fe 273 273 character(len=3) :: anspec 274 274 CHARACTER :: adate*8,atime*6,timeunit*32 275 ! ESO DBG: WHY IS THIS HARDCODED TO 1000?276 275 !REAL, DIMENSION(1000) :: coord 277 276 real, allocatable, dimension(:) :: coord -
src/outgrid_init.f90
r6ecb30a r0ecc1fe 210 210 allocate(gridunc(0:numxgrid-1,0:numygrid-1,numzgrid,maxspec, & 211 211 maxpointspec_act,nclassunc,maxageclass),stat=stat) 212 212 if (stat.ne.0) write(*,*)'ERROR: could not allocate gridunc' 213 213 if (ldirect.gt.0) then 214 214 allocate(wetgridunc(0:numxgrid-1,0:numygrid-1,maxspec, & 215 maxpointspec_act,nclassunc,maxageclass),stat=stat)215 maxpointspec_act,nclassunc,maxageclass),stat=stat) 216 216 if (stat.ne.0) write(*,*)'ERROR: could not allocate wetgridunc' 217 allocate(drygridunc(0:numxgrid-1,0:numygrid-1,maxspec, &218 maxpointspec_act,nclassunc,maxageclass),stat=stat)217 allocate(drygridunc(0:numxgrid-1,0:numygrid-1,maxspec, & 218 maxpointspec_act,nclassunc,maxageclass),stat=stat) 219 219 if (stat.ne.0) write(*,*)'ERROR: could not allocate drygridunc' 220 220 endif 221 221 222 #ifdef USE_MPIINPLACE 223 #else 222 224 ! Extra field for totals at MPI root process 223 225 if (lroot.and.mpi_mode.gt.0) then 224 225 #ifdef USE_MPIINPLACE 226 #else 227 ! If MPI_IN_PLACE option is not used in mpi_mod.f90::mpif_tm_reduce_grid(), 228 ! then an aux array is needed for parallel grid reduction 226 ! If MPI_IN_PLACE option is not used in mpi_mod.f90::mpif_tm_reduce_grid(), 227 ! then an aux array is needed for parallel grid reduction 229 228 allocate(gridunc0(0:numxgrid-1,0:numygrid-1,numzgrid,maxspec, & 230 229 maxpointspec_act,nclassunc,maxageclass),stat=stat) 231 230 if (stat.ne.0) write(*,*)'ERROR: could not allocate gridunc0' 231 else if (.not.lroot.and.mpi_mode.gt.0) then 232 allocate(gridunc0(1,1,1,1,1,1,1),stat=stat) 233 if (stat.ne.0) write(*,*)'ERROR: could not allocate gridunc0' 234 end if 232 235 #endif 233 if (ldirect.gt.0) then 234 allocate(wetgridunc0(0:numxgrid-1,0:numygrid-1,maxspec, & 235 maxpointspec_act,nclassunc,maxageclass),stat=stat) 236 if (stat.ne.0) write(*,*)'ERROR: could not allocate wetgridunc0' 237 allocate(drygridunc0(0:numxgrid-1,0:numygrid-1,maxspec, & 238 maxpointspec_act,nclassunc,maxageclass),stat=stat) 239 if (stat.ne.0) write(*,*)'ERROR: could not allocate drygridunc0' 240 endif 236 ! if (ldirect.gt.0) then 237 if (lroot.and.mpi_mode.gt.0) then 238 allocate(wetgridunc0(0:numxgrid-1,0:numygrid-1,maxspec, & 239 maxpointspec_act,nclassunc,maxageclass),stat=stat) 240 if (stat.ne.0) write(*,*)'ERROR: could not allocate wetgridunc0' 241 allocate(drygridunc0(0:numxgrid-1,0:numygrid-1,maxspec, & 242 maxpointspec_act,nclassunc,maxageclass),stat=stat) 243 if (stat.ne.0) write(*,*)'ERROR: could not allocate drygridunc0' 244 241 245 ! allocate a dummy to avoid compilator complaints 242 246 else if (.not.lroot.and.mpi_mode.gt.0) then -
src/outgrid_init_nest.f90
r8a65cb0 r0ecc1fe 69 69 endif 70 70 71 #ifdef USE_MPIINPLACE 72 #else 71 73 ! Extra field for totals at MPI root process 72 74 if (lroot.and.mpi_mode.gt.0) then 73 ! allocate(griduncn0(0:numxgridn-1,0:numygridn-1,numzgrid,maxspec, & 74 ! maxpointspec_act,nclassunc,maxageclass),stat=stat) 75 ! if (stat.ne.0) write(*,*)'ERROR:could not allocate nested gridunc' 76 77 if (ldirect.gt.0) then 78 allocate(wetgriduncn0(0:numxgridn-1,0:numygridn-1,maxspec, & 79 maxpointspec_act,nclassunc,maxageclass),stat=stat) 80 if (stat.ne.0) write(*,*)'ERROR:could not allocate nested gridunc' 81 allocate(drygriduncn0(0:numxgridn-1,0:numygridn-1,maxspec, & 82 maxpointspec_act,nclassunc,maxageclass),stat=stat) 83 if (stat.ne.0) write(*,*)'ERROR:could not allocate nested gridunc' 84 endif 75 ! If MPI_IN_PLACE option is not used in mpi_mod.f90::mpif_tm_reduce_grid_nest(), 76 ! then an aux array is needed for parallel grid reduction 77 allocate(griduncn0(0:numxgridn-1,0:numygridn-1,numzgrid,maxspec, & 78 maxpointspec_act,nclassunc,maxageclass),stat=stat) 79 if (stat.ne.0) write(*,*)'ERROR:could not allocate nested gridunc' 80 ! allocate a dummy to avoid compilator complaints 81 else if (.not.lroot.and.mpi_mode.gt.0) then 82 allocate(griduncn0(1,1,1,1,1,1,1),stat=stat) 83 end if 84 #endif 85 ! if (ldirect.gt.0) then 86 if (lroot.and.mpi_mode.gt.0) then 87 allocate(wetgriduncn0(0:numxgridn-1,0:numygridn-1,maxspec, & 88 maxpointspec_act,nclassunc,maxageclass),stat=stat) 89 if (stat.ne.0) write(*,*)'ERROR:could not allocate nested gridunc' 90 allocate(drygriduncn0(0:numxgridn-1,0:numygridn-1,maxspec, & 91 maxpointspec_act,nclassunc,maxageclass),stat=stat) 92 if (stat.ne.0) write(*,*)'ERROR:could not allocate nested gridunc' 93 ! endif 85 94 ! allocate a dummy to avoid compilator complaints 86 95 else if (.not.lroot.and.mpi_mode.gt.0) then -
src/unc_mod.f90
r6ecb30a r0ecc1fe 38 38 ! then an aux array is needed for parallel grid reduction 39 39 real,allocatable, dimension (:,:,:,:,:,:,:) :: gridunc0 40 real,allocatable, dimension (:,:,:,:,:,:,:) :: griduncn0 40 41 #endif 41 42 real,allocatable, dimension (:,:,:,:,:,:,:) :: griduncn
Note: See TracChangeset
for help on using the changeset viewer.