Changes in / [d1a8707:6985a98] in flexpart.git
- Location:
- src
- Files:
-
- 1 added
- 29 edited
Legend:
- Unmodified
- Added
- Removed
-
src/FLEXPART.f90
r6896b17 rc8fc724 67 67 call gasdev1(idummy,rannumb(maxrand),rannumb(maxrand-1)) 68 68 69 69 70 ! FLEXPART version string 70 71 flexversion_major = '10' ! Major version number, also used for species file names 71 flexversion='Version '//trim(flexversion_major)//'.1beta (201 7-01-01)'72 flexversion='Version '//trim(flexversion_major)//'.1beta (2016-11-02)' 72 73 verbosity=0 73 74 … … 384 385 end do 385 386 387 ! Inform whether output kernel is used or not 388 !********************************************* 389 if (lroot) then 390 if (lnokernel) then 391 write(*,*) "Concentrations are calculated without using kernel" 392 else 393 write(*,*) "Concentrations are calculated using kernel" 394 end if 395 end if 396 397 386 398 ! Calculate particle trajectories 387 399 !******************************** … … 402 414 403 415 ! NIK 16.02.2005 404 write(*,*) '**********************************************' 405 write(*,*) 'Total number of occurences of below-cloud scavenging', tot_blc_count 406 write(*,*) 'Total number of occurences of in-cloud scavenging', tot_inc_count 407 write(*,*) '**********************************************' 408 416 do i=1,nspec 417 write(*,*) '**********************************************' 418 write(*,*) 'Scavenging statistics for species ', species(i), ':' 419 write(*,*) 'Total number of occurences of below-cloud scavenging', & 420 & tot_blc_count(i) 421 write(*,*) 'Total number of occurences of in-cloud scavenging', & 422 & tot_inc_count(i) 423 write(*,*) '**********************************************' 424 end do 425 409 426 write(*,*) 'CONGRATULATIONS: YOU HAVE SUCCESSFULLY COMPLETED A FLE& 410 427 &XPART MODEL RUN!' -
src/FLEXPART_MPI.f90
r79abee9 rc8fc724 55 55 56 56 57 ! Initialize mpi58 !*********************57 ! Initialize mpi 58 !********************* 59 59 call mpif_init 60 60 61 61 if (mp_measure_time) call mpif_mtime('flexpart',0) 62 62 63 ! Initialize arrays in com_mod 64 !***************************** 65 call com_mod_allocate_part(maxpart_mpi) 63 ! Initialize arrays in com_mod 64 !***************************** 65 66 if(.not.(lmpreader.and.lmp_use_reader)) call com_mod_allocate_part(maxpart_mpi) 67 66 68 67 69 ! Generate a large number of random numbers … … 79 81 flexversion_major = '10' ! Major version number, also used for species file names 80 82 ! flexversion='Ver. 10 Beta MPI (2015-05-01)' 81 flexversion='Ver. '//trim(flexversion_major)//' Beta MPI (2015-05-01)'83 flexversion='Ver. '//trim(flexversion_major)//'.1beta MPI (2016-11-02)' 82 84 verbosity=0 83 85 … … 306 308 endif 307 309 308 do j=1, size(itra1) ! maxpart_mpi 309 itra1(j)=-999999999 310 end do 310 if (.not.(lmpreader.and.lmp_use_reader)) then 311 do j=1, size(itra1) ! maxpart_mpi 312 itra1(j)=-999999999 313 end do 314 end if 311 315 312 316 ! For continuation of previous run, read in particle positions … … 318 322 endif 319 323 ! readwind process skips this step 320 if ( lmp_use_reader.and..not.lmpreader) call readpartpositions324 if (.not.(lmpreader.and.lmp_use_reader)) call readpartpositions 321 325 else 322 326 if (verbosity.gt.0 .and. lroot) then … … 425 429 end do 426 430 431 ! Inform whether output kernel is used or not 432 !********************************************* 433 434 if (lroot) then 435 if (lnokernel) then 436 write(*,*) "Concentrations are calculated without using kernel" 437 else 438 write(*,*) "Concentrations are calculated using kernel" 439 end if 440 end if 427 441 428 442 ! Calculate particle trajectories … … 448 462 ! NIK 16.02.2005 449 463 if (lroot) then 450 call MPI_Reduce(MPI_IN_PLACE, tot_blc_count, 1, MPI_INTEGER8, MPI_SUM, id_root, &464 call MPI_Reduce(MPI_IN_PLACE, tot_blc_count, nspec, MPI_INTEGER8, MPI_SUM, id_root, & 451 465 & mp_comm_used, mp_ierr) 452 call MPI_Reduce(MPI_IN_PLACE, tot_inc_count, 1, MPI_INTEGER8, MPI_SUM, id_root, &466 call MPI_Reduce(MPI_IN_PLACE, tot_inc_count, nspec, MPI_INTEGER8, MPI_SUM, id_root, & 453 467 & mp_comm_used, mp_ierr) 454 468 else 455 469 if (mp_partgroup_pid.ge.0) then ! Skip for readwind process 456 call MPI_Reduce(tot_blc_count, 0, 1, MPI_INTEGER8, MPI_SUM, id_root, &470 call MPI_Reduce(tot_blc_count, 0, nspec, MPI_INTEGER8, MPI_SUM, id_root, & 457 471 & mp_comm_used, mp_ierr) 458 call MPI_Reduce(tot_inc_count, 0, 1, MPI_INTEGER8, MPI_SUM, id_root, &472 call MPI_Reduce(tot_inc_count, 0, nspec, MPI_INTEGER8, MPI_SUM, id_root, & 459 473 & mp_comm_used, mp_ierr) 460 474 end if … … 462 476 463 477 if (lroot) then 464 write(*,*) '**********************************************' 465 write(*,*) 'Total number of occurences of below-cloud scavenging', tot_blc_count 466 write(*,*) 'Total number of occurences of in-cloud scavenging', tot_inc_count 467 write(*,*) '**********************************************' 478 do i=1,nspec 479 write(*,*) '**********************************************' 480 write(*,*) 'Scavenging statistics for species ', species(i), ':' 481 write(*,*) 'Total number of occurences of below-cloud scavenging', & 482 & tot_blc_count(i) 483 write(*,*) 'Total number of occurences of in-cloud scavenging', & 484 & tot_inc_count(i) 485 write(*,*) '**********************************************' 486 end do 468 487 469 488 write(*,*) 'CONGRATULATIONS: YOU HAVE SUCCESSFULLY COMPLETED A FLE& -
src/boundcond_domainfill_mpi.f90
r7999df47 r16b61a5 21 21 22 22 subroutine boundcond_domainfill(itime,loutend) 23 ! i i 24 !***************************************************************************** 25 ! * 26 ! Particles are created by this subroutine continuously throughout the * 27 ! simulation at the boundaries of the domain-filling box. * 28 ! All particles carry the same amount of mass which alltogether comprises the* 29 ! mass of air within the box, which remains (more or less) constant. * 30 ! * 31 ! Author: A. Stohl * 32 ! * 33 ! 16 October 2002 * 34 ! * 35 !***************************************************************************** 36 ! * 37 ! Variables: * 38 ! * 39 ! nx_we(2) grid indices for western and eastern boundary of domain- * 40 ! filling trajectory calculations * 41 ! ny_sn(2) grid indices for southern and northern boundary of domain- * 42 ! filling trajectory calculations * 43 ! * 44 !***************************************************************************** 23 ! i i 24 !***************************************************************************** 25 ! * 26 ! Particles are created by this subroutine continuously throughout the * 27 ! simulation at the boundaries of the domain-filling box. * 28 ! All particles carry the same amount of mass which alltogether comprises the* 29 ! mass of air within the box, which remains (more or less) constant. * 30 ! * 31 ! Author: A. Stohl * 32 ! * 33 ! 16 October 2002 * 34 ! * 35 !***************************************************************************** 36 ! * 37 ! Variables: * 38 ! * 39 ! nx_we(2) grid indices for western and eastern boundary of domain- * 40 ! filling trajectory calculations * 41 ! ny_sn(2) grid indices for southern and northern boundary of domain- * 42 ! filling trajectory calculations * 43 ! * 44 !***************************************************************************** 45 ! CHANGES 46 ! 08/2016 eso: MPI version: 47 ! 48 ! -Root process release particles and distributes to other processes. 49 ! Temporary arrays are used, also for the non-root (receiving) processes. 50 ! -The scheme can be improved by having all processes report numpart 51 ! (keeping track of how many particles have left the domain), so that 52 ! a proportional amount of new particles can be distributed (however 53 ! we have a separate function called from timemanager that will 54 ! redistribute particles among processes if there are imbalances) 55 !***************************************************************************** 45 56 46 57 use point_mod … … 55 66 integer :: itime,in,indz,indzp,i,loutend 56 67 integer :: j,k,ix,jy,m,indzh,indexh,minpart,ipart,mmass 57 integer :: numactiveparticles 68 integer :: numactiveparticles, numpart_total, rel_counter 69 integer,allocatable,dimension(:) :: numrel_mpi !, numactiveparticles_mpi 58 70 59 71 real :: windl(2),rhol(2) … … 66 78 67 79 integer :: idummy = -11 80 integer :: mtag 68 81 logical :: first_call=.true. 69 70 ! Use different seed for each process 82 ! Sizes of temporary arrays are maxpartfract*maxpart. Increase maxpartfract if 83 ! needed. 84 real,parameter :: maxpartfract=0.1 85 integer :: tmp_size = int(maxpartfract*maxpart) 86 87 ! Use different seed for each process 71 88 if (first_call) then 72 89 idummy=idummy+mp_seed … … 75 92 76 93 77 78 94 ! If domain-filling is global, no boundary conditions are needed 95 !*************************************************************** 79 96 80 97 if (gdomainfill) return … … 82 99 accmasst=0. 83 100 numactiveparticles=0 84 85 ! Terminate trajectories that have left the domain, if domain-filling 86 ! trajectory calculation domain is not global 87 !******************************************************************** 101 ! Keep track of active particles on each process 102 allocate(numrel_mpi(0:mp_partgroup_np-1)) 103 ! numactiveparticles_mpi(0:mp_partgroup_np-1) 104 105 ! New particles to be released on each process 106 numrel_mpi(:)=0 107 108 ! Terminate trajectories that have left the domain, if domain-filling 109 ! trajectory calculation domain is not global. Done for all processes 110 !******************************************************************** 88 111 89 112 do i=1,numpart … … 98 121 numactiveparticles+1 99 122 end do 100 101 102 ! Determine auxiliary variables for time interpolation 103 !***************************************************** 104 105 dt1=real(itime-memtime(1)) 106 dt2=real(memtime(2)-itime) 107 dtt=1./(dt1+dt2) 108 109 ! Initialize auxiliary variable used to search for vacant storage space 110 !********************************************************************** 111 112 minpart=1 113 114 !*************************************** 115 ! Western and eastern boundary condition 116 !*************************************** 117 118 ! Loop from south to north 119 !************************* 120 121 do jy=ny_sn(1),ny_sn(2) 122 123 ! Loop over western (index 1) and eastern (index 2) boundary 124 !*********************************************************** 125 126 do k=1,2 127 128 ! Loop over all release locations in a column 129 !******************************************** 130 131 do j=1,numcolumn_we(k,jy) 132 133 ! Determine, for each release location, the area of the corresponding boundary 134 !***************************************************************************** 135 136 if (j.eq.1) then 137 deltaz=(zcolumn_we(k,jy,2)+zcolumn_we(k,jy,1))/2. 138 else if (j.eq.numcolumn_we(k,jy)) then 139 ! deltaz=height(nz)-(zcolumn_we(k,jy,j-1)+ 140 ! + zcolumn_we(k,jy,j))/2. 141 ! In order to avoid taking a very high column for very many particles, 142 ! use the deltaz from one particle below instead 143 deltaz=(zcolumn_we(k,jy,j)-zcolumn_we(k,jy,j-2))/2. 144 else 145 deltaz=(zcolumn_we(k,jy,j+1)-zcolumn_we(k,jy,j-1))/2. 146 endif 147 if ((jy.eq.ny_sn(1)).or.(jy.eq.ny_sn(2))) then 148 boundarea=deltaz*111198.5/2.*dy 149 else 150 boundarea=deltaz*111198.5*dy 151 endif 152 153 154 ! Interpolate the wind velocity and density to the release location 155 !****************************************************************** 156 157 ! Determine the model level below the release position 158 !***************************************************** 159 160 do i=2,nz 161 if (height(i).gt.zcolumn_we(k,jy,j)) then 162 indz=i-1 163 indzp=i 164 goto 6 123 ! numactiveparticles_mpi(mp_partid) = numactiveparticles 124 125 126 ! Collect number of active particles from all processes 127 ! call MPI_Allgather(numactiveparticles, 1, MPI_INTEGER, & 128 ! &numactiveparticles_mpi, 1, MPI_INTEGER, mp_comm_used, mp_ierr) 129 130 131 ! Total number of new releases 132 numpart_total = 0 133 134 135 ! This section only done by root process 136 !*************************************** 137 138 if (lroot) then 139 140 ! Use separate arrays for newly released particles 141 !************************************************* 142 143 allocate(itra1_tmp(tmp_size),npoint_tmp(tmp_size),nclass_tmp(tmp_size),& 144 & idt_tmp(tmp_size),itramem_tmp(tmp_size),itrasplit_tmp(tmp_size),& 145 & xtra1_tmp(tmp_size),ytra1_tmp(tmp_size),ztra1_tmp(tmp_size),& 146 & xmass1_tmp(tmp_size, maxspec)) 147 148 ! Initialize all particles as non-existent 149 itra1_tmp(:)=-999999999 150 151 ! Determine auxiliary variables for time interpolation 152 !***************************************************** 153 154 dt1=real(itime-memtime(1)) 155 dt2=real(memtime(2)-itime) 156 dtt=1./(dt1+dt2) 157 158 ! Initialize auxiliary variable used to search for vacant storage space 159 !********************************************************************** 160 161 minpart=1 162 163 !*************************************** 164 ! Western and eastern boundary condition 165 !*************************************** 166 167 ! Loop from south to north 168 !************************* 169 170 do jy=ny_sn(1),ny_sn(2) 171 172 ! Loop over western (index 1) and eastern (index 2) boundary 173 !*********************************************************** 174 175 do k=1,2 176 177 ! Loop over all release locations in a column 178 !******************************************** 179 180 do j=1,numcolumn_we(k,jy) 181 182 ! Determine, for each release location, the area of the corresponding boundary 183 !***************************************************************************** 184 185 if (j.eq.1) then 186 deltaz=(zcolumn_we(k,jy,2)+zcolumn_we(k,jy,1))/2. 187 else if (j.eq.numcolumn_we(k,jy)) then 188 ! deltaz=height(nz)-(zcolumn_we(k,jy,j-1)+ 189 ! + zcolumn_we(k,jy,j))/2. 190 ! In order to avoid taking a very high column for very many particles, 191 ! use the deltaz from one particle below instead 192 deltaz=(zcolumn_we(k,jy,j)-zcolumn_we(k,jy,j-2))/2. 193 else 194 deltaz=(zcolumn_we(k,jy,j+1)-zcolumn_we(k,jy,j-1))/2. 165 195 endif 166 end do 167 6 continue 168 169 ! Vertical distance to the level below and above current position 170 !**************************************************************** 171 172 dz1=zcolumn_we(k,jy,j)-height(indz) 173 dz2=height(indzp)-zcolumn_we(k,jy,j) 174 dz=1./(dz1+dz2) 175 176 ! Vertical and temporal interpolation 177 !************************************ 178 179 do m=1,2 180 indexh=memind(m) 181 do in=1,2 182 indzh=indz+in-1 183 windl(in)=uu(nx_we(k),jy,indzh,indexh) 184 rhol(in)=rho(nx_we(k),jy,indzh,indexh) 185 end do 186 187 windhl(m)=(dz2*windl(1)+dz1*windl(2))*dz 188 rhohl(m)=(dz2*rhol(1)+dz1*rhol(2))*dz 189 end do 190 191 windx=(windhl(1)*dt2+windhl(2)*dt1)*dtt 192 rhox=(rhohl(1)*dt2+rhohl(2)*dt1)*dtt 193 194 ! Calculate mass flux, divided by number of processes 195 !**************************************************** 196 197 fluxofmass=windx*rhox*boundarea*real(lsynctime)/mp_partgroup_np 198 199 200 ! If the mass flux is directed into the domain, add it to previous mass fluxes; 201 ! if it is out of the domain, set accumulated mass flux to zero 202 !****************************************************************************** 203 204 if (k.eq.1) then 205 if (fluxofmass.ge.0.) then 206 acc_mass_we(k,jy,j)=acc_mass_we(k,jy,j)+fluxofmass 196 if ((jy.eq.ny_sn(1)).or.(jy.eq.ny_sn(2))) then 197 boundarea=deltaz*111198.5/2.*dy 207 198 else 208 acc_mass_we(k,jy,j)=0.199 boundarea=deltaz*111198.5*dy 209 200 endif 210 else 211 if (fluxofmass.le.0.) then 212 acc_mass_we(k,jy,j)=acc_mass_we(k,jy,j)+abs(fluxofmass) 213 else 214 acc_mass_we(k,jy,j)=0. 215 endif 216 endif 217 accmasst=accmasst+acc_mass_we(k,jy,j) 218 219 ! If the accumulated mass exceeds half the mass that each particle shall carry, 220 ! one (or more) particle(s) is (are) released and the accumulated mass is 221 ! reduced by the mass of this (these) particle(s) 222 !****************************************************************************** 223 224 if (acc_mass_we(k,jy,j).ge.xmassperparticle/2.) then 225 mmass=int((acc_mass_we(k,jy,j)+xmassperparticle/2.)/ & 226 xmassperparticle) 227 acc_mass_we(k,jy,j)=acc_mass_we(k,jy,j)- & 228 real(mmass)*xmassperparticle 229 else 230 mmass=0 231 endif 232 233 do m=1,mmass 234 do ipart=minpart,maxpart 235 236 ! If a vacant storage space is found, attribute everything to this array element 237 !***************************************************************************** 238 239 if (itra1(ipart).ne.itime) then 240 241 ! Assign particle positions 242 !************************** 243 244 xtra1(ipart)=real(nx_we(k)) 245 if (jy.eq.ny_sn(1)) then 246 ytra1(ipart)=real(jy)+0.5*ran1(idummy) 247 else if (jy.eq.ny_sn(2)) then 248 ytra1(ipart)=real(jy)-0.5*ran1(idummy) 249 else 250 ytra1(ipart)=real(jy)+(ran1(idummy)-.5) 251 endif 252 if (j.eq.1) then 253 ztra1(ipart)=zcolumn_we(k,jy,1)+(zcolumn_we(k,jy,2)- & 254 zcolumn_we(k,jy,1))/4. 255 else if (j.eq.numcolumn_we(k,jy)) then 256 ztra1(ipart)=(2.*zcolumn_we(k,jy,j)+ & 257 zcolumn_we(k,jy,j-1)+height(nz))/4. 258 else 259 ztra1(ipart)=zcolumn_we(k,jy,j-1)+ran1(idummy)* & 260 (zcolumn_we(k,jy,j+1)-zcolumn_we(k,jy,j-1)) 261 endif 262 263 ! Interpolate PV to the particle position 264 !**************************************** 265 ixm=int(xtra1(ipart)) 266 jym=int(ytra1(ipart)) 267 ixp=ixm+1 268 jyp=jym+1 269 ddx=xtra1(ipart)-real(ixm) 270 ddy=ytra1(ipart)-real(jym) 271 rddx=1.-ddx 272 rddy=1.-ddy 273 p1=rddx*rddy 274 p2=ddx*rddy 275 p3=rddx*ddy 276 p4=ddx*ddy 277 do i=2,nz 278 if (height(i).gt.ztra1(ipart)) then 279 indzm=i-1 280 indzp=i 281 goto 26 282 endif 283 end do 284 26 continue 285 dz1=ztra1(ipart)-height(indzm) 286 dz2=height(indzp)-ztra1(ipart) 287 dz=1./(dz1+dz2) 288 do mm=1,2 289 indexh=memind(mm) 290 do in=1,2 291 indzh=indzm+in-1 292 y1(in)=p1*pv(ixm,jym,indzh,indexh) & 293 +p2*pv(ixp,jym,indzh,indexh) & 294 +p3*pv(ixm,jyp,indzh,indexh) & 295 +p4*pv(ixp,jyp,indzh,indexh) 296 end do 297 yh1(mm)=(dz2*y1(1)+dz1*y1(2))*dz 298 end do 299 pvpart=(yh1(1)*dt2+yh1(2)*dt1)*dtt 300 ylat=ylat0+ytra1(ipart)*dy 301 if (ylat.lt.0.) pvpart=-1.*pvpart 302 303 304 ! For domain-filling option 2 (stratospheric O3), do the rest only in the stratosphere 305 !***************************************************************************** 306 307 if (((ztra1(ipart).gt.3000.).and. & 308 (pvpart.gt.pvcrit)).or.(mdomainfill.eq.1)) then 309 nclass(ipart)=min(int(ran1(idummy)* & 310 real(nclassunc))+1,nclassunc) 311 numactiveparticles=numactiveparticles+1 312 numparticlecount=numparticlecount+1 313 npoint(ipart)=numparticlecount 314 idt(ipart)=mintime 315 itra1(ipart)=itime 316 itramem(ipart)=itra1(ipart) 317 itrasplit(ipart)=itra1(ipart)+ldirect*itsplit 318 xmass1(ipart,1)=xmassperparticle 319 if (mdomainfill.eq.2) xmass1(ipart,1)= & 320 xmass1(ipart,1)*pvpart*48./29.*ozonescale/10.**9 321 else 322 goto 71 323 endif 324 325 326 ! Increase numpart, if necessary 327 !******************************* 328 329 numpart=max(numpart,ipart) 330 goto 73 ! Storage space has been found, stop searching 201 202 203 ! Interpolate the wind velocity and density to the release location 204 !****************************************************************** 205 206 ! Determine the model level below the release position 207 !***************************************************** 208 209 do i=2,nz 210 if (height(i).gt.zcolumn_we(k,jy,j)) then 211 indz=i-1 212 indzp=i 213 goto 6 331 214 endif 332 215 end do 333 if (ipart.gt.maxpart) & 334 stop 'boundcond_domainfill.f: too many particles required' 335 73 minpart=ipart+1 336 71 continue 216 6 continue 217 218 ! Vertical distance to the level below and above current position 219 !**************************************************************** 220 221 dz1=zcolumn_we(k,jy,j)-height(indz) 222 dz2=height(indzp)-zcolumn_we(k,jy,j) 223 dz=1./(dz1+dz2) 224 225 ! Vertical and temporal interpolation 226 !************************************ 227 228 do m=1,2 229 indexh=memind(m) 230 do in=1,2 231 indzh=indz+in-1 232 windl(in)=uu(nx_we(k),jy,indzh,indexh) 233 rhol(in)=rho(nx_we(k),jy,indzh,indexh) 234 end do 235 236 windhl(m)=(dz2*windl(1)+dz1*windl(2))*dz 237 rhohl(m)=(dz2*rhol(1)+dz1*rhol(2))*dz 238 end do 239 240 windx=(windhl(1)*dt2+windhl(2)*dt1)*dtt 241 rhox=(rhohl(1)*dt2+rhohl(2)*dt1)*dtt 242 243 ! Calculate mass flux 244 !******************** 245 246 fluxofmass=windx*rhox*boundarea*real(lsynctime) 247 248 249 ! If the mass flux is directed into the domain, add it to previous mass fluxes; 250 ! if it is out of the domain, set accumulated mass flux to zero 251 !****************************************************************************** 252 253 if (k.eq.1) then 254 if (fluxofmass.ge.0.) then 255 acc_mass_we(k,jy,j)=acc_mass_we(k,jy,j)+fluxofmass 256 else 257 acc_mass_we(k,jy,j)=0. 258 endif 259 else 260 if (fluxofmass.le.0.) then 261 acc_mass_we(k,jy,j)=acc_mass_we(k,jy,j)+abs(fluxofmass) 262 else 263 acc_mass_we(k,jy,j)=0. 264 endif 265 endif 266 accmasst=accmasst+acc_mass_we(k,jy,j) 267 268 ! If the accumulated mass exceeds half the mass that each particle shall carry, 269 ! one (or more) particle(s) is (are) released and the accumulated mass is 270 ! reduced by the mass of this (these) particle(s) 271 !****************************************************************************** 272 273 if (acc_mass_we(k,jy,j).ge.xmassperparticle/2.) then 274 mmass=int((acc_mass_we(k,jy,j)+xmassperparticle/2.)/ & 275 xmassperparticle) 276 acc_mass_we(k,jy,j)=acc_mass_we(k,jy,j)- & 277 real(mmass)*xmassperparticle 278 else 279 mmass=0 280 endif 281 282 do m=1,mmass 283 do ipart=minpart,maxpart 284 285 ! If a vacant storage space is found, attribute everything to this array element 286 ! TODO: for the MPI version this test can be removed, as all 287 ! elements in _tmp arrays are initialized to zero 288 !***************************************************************************** 289 290 if (itra1_tmp(ipart).ne.itime) then 291 292 ! Assign particle positions 293 !************************** 294 295 xtra1_tmp(ipart)=real(nx_we(k)) 296 if (jy.eq.ny_sn(1)) then 297 ytra1_tmp(ipart)=real(jy)+0.5*ran1(idummy) 298 else if (jy.eq.ny_sn(2)) then 299 ytra1_tmp(ipart)=real(jy)-0.5*ran1(idummy) 300 else 301 ytra1_tmp(ipart)=real(jy)+(ran1(idummy)-.5) 302 endif 303 if (j.eq.1) then 304 ztra1_tmp(ipart)=zcolumn_we(k,jy,1)+(zcolumn_we(k,jy,2)- & 305 zcolumn_we(k,jy,1))/4. 306 else if (j.eq.numcolumn_we(k,jy)) then 307 ztra1_tmp(ipart)=(2.*zcolumn_we(k,jy,j)+ & 308 zcolumn_we(k,jy,j-1)+height(nz))/4. 309 else 310 ztra1_tmp(ipart)=zcolumn_we(k,jy,j-1)+ran1(idummy)* & 311 (zcolumn_we(k,jy,j+1)-zcolumn_we(k,jy,j-1)) 312 endif 313 314 ! Interpolate PV to the particle position 315 !**************************************** 316 ixm=int(xtra1_tmp(ipart)) 317 jym=int(ytra1_tmp(ipart)) 318 ixp=ixm+1 319 jyp=jym+1 320 ddx=xtra1_tmp(ipart)-real(ixm) 321 ddy=ytra1_tmp(ipart)-real(jym) 322 rddx=1.-ddx 323 rddy=1.-ddy 324 p1=rddx*rddy 325 p2=ddx*rddy 326 p3=rddx*ddy 327 p4=ddx*ddy 328 do i=2,nz 329 if (height(i).gt.ztra1_tmp(ipart)) then 330 indzm=i-1 331 indzp=i 332 goto 26 333 endif 334 end do 335 26 continue 336 dz1=ztra1_tmp(ipart)-height(indzm) 337 dz2=height(indzp)-ztra1_tmp(ipart) 338 dz=1./(dz1+dz2) 339 do mm=1,2 340 indexh=memind(mm) 341 do in=1,2 342 indzh=indzm+in-1 343 y1(in)=p1*pv(ixm,jym,indzh,indexh) & 344 +p2*pv(ixp,jym,indzh,indexh) & 345 +p3*pv(ixm,jyp,indzh,indexh) & 346 +p4*pv(ixp,jyp,indzh,indexh) 347 end do 348 yh1(mm)=(dz2*y1(1)+dz1*y1(2))*dz 349 end do 350 pvpart=(yh1(1)*dt2+yh1(2)*dt1)*dtt 351 ylat=ylat0+ytra1_tmp(ipart)*dy 352 if (ylat.lt.0.) pvpart=-1.*pvpart 353 354 355 ! For domain-filling option 2 (stratospheric O3), do the rest only in the stratosphere 356 !***************************************************************************** 357 358 if (((ztra1_tmp(ipart).gt.3000.).and. & 359 (pvpart.gt.pvcrit)).or.(mdomainfill.eq.1)) then 360 nclass_tmp(ipart)=min(int(ran1(idummy)* & 361 real(nclassunc))+1,nclassunc) 362 numactiveparticles=numactiveparticles+1 363 numparticlecount=numparticlecount+1 364 npoint_tmp(ipart)=numparticlecount 365 idt_tmp(ipart)=mintime 366 itra1_tmp(ipart)=itime 367 itramem_tmp(ipart)=itra1_tmp(ipart) 368 itrasplit_tmp(ipart)=itra1_tmp(ipart)+ldirect*itsplit 369 xmass1_tmp(ipart,1)=xmassperparticle 370 if (mdomainfill.eq.2) xmass1_tmp(ipart,1)= & 371 xmass1_tmp(ipart,1)*pvpart*48./29.*ozonescale/10.**9 372 else 373 goto 71 374 endif 375 376 377 ! Increase numpart, if necessary 378 !******************************* 379 380 numpart_total=max(numpart_total,ipart) 381 goto 73 ! Storage space has been found, stop searching 382 endif 383 end do 384 if (ipart.gt.tmp_size) & 385 stop 'boundcond_domainfill_mpi.f90: too many particles required' 386 73 minpart=ipart+1 387 71 continue 388 end do 389 390 337 391 end do 338 339 340 392 end do 341 393 end do 342 end do 343 344 345 !***************************************** 346 ! Southern and northern boundary condition 347 !***************************************** 348 349 ! Loop from west to east 350 !*********************** 351 352 do ix=nx_we(1),nx_we(2) 353 354 ! Loop over southern (index 1) and northern (index 2) boundary 355 !************************************************************* 356 357 do k=1,2 358 ylat=ylat0+real(ny_sn(k))*dy 359 cosfact=cos(ylat*pi180) 360 361 ! Loop over all release locations in a column 362 !******************************************** 363 364 do j=1,numcolumn_sn(k,ix) 365 366 ! Determine, for each release location, the area of the corresponding boundary 367 !***************************************************************************** 368 369 if (j.eq.1) then 370 deltaz=(zcolumn_sn(k,ix,2)+zcolumn_sn(k,ix,1))/2. 371 else if (j.eq.numcolumn_sn(k,ix)) then 372 ! deltaz=height(nz)-(zcolumn_sn(k,ix,j-1)+ 373 ! + zcolumn_sn(k,ix,j))/2. 374 ! In order to avoid taking a very high column for very many particles, 375 ! use the deltaz from one particle below instead 376 deltaz=(zcolumn_sn(k,ix,j)-zcolumn_sn(k,ix,j-2))/2. 377 else 378 deltaz=(zcolumn_sn(k,ix,j+1)-zcolumn_sn(k,ix,j-1))/2. 379 endif 380 if ((ix.eq.nx_we(1)).or.(ix.eq.nx_we(2))) then 381 boundarea=deltaz*111198.5/2.*cosfact*dx 382 else 383 boundarea=deltaz*111198.5*cosfact*dx 384 endif 385 386 387 ! Interpolate the wind velocity and density to the release location 388 !****************************************************************** 389 390 ! Determine the model level below the release position 391 !***************************************************** 392 393 do i=2,nz 394 if (height(i).gt.zcolumn_sn(k,ix,j)) then 395 indz=i-1 396 indzp=i 397 goto 16 394 395 396 !***************************************** 397 ! Southern and northern boundary condition 398 !***************************************** 399 400 ! Loop from west to east 401 !*********************** 402 403 do ix=nx_we(1),nx_we(2) 404 405 ! Loop over southern (index 1) and northern (index 2) boundary 406 !************************************************************* 407 408 do k=1,2 409 ylat=ylat0+real(ny_sn(k))*dy 410 cosfact=cos(ylat*pi180) 411 412 ! Loop over all release locations in a column 413 !******************************************** 414 415 do j=1,numcolumn_sn(k,ix) 416 417 ! Determine, for each release location, the area of the corresponding boundary 418 !***************************************************************************** 419 420 if (j.eq.1) then 421 deltaz=(zcolumn_sn(k,ix,2)+zcolumn_sn(k,ix,1))/2. 422 else if (j.eq.numcolumn_sn(k,ix)) then 423 ! deltaz=height(nz)-(zcolumn_sn(k,ix,j-1)+ 424 ! + zcolumn_sn(k,ix,j))/2. 425 ! In order to avoid taking a very high column for very many particles, 426 ! use the deltaz from one particle below instead 427 deltaz=(zcolumn_sn(k,ix,j)-zcolumn_sn(k,ix,j-2))/2. 428 else 429 deltaz=(zcolumn_sn(k,ix,j+1)-zcolumn_sn(k,ix,j-1))/2. 398 430 endif 399 end do 400 16 continue 401 402 ! Vertical distance to the level below and above current position 403 !**************************************************************** 404 405 dz1=zcolumn_sn(k,ix,j)-height(indz) 406 dz2=height(indzp)-zcolumn_sn(k,ix,j) 407 dz=1./(dz1+dz2) 408 409 ! Vertical and temporal interpolation 410 !************************************ 411 412 do m=1,2 413 indexh=memind(m) 414 do in=1,2 415 indzh=indz+in-1 416 windl(in)=vv(ix,ny_sn(k),indzh,indexh) 417 rhol(in)=rho(ix,ny_sn(k),indzh,indexh) 418 end do 419 420 windhl(m)=(dz2*windl(1)+dz1*windl(2))*dz 421 rhohl(m)=(dz2*rhol(1)+dz1*rhol(2))*dz 422 end do 423 424 windx=(windhl(1)*dt2+windhl(2)*dt1)*dtt 425 rhox=(rhohl(1)*dt2+rhohl(2)*dt1)*dtt 426 427 ! Calculate mass flux, divided by number of processes 428 !**************************************************** 429 430 fluxofmass=windx*rhox*boundarea*real(lsynctime)/mp_partgroup_np 431 432 ! If the mass flux is directed into the domain, add it to previous mass fluxes; 433 ! if it is out of the domain, set accumulated mass flux to zero 434 !****************************************************************************** 435 436 if (k.eq.1) then 437 if (fluxofmass.ge.0.) then 438 acc_mass_sn(k,ix,j)=acc_mass_sn(k,ix,j)+fluxofmass 431 if ((ix.eq.nx_we(1)).or.(ix.eq.nx_we(2))) then 432 boundarea=deltaz*111198.5/2.*cosfact*dx 439 433 else 440 acc_mass_sn(k,ix,j)=0.434 boundarea=deltaz*111198.5*cosfact*dx 441 435 endif 442 else 443 if (fluxofmass.le.0.) then 444 acc_mass_sn(k,ix,j)=acc_mass_sn(k,ix,j)+abs(fluxofmass) 445 else 446 acc_mass_sn(k,ix,j)=0. 447 endif 448 endif 449 accmasst=accmasst+acc_mass_sn(k,ix,j) 450 451 ! If the accumulated mass exceeds half the mass that each particle shall carry, 452 ! one (or more) particle(s) is (are) released and the accumulated mass is 453 ! reduced by the mass of this (these) particle(s) 454 !****************************************************************************** 455 456 if (acc_mass_sn(k,ix,j).ge.xmassperparticle/2.) then 457 mmass=int((acc_mass_sn(k,ix,j)+xmassperparticle/2.)/ & 458 xmassperparticle) 459 acc_mass_sn(k,ix,j)=acc_mass_sn(k,ix,j)- & 460 real(mmass)*xmassperparticle 461 else 462 mmass=0 463 endif 464 465 do m=1,mmass 466 do ipart=minpart,maxpart 467 468 ! If a vacant storage space is found, attribute everything to this array element 469 !***************************************************************************** 470 471 if (itra1(ipart).ne.itime) then 472 473 ! Assign particle positions 474 !************************** 475 476 ytra1(ipart)=real(ny_sn(k)) 477 if (ix.eq.nx_we(1)) then 478 xtra1(ipart)=real(ix)+0.5*ran1(idummy) 479 else if (ix.eq.nx_we(2)) then 480 xtra1(ipart)=real(ix)-0.5*ran1(idummy) 481 else 482 xtra1(ipart)=real(ix)+(ran1(idummy)-.5) 483 endif 484 if (j.eq.1) then 485 ztra1(ipart)=zcolumn_sn(k,ix,1)+(zcolumn_sn(k,ix,2)- & 486 zcolumn_sn(k,ix,1))/4. 487 else if (j.eq.numcolumn_sn(k,ix)) then 488 ztra1(ipart)=(2.*zcolumn_sn(k,ix,j)+ & 489 zcolumn_sn(k,ix,j-1)+height(nz))/4. 490 else 491 ztra1(ipart)=zcolumn_sn(k,ix,j-1)+ran1(idummy)* & 492 (zcolumn_sn(k,ix,j+1)-zcolumn_sn(k,ix,j-1)) 493 endif 494 495 496 ! Interpolate PV to the particle position 497 !**************************************** 498 ixm=int(xtra1(ipart)) 499 jym=int(ytra1(ipart)) 500 ixp=ixm+1 501 jyp=jym+1 502 ddx=xtra1(ipart)-real(ixm) 503 ddy=ytra1(ipart)-real(jym) 504 rddx=1.-ddx 505 rddy=1.-ddy 506 p1=rddx*rddy 507 p2=ddx*rddy 508 p3=rddx*ddy 509 p4=ddx*ddy 510 do i=2,nz 511 if (height(i).gt.ztra1(ipart)) then 512 indzm=i-1 513 indzp=i 514 goto 126 515 endif 516 end do 517 126 continue 518 dz1=ztra1(ipart)-height(indzm) 519 dz2=height(indzp)-ztra1(ipart) 520 dz=1./(dz1+dz2) 521 do mm=1,2 522 indexh=memind(mm) 523 do in=1,2 524 indzh=indzm+in-1 525 y1(in)=p1*pv(ixm,jym,indzh,indexh) & 526 +p2*pv(ixp,jym,indzh,indexh) & 527 +p3*pv(ixm,jyp,indzh,indexh) & 528 +p4*pv(ixp,jyp,indzh,indexh) 529 end do 530 yh1(mm)=(dz2*y1(1)+dz1*y1(2))*dz 531 end do 532 pvpart=(yh1(1)*dt2+yh1(2)*dt1)*dtt 533 if (ylat.lt.0.) pvpart=-1.*pvpart 534 535 536 ! For domain-filling option 2 (stratospheric O3), do the rest only in the stratosphere 537 !***************************************************************************** 538 539 if (((ztra1(ipart).gt.3000.).and. & 540 (pvpart.gt.pvcrit)).or.(mdomainfill.eq.1)) then 541 nclass(ipart)=min(int(ran1(idummy)* & 542 real(nclassunc))+1,nclassunc) 543 numactiveparticles=numactiveparticles+1 544 numparticlecount=numparticlecount+1 545 npoint(ipart)=numparticlecount 546 idt(ipart)=mintime 547 itra1(ipart)=itime 548 itramem(ipart)=itra1(ipart) 549 itrasplit(ipart)=itra1(ipart)+ldirect*itsplit 550 xmass1(ipart,1)=xmassperparticle 551 if (mdomainfill.eq.2) xmass1(ipart,1)= & 552 xmass1(ipart,1)*pvpart*48./29.*ozonescale/10.**9 553 else 554 goto 171 555 endif 556 557 558 ! Increase numpart, if necessary 559 !******************************* 560 numpart=max(numpart,ipart) 561 goto 173 ! Storage space has been found, stop searching 436 437 438 ! Interpolate the wind velocity and density to the release location 439 !****************************************************************** 440 441 ! Determine the model level below the release position 442 !***************************************************** 443 444 do i=2,nz 445 if (height(i).gt.zcolumn_sn(k,ix,j)) then 446 indz=i-1 447 indzp=i 448 goto 16 562 449 endif 563 450 end do 564 if (ipart.gt.maxpart) & 565 stop 'boundcond_domainfill.f: too many particles required' 566 173 minpart=ipart+1 567 171 continue 451 16 continue 452 453 ! Vertical distance to the level below and above current position 454 !**************************************************************** 455 456 dz1=zcolumn_sn(k,ix,j)-height(indz) 457 dz2=height(indzp)-zcolumn_sn(k,ix,j) 458 dz=1./(dz1+dz2) 459 460 ! Vertical and temporal interpolation 461 !************************************ 462 463 do m=1,2 464 indexh=memind(m) 465 do in=1,2 466 indzh=indz+in-1 467 windl(in)=vv(ix,ny_sn(k),indzh,indexh) 468 rhol(in)=rho(ix,ny_sn(k),indzh,indexh) 469 end do 470 471 windhl(m)=(dz2*windl(1)+dz1*windl(2))*dz 472 rhohl(m)=(dz2*rhol(1)+dz1*rhol(2))*dz 473 end do 474 475 windx=(windhl(1)*dt2+windhl(2)*dt1)*dtt 476 rhox=(rhohl(1)*dt2+rhohl(2)*dt1)*dtt 477 478 ! Calculate mass flux 479 !******************** 480 481 fluxofmass=windx*rhox*boundarea*real(lsynctime) 482 483 ! If the mass flux is directed into the domain, add it to previous mass fluxes; 484 ! if it is out of the domain, set accumulated mass flux to zero 485 !****************************************************************************** 486 487 if (k.eq.1) then 488 if (fluxofmass.ge.0.) then 489 acc_mass_sn(k,ix,j)=acc_mass_sn(k,ix,j)+fluxofmass 490 else 491 acc_mass_sn(k,ix,j)=0. 492 endif 493 else 494 if (fluxofmass.le.0.) then 495 acc_mass_sn(k,ix,j)=acc_mass_sn(k,ix,j)+abs(fluxofmass) 496 else 497 acc_mass_sn(k,ix,j)=0. 498 endif 499 endif 500 accmasst=accmasst+acc_mass_sn(k,ix,j) 501 502 ! If the accumulated mass exceeds half the mass that each particle shall carry, 503 ! one (or more) particle(s) is (are) released and the accumulated mass is 504 ! reduced by the mass of this (these) particle(s) 505 !****************************************************************************** 506 507 if (acc_mass_sn(k,ix,j).ge.xmassperparticle/2.) then 508 mmass=int((acc_mass_sn(k,ix,j)+xmassperparticle/2.)/ & 509 xmassperparticle) 510 acc_mass_sn(k,ix,j)=acc_mass_sn(k,ix,j)- & 511 real(mmass)*xmassperparticle 512 else 513 mmass=0 514 endif 515 516 do m=1,mmass 517 do ipart=minpart,maxpart 518 519 ! If a vacant storage space is found, attribute everything to this array element 520 !***************************************************************************** 521 522 if (itra1_tmp(ipart).ne.itime) then 523 524 ! Assign particle positions 525 !************************** 526 527 ytra1_tmp(ipart)=real(ny_sn(k)) 528 if (ix.eq.nx_we(1)) then 529 xtra1_tmp(ipart)=real(ix)+0.5*ran1(idummy) 530 else if (ix.eq.nx_we(2)) then 531 xtra1_tmp(ipart)=real(ix)-0.5*ran1(idummy) 532 else 533 xtra1_tmp(ipart)=real(ix)+(ran1(idummy)-.5) 534 endif 535 if (j.eq.1) then 536 ztra1_tmp(ipart)=zcolumn_sn(k,ix,1)+(zcolumn_sn(k,ix,2)- & 537 zcolumn_sn(k,ix,1))/4. 538 else if (j.eq.numcolumn_sn(k,ix)) then 539 ztra1_tmp(ipart)=(2.*zcolumn_sn(k,ix,j)+ & 540 zcolumn_sn(k,ix,j-1)+height(nz))/4. 541 else 542 ztra1_tmp(ipart)=zcolumn_sn(k,ix,j-1)+ran1(idummy)* & 543 (zcolumn_sn(k,ix,j+1)-zcolumn_sn(k,ix,j-1)) 544 endif 545 546 547 ! Interpolate PV to the particle position 548 !**************************************** 549 ixm=int(xtra1_tmp(ipart)) 550 jym=int(ytra1_tmp(ipart)) 551 ixp=ixm+1 552 jyp=jym+1 553 ddx=xtra1_tmp(ipart)-real(ixm) 554 ddy=ytra1_tmp(ipart)-real(jym) 555 rddx=1.-ddx 556 rddy=1.-ddy 557 p1=rddx*rddy 558 p2=ddx*rddy 559 p3=rddx*ddy 560 p4=ddx*ddy 561 do i=2,nz 562 if (height(i).gt.ztra1_tmp(ipart)) then 563 indzm=i-1 564 indzp=i 565 goto 126 566 endif 567 end do 568 126 continue 569 dz1=ztra1_tmp(ipart)-height(indzm) 570 dz2=height(indzp)-ztra1_tmp(ipart) 571 dz=1./(dz1+dz2) 572 do mm=1,2 573 indexh=memind(mm) 574 do in=1,2 575 indzh=indzm+in-1 576 y1(in)=p1*pv(ixm,jym,indzh,indexh) & 577 +p2*pv(ixp,jym,indzh,indexh) & 578 +p3*pv(ixm,jyp,indzh,indexh) & 579 +p4*pv(ixp,jyp,indzh,indexh) 580 end do 581 yh1(mm)=(dz2*y1(1)+dz1*y1(2))*dz 582 end do 583 pvpart=(yh1(1)*dt2+yh1(2)*dt1)*dtt 584 if (ylat.lt.0.) pvpart=-1.*pvpart 585 586 587 ! For domain-filling option 2 (stratospheric O3), do the rest only in the stratosphere 588 !***************************************************************************** 589 590 if (((ztra1_tmp(ipart).gt.3000.).and. & 591 (pvpart.gt.pvcrit)).or.(mdomainfill.eq.1)) then 592 nclass_tmp(ipart)=min(int(ran1(idummy)* & 593 real(nclassunc))+1,nclassunc) 594 numactiveparticles=numactiveparticles+1 595 numparticlecount=numparticlecount+1 596 npoint_tmp(ipart)=numparticlecount 597 idt_tmp(ipart)=mintime 598 itra1_tmp(ipart)=itime 599 itramem_tmp(ipart)=itra1_tmp(ipart) 600 itrasplit_tmp(ipart)=itra1_tmp(ipart)+ldirect*itsplit 601 xmass1_tmp(ipart,1)=xmassperparticle 602 if (mdomainfill.eq.2) xmass1_tmp(ipart,1)= & 603 xmass1_tmp(ipart,1)*pvpart*48./29.*ozonescale/10.**9 604 else 605 goto 171 606 endif 607 608 609 ! Increase numpart, if necessary 610 !******************************* 611 numpart_total=max(numpart_total,ipart) 612 goto 173 ! Storage space has been found, stop searching 613 endif 614 end do 615 if (ipart.gt.tmp_size) & 616 stop 'boundcond_domainfill.f: too many particles required' 617 173 minpart=ipart+1 618 171 continue 619 end do 620 621 568 622 end do 569 570 571 623 end do 572 624 end do 625 626 627 ! xm=0. 628 ! do i=1,numpart_total 629 ! if (itra1_tmp(i).eq.itime) xm=xm+xmass1(i,1) 630 ! end do 631 632 !write(*,*) itime,numactiveparticles,numparticlecount,numpart, 633 ! +xm,accmasst,xm+accmasst 634 635 end if ! if lroot 636 637 ! Distribute the number of particles to be released 638 ! ************************************************* 639 call MPI_Bcast(numpart_total, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) 640 641 do i=0, mp_partgroup_np-1 642 numrel_mpi(i) = numpart_total/mp_partgroup_np 643 if (i.lt.mod(numpart_total,mp_partgroup_np)) numrel_mpi(i) = numrel_mpi(i) + 1 573 644 end do 574 645 575 576 xm=0. 577 do i=1,numpart 578 if (itra1(i).eq.itime) xm=xm+xmass1(i,1) 646 ! Allocate temporary arrays for receiving processes 647 if (.not.lroot) then 648 allocate(itra1_tmp(numrel_mpi(mp_partid)),& 649 & npoint_tmp(numrel_mpi(mp_partid)),& 650 & nclass_tmp(numrel_mpi(mp_partid)),& 651 & idt_tmp(numrel_mpi(mp_partid)),& 652 & itramem_tmp(numrel_mpi(mp_partid)),& 653 & itrasplit_tmp(numrel_mpi(mp_partid)),& 654 & xtra1_tmp(numrel_mpi(mp_partid)),& 655 & ytra1_tmp(numrel_mpi(mp_partid)),& 656 & ztra1_tmp(numrel_mpi(mp_partid)),& 657 & xmass1_tmp(numrel_mpi(mp_partid),maxspec)) 658 659 ! Initialize all particles as non-existent 660 itra1_tmp(:)=-999999999 661 end if 662 663 ! Distribute particles 664 ! Keep track of released particles so far 665 rel_counter = 0 666 mtag = 1000 667 668 do i=0, mp_partgroup_np-1 669 670 ! For root process, nothing to do except update release count 671 if (i.eq.0) then 672 rel_counter = rel_counter + numrel_mpi(i) 673 cycle 674 end if 675 676 ! Send particles from root to non-root processes 677 if (lroot.and.numrel_mpi(i).gt.0) then 678 679 call MPI_SEND(nclass_tmp(rel_counter+1:rel_counter+numrel_mpi(i)),& 680 &numrel_mpi(i),MPI_INTEGER,i,mtag+1*i,mp_comm_used,mp_ierr) 681 682 call MPI_SEND(npoint_tmp(rel_counter+1:rel_counter+numrel_mpi(i)),& 683 &numrel_mpi(i),MPI_INTEGER,i,mtag+2*i,mp_comm_used,mp_ierr) 684 685 call MPI_SEND(itra1_tmp(rel_counter+1:rel_counter+numrel_mpi(i)),& 686 &numrel_mpi(i),MPI_INTEGER,i,mtag+3*i,mp_comm_used,mp_ierr) 687 688 call MPI_SEND(idt_tmp(rel_counter+1:rel_counter+numrel_mpi(i)),& 689 &numrel_mpi(i),MPI_INTEGER,i,mtag+4*i,mp_comm_used,mp_ierr) 690 691 call MPI_SEND(itramem_tmp(rel_counter+1:rel_counter+numrel_mpi(i)),& 692 &numrel_mpi(i),MPI_INTEGER,i,mtag+5*i,mp_comm_used,mp_ierr) 693 694 call MPI_SEND(itrasplit_tmp(rel_counter+1:rel_counter+numrel_mpi(i)),& 695 &numrel_mpi(i),MPI_INTEGER,i,mtag+6*i,mp_comm_used,mp_ierr) 696 697 call MPI_SEND(xtra1_tmp(rel_counter+1:rel_counter+numrel_mpi(i)),& 698 &numrel_mpi(i),mp_dp,i,mtag+7*i,mp_comm_used,mp_ierr) 699 700 call MPI_SEND(ytra1_tmp(rel_counter+1:rel_counter+numrel_mpi(i)),& 701 &numrel_mpi(i),mp_dp,i,mtag+8*i,mp_comm_used,mp_ierr) 702 703 call MPI_SEND(ztra1_tmp(rel_counter+1:rel_counter+numrel_mpi(i)),& 704 &numrel_mpi(i),mp_sp,i,mtag+9*i,mp_comm_used,mp_ierr) 705 706 do j=1,nspec 707 call MPI_SEND(xmass1_tmp(rel_counter+1:rel_counter+numrel_mpi(i),j),& 708 &numrel_mpi(i),mp_sp,i,mtag+(9+j)*i,mp_comm_used,mp_ierr) 709 end do 710 711 ! Non-root processes issue receive requests 712 else if (i.eq.mp_partid.and.numrel_mpi(i).gt.0) then 713 call MPI_RECV(nclass_tmp(1:numrel_mpi(i)),numrel_mpi(i),& 714 &MPI_INTEGER,id_root,mtag+1*i,mp_comm_used,mp_status,mp_ierr) 715 716 call MPI_RECV(npoint_tmp(1:numrel_mpi(i)),numrel_mpi(i),& 717 &MPI_INTEGER,id_root,mtag+2*i,mp_comm_used,mp_status,mp_ierr) 718 719 call MPI_RECV(itra1_tmp(1:numrel_mpi(i)),numrel_mpi(i),& 720 &MPI_INTEGER,id_root,mtag+3*i,mp_comm_used,mp_status,mp_ierr) 721 722 call MPI_RECV(idt_tmp(1:numrel_mpi(i)),numrel_mpi(i),& 723 &MPI_INTEGER,id_root,mtag+4*i,mp_comm_used,mp_status,mp_ierr) 724 725 call MPI_RECV(itramem_tmp(1:numrel_mpi(i)),numrel_mpi(i),& 726 &MPI_INTEGER,id_root,mtag+5*i,mp_comm_used,mp_status,mp_ierr) 727 728 call MPI_RECV(itrasplit_tmp(1:numrel_mpi(i)),numrel_mpi(i),& 729 &MPI_INTEGER,id_root,mtag+6*i,mp_comm_used,mp_status,mp_ierr) 730 731 call MPI_RECV(xtra1_tmp(1:numrel_mpi(i)),numrel_mpi(i),& 732 &mp_dp,id_root,mtag+7*i,mp_comm_used,mp_status,mp_ierr) 733 734 call MPI_RECV(ytra1_tmp(1:numrel_mpi(i)),numrel_mpi(i),& 735 &mp_dp,id_root,mtag+8*i,mp_comm_used,mp_status,mp_ierr) 736 737 call MPI_RECV(ztra1_tmp(1:numrel_mpi(i)),numrel_mpi(i),& 738 &mp_sp,id_root,mtag+9*i,mp_comm_used,mp_status,mp_ierr) 739 740 do j=1,nspec 741 call MPI_RECV(xmass1_tmp(1:numrel_mpi(i),j),numrel_mpi(i),& 742 &mp_sp,id_root,mtag+(9+j)*i,mp_comm_used,mp_status,mp_ierr) 743 744 end do 745 end if 746 rel_counter = rel_counter + numrel_mpi(i) 579 747 end do 580 748 581 !write(*,*) itime,numactiveparticles,numparticlecount,numpart, 582 ! +xm,accmasst,xm+accmasst 583 584 585 ! If particles shall be dumped, then accumulated masses at the domain boundaries 586 ! must be dumped, too, to be used for later runs 587 !***************************************************************************** 588 589 ! :TODO: eso parallelize 749 ! Find free storage space for the new particles. 750 ! This section is independent of the redistribution scheme used 751 ! ******************************************************************** 752 753 ! Keep track of released particles so far 754 minpart=1 755 756 ! The algorithm should be correct also for root process 757 do i=1, numrel_mpi(mp_partid) 758 do ipart=minpart, maxpart 759 if (itra1(ipart).ne.itime) then 760 itra1(ipart) = itra1_tmp(i) 761 npoint(ipart) = npoint_tmp(i) 762 nclass(ipart) = nclass_tmp(i) 763 idt(ipart) = idt_tmp(i) 764 itramem(ipart) = itramem_tmp(i) 765 itrasplit(ipart) = itrasplit_tmp(i) 766 xtra1(ipart) = xtra1_tmp(i) 767 ytra1(ipart) = ytra1_tmp(i) 768 ztra1(ipart) = ztra1_tmp(i) 769 xmass1(ipart,:) = xmass1_tmp(i,:) 770 ! Increase numpart, if necessary 771 numpart=max(numpart,ipart) 772 goto 200 ! Storage space has been found, stop searching 773 end if 774 end do 775 200 minpart=ipart+1 776 end do 777 778 ! If particles shall be dumped, then accumulated masses at the domain boundaries 779 ! must be dumped, too, to be used for later runs 780 !***************************************************************************** 781 590 782 if ((ipout.gt.0).and.(itime.eq.loutend)) then 591 783 if (lroot) then 784 call mpif_mtime('iotime',0) 592 785 open(unitboundcond,file=path(2)(1:length(2))//'boundcond.bin', & 593 786 form='unformatted') … … 595 788 zcolumn_we,zcolumn_sn,acc_mass_we,acc_mass_sn 596 789 close(unitboundcond) 790 call mpif_mtime('iotime',1) 597 791 end if 598 792 endif 599 793 794 ! Deallocate temporary arrays 795 deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,itrasplit_tmp,& 796 & xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp,numrel_mpi) 797 ! numactiveparticles_mpi 798 799 600 800 end subroutine boundcond_domainfill -
src/com_mod.f90
r5888298 r5888298 132 132 133 133 !ZHG SEP 2015 wheather or not to read clouds from GRIB 134 logical :: readclouds 134 logical :: readclouds=.false. 135 135 !ESO DEC 2015 whether or not both clwc and ciwc are present (if so they are summed) 136 logical :: sumclouds 136 logical :: sumclouds=.false. 137 137 138 138 logical,dimension(maxnests) :: readclouds_nest, sumclouds_nest … … 140 140 141 141 !NIK 16.02.2015 142 integer(selected_int_kind(16)) :: tot_blc_count=0, tot_inc_count=0 142 integer(selected_int_kind(16)), dimension(maxspec) :: tot_blc_count=0, & 143 &tot_inc_count=0 143 144 144 145 … … 576 577 real :: dxoutn,dyoutn,outlon0n,outlat0n,xoutshiftn,youtshiftn 577 578 !real outheight(maxzgrid),outheighthalf(maxzgrid) 578 logical :: DEP,DRYDEP,DRYDEPSPEC(maxspec),WETDEP,OHREA,ASSSPEC 579 580 logical :: DEP,DRYDEP,DRYDEPSPEC(maxspec),WETDEP,WETDEPSPEC(maxspec),& 581 & OHREA,ASSSPEC 579 582 logical :: DRYBKDEP,WETBKDEP 580 583 … … 594 597 ! DRYDEPSPEC .true., if dry deposition is switched on for that species 595 598 ! WETDEP .true., if wet deposition is switched on 599 ! WETDEPSPEC .true., if wet deposition is switched on for that species 596 600 ! OHREA .true., if OH reaction is switched on 597 601 ! ASSSPEC .true., if there are two species asscoiated … … 747 751 748 752 ! These variables are used to avoid having separate versions of 749 ! files in cases where differences with MPI version isminor (eso)753 ! files in cases where differences with MPI version are minor (eso) 750 754 !***************************************************************** 751 755 integer :: mpi_mode=0 ! .gt. 0 if running MPI version -
src/conccalc.f90
r9287c01 r9287c01 127 127 !***************************************************************************** 128 128 do ind=indz,indzp 129 rhoprof(ind-indz+1)=p1*rho(ix ,jy ,ind, 2) &129 rhoprof(ind-indz+1)=p1*rho(ix ,jy ,ind,memind(2)) & 130 130 +p2*rho(ixp,jy ,ind,2) & 131 131 +p3*rho(ix ,jyp,ind,2) & … … 181 181 !***************************************************************************** 182 182 183 if ( ((itage.lt.10800).or.(xl.lt.0.5).or.(yl.lt.0.5).or. &183 if (lnokernel.or.(itage.lt.10800).or.(xl.lt.0.5).or.(yl.lt.0.5).or. & 184 184 (xl.gt.real(numxgrid-1)-0.5).or. & 185 (yl.gt.real(numygrid-1)-0.5)) .or.(.not.usekernel)) then ! no kernel, direct attribution to grid cell185 (yl.gt.real(numygrid-1)-0.5))) then ! no kernel, direct attribution to grid cell 186 186 if ((ix.ge.0).and.(jy.ge.0).and.(ix.le.numxgrid-1).and. & 187 187 (jy.le.numygrid-1)) then -
src/conccalc_mpi.f90
r5f9d14a r4c64400 194 194 !***************************************************************************** 195 195 196 if ( (itage.lt.10800).or.(xl.lt.0.5).or.(yl.lt.0.5).or. &196 if (lnokernel.or.(itage.lt.10800).or.(xl.lt.0.5).or.(yl.lt.0.5).or. & 197 197 (xl.gt.real(numxgrid-1)-0.5).or. & 198 198 (yl.gt.real(numygrid-1)-0.5)) then ! no kernel, direct attribution to grid cell -
src/concoutput.f90
r54cbd6c r16b61a5 635 635 !************************* 636 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 637 ! do ks=1,nspec 638 ! do kp=1,maxpointspec_act 639 ! do i=1,numreceptor 640 ! creceptor(i,ks)=0. 641 ! end do 642 ! do jy=0,numygrid-1 643 ! do ix=0,numxgrid-1 644 ! do l=1,nclassunc 645 ! do nage=1,nageclass 646 ! do kz=1,numzgrid 647 ! gridunc(ix,jy,kz,ks,kp,l,nage)=0. 648 ! end do 649 ! end do 650 ! end do 651 ! end do 652 ! end do 653 ! end do 654 ! end do 655 655 creceptor(:,:)=0. 656 656 gridunc(:,:,:,:,:,:,:)=0. -
src/concoutput_nest_mpi.f90
r38b7917 r16b61a5 105 105 ! Measure execution time 106 106 if (mp_measure_time) call mpif_mtime('iotime',0) 107 ! call cpu_time(mp_root_time_beg)108 ! mp_root_wtime_beg = mpi_wtime()109 ! end if110 107 111 108 if (verbosity.eq.1) then -
src/concoutput_surf.f90
r6a678e3 r16b61a5 21 21 22 22 subroutine concoutput_surf(itime,outnum,gridtotalunc,wetgridtotalunc, & 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 23 drygridtotalunc) 24 ! i i o o 25 ! o 26 !***************************************************************************** 27 ! * 28 ! Output of the concentration grid and the receptor concentrations. * 29 ! * 30 ! Author: A. Stohl * 31 ! * 32 ! 24 May 1995 * 33 ! * 34 ! 13 April 1999, Major update: if output size is smaller, dump output * 35 ! in sparse matrix format; additional output of * 36 ! uncertainty * 37 ! * 38 ! 05 April 2000, Major update: output of age classes; output for backward* 39 ! runs is time spent in grid cell times total mass of * 40 ! species. * 41 ! * 42 ! 17 February 2002, Appropriate dimensions for backward and forward runs * 43 ! are now specified in file par_mod * 44 ! * 45 ! June 2006, write grid in sparse matrix with a single write command * 46 ! in order to save disk space * 47 ! * 48 ! 2008 new sparse matrix format * 49 ! * 50 !***************************************************************************** 51 ! * 52 ! Variables: * 53 ! outnum number of samples * 54 ! ncells number of cells with non-zero concentrations * 55 ! sparse .true. if in sparse matrix format, else .false. * 56 ! tot_mu 1 for forward, initial mass mixing ration for backw. runs * 57 ! * 58 !***************************************************************************** 59 59 60 60 use unc_mod … … 73 73 real :: outnum,densityoutrecept(maxreceptor),xl,yl 74 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 75 !real densityoutgrid(0:numxgrid-1,0:numygrid-1,numzgrid), 76 ! +grid(0:numxgrid-1,0:numygrid-1,numzgrid,maxspec,maxpointspec_act, 77 ! + maxageclass) 78 !real wetgrid(0:numxgrid-1,0:numygrid-1,maxspec,maxpointspec_act, 79 ! + maxageclass) 80 !real drygrid(0:numxgrid-1,0:numygrid-1,maxspec, 81 ! + maxpointspec_act,maxageclass) 82 !real gridsigma(0:numxgrid-1,0:numygrid-1,numzgrid,maxspec, 83 ! + maxpointspec_act,maxageclass), 84 ! + drygridsigma(0:numxgrid-1,0:numygrid-1,maxspec, 85 ! + maxpointspec_act,maxageclass), 86 ! + wetgridsigma(0:numxgrid-1,0:numygrid-1,maxspec, 87 ! + maxpointspec_act,maxageclass) 88 !real factor(0:numxgrid-1,0:numygrid-1,numzgrid) 89 !real sparse_dump_r(numxgrid*numygrid*numzgrid) 90 !integer sparse_dump_i(numxgrid*numygrid*numzgrid) 91 92 !real sparse_dump_u(numxgrid*numygrid*numzgrid) 93 93 real(dep_prec) :: auxgrid(nclassunc) 94 94 real(sp) :: gridtotal,gridsigmatotal,gridtotalunc … … 104 104 105 105 if (verbosity.eq.1) then 106 107 108 106 print*,'inside concoutput_surf ' 107 CALL SYSTEM_CLOCK(count_clock) 108 WRITE(*,*) 'SYSTEM_CLOCK',count_clock - count_clock0 109 109 endif 110 110 111 112 111 ! Determine current calendar date, needed for the file name 112 !********************************************************** 113 113 114 114 jul=bdate+real(itime,kind=dp)/86400._dp … … 116 116 write(adate,'(i8.8)') jjjjmmdd 117 117 write(atime,'(i6.6)') ihmmss 118 119 120 121 122 123 124 125 126 127 118 !write(unitdates,'(a)') adate//atime 119 120 open(unitdates,file=path(2)(1:length(2))//'dates', ACCESS='APPEND') 121 write(unitdates,'(a)') adate//atime 122 close(unitdates) 123 124 ! For forward simulations, output fields have dimension MAXSPEC, 125 ! for backward simulations, output fields have dimension MAXPOINT. 126 ! Thus, make loops either about nspec, or about numpoint 127 !***************************************************************** 128 128 129 129 … … 144 144 145 145 if (verbosity.eq.1) then 146 147 148 146 print*,'concoutput_surf 2' 147 CALL SYSTEM_CLOCK(count_clock) 148 WRITE(*,*) 'SYSTEM_CLOCK',count_clock - count_clock0 149 149 endif 150 150 151 152 153 154 155 156 151 !******************************************************************* 152 ! Compute air density: sufficiently accurate to take it 153 ! from coarse grid at some time 154 ! Determine center altitude of output layer, and interpolate density 155 ! data to that altitude 156 !******************************************************************* 157 157 158 158 do kz=1,numzgrid … … 166 166 (height(kzz).gt.halfheight)) goto 46 167 167 end do 168 46 168 46 kzz=max(min(kzz,nz),2) 169 169 dz1=halfheight-height(kzz-1) 170 170 dz2=height(kzz)-halfheight … … 184 184 end do 185 185 186 do i=1,numreceptor 187 xl=xreceptor(i) 188 yl=yreceptor(i) 189 iix=max(min(nint(xl),nxmin1),0) 190 jjy=max(min(nint(yl),nymin1),0) 191 densityoutrecept(i)=rho(iix,jjy,1,2) 192 end do 193 194 195 ! Output is different for forward and backward simulations 196 do kz=1,numzgrid 197 do jy=0,numygrid-1 198 do ix=0,numxgrid-1 199 if (ldirect.eq.1) then 200 factor3d(ix,jy,kz)=1.e12/volume(ix,jy,kz)/outnum 201 else 202 factor3d(ix,jy,kz)=real(abs(loutaver))/outnum 203 endif 204 end do 186 do i=1,numreceptor 187 xl=xreceptor(i) 188 yl=yreceptor(i) 189 iix=max(min(nint(xl),nxmin1),0) 190 jjy=max(min(nint(yl),nymin1),0) 191 densityoutrecept(i)=rho(iix,jjy,1,2) 192 end do 193 194 195 ! Output is different for forward and backward simulations 196 do kz=1,numzgrid 197 do jy=0,numygrid-1 198 do ix=0,numxgrid-1 199 if (ldirect.eq.1) then 200 factor3d(ix,jy,kz)=1.e12/volume(ix,jy,kz)/outnum 201 else 202 factor3d(ix,jy,kz)=real(abs(loutaver))/outnum 203 endif 205 204 end do 206 205 end do 207 208 !********************************************************************* 209 ! Determine the standard deviation of the mean concentration or mixing 210 ! ratio (uncertainty of the output) and the dry and wet deposition 211 !********************************************************************* 206 end do 207 208 !********************************************************************* 209 ! Determine the standard deviation of the mean concentration or mixing 210 ! ratio (uncertainty of the output) and the dry and wet deposition 211 !********************************************************************* 212 212 213 213 if (verbosity.eq.1) then 214 215 216 214 print*,'concoutput_surf 3 (sd)' 215 CALL SYSTEM_CLOCK(count_clock) 216 WRITE(*,*) 'SYSTEM_CLOCK',count_clock - count_clock0 217 217 endif 218 218 gridtotal=0. … … 228 228 do ks=1,nspec 229 229 230 write(anspec,'(i3.3)') ks 231 if ((iout.eq.1).or.(iout.eq.3).or.(iout.eq.5)) then 232 if (ldirect.eq.1) then 233 open(unitoutgrid,file=path(2)(1:length(2))//'grid_conc_'//adate// & 230 write(anspec,'(i3.3)') ks 231 if ((iout.eq.1).or.(iout.eq.3).or.(iout.eq.5)) then 232 if (ldirect.eq.1) then 233 open(unitoutgrid,file=path(2)(1:length(2))//'grid_conc_'//adate// & 234 atime//'_'//anspec,form='unformatted') 235 else 236 open(unitoutgrid,file=path(2)(1:length(2))//'grid_time_'//adate// & 237 atime//'_'//anspec,form='unformatted') 238 endif 239 write(unitoutgrid) itime 240 endif 241 242 if ((iout.eq.2).or.(iout.eq.3)) then ! mixing ratio 243 open(unitoutgridppt,file=path(2)(1:length(2))//'grid_pptv_'//adate// & 234 244 atime//'_'//anspec,form='unformatted') 235 else 236 open(unitoutgrid,file=path(2)(1:length(2))//'grid_time_'//adate// & 237 atime//'_'//anspec,form='unformatted') 245 246 write(unitoutgridppt) itime 238 247 endif 239 write(unitoutgrid) itime 240 endif 241 242 if ((iout.eq.2).or.(iout.eq.3)) then ! mixing ratio 243 open(unitoutgridppt,file=path(2)(1:length(2))//'grid_pptv_'//adate// & 244 atime//'_'//anspec,form='unformatted') 245 246 write(unitoutgridppt) itime 247 endif 248 249 do kp=1,maxpointspec_act 250 do nage=1,nageclass 251 252 do jy=0,numygrid-1 253 do ix=0,numxgrid-1 254 255 ! WET DEPOSITION 256 if ((WETDEP).and.(ldirect.gt.0)) then 257 do l=1,nclassunc 258 auxgrid(l)=wetgridunc(ix,jy,ks,kp,l,nage) 259 end do 260 call mean(auxgrid,wetgrid(ix,jy), & 261 wetgridsigma(ix,jy),nclassunc) 262 ! Multiply by number of classes to get total concentration 263 wetgrid(ix,jy)=wetgrid(ix,jy) & 264 *nclassunc 265 wetgridtotal=wetgridtotal+wetgrid(ix,jy) 266 ! Calculate standard deviation of the mean 267 wetgridsigma(ix,jy)= & 268 wetgridsigma(ix,jy)* & 269 sqrt(real(nclassunc)) 270 wetgridsigmatotal=wetgridsigmatotal+ & 271 wetgridsigma(ix,jy) 248 249 do kp=1,maxpointspec_act 250 do nage=1,nageclass 251 252 do jy=0,numygrid-1 253 do ix=0,numxgrid-1 254 255 ! WET DEPOSITION 256 if ((WETDEP).and.(ldirect.gt.0)) then 257 do l=1,nclassunc 258 auxgrid(l)=wetgridunc(ix,jy,ks,kp,l,nage) 259 end do 260 call mean(auxgrid,wetgrid(ix,jy), & 261 wetgridsigma(ix,jy),nclassunc) 262 ! Multiply by number of classes to get total concentration 263 wetgrid(ix,jy)=wetgrid(ix,jy) & 264 *nclassunc 265 wetgridtotal=wetgridtotal+wetgrid(ix,jy) 266 ! Calculate standard deviation of the mean 267 wetgridsigma(ix,jy)= & 268 wetgridsigma(ix,jy)* & 269 sqrt(real(nclassunc)) 270 wetgridsigmatotal=wetgridsigmatotal+ & 271 wetgridsigma(ix,jy) 272 endif 273 274 ! DRY DEPOSITION 275 if ((DRYDEP).and.(ldirect.gt.0)) then 276 do l=1,nclassunc 277 auxgrid(l)=drygridunc(ix,jy,ks,kp,l,nage) 278 end do 279 call mean(auxgrid,drygrid(ix,jy), & 280 drygridsigma(ix,jy),nclassunc) 281 ! Multiply by number of classes to get total concentration 282 drygrid(ix,jy)=drygrid(ix,jy)* & 283 nclassunc 284 drygridtotal=drygridtotal+drygrid(ix,jy) 285 ! Calculate standard deviation of the mean 286 drygridsigma(ix,jy)= & 287 drygridsigma(ix,jy)* & 288 sqrt(real(nclassunc)) 289 125 drygridsigmatotal=drygridsigmatotal+ & 290 drygridsigma(ix,jy) 291 endif 292 293 ! CONCENTRATION OR MIXING RATIO 294 do kz=1,numzgrid 295 do l=1,nclassunc 296 auxgrid(l)=gridunc(ix,jy,kz,ks,kp,l,nage) 297 end do 298 call mean(auxgrid,grid(ix,jy,kz), & 299 gridsigma(ix,jy,kz),nclassunc) 300 ! Multiply by number of classes to get total concentration 301 grid(ix,jy,kz)= & 302 grid(ix,jy,kz)*nclassunc 303 gridtotal=gridtotal+grid(ix,jy,kz) 304 ! Calculate standard deviation of the mean 305 gridsigma(ix,jy,kz)= & 306 gridsigma(ix,jy,kz)* & 307 sqrt(real(nclassunc)) 308 gridsigmatotal=gridsigmatotal+ & 309 gridsigma(ix,jy,kz) 310 end do 311 end do 312 end do 313 314 315 !******************************************************************* 316 ! Generate output: may be in concentration (ng/m3) or in mixing 317 ! ratio (ppt) or both 318 ! Output the position and the values alternated multiplied by 319 ! 1 or -1, first line is number of values, number of positions 320 ! For backward simulations, the unit is seconds, stored in grid_time 321 !******************************************************************* 322 323 if (verbosity.eq.1) then 324 print*,'concoutput_surf 4 (output)' 325 CALL SYSTEM_CLOCK(count_clock) 326 WRITE(*,*) 'SYSTEM_CLOCK',count_clock - count_clock0 272 327 endif 273 328 274 ! DRY DEPOSITION 275 if ((DRYDEP).and.(ldirect.gt.0)) then 276 do l=1,nclassunc 277 auxgrid(l)=drygridunc(ix,jy,ks,kp,l,nage) 278 end do 279 call mean(auxgrid,drygrid(ix,jy), & 280 drygridsigma(ix,jy),nclassunc) 281 ! Multiply by number of classes to get total concentration 282 drygrid(ix,jy)=drygrid(ix,jy)* & 283 nclassunc 284 drygridtotal=drygridtotal+drygrid(ix,jy) 285 ! Calculate standard deviation of the mean 286 drygridsigma(ix,jy)= & 287 drygridsigma(ix,jy)* & 288 sqrt(real(nclassunc)) 289 125 drygridsigmatotal=drygridsigmatotal+ & 290 drygridsigma(ix,jy) 291 endif 292 293 ! CONCENTRATION OR MIXING RATIO 294 do kz=1,numzgrid 295 do l=1,nclassunc 296 auxgrid(l)=gridunc(ix,jy,kz,ks,kp,l,nage) 297 end do 298 call mean(auxgrid,grid(ix,jy,kz), & 299 gridsigma(ix,jy,kz),nclassunc) 300 ! Multiply by number of classes to get total concentration 301 grid(ix,jy,kz)= & 302 grid(ix,jy,kz)*nclassunc 303 gridtotal=gridtotal+grid(ix,jy,kz) 304 ! Calculate standard deviation of the mean 305 gridsigma(ix,jy,kz)= & 306 gridsigma(ix,jy,kz)* & 307 sqrt(real(nclassunc)) 308 gridsigmatotal=gridsigmatotal+ & 309 gridsigma(ix,jy,kz) 310 end do 311 end do 312 end do 313 314 315 !******************************************************************* 316 ! Generate output: may be in concentration (ng/m3) or in mixing 317 ! ratio (ppt) or both 318 ! Output the position and the values alternated multiplied by 319 ! 1 or -1, first line is number of values, number of positions 320 ! For backward simulations, the unit is seconds, stored in grid_time 321 !******************************************************************* 322 323 if (verbosity.eq.1) then 324 print*,'concoutput_surf 4 (output)' 325 CALL SYSTEM_CLOCK(count_clock) 326 WRITE(*,*) 'SYSTEM_CLOCK',count_clock - count_clock0 327 endif 328 329 ! Concentration output 330 !********************* 331 332 if ((iout.eq.1).or.(iout.eq.3).or.(iout.eq.5)) then 333 334 if (verbosity.eq.1) then 335 print*,'concoutput_surf (Wet deposition)' 336 CALL SYSTEM_CLOCK(count_clock) 337 WRITE(*,*) 'SYSTEM_CLOCK',count_clock - count_clock0 338 endif 339 340 ! Wet deposition 341 sp_count_i=0 342 sp_count_r=0 343 sp_fact=-1. 344 sp_zer=.true. 345 if ((ldirect.eq.1).and.(WETDEP)) then 346 do jy=0,numygrid-1 347 do ix=0,numxgrid-1 348 ! concentraion greater zero 349 if (wetgrid(ix,jy).gt.smallnum) then 350 if (sp_zer.eqv..true.) then ! first non zero value 329 ! Concentration output 330 !********************* 331 332 if ((iout.eq.1).or.(iout.eq.3).or.(iout.eq.5)) then 333 334 if (verbosity.eq.1) then 335 print*,'concoutput_surf (Wet deposition)' 336 CALL SYSTEM_CLOCK(count_clock) 337 WRITE(*,*) 'SYSTEM_CLOCK',count_clock - count_clock0 338 endif 339 340 ! Wet deposition 341 sp_count_i=0 342 sp_count_r=0 343 sp_fact=-1. 344 sp_zer=.true. 345 if ((ldirect.eq.1).and.(WETDEP)) then 346 do jy=0,numygrid-1 347 do ix=0,numxgrid-1 348 ! concentraion greater zero 349 if (wetgrid(ix,jy).gt.smallnum) then 350 if (sp_zer.eqv..true.) then ! first non zero value 351 351 sp_count_i=sp_count_i+1 352 352 sparse_dump_i(sp_count_i)=ix+jy*numxgrid 353 353 sp_zer=.false. 354 354 sp_fact=sp_fact*(-1.) 355 endif356 sp_count_r=sp_count_r+1357 sparse_dump_r(sp_count_r)= &358 sp_fact*1.e12*wetgrid(ix,jy)/area(ix,jy)359 sparse_dump_u(sp_count_r)= &360 1.e12*wetgridsigma(ix,jy)/area(ix,jy)361 else ! concentration is zero355 endif 356 sp_count_r=sp_count_r+1 357 sparse_dump_r(sp_count_r)= & 358 sp_fact*1.e12*wetgrid(ix,jy)/area(ix,jy) 359 sparse_dump_u(sp_count_r)= & 360 1.e12*wetgridsigma(ix,jy)/area(ix,jy) 361 else ! concentration is zero 362 362 sp_zer=.true. 363 endif364 end do365 end do366 else363 endif 364 end do 365 end do 366 else 367 367 sp_count_i=0 368 368 sp_count_r=0 369 endif370 write(unitoutgrid) sp_count_i371 write(unitoutgrid) (sparse_dump_i(i),i=1,sp_count_i)372 write(unitoutgrid) sp_count_r373 write(unitoutgrid) (sparse_dump_r(i),i=1,sp_count_r)369 endif 370 write(unitoutgrid) sp_count_i 371 write(unitoutgrid) (sparse_dump_i(i),i=1,sp_count_i) 372 write(unitoutgrid) sp_count_r 373 write(unitoutgrid) (sparse_dump_r(i),i=1,sp_count_r) 374 374 ! write(unitoutgrid) (sparse_dump_u(i),i=1,sp_count_r) 375 375 376 if (verbosity.eq.1) then377 print*,'concoutput_surf (Dry deposition)'378 CALL SYSTEM_CLOCK(count_clock)379 WRITE(*,*) 'SYSTEM_CLOCK',count_clock - count_clock0380 endif381 382 sp_count_i=0383 sp_count_r=0384 sp_fact=-1.385 sp_zer=.true.386 if ((ldirect.eq.1).and.(DRYDEP)) then387 do jy=0,numygrid-1388 do ix=0,numxgrid-1389 if (drygrid(ix,jy).gt.smallnum) then390 if (sp_zer.eqv..true.) then ! first non zero value376 if (verbosity.eq.1) then 377 print*,'concoutput_surf (Dry deposition)' 378 CALL SYSTEM_CLOCK(count_clock) 379 WRITE(*,*) 'SYSTEM_CLOCK',count_clock - count_clock0 380 endif 381 ! Dry deposition 382 sp_count_i=0 383 sp_count_r=0 384 sp_fact=-1. 385 sp_zer=.true. 386 if ((ldirect.eq.1).and.(DRYDEP)) then 387 do jy=0,numygrid-1 388 do ix=0,numxgrid-1 389 if (drygrid(ix,jy).gt.smallnum) then 390 if (sp_zer.eqv..true.) then ! first non zero value 391 391 sp_count_i=sp_count_i+1 392 392 sparse_dump_i(sp_count_i)=ix+jy*numxgrid 393 393 sp_zer=.false. 394 394 sp_fact=sp_fact*(-1.) 395 endif396 sp_count_r=sp_count_r+1397 sparse_dump_r(sp_count_r)= &398 sp_fact* &399 1.e12*drygrid(ix,jy)/area(ix,jy)395 endif 396 sp_count_r=sp_count_r+1 397 sparse_dump_r(sp_count_r)= & 398 sp_fact* & 399 1.e12*drygrid(ix,jy)/area(ix,jy) 400 400 sparse_dump_u(sp_count_r)= & 401 1.e12*drygridsigma(ix,jy)/area(ix,jy)402 else ! concentration is zero401 1.e12*drygridsigma(ix,jy)/area(ix,jy) 402 else ! concentration is zero 403 403 sp_zer=.true. 404 endif405 end do406 end do407 else404 endif 405 end do 406 end do 407 else 408 408 sp_count_i=0 409 409 sp_count_r=0 410 endif411 write(unitoutgrid) sp_count_i412 write(unitoutgrid) (sparse_dump_i(i),i=1,sp_count_i)413 write(unitoutgrid) sp_count_r414 write(unitoutgrid) (sparse_dump_r(i),i=1,sp_count_r)410 endif 411 write(unitoutgrid) sp_count_i 412 write(unitoutgrid) (sparse_dump_i(i),i=1,sp_count_i) 413 write(unitoutgrid) sp_count_r 414 write(unitoutgrid) (sparse_dump_r(i),i=1,sp_count_r) 415 415 ! write(unitoutgrid) (sparse_dump_u(i),i=1,sp_count_r) 416 416 417 if (verbosity.eq.1) then418 print*,'concoutput_surf (Concentrations)'419 CALL SYSTEM_CLOCK(count_clock)420 WRITE(*,*) 'SYSTEM_CLOCK',count_clock - count_clock0421 endif422 423 424 425 426 427 sp_count_i=0428 sp_count_r=0429 sp_fact=-1.430 sp_zer=.true.417 if (verbosity.eq.1) then 418 print*,'concoutput_surf (Concentrations)' 419 CALL SYSTEM_CLOCK(count_clock) 420 WRITE(*,*) 'SYSTEM_CLOCK',count_clock - count_clock0 421 endif 422 423 ! Concentrations 424 425 ! surf_only write only 1st layer 426 427 sp_count_i=0 428 sp_count_r=0 429 sp_fact=-1. 430 sp_zer=.true. 431 431 do kz=1,1 432 432 do jy=0,numygrid-1 … … 440 440 sp_fact=sp_fact*(-1.) 441 441 endif 442 443 444 445 446 447 448 449 450 451 442 sp_count_r=sp_count_r+1 443 sparse_dump_r(sp_count_r)= & 444 sp_fact* & 445 grid(ix,jy,kz)* & 446 factor3d(ix,jy,kz)/tot_mu(ks,kp) 447 ! if ((factor(ix,jy,kz)/tot_mu(ks,kp)).eq.0) 448 ! + write (*,*) factor(ix,jy,kz),tot_mu(ks,kp),ks,kp 449 sparse_dump_u(sp_count_r)= & 450 gridsigma(ix,jy,kz)* & 451 factor3d(ix,jy,kz)/tot_mu(ks,kp) 452 452 else ! concentration is zero 453 453 sp_zer=.true. 454 454 endif 455 end do456 end do457 end do458 write(unitoutgrid) sp_count_i459 write(unitoutgrid) (sparse_dump_i(i),i=1,sp_count_i)460 write(unitoutgrid) sp_count_r461 write(unitoutgrid) (sparse_dump_r(i),i=1,sp_count_r)455 end do 456 end do 457 end do 458 write(unitoutgrid) sp_count_i 459 write(unitoutgrid) (sparse_dump_i(i),i=1,sp_count_i) 460 write(unitoutgrid) sp_count_r 461 write(unitoutgrid) (sparse_dump_r(i),i=1,sp_count_r) 462 462 ! write(unitoutgrid) (sparse_dump_u(i),i=1,sp_count_r) 463 463 464 endif ! concentration output465 466 467 468 469 if ((iout.eq.2).or.(iout.eq.3)) then ! mixing ratio470 471 472 sp_count_i=0473 sp_count_r=0474 sp_fact=-1.475 sp_zer=.true.476 if ((ldirect.eq.1).and.(WETDEP)) then477 do jy=0,numygrid-1478 do ix=0,numxgrid-1464 endif ! concentration output 465 466 ! Mixing ratio output 467 !******************** 468 469 if ((iout.eq.2).or.(iout.eq.3)) then ! mixing ratio 470 471 ! Wet deposition 472 sp_count_i=0 473 sp_count_r=0 474 sp_fact=-1. 475 sp_zer=.true. 476 if ((ldirect.eq.1).and.(WETDEP)) then 477 do jy=0,numygrid-1 478 do ix=0,numxgrid-1 479 479 if (wetgrid(ix,jy).gt.smallnum) then 480 480 if (sp_zer.eqv..true.) then ! first non zero value … … 484 484 sp_zer=.false. 485 485 sp_fact=sp_fact*(-1.) 486 endif487 sp_count_r=sp_count_r+1488 sparse_dump_r(sp_count_r)= &489 sp_fact* &490 1.e12*wetgrid(ix,jy)/area(ix,jy)491 sparse_dump_u(sp_count_r)= &492 1.e12*wetgridsigma(ix,jy)/area(ix,jy)493 else ! concentration is zero486 endif 487 sp_count_r=sp_count_r+1 488 sparse_dump_r(sp_count_r)= & 489 sp_fact* & 490 1.e12*wetgrid(ix,jy)/area(ix,jy) 491 sparse_dump_u(sp_count_r)= & 492 1.e12*wetgridsigma(ix,jy)/area(ix,jy) 493 else ! concentration is zero 494 494 sp_zer=.true. 495 endif496 end do497 end do498 else499 sp_count_i=0500 sp_count_r=0501 endif502 write(unitoutgridppt) sp_count_i503 write(unitoutgridppt) (sparse_dump_i(i),i=1,sp_count_i)504 write(unitoutgridppt) sp_count_r505 write(unitoutgridppt) (sparse_dump_r(i),i=1,sp_count_r)495 endif 496 end do 497 end do 498 else 499 sp_count_i=0 500 sp_count_r=0 501 endif 502 write(unitoutgridppt) sp_count_i 503 write(unitoutgridppt) (sparse_dump_i(i),i=1,sp_count_i) 504 write(unitoutgridppt) sp_count_r 505 write(unitoutgridppt) (sparse_dump_r(i),i=1,sp_count_r) 506 506 ! write(unitoutgridppt) (sparse_dump_u(i),i=1,sp_count_r) 507 507 508 508 509 510 sp_count_i=0511 sp_count_r=0512 sp_fact=-1.513 sp_zer=.true.514 if ((ldirect.eq.1).and.(DRYDEP)) then515 do jy=0,numygrid-1516 do ix=0,numxgrid-1509 ! Dry deposition 510 sp_count_i=0 511 sp_count_r=0 512 sp_fact=-1. 513 sp_zer=.true. 514 if ((ldirect.eq.1).and.(DRYDEP)) then 515 do jy=0,numygrid-1 516 do ix=0,numxgrid-1 517 517 if (drygrid(ix,jy).gt.smallnum) then 518 518 if (sp_zer.eqv..true.) then ! first non zero value … … 522 522 sp_zer=.false. 523 523 sp_fact=sp_fact*(-1) 524 endif525 sp_count_r=sp_count_r+1526 sparse_dump_r(sp_count_r)= &527 sp_fact* &528 1.e12*drygrid(ix,jy)/area(ix,jy)529 sparse_dump_u(sp_count_r)= &530 1.e12*drygridsigma(ix,jy)/area(ix,jy)531 else ! concentration is zero524 endif 525 sp_count_r=sp_count_r+1 526 sparse_dump_r(sp_count_r)= & 527 sp_fact* & 528 1.e12*drygrid(ix,jy)/area(ix,jy) 529 sparse_dump_u(sp_count_r)= & 530 1.e12*drygridsigma(ix,jy)/area(ix,jy) 531 else ! concentration is zero 532 532 sp_zer=.true. 533 endif534 end do535 end do536 else537 sp_count_i=0538 sp_count_r=0539 endif540 write(unitoutgridppt) sp_count_i541 write(unitoutgridppt) (sparse_dump_i(i),i=1,sp_count_i)542 write(unitoutgridppt) sp_count_r543 write(unitoutgridppt) (sparse_dump_r(i),i=1,sp_count_r)533 endif 534 end do 535 end do 536 else 537 sp_count_i=0 538 sp_count_r=0 539 endif 540 write(unitoutgridppt) sp_count_i 541 write(unitoutgridppt) (sparse_dump_i(i),i=1,sp_count_i) 542 write(unitoutgridppt) sp_count_r 543 write(unitoutgridppt) (sparse_dump_r(i),i=1,sp_count_r) 544 544 ! write(unitoutgridppt) (sparse_dump_u(i),i=1,sp_count_r) 545 545 546 546 547 548 549 550 551 sp_count_i=0552 sp_count_r=0553 sp_fact=-1.554 sp_zer=.true.547 ! Mixing ratios 548 549 ! surf_only write only 1st layer 550 551 sp_count_i=0 552 sp_count_r=0 553 sp_fact=-1. 554 sp_zer=.true. 555 555 do kz=1,1 556 556 do jy=0,numygrid-1 … … 563 563 sp_zer=.false. 564 564 sp_fact=sp_fact*(-1.) 565 endif566 sp_count_r=sp_count_r+1567 sparse_dump_r(sp_count_r)= &568 sp_fact* &569 1.e12*grid(ix,jy,kz) &570 /volume(ix,jy,kz)/outnum* &571 weightair/weightmolar(ks)/densityoutgrid(ix,jy,kz)572 sparse_dump_u(sp_count_r)= &573 1.e12*gridsigma(ix,jy,kz)/volume(ix,jy,kz)/ &574 outnum*weightair/weightmolar(ks)/ &575 densityoutgrid(ix,jy,kz)576 else ! concentration is zero565 endif 566 sp_count_r=sp_count_r+1 567 sparse_dump_r(sp_count_r)= & 568 sp_fact* & 569 1.e12*grid(ix,jy,kz) & 570 /volume(ix,jy,kz)/outnum* & 571 weightair/weightmolar(ks)/densityoutgrid(ix,jy,kz) 572 sparse_dump_u(sp_count_r)= & 573 1.e12*gridsigma(ix,jy,kz)/volume(ix,jy,kz)/ & 574 outnum*weightair/weightmolar(ks)/ & 575 densityoutgrid(ix,jy,kz) 576 else ! concentration is zero 577 577 sp_zer=.true. 578 endif578 endif 579 579 end do 580 580 end do 581 581 end do 582 write(unitoutgridppt) sp_count_i583 write(unitoutgridppt) (sparse_dump_i(i),i=1,sp_count_i)584 write(unitoutgridppt) sp_count_r585 write(unitoutgridppt) (sparse_dump_r(i),i=1,sp_count_r)582 write(unitoutgridppt) sp_count_i 583 write(unitoutgridppt) (sparse_dump_i(i),i=1,sp_count_i) 584 write(unitoutgridppt) sp_count_r 585 write(unitoutgridppt) (sparse_dump_r(i),i=1,sp_count_r) 586 586 ! write(unitoutgridppt) (sparse_dump_u(i),i=1,sp_count_r) 587 587 588 endif ! output for ppt589 590 end do591 end do588 endif ! output for ppt 589 590 end do 591 end do 592 592 593 593 close(unitoutgridppt) … … 602 602 drygridtotal 603 603 604 ! Dump of receptor concentrations 605 606 if (numreceptor.gt.0 .and. (iout.eq.2 .or. iout.eq.3) ) then 607 write(unitoutreceptppt) itime 608 do ks=1,nspec 609 write(unitoutreceptppt) (1.e12*creceptor(i,ks)/outnum* & 610 weightair/weightmolar(ks)/densityoutrecept(i),i=1,numreceptor) 604 ! Dump of receptor concentrations 605 606 if (numreceptor.gt.0 .and. (iout.eq.2 .or. iout.eq.3) ) then 607 write(unitoutreceptppt) itime 608 do ks=1,nspec 609 write(unitoutreceptppt) (1.e12*creceptor(i,ks)/outnum* & 610 weightair/weightmolar(ks)/densityoutrecept(i),i=1,numreceptor) 611 end do 612 endif 613 614 ! Dump of receptor concentrations 615 616 if (numreceptor.gt.0) then 617 write(unitoutrecept) itime 618 do ks=1,nspec 619 write(unitoutrecept) (1.e12*creceptor(i,ks)/outnum, & 620 i=1,numreceptor) 621 end do 622 endif 623 624 625 626 ! Reinitialization of grid 627 !************************* 628 629 do ks=1,nspec 630 do kp=1,maxpointspec_act 631 do i=1,numreceptor 632 creceptor(i,ks)=0. 611 633 end do 612 endif 613 614 ! Dump of receptor concentrations 615 616 if (numreceptor.gt.0) then 617 write(unitoutrecept) itime 618 do ks=1,nspec 619 write(unitoutrecept) (1.e12*creceptor(i,ks)/outnum, & 620 i=1,numreceptor) 621 end do 622 endif 623 624 625 626 ! Reinitialization of grid 627 !************************* 628 629 do ks=1,nspec 630 do kp=1,maxpointspec_act 631 do i=1,numreceptor 632 creceptor(i,ks)=0. 633 end do 634 do jy=0,numygrid-1 635 do ix=0,numxgrid-1 636 do l=1,nclassunc 637 do nage=1,nageclass 638 do kz=1,numzgrid 639 gridunc(ix,jy,kz,ks,kp,l,nage)=0. 634 do jy=0,numygrid-1 635 do ix=0,numxgrid-1 636 do l=1,nclassunc 637 do nage=1,nageclass 638 do kz=1,numzgrid 639 gridunc(ix,jy,kz,ks,kp,l,nage)=0. 640 end do 640 641 end do 641 642 end do … … 644 645 end do 645 646 end do 646 end do647 647 648 648 -
src/drydepokernel.f90
r1c0d5e6 r1c0d5e6 40 40 ! * 41 41 !***************************************************************************** 42 ! Changes: 43 ! eso 10/2016: Added option to disregard kernel 44 ! 45 !***************************************************************************** 46 42 47 43 48 use unc_mod … … 47 52 implicit none 48 53 49 real :: x,y,deposit(maxspec),ddx,ddy,xl,yl,wx,wy,w 54 real(dep_prec), dimension(maxspec) :: deposit 55 real :: x,y,ddx,ddy,xl,yl,wx,wy,w 50 56 integer :: ix,jy,ixp,jyp,ks,nunc,nage,kp 51 57 … … 74 80 endif 75 81 82 ! If no kernel is used, direct attribution to grid cell 83 !****************************************************** 84 85 if (lnokernel) then 86 do ks=1,nspec 87 if ((abs(deposit(ks)).gt.0).and.DRYDEPSPEC(ks)) then 88 if ((ix.ge.0).and.(jy.ge.0).and.(ix.le.numxgrid-1).and. & 89 (jy.le.numygrid-1)) then 90 drygridunc(ix,jy,ks,kp,nunc,nage)= & 91 drygridunc(ix,jy,ks,kp,nunc,nage)+deposit(ks) 92 end if 93 end if 94 end do 95 else ! use kernel 96 76 97 77 98 ! Determine mass fractions for four grid points 78 99 !********************************************** 79 100 do ks=1,nspec 80 101 81 102 if ((abs(deposit(ks)).gt.0).and.DRYDEPSPEC(ks)) then 82 103 83 104 if (.not.usekernel) then … … 87 108 if ((ix.ge.0).and.(jy.ge.0).and.(ix.le.numxgrid-1).and. & 88 109 (jy.le.numygrid-1)) then 89 w=wx*wy90 drygridunc(ix,jy,ks,kp,nunc,nage)= &110 w=wx*wy 111 drygridunc(ix,jy,ks,kp,nunc,nage)= & 91 112 drygridunc(ix,jy,ks,kp,nunc,nage)+deposit(ks)*w 92 113 endif … … 116 137 endif ! deposit>0 117 138 118 end do 139 end do 140 end if 119 141 120 142 end subroutine drydepokernel -
src/drydepokernel_nest.f90
re200b7a r4c64400 50 50 implicit none 51 51 52 real :: x,y,deposit(maxspec),ddx,ddy,xl,yl,wx,wy,w 52 real(dep_prec), dimension(maxspec) :: deposit 53 real :: x,y,ddx,ddy,xl,yl,wx,wy,w 53 54 integer :: ix,jy,ixp,jyp,ks,kp,nunc,nage 54 55 -
src/get_wetscav.f90
rc7e771d rc7e771d 72 72 integer(kind=1) :: clouds_v 73 73 integer :: ks, kp 74 integer :: inc_count, blc_count 74 integer(selected_int_kind(16)), dimension(nspec) :: blc_count, inc_count 75 75 76 ! integer :: n1,n2, icbot,ictop, indcloud !TEST 76 77 real :: S_i, act_temp, cl, cle ! in cloud scavenging … … 237 238 if ((dquer(ks).le.0.).and.(weta_gas(ks).gt.0..or.wetb_gas(ks).gt.0.)) then 238 239 ! if (weta(ks).gt.0. .or. wetb(ks).gt.0.) then 239 blc_count =blc_count+1240 blc_count(ks)=blc_count(ks)+1 240 241 wetscav=weta_gas(ks)*prec(1)**wetb_gas(ks) 241 242 … … 243 244 !********************************************************************************* 244 245 else if ((dquer(ks).gt.0.).and.(crain_aero(ks).gt.0..or.csnow_aero(ks).gt.0.)) then 245 blc_count =blc_count+1246 blc_count(ks)=blc_count(ks)+1 246 247 247 248 !NIK 17.02.2015 … … 283 284 ! given in species file, or if gas and positive Henry's constant 284 285 if ((ccn_aero(ks).gt.0. .or. in_aero(ks).gt.0.).or.(henry(ks).gt.0.and.dquer(ks).le.0)) then 285 inc_count =inc_count+1286 inc_count(ks)=inc_count(ks)+1 286 287 ! write(*,*) 'Incloud: ',inc_count 287 288 ! if negative coefficients (turned off) set to zero for use in equation -
src/gfs_mod.f90
r62e65c7 r4c64400 42 42 !********************************************* 43 43 44 integer,parameter :: nxmax=721,nymax=361,nuvzmax=64,nwzmax=64,nzmax=64 44 ! integer,parameter :: nxmax=721,nymax=361,nuvzmax=64,nwzmax=64,nzmax=64 45 integer,parameter :: nxmax=361,nymax=181,nuvzmax=64,nwzmax=64,nzmax=64 45 46 integer,parameter :: nxshift=0 ! for GFS or FNL 46 47 -
src/gridcheck_gfs.f90
r5f9d14a r4c64400 421 421 !******************** 422 422 423 write(*,*) 424 write(*,*) 425 write(*,'(a,2i7)') 'Vertical levels in NCEP data: ', & 426 nuvz,nwz 427 write(*,*) 428 write(*,'(a)') 'Mother domain:' 429 write(*,'(a,f10.2,a1,f10.2,a,f10.2)') ' Longitude range: ', & 430 xlon0,' to ',xlon0+(nx-1)*dx,' Grid distance: ',dx 431 write(*,'(a,f10.2,a1,f10.2,a,f10.2)') ' Latitude range : ', & 432 ylat0,' to ',ylat0+(ny-1)*dy,' Grid distance: ',dy 433 write(*,*) 434 423 if (lroot) then 424 write(*,*) 425 write(*,*) 426 write(*,'(a,2i7)') 'Vertical levels in NCEP data: ', & 427 nuvz,nwz 428 write(*,*) 429 write(*,'(a)') 'Mother domain:' 430 write(*,'(a,f10.2,a1,f10.2,a,f10.2)') ' Longitude range: ', & 431 xlon0,' to ',xlon0+(nx-1)*dx,' Grid distance: ',dx 432 write(*,'(a,f10.2,a1,f10.2,a,f10.2)') ' Latitude range : ', & 433 ylat0,' to ',ylat0+(ny-1)*dy,' Grid distance: ',dy 434 write(*,*) 435 end if 435 436 436 437 ! CALCULATE VERTICAL DISCRETIZATION OF ECMWF MODEL -
src/init_domainfill_mpi.f90
r7999df47 r16b61a5 47 47 ! * 48 48 !***************************************************************************** 49 ! MPI version: 50 ! 51 ! -Root process allocates temporary arrays holding properties for 52 ! all particles in the simulation 53 ! -An index array is used to assign 1st particle to 1st process, 2nd particle 54 ! to 2nd process and so on so that they are evenly distibuted geographically 55 ! -Inititialization for domain-filling is done as in the serial code 56 ! -Root process distributes particles evenly to other processes 57 ! 58 !***************************************************************************** 49 59 50 60 use point_mod … … 56 66 implicit none 57 67 58 integer :: j,ix,jy,kz,ncolumn,numparttot 68 ! ncolumn_mpi,numparttot_mpi ncolumn,numparttot per process 69 integer :: j,ix,jy,kz,ncolumn,numparttot,ncolumn_mpi,numparttot_mpi, arr_size 59 70 real :: gridarea(0:nymax-1),pp(nzmax),ylat,ylatp,ylatm,hzone 60 71 real :: cosfactm,cosfactp,deltacol,dz1,dz2,dz,pnew,fractus … … 66 77 67 78 integer :: idummy = -11 79 integer,allocatable,dimension(:) :: idx ! index array 80 integer :: stride 81 integer, parameter :: nullsize=0 68 82 logical :: first_call=.true. 69 83 70 ! Use different seed for each process 84 ! Use different seed for each process ! TODO: not needed anymore 71 85 if (first_call) then 72 86 idummy=idummy+mp_seed … … 102 116 103 117 118 ! This section only done by the root process 119 !******************************************* 120 if (lroot) then 121 ! Arrays for particles to be released. Add a small number to npart(1) in case of 122 ! round-off errors 123 arr_size = npart(1) + mp_np 124 allocate(itra1_tmp(arr_size),npoint_tmp(arr_size),nclass_tmp(arr_size),& 125 & idt_tmp(arr_size),itramem_tmp(arr_size),itrasplit_tmp(arr_size),& 126 & xtra1_tmp(arr_size),ytra1_tmp(arr_size),ztra1_tmp(arr_size),& 127 & xmass1_tmp(arr_size, maxspec)) 128 129 ! Index array for particles. This is to avoid having particles 130 ! near edges of domain all on one process. 131 !**************************************************************************** 132 allocate(idx(npart(1))) 133 stride = npart(1)/mp_partgroup_np 134 135 jj=0 136 do j=1, stride 137 do i=0, mp_partgroup_np-1 138 jj = jj+1 139 if (jj.gt.npart(1)) exit 140 idx(jj) = i*stride+j 141 end do 142 end do 143 144 ! Add extra indices if npart(1) not evenly divisible by number of processes 145 do i=1, mod(npart(1),mp_partgroup_np) 146 jj = jj+1 147 if (jj.gt.npart(1)) exit 148 idx(jj) = jj 149 end do 150 151 ! Initialize all particles as non-existent 152 itra1_tmp(:)=-999999999 153 104 154 ! Calculate area of grid cell with formula M=2*pi*R*h*dx/360, 105 155 ! see Netz, Formeln der Mathematik, 5. Auflage (1983), p.90 106 156 !************************************************************ 107 157 108 do jy=ny_sn(1),ny_sn(2) ! loop about latitudes 109 ylat=ylat0+real(jy)*dy 110 ylatp=ylat+0.5*dy 111 ylatm=ylat-0.5*dy 112 if ((ylatm.lt.0).and.(ylatp.gt.0.)) then 113 hzone=1./dyconst 114 else 158 do jy=ny_sn(1),ny_sn(2) ! loop about latitudes 159 ylat=ylat0+real(jy)*dy 160 ylatp=ylat+0.5*dy 161 ylatm=ylat-0.5*dy 162 if ((ylatm.lt.0).and.(ylatp.gt.0.)) then 163 hzone=1./dyconst 164 else 165 cosfactp=cos(ylatp*pih)*r_earth 166 cosfactm=cos(ylatm*pih)*r_earth 167 if (cosfactp.lt.cosfactm) then 168 hzone=sqrt(r_earth**2-cosfactp**2)- & 169 sqrt(r_earth**2-cosfactm**2) 170 else 171 hzone=sqrt(r_earth**2-cosfactm**2)- & 172 sqrt(r_earth**2-cosfactp**2) 173 endif 174 endif 175 gridarea(jy)=2.*pi*r_earth*hzone*dx/360. 176 end do 177 178 ! Do the same for the south pole 179 180 if (sglobal) then 181 ylat=ylat0 182 ylatp=ylat+0.5*dy 183 ylatm=ylat 184 cosfactm=0. 115 185 cosfactp=cos(ylatp*pih)*r_earth 186 hzone=sqrt(r_earth**2-cosfactm**2)- & 187 sqrt(r_earth**2-cosfactp**2) 188 gridarea(0)=2.*pi*r_earth*hzone*dx/360. 189 endif 190 191 ! Do the same for the north pole 192 193 if (nglobal) then 194 ylat=ylat0+real(nymin1)*dy 195 ylatp=ylat 196 ylatm=ylat-0.5*dy 197 cosfactp=0. 116 198 cosfactm=cos(ylatm*pih)*r_earth 117 if (cosfactp.lt.cosfactm) then 118 hzone=sqrt(r_earth**2-cosfactp**2)- & 119 sqrt(r_earth**2-cosfactm**2) 120 else 121 hzone=sqrt(r_earth**2-cosfactm**2)- & 122 sqrt(r_earth**2-cosfactp**2) 123 endif 199 hzone=sqrt(r_earth**2-cosfactp**2)- & 200 sqrt(r_earth**2-cosfactm**2) 201 gridarea(nymin1)=2.*pi*r_earth*hzone*dx/360. 124 202 endif 125 gridarea(jy)=2.*pi*r_earth*hzone*dx/360.126 end do127 128 ! Do the same for the south pole129 130 if (sglobal) then131 ylat=ylat0132 ylatp=ylat+0.5*dy133 ylatm=ylat134 cosfactm=0.135 cosfactp=cos(ylatp*pih)*r_earth136 hzone=sqrt(r_earth**2-cosfactm**2)- &137 sqrt(r_earth**2-cosfactp**2)138 gridarea(0)=2.*pi*r_earth*hzone*dx/360.139 endif140 141 ! Do the same for the north pole142 143 if (nglobal) then144 ylat=ylat0+real(nymin1)*dy145 ylatp=ylat146 ylatm=ylat-0.5*dy147 cosfactp=0.148 cosfactm=cos(ylatm*pih)*r_earth149 hzone=sqrt(r_earth**2-cosfactp**2)- &150 sqrt(r_earth**2-cosfactm**2)151 gridarea(nymin1)=2.*pi*r_earth*hzone*dx/360.152 endif153 203 154 204 … … 156 206 !********************************************************************* 157 207 158 colmasstotal=0. 159 do jy=ny_sn(1),ny_sn(2) ! loop about latitudes 160 do ix=nx_we(1),nx_we(2) ! loop about longitudes 161 pp(1)=rho(ix,jy,1,1)*r_air*tt(ix,jy,1,1) 162 pp(nz)=rho(ix,jy,nz,1)*r_air*tt(ix,jy,nz,1) 163 ! Each MPI process is assigned an equal share of particles 164 colmass(ix,jy)=(pp(1)-pp(nz))/ga*gridarea(jy)/mp_partgroup_np 165 colmasstotal=colmasstotal+colmass(ix,jy) 166 167 end do 168 end do 169 170 if (lroot) write(*,*) 'Atm. mass: ',colmasstotal 171 172 173 if (ipin.eq.0) numpart=0 208 colmasstotal=0. 209 do jy=ny_sn(1),ny_sn(2) ! loop about latitudes 210 do ix=nx_we(1),nx_we(2) ! loop about longitudes 211 pp(1)=rho(ix,jy,1,1)*r_air*tt(ix,jy,1,1) 212 pp(nz)=rho(ix,jy,nz,1)*r_air*tt(ix,jy,nz,1) 213 colmass(ix,jy)=(pp(1)-pp(nz))/ga*gridarea(jy) 214 colmasstotal=colmasstotal+colmass(ix,jy) 215 216 end do 217 end do 218 219 write(*,*) 'Atm. mass: ',colmasstotal 220 221 222 if (ipin.eq.0) numpart=0 174 223 175 224 ! Determine the particle positions 176 225 !********************************* 177 226 178 numparttot=0179 numcolumn=0180 do jy=ny_sn(1),ny_sn(2) ! loop about latitudes181 ylat=ylat0+real(jy)*dy182 do ix=nx_we(1),nx_we(2) ! loop about longitudes183 ncolumn=nint(0.999*real(npart(1)/mp_partgroup_np)*colmass(ix,jy)/ &184 colmasstotal)185 if (ncolumn.eq.0) goto 30186 if (ncolumn.gt.numcolumn) numcolumn=ncolumn227 numparttot=0 228 numcolumn=0 229 do jy=ny_sn(1),ny_sn(2) ! loop about latitudes 230 ylat=ylat0+real(jy)*dy 231 do ix=nx_we(1),nx_we(2) ! loop about longitudes 232 ncolumn=nint(0.999*real(npart(1))*colmass(ix,jy)/ & 233 colmasstotal) 234 if (ncolumn.eq.0) goto 30 235 if (ncolumn.gt.numcolumn) numcolumn=ncolumn 187 236 188 237 ! Calculate pressure at the altitudes of model surfaces, using the air density … … 190 239 !***************************************************************************** 191 240 192 do kz=1,nz193 pp(kz)=rho(ix,jy,kz,1)*r_air*tt(ix,jy,kz,1)194 end do195 196 197 deltacol=(pp(1)-pp(nz))/real(ncolumn)198 pnew=pp(1)+deltacol/2.199 jj=0200 do j=1,ncolumn201 jj=jj+1241 do kz=1,nz 242 pp(kz)=rho(ix,jy,kz,1)*r_air*tt(ix,jy,kz,1) 243 end do 244 245 246 deltacol=(pp(1)-pp(nz))/real(ncolumn) 247 pnew=pp(1)+deltacol/2. 248 jj=0 249 do j=1,ncolumn 250 jj=jj+1 202 251 203 252 … … 208 257 209 258 210 if (ncolumn.gt.20) then211 pnew=pnew-deltacol212 else213 pnew=pp(1)-ran1(idummy)*(pp(1)-pp(nz))214 endif215 216 do kz=1,nz-1217 if ((pp(kz).ge.pnew).and.(pp(kz+1).lt.pnew)) then218 dz1=pp(kz)-pnew219 dz2=pnew-pp(kz+1)220 dz=1./(dz1+dz2)259 if (ncolumn.gt.20) then 260 pnew=pnew-deltacol 261 else 262 pnew=pp(1)-ran1(idummy)*(pp(1)-pp(nz)) 263 endif 264 265 do kz=1,nz-1 266 if ((pp(kz).ge.pnew).and.(pp(kz+1).lt.pnew)) then 267 dz1=pp(kz)-pnew 268 dz2=pnew-pp(kz+1) 269 dz=1./(dz1+dz2) 221 270 222 271 ! Assign particle position … … 224 273 ! Do the following steps only if particles are not read in from previous model run 225 274 !***************************************************************************** 226 if (ipin.eq.0) then227 xtra1(numpart+jj)=real(ix)-0.5+ran1(idummy)228 if (ix.eq.0) xtra1(numpart+jj)=ran1(idummy)229 if (ix.eq.nxmin1) xtra1(numpart+jj)= &230 real(nxmin1)-ran1(idummy)231 ytra1(numpart+jj)=real(jy)-0.5+ran1(idummy)232 ztra1(numpart+jj)=(height(kz)*dz2+height(kz+1)*dz1)*dz233 if (ztra1(numpart+jj).gt.height(nz)-0.5) &234 ztra1(numpart+jj)=height(nz)-0.5275 if (ipin.eq.0) then 276 xtra1_tmp(idx(numpart+jj))=real(ix)-0.5+ran1(idummy) 277 if (ix.eq.0) xtra1_tmp(idx(numpart+jj))=ran1(idummy) 278 if (ix.eq.nxmin1) xtra1_tmp(idx(numpart+jj))= & 279 real(nxmin1)-ran1(idummy) 280 ytra1_tmp(idx(numpart+jj))=real(jy)-0.5+ran1(idummy) 281 ztra1_tmp(idx(numpart+jj))=(height(kz)*dz2+height(kz+1)*dz1)*dz 282 if (ztra1_tmp(idx(numpart+jj)).gt.height(nz)-0.5) & 283 ztra1_tmp(idx(numpart+jj))=height(nz)-0.5 235 284 236 285 237 286 ! Interpolate PV to the particle position 238 287 !**************************************** 239 ixm=int(xtra1(numpart+jj))240 jym=int(ytra1(numpart+jj))241 ixp=ixm+1242 jyp=jym+1243 ddx=xtra1(numpart+jj)-real(ixm)244 ddy=ytra1(numpart+jj)-real(jym)245 rddx=1.-ddx246 rddy=1.-ddy247 p1=rddx*rddy248 p2=ddx*rddy249 p3=rddx*ddy250 p4=ddx*ddy251 do i=2,nz252 if (height(i).gt.ztra1(numpart+jj)) then253 indzm=i-1254 indzp=i255 goto 6256 endif257 end do258 6 continue259 dz1=ztra1(numpart+jj)-height(indzm)260 dz2=height(indzp)-ztra1(numpart+jj)261 dz=1./(dz1+dz2)262 do in=1,2263 indzh=indzm+in-1264 y1(in)=p1*pv(ixm,jym,indzh,1) &265 +p2*pv(ixp,jym,indzh,1) &266 +p3*pv(ixm,jyp,indzh,1) &267 +p4*pv(ixp,jyp,indzh,1)268 end do269 pvpart=(dz2*y1(1)+dz1*y1(2))*dz270 if (ylat.lt.0.) pvpart=-1.*pvpart288 ixm=int(xtra1_tmp(idx(numpart+jj))) 289 jym=int(ytra1_tmp(idx(numpart+jj))) 290 ixp=ixm+1 291 jyp=jym+1 292 ddx=xtra1_tmp(idx(numpart+jj))-real(ixm) 293 ddy=ytra1_tmp(idx(numpart+jj))-real(jym) 294 rddx=1.-ddx 295 rddy=1.-ddy 296 p1=rddx*rddy 297 p2=ddx*rddy 298 p3=rddx*ddy 299 p4=ddx*ddy 300 do i=2,nz 301 if (height(i).gt.ztra1_tmp(idx(numpart+jj))) then 302 indzm=i-1 303 indzp=i 304 goto 6 305 endif 306 end do 307 6 continue 308 dz1=ztra1_tmp(idx(numpart+jj))-height(indzm) 309 dz2=height(indzp)-ztra1_tmp(idx(numpart+jj)) 310 dz=1./(dz1+dz2) 311 do in=1,2 312 indzh=indzm+in-1 313 y1(in)=p1*pv(ixm,jym,indzh,1) & 314 +p2*pv(ixp,jym,indzh,1) & 315 +p3*pv(ixm,jyp,indzh,1) & 316 +p4*pv(ixp,jyp,indzh,1) 317 end do 318 pvpart=(dz2*y1(1)+dz1*y1(2))*dz 319 if (ylat.lt.0.) pvpart=-1.*pvpart 271 320 272 321 … … 274 323 !***************************************************************************** 275 324 276 if (((ztra1(numpart+jj).gt.3000.).and. &277 (pvpart.gt.pvcrit)).or.(mdomainfill.eq.1)) then325 if (((ztra1_tmp(idx(numpart+jj)).gt.3000.).and. & 326 (pvpart.gt.pvcrit)).or.(mdomainfill.eq.1)) then 278 327 279 328 ! Assign certain properties to the particle 280 329 !****************************************** 281 nclass(numpart+jj)=min(int(ran1(idummy)* & 282 real(nclassunc))+1,nclassunc) 283 numparticlecount=numparticlecount+1 284 npoint(numpart+jj)=numparticlecount 285 idt(numpart+jj)=mintime 286 itra1(numpart+jj)=0 287 itramem(numpart+jj)=0 288 itrasplit(numpart+jj)=itra1(numpart+jj)+ldirect* & 289 itsplit 290 xmass1(numpart+jj,1)=colmass(ix,jy)/real(ncolumn) 291 if (mdomainfill.eq.2) xmass1(numpart+jj,1)= & 292 xmass1(numpart+jj,1)*pvpart*48./29.*ozonescale/10.**9 293 else 294 jj=jj-1 330 nclass_tmp(idx(numpart+jj))=min(int(ran1(idummy)* & 331 real(nclassunc))+1,nclassunc) 332 numparticlecount=numparticlecount+1 333 npoint_tmp(idx(numpart+jj))=numparticlecount 334 idt_tmp(idx(numpart+jj))=mintime 335 itra1_tmp(idx(numpart+jj))=0 336 itramem_tmp(idx(numpart+jj))=0 337 itrasplit_tmp(idx(numpart+jj))=itra1_tmp(idx(numpart+jj))+ldirect* & 338 itsplit 339 xmass1_tmp(idx(numpart+jj),1)=colmass(ix,jy)/real(ncolumn) 340 if (mdomainfill.eq.2) xmass1_tmp(idx(numpart+jj),1)= & 341 xmass1_tmp(idx(numpart+jj),1)*pvpart*48./29.*ozonescale/10.**9 342 else 343 jj=jj-1 344 endif 295 345 endif 296 346 endif 297 end if347 end do 298 348 end do 349 numparttot=numparttot+ncolumn 350 if (ipin.eq.0) numpart=numpart+jj 351 30 continue 299 352 end do 300 numparttot=numparttot+ncolumn 301 if (ipin.eq.0) numpart=numpart+jj 302 30 continue 303 end do 304 end do 353 end do 305 354 306 355 … … 308 357 !***************************************************** 309 358 310 if (numpart.gt.maxpart) then 311 write(*,*) 'numpart too large: change source in init_atm_mass.f' 312 write(*,*) 'numpart: ',numpart,' maxpart: ',maxpart 313 endif 314 315 316 xmassperparticle=colmasstotal/real(numparttot) 359 ! ESO :TODO: this warning need to be moved further up, else out-of-bounds error earlier 360 if (numpart.gt.maxpart) then 361 write(*,*) 'numpart too large: change source in init_atm_mass.f' 362 write(*,*) 'numpart: ',numpart,' maxpart: ',maxpart 363 endif 364 365 366 xmassperparticle=colmasstotal/real(numparttot) 317 367 318 368 … … 320 370 !*********************************************** 321 371 322 do j=1,numpart 323 if ((xtra1(j).lt.0.).or.(xtra1(j).ge.real(nxmin1)).or. & 324 (ytra1(j).lt.0.).or.(ytra1(j).ge.real(nymin1))) then 325 itra1(j)=-999999999 326 endif 327 end do 372 ! do j=1,numpart 373 do j=1,npoint(1) 374 if ((xtra1_tmp(j).lt.0.).or.(xtra1_tmp(j).ge.real(nxmin1)).or. & 375 (ytra1_tmp(j).lt.0.).or.(ytra1_tmp(j).ge.real(nymin1))) then 376 itra1_tmp(j)=-999999999 377 endif 378 end do 328 379 329 380 … … 340 391 !**************************************************************************** 341 392 342 fractus=real(numcolumn)/real(nz)343 write(*,*) 'Total number of particles at model start: ',numpart*mp_partgroup_np344 write(*,*) 'Maximum number of particles per column: ',numcolumn*mp_partgroup_np345 write(*,*) 'If ',fractus*mp_partgroup_np,' <1, better use more particles'346 fractus=sqrt(max(fractus,1.))/2.347 348 do jy=ny_sn(1),ny_sn(2) ! loop about latitudes349 do ix=nx_we(1),nx_we(2) ! loop about longitudes350 ncolumn=nint(0.999/fractus*real(npart(1)/mp_partgroup_np)*colmass(ix,jy) &351 /colmasstotal)352 if (ncolumn.gt.maxcolumn) stop 'maxcolumn too small'353 if (ncolumn.eq.0) goto 80393 fractus=real(numcolumn)/real(nz) 394 write(*,*) 'Total number of particles at model start: ',numpart 395 write(*,*) 'Maximum number of particles per column: ',numcolumn 396 write(*,*) 'If ',fractus,' <1, better use more particles' 397 fractus=sqrt(max(fractus,1.))/2. 398 399 do jy=ny_sn(1),ny_sn(2) ! loop about latitudes 400 do ix=nx_we(1),nx_we(2) ! loop about longitudes 401 ncolumn=nint(0.999/fractus*real(npart(1))*colmass(ix,jy) & 402 /colmasstotal) 403 if (ncolumn.gt.maxcolumn) stop 'maxcolumn too small' 404 if (ncolumn.eq.0) goto 80 354 405 355 406 … … 359 410 !************************************************************************ 360 411 361 if (ix.eq.nx_we(1)) numcolumn_we(1,jy)=ncolumn362 if (ix.eq.nx_we(2)) numcolumn_we(2,jy)=ncolumn363 if (jy.eq.ny_sn(1)) numcolumn_sn(1,ix)=ncolumn364 if (jy.eq.ny_sn(2)) numcolumn_sn(2,ix)=ncolumn412 if (ix.eq.nx_we(1)) numcolumn_we(1,jy)=ncolumn 413 if (ix.eq.nx_we(2)) numcolumn_we(2,jy)=ncolumn 414 if (jy.eq.ny_sn(1)) numcolumn_sn(1,ix)=ncolumn 415 if (jy.eq.ny_sn(2)) numcolumn_sn(2,ix)=ncolumn 365 416 366 417 ! Calculate pressure at the altitudes of model surfaces, using the air density … … 368 419 !***************************************************************************** 369 420 370 do kz=1,nz371 pp(kz)=rho(ix,jy,kz,1)*r_air*tt(ix,jy,kz,1)372 end do421 do kz=1,nz 422 pp(kz)=rho(ix,jy,kz,1)*r_air*tt(ix,jy,kz,1) 423 end do 373 424 374 425 ! Determine the reference starting altitudes 375 426 !******************************************* 376 427 377 deltacol=(pp(1)-pp(nz))/real(ncolumn)378 pnew=pp(1)+deltacol/2.379 do j=1,ncolumn380 pnew=pnew-deltacol381 do kz=1,nz-1382 if ((pp(kz).ge.pnew).and.(pp(kz+1).lt.pnew)) then383 dz1=pp(kz)-pnew384 dz2=pnew-pp(kz+1)385 dz=1./(dz1+dz2)386 zposition=(height(kz)*dz2+height(kz+1)*dz1)*dz387 if (zposition.gt.height(nz)-0.5) zposition=height(nz)-0.5428 deltacol=(pp(1)-pp(nz))/real(ncolumn) 429 pnew=pp(1)+deltacol/2. 430 do j=1,ncolumn 431 pnew=pnew-deltacol 432 do kz=1,nz-1 433 if ((pp(kz).ge.pnew).and.(pp(kz+1).lt.pnew)) then 434 dz1=pp(kz)-pnew 435 dz2=pnew-pp(kz+1) 436 dz=1./(dz1+dz2) 437 zposition=(height(kz)*dz2+height(kz+1)*dz1)*dz 438 if (zposition.gt.height(nz)-0.5) zposition=height(nz)-0.5 388 439 389 440 ! Memorize vertical positions where particles are introduced … … 391 442 !*********************************************************** 392 443 393 if (ix.eq.nx_we(1)) zcolumn_we(1,jy,j)=zposition394 if (ix.eq.nx_we(2)) zcolumn_we(2,jy,j)=zposition395 if (jy.eq.ny_sn(1)) zcolumn_sn(1,ix,j)=zposition396 if (jy.eq.ny_sn(2)) zcolumn_sn(2,ix,j)=zposition444 if (ix.eq.nx_we(1)) zcolumn_we(1,jy,j)=zposition 445 if (ix.eq.nx_we(2)) zcolumn_we(2,jy,j)=zposition 446 if (jy.eq.ny_sn(1)) zcolumn_sn(1,ix,j)=zposition 447 if (jy.eq.ny_sn(2)) zcolumn_sn(2,ix,j)=zposition 397 448 398 449 ! Initialize mass that has accumulated at boundary to zero 399 450 !********************************************************* 400 451 401 acc_mass_we(1,jy,j)=0. 402 acc_mass_we(2,jy,j)=0. 403 acc_mass_sn(1,jy,j)=0. 404 acc_mass_sn(2,jy,j)=0. 405 endif 452 acc_mass_we(1,jy,j)=0. 453 acc_mass_we(2,jy,j)=0. 454 acc_mass_sn(1,jy,j)=0. 455 acc_mass_sn(2,jy,j)=0. 456 endif 457 end do 406 458 end do 459 80 continue 407 460 end do 408 80 continue 409 end do 410 end do 461 end do 411 462 412 463 ! If particles shall be read in to continue an existing run, … … 415 466 !*************************************************************************** 416 467 417 ! :TODO: eso: parallelize 418 if (ipin.eq.1) then 419 open(unitboundcond,file=path(2)(1:length(2))//'boundcond.bin', & 420 form='unformatted') 421 read(unitboundcond) numcolumn_we,numcolumn_sn, & 422 zcolumn_we,zcolumn_sn,acc_mass_we,acc_mass_sn 423 close(unitboundcond) 424 endif 425 426 468 ! eso TODO: only needed for root process 469 if (ipin.eq.1) then 470 open(unitboundcond,file=path(2)(1:length(2))//'boundcond.bin', & 471 form='unformatted') 472 read(unitboundcond) numcolumn_we,numcolumn_sn, & 473 zcolumn_we,zcolumn_sn,acc_mass_we,acc_mass_sn 474 close(unitboundcond) 475 endif 476 477 numpart = npart(1)/mp_partgroup_np 478 if (mod(numpart,mp_partgroup_np).ne.0) numpart=numpart+1 479 480 else ! Allocate dummy arrays for receiving processes 481 allocate(itra1_tmp(nullsize),npoint_tmp(nullsize),nclass_tmp(nullsize),& 482 & idt_tmp(nullsize),itramem_tmp(nullsize),itrasplit_tmp(nullsize),& 483 & xtra1_tmp(nullsize),ytra1_tmp(nullsize),ztra1_tmp(nullsize),& 484 & xmass1_tmp(nullsize, nullsize)) 485 486 end if ! end if(lroot) 487 488 489 ! Distribute particles to other processes (numpart is 'per-process', not total) 490 call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) 491 ! eso TODO: xmassperparticle: not necessary to send 492 call MPI_Bcast(xmassperparticle, 1, mp_sp, id_root, mp_comm_used, mp_ierr) 493 call mpif_send_part_properties(numpart) 494 495 ! Deallocate the temporary arrays used for all particles 496 deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,& 497 & itrasplit_tmp,xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp) 427 498 428 499 -
src/makefile
rd1a8707 rd1a8707 423 423 par_mod.o point_mod.o unc_mod.o xmass_mod.o 424 424 timemanager_mpi.o: com_mod.o flux_mod.o mpi_mod.o oh_mod.o outg_mod.o \ 425 par_mod.o point_mod.o unc_mod.o xmass_mod.o 425 par_mod.o point_mod.o unc_mod.o xmass_mod.o netcdf_output_mod.o 426 426 unc_mod.o: par_mod.o 427 427 verttransform.o: cmapf_mod.o com_mod.o par_mod.o -
src/mean_mod.f90
r6a678e3 r4c64400 29 29 module procedure mean_sp 30 30 module procedure mean_dp 31 module procedure mean_mixed_prec 31 module procedure mean_mixed_dss 32 module procedure mean_mixed_dsd 33 32 34 end interface mean 33 35 … … 64 66 ! real(sp),parameter :: eps=1.0e-30 65 67 68 integer,intent(in) :: number 66 69 real(sp), intent(in) :: x_sp(number) 67 70 real(sp), intent(out) ::xm,xs 68 integer,intent(in) :: number69 71 real(sp) :: xl,xq,xaux 70 72 real(sp),parameter :: eps=1.0e-30 … … 116 118 implicit none 117 119 120 integer,intent(in) :: number 118 121 real(dp), intent(in) :: x_dp(number) 119 122 real(dp), intent(out) ::xm,xs 120 integer,intent(in) :: number121 123 real(dp) :: xl,xq,xaux 122 124 real(dp),parameter :: eps=1.0e-30 … … 142 144 end subroutine mean_dp 143 145 144 subroutine mean_mixed_ prec(x_dp,xm,xs,number)145 146 !***************************************************************************** 147 ! * 148 ! This subroutine calculates mean and standard deviation of a given element.* 149 ! * 150 ! AUTHOR: Andreas Stohl, 25 January 1994 * 151 ! * 152 ! Mixed precision version ESO 2016 (dp in put, sp output)*146 subroutine mean_mixed_dss(x_dp,xm,xs,number) 147 148 !***************************************************************************** 149 ! * 150 ! This subroutine calculates mean and standard deviation of a given element.* 151 ! * 152 ! AUTHOR: Andreas Stohl, 25 January 1994 * 153 ! * 154 ! Mixed precision version ESO 2016 (dp in, sp out, sp out) * 153 155 !***************************************************************************** 154 156 ! * … … 168 170 implicit none 169 171 172 integer,intent(in) :: number 170 173 real(dp), intent(in) :: x_dp(number) 171 174 real(sp), intent(out) ::xm,xs 172 integer,intent(in) :: number173 175 real(sp) :: xl,xq,xaux 174 176 real(sp),parameter :: eps=1.0e-30 … … 192 194 endif 193 195 194 end subroutine mean_mixed_prec 196 end subroutine mean_mixed_dss 197 198 subroutine mean_mixed_dsd(x_dp,xm,xs_dp,number) 199 200 !***************************************************************************** 201 ! * 202 ! This subroutine calculates mean and standard deviation of a given element.* 203 ! * 204 ! AUTHOR: Andreas Stohl, 25 January 1994 * 205 ! * 206 ! Mixed precision version ESO 2016 (dp in, sp out, dp out) * 207 !***************************************************************************** 208 ! * 209 ! Variables: * 210 ! x_dp(number) field of input data * 211 ! xm mean * 212 ! xs_dp standard deviation * 213 ! number number of elements of field x_dp * 214 ! * 215 ! Constants: * 216 ! eps tiny number * 217 ! * 218 !***************************************************************************** 219 220 use par_mod, only: sp,dp 221 222 implicit none 223 224 integer,intent(in) :: number 225 real(dp), intent(in) :: x_dp(number) 226 real(sp), intent(out) ::xm 227 real(dp), intent(out) ::xs_dp 228 real(dp) :: xl,xq,xaux 229 real(dp),parameter :: eps=1.0e-30_dp 230 integer :: i 231 232 xl=0._dp 233 xq=0._dp 234 do i=1,number 235 xl=xl+x_dp(i) 236 xq=xq+x_dp(i)*x_dp(i) 237 end do 238 239 xm=xl/real(number,kind=sp) 240 241 xaux=xq-xl*xl/real(number,kind=dp) 242 243 if (xaux.lt.eps) then 244 xs_dp=0._dp 245 else 246 xs_dp=sqrt(xaux/real(number-1,kind=dp)) 247 endif 248 249 end subroutine mean_mixed_dsd 250 195 251 end module mean_mod -
src/mpi_mod.f90
r79abee9 r4c64400 90 90 ! MPI tags/requests for send/receive operation 91 91 integer :: tm1 92 integer, parameter :: nvar_async=26 !27 !29 :DBG:92 integer, parameter :: nvar_async=26 93 93 !integer, dimension(:), allocatable :: tags 94 94 integer, dimension(:), allocatable :: reqs 95 96 ! Status array used for certain MPI operations (MPI_RECV) 97 integer, dimension(MPI_STATUS_SIZE) :: mp_status 95 98 96 99 … … 149 152 integer, private :: dat_lun 150 153 154 ! Temporary arrays for particles (allocated and deallocated as needed) 155 integer, allocatable, dimension(:) :: nclass_tmp, npoint_tmp, itra1_tmp, idt_tmp, & 156 & itramem_tmp, itrasplit_tmp 157 real(kind=dp), allocatable, dimension(:) :: xtra1_tmp, ytra1_tmp 158 real, allocatable, dimension(:) :: ztra1_tmp 159 real, allocatable, dimension(:,:) :: xmass1_tmp 160 161 ! mp_redist_fract Exchange particles between processes if relative difference 162 ! is larger. A good value is between 0.0 and 0.5 163 ! mp_maxpart_factor Allocate more memory per process than strictly needed 164 ! (safety factor). Recommended value between 1.5 and 2.5 165 ! mp_min_redist Do not redistribute particles if below this limit 166 real, parameter :: mp_redist_fract=0.2, mp_maxpart_factor=1.5 167 integer,parameter :: mp_min_redist=100000 168 169 151 170 contains 152 171 … … 195 214 if (dep_prec==dp) then 196 215 mp_cp = MPI_REAL8 197 216 ! TODO: write info message for serial version as well 198 217 if (lroot.and.verbosity>0) write(*,*) 'Using double precision for deposition fields' 199 218 else if (dep_prec==sp) then … … 232 251 write(*,FMT='(80("#"))') 233 252 end if 234 lmp_sync=.true. ! :DBG: eso fix this...253 lmp_sync=.true. 235 254 end if 236 255 … … 311 330 312 331 ! Set maxpart per process 313 if (mp_partid.lt.mod(maxpart,mp_partgroup_np)) addmaxpart=1 314 maxpart_mpi=int(m axpart/mp_partgroup_np)+addmaxpart332 ! eso 08/2016: Increase maxpart per process, in case of unbalanced distribution 333 maxpart_mpi=int(mp_maxpart_factor*real(maxpart)/real(mp_partgroup_np)) 315 334 316 335 ! Do not allocate particle data arrays for readwind process … … 321 340 ! Set random seed for each non-root process 322 341 if (mp_pid.gt.0) then 323 ! if (mp_pid.ge.0) then324 ! call system_clock(s)325 342 s = 244 326 343 mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729)) 327 344 end if 328 if (mp_dev_mode) write(*,*) 'PID, mp_seed: ',mp_pid, mp_seed329 345 if (mp_dbg_mode) then 330 ! :DBG: For debugging, set all seed to 0 and maxrand to e.g. 20331 346 mp_seed=0 332 347 if (lroot) write(*,*) 'MPI: setting seed=0' … … 454 469 455 470 471 subroutine mpif_send_part_properties(num_part) 472 !*********************************************************************** 473 ! Distribute particle properties from root to all processes. 474 ! 475 ! Used by init_domainfill_mpi 476 ! 477 ! Variables: 478 ! 479 ! num_part input, number of particles per process (rounded up) 480 ! 481 !*********************************************************************** 482 use com_mod 483 484 implicit none 485 486 integer,intent(in) :: num_part 487 integer :: i,jj, addone 488 489 ! Exit if too many particles 490 if (num_part.gt.maxpart_mpi) then 491 write(*,*) '#####################################################' 492 write(*,*) '#### ERROR - TOTAL NUMBER OF PARTICLES REQUIRED ####' 493 write(*,*) '#### EXCEEDS THE MAXIMUM ALLOWED NUMBER. REDUCE ####' 494 write(*,*) '#### EITHER NUMBER OF PARTICLES PER RELEASE POINT####' 495 write(*,*) '#### OR INCREASE MAXPART. ####' 496 write(*,*) '#####################################################' 497 ! call MPI_FINALIZE(mp_ierr) 498 stop 499 end if 500 501 502 ! Time for MPI communications 503 !**************************** 504 if (mp_measure_time) call mpif_mtime('commtime',0) 505 506 ! Distribute variables, send from pid 0 to other processes (including itself) 507 !**************************************************************************** 508 509 call MPI_SCATTER(nclass_tmp,num_part,MPI_INTEGER,nclass,& 510 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 511 if (mp_ierr /= 0) goto 600 512 call MPI_SCATTER(npoint_tmp,num_part,MPI_INTEGER,npoint,& 513 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 514 if (mp_ierr /= 0) goto 600 515 call MPI_SCATTER(itra1_tmp,num_part,MPI_INTEGER,itra1,& 516 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 517 if (mp_ierr /= 0) goto 600 518 call MPI_SCATTER(idt_tmp,num_part,MPI_INTEGER,idt,& 519 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 520 if (mp_ierr /= 0) goto 600 521 call MPI_SCATTER(itramem_tmp,num_part,MPI_INTEGER,itramem,& 522 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 523 if (mp_ierr /= 0) goto 600 524 call MPI_SCATTER(itrasplit_tmp,num_part,MPI_INTEGER,itrasplit,& 525 &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 526 if (mp_ierr /= 0) goto 600 527 call MPI_SCATTER(xtra1_tmp,num_part,mp_dp,xtra1,& 528 &num_part,mp_dp,id_root,mp_comm_used,mp_ierr) 529 if (mp_ierr /= 0) goto 600 530 call MPI_SCATTER(ytra1_tmp,num_part,mp_dp,ytra1,& 531 &num_part,mp_dp,id_root,mp_comm_used,mp_ierr) 532 if (mp_ierr /= 0) goto 600 533 call MPI_SCATTER(ztra1_tmp,num_part,mp_sp,ztra1,& 534 &num_part,mp_sp,id_root,mp_comm_used,mp_ierr) 535 if (mp_ierr /= 0) goto 600 536 do i=1,nspec 537 call MPI_SCATTER(xmass1_tmp(:,i),num_part,mp_sp,xmass1(:,i),& 538 &num_part,mp_sp,id_root,mp_comm_used,mp_ierr) 539 if (mp_ierr /= 0) goto 600 540 end do 541 542 if (mp_measure_time) call mpif_mtime('commtime',1) 543 544 goto 601 545 546 600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr 547 stop 548 549 ! After the transfer of particles it it possible that the value of 550 ! "numpart" is larger than the actual used, so we reduce it if there are 551 ! invalid particles at the end of the arrays 552 601 do i=num_part, 1, -1 553 if (itra1(i).eq.-999999999) then 554 numpart=numpart-1 555 else 556 exit 557 end if 558 end do 559 560 561 !601 end subroutine mpif_send_part_properties 562 end subroutine mpif_send_part_properties 563 564 565 subroutine mpif_calculate_part_redist(itime) 566 !*********************************************************************** 567 ! Calculate number of particles to redistribute between processes. This routine 568 ! can be called at regular time intervals to keep a level number of 569 ! particles on each process. 570 ! 571 ! First, all processes report their local "numpart" to each other, which is 572 ! stored in array "numpart_mpi(np)". This array is sorted from low to 573 ! high values, along with a corresponding process ID array "idx_arr(np)". 574 ! If the relative difference between the highest and lowest "numpart_mpi" value 575 ! is above a threshold, particles are transferred from process idx_arr(np-1) 576 ! to process idx_arr(0). Repeat for processes idx_arr(np-i) and idx_arr(i) 577 ! for all valid i. 578 ! Note: If np is an odd number, the process with the median 579 ! number of particles is left unchanged 580 ! 581 ! VARIABLES 582 ! itime input, current time 583 !*********************************************************************** 584 use com_mod 585 586 implicit none 587 588 integer, intent(in) :: itime 589 real :: pmin,z 590 integer :: i,jj,nn, num_part=1,m,imin, num_trans 591 logical :: first_iter 592 integer,allocatable,dimension(:) :: numparticles_mpi, idx_arr 593 real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this 594 595 ! Exit if running with only 1 process 596 !************************************************************************ 597 if (mp_np.eq.1) return 598 599 ! All processes exchange information on number of particles 600 !**************************************************************************** 601 allocate(numparticles_mpi(0:mp_partgroup_np-1), & 602 &idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1)) 603 604 call MPI_Allgather(numpart, 1, MPI_INTEGER, numparticles_mpi, & 605 & 1, MPI_INTEGER, mp_comm_used, mp_ierr) 606 607 608 ! Sort from lowest to highest 609 ! Initial guess: correct order 610 sorted(:) = numparticles_mpi(:) 611 do i=0, mp_partgroup_np-1 612 idx_arr(i) = i 613 end do 614 615 ! For each successive element in index array, see if a lower value exists 616 do i=0, mp_partgroup_np-2 617 pmin=sorted(i) 618 imin=idx_arr(i) 619 m=i+1 620 do jj=m, mp_partgroup_np-1 621 if (pmin.le.sorted(jj)) cycle 622 z=pmin 623 pmin=sorted(jj) 624 sorted(jj)=z 625 626 nn=imin 627 imin=idx_arr(jj) 628 idx_arr(jj)=nn 629 end do 630 sorted(i)=pmin 631 idx_arr(i)=imin 632 end do 633 634 ! For each pair of processes, transfer particles if the difference is above a 635 ! limit, and numpart at sending process large enough 636 637 m=mp_partgroup_np-1 ! index for last sorted process (most particles) 638 do i=0,mp_partgroup_np/2-1 639 num_trans = numparticles_mpi(idx_arr(m)) - numparticles_mpi(idx_arr(i)) 640 if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then 641 if ( numparticles_mpi(idx_arr(m)).gt.mp_min_redist.and.& 642 & real(num_trans)/real(numparticles_mpi(idx_arr(m))).gt.mp_redist_fract) then 643 ! DBG 644 ! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi', & 645 ! &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi 646 ! DBG 647 call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2) 648 end if 649 end if 650 m=m-1 651 end do 652 653 deallocate(numparticles_mpi, idx_arr, sorted) 654 655 end subroutine mpif_calculate_part_redist 656 657 658 subroutine mpif_redist_part(itime, src_proc, dest_proc, num_trans) 659 !*********************************************************************** 660 ! Transfer particle properties between two arbitrary processes. 661 ! 662 ! VARIABLES 663 ! 664 ! itime input, current time 665 ! src_proc input, ID of source process 666 ! dest_proc input, ID of destination process 667 ! num_trans input, number of particles to transfer 668 ! 669 !************************************************************************ 670 use com_mod !TODO: ,only: nclass etc 671 672 implicit none 673 674 integer, intent(in) :: itime, src_proc, dest_proc, num_trans 675 integer :: ll, ul ! lower and upper indices in arrays 676 integer :: arr_sz ! size of temporary arrays 677 integer :: mtag ! MPI message tag 678 integer :: i, j, minpart, ipart, maxnumpart 679 680 ! Check for invalid input arguments 681 !********************************** 682 if (src_proc.eq.dest_proc) then 683 write(*,*) '<mpi_mod::mpif_redist_part>: Error: & 684 &src_proc.eq.dest_proc' 685 stop 686 end if 687 688 ! Measure time for MPI communications 689 !************************************ 690 if (mp_measure_time) call mpif_mtime('commtime',0) 691 692 ! Send particles 693 !*************** 694 if (mp_partid.eq.src_proc) then 695 mtag = 2000 696 697 ! Array indices for the transferred particles 698 ll=numpart-num_trans+1 699 ul=numpart 700 701 if (mp_dev_mode) then 702 write(*,FMT='(72("#"))') 703 write(*,*) "Sending ", num_trans, "particles (from/to)", src_proc, dest_proc 704 write(*,*) "numpart before: ", numpart 705 end if 706 707 call MPI_SEND(nclass(ll:ul),num_trans,& 708 & MPI_INTEGER,dest_proc,mtag+1,mp_comm_used,mp_ierr) 709 710 call MPI_SEND(npoint(ll:ul),num_trans,& 711 & MPI_INTEGER,dest_proc,mtag+2,mp_comm_used,mp_ierr) 712 713 call MPI_SEND(itra1(ll:ul),num_trans, & 714 & MPI_INTEGER,dest_proc,mtag+3,mp_comm_used,mp_ierr) 715 716 call MPI_SEND(idt(ll:ul),num_trans, & 717 & MPI_INTEGER,dest_proc,mtag+4,mp_comm_used,mp_ierr) 718 719 call MPI_SEND(itramem(ll:ul),num_trans, & 720 & MPI_INTEGER,dest_proc,mtag+5,mp_comm_used,mp_ierr) 721 722 call MPI_SEND(itrasplit(ll:ul),num_trans,& 723 & MPI_INTEGER,dest_proc,mtag+6,mp_comm_used,mp_ierr) 724 725 call MPI_SEND(xtra1(ll:ul),num_trans, & 726 & mp_dp,dest_proc,mtag+7,mp_comm_used,mp_ierr) 727 728 call MPI_SEND(ytra1(ll:ul),num_trans,& 729 & mp_dp,dest_proc,mtag+8,mp_comm_used,mp_ierr) 730 731 call MPI_SEND(ztra1(ll:ul),num_trans,& 732 & mp_sp,dest_proc,mtag+9,mp_comm_used,mp_ierr) 733 734 do j=1,nspec 735 call MPI_SEND(xmass1(ll:ul,j),num_trans,& 736 & mp_sp,dest_proc,mtag+(9+j),mp_comm_used,mp_ierr) 737 end do 738 739 ! Terminate transferred particles, update numpart 740 itra1(ll:ul) = -999999999 741 742 numpart = numpart-num_trans 743 744 if (mp_dev_mode) then 745 write(*,*) "numpart after: ", numpart 746 write(*,FMT='(72("#"))') 747 end if 748 749 else if (mp_partid.eq.dest_proc) then 750 751 ! Receive particles 752 !****************** 753 mtag = 2000 754 ! Array indices for the transferred particles 755 ll=numpart+1 756 ul=numpart+num_trans 757 758 if (mp_dev_mode) then 759 write(*,FMT='(72("#"))') 760 write(*,*) "Receiving ", num_trans, "particles (from/to)", src_proc, dest_proc 761 write(*,*) "numpart before: ", numpart 762 end if 763 764 ! We could receive the data directly at nclass(ll:ul) etc., but this leaves 765 ! vacant spaces at lower indices. Using temporary arrays instead. 766 arr_sz = ul-ll+1 767 allocate(itra1_tmp(arr_sz),npoint_tmp(arr_sz),nclass_tmp(arr_sz),& 768 & idt_tmp(arr_sz),itramem_tmp(arr_sz),itrasplit_tmp(arr_sz),& 769 & xtra1_tmp(arr_sz),ytra1_tmp(arr_sz),ztra1_tmp(arr_sz),& 770 & xmass1_tmp(arr_sz, maxspec)) 771 772 call MPI_RECV(nclass_tmp,num_trans,& 773 & MPI_INTEGER,src_proc,mtag+1,mp_comm_used,mp_status,mp_ierr) 774 775 call MPI_RECV(npoint_tmp,num_trans,& 776 & MPI_INTEGER,src_proc,mtag+2,mp_comm_used,mp_status,mp_ierr) 777 778 call MPI_RECV(itra1_tmp,num_trans, & 779 & MPI_INTEGER,src_proc,mtag+3,mp_comm_used,mp_status,mp_ierr) 780 781 call MPI_RECV(idt_tmp,num_trans, & 782 & MPI_INTEGER,src_proc,mtag+4,mp_comm_used,mp_status,mp_ierr) 783 784 call MPI_RECV(itramem_tmp,num_trans, & 785 & MPI_INTEGER,src_proc,mtag+5,mp_comm_used,mp_status,mp_ierr) 786 787 call MPI_RECV(itrasplit_tmp,num_trans,& 788 & MPI_INTEGER,src_proc,mtag+6,mp_comm_used,mp_status,mp_ierr) 789 790 call MPI_RECV(xtra1_tmp,num_trans, & 791 & mp_dp,src_proc,mtag+7,mp_comm_used,mp_status,mp_ierr) 792 793 call MPI_RECV(ytra1_tmp,num_trans,& 794 & mp_dp,src_proc,mtag+8,mp_comm_used,mp_status,mp_ierr) 795 796 call MPI_RECV(ztra1_tmp,num_trans,& 797 & mp_sp,src_proc,mtag+9,mp_comm_used,mp_status,mp_ierr) 798 799 do j=1,nspec 800 call MPI_RECV(xmass1_tmp(:,j),num_trans,& 801 & mp_sp,src_proc,mtag+(9+j),mp_comm_used,mp_status,mp_ierr) 802 end do 803 804 ! This is the maximum value numpart can possibly have after the transfer 805 maxnumpart=numpart+num_trans 806 807 ! Search for vacant space and copy from temporary storage 808 !******************************************************** 809 minpart=1 810 do i=1, num_trans 811 ! Take into acount that we may have transferred invalid particles 812 if (itra1_tmp(i).ne.itime) cycle 813 do ipart=minpart,maxnumpart 814 if (itra1(ipart).ne.itime) then 815 itra1(ipart) = itra1_tmp(i) 816 npoint(ipart) = npoint_tmp(i) 817 nclass(ipart) = nclass_tmp(i) 818 idt(ipart) = idt_tmp(i) 819 itramem(ipart) = itramem_tmp(i) 820 itrasplit(ipart) = itrasplit_tmp(i) 821 xtra1(ipart) = xtra1_tmp(i) 822 ytra1(ipart) = ytra1_tmp(i) 823 ztra1(ipart) = ztra1_tmp(i) 824 xmass1(ipart,:) = xmass1_tmp(i,:) 825 goto 200 ! Storage space has been found, stop searching 826 end if 827 ! :TODO: add check for if too many particles requiried 828 end do 829 200 minpart=ipart+1 830 end do 831 ! Increase numpart, if necessary 832 numpart=max(numpart,ipart) 833 834 deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,& 835 &itrasplit_tmp,xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp) 836 837 if (mp_dev_mode) then 838 write(*,*) "numpart after: ", numpart 839 write(*,FMT='(72("#"))') 840 end if 841 842 else 843 ! This routine should only be called by the two participating processes 844 write(*,*) "ERROR: wrong process has entered mpi_mod::mpif_redist_part" 845 stop 846 return 847 end if 848 849 ! Measure time for MPI communications 850 !************************************ 851 if (mp_measure_time) call mpif_mtime('commtime',1) 852 853 end subroutine mpif_redist_part 854 855 456 856 subroutine mpif_tm_send_vars 457 857 !*********************************************************************** … … 477 877 if (lroot) then 478 878 call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 479 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)879 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 480 880 if (mp_ierr /= 0) goto 600 481 881 call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 482 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)882 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 483 883 if (mp_ierr /= 0) goto 600 484 884 call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 485 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)885 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 486 886 if (mp_ierr /= 0) goto 600 487 887 call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 488 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)888 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 489 889 if (mp_ierr /= 0) goto 600 490 890 call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,& 491 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)891 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 492 892 if (mp_ierr /= 0) goto 600 493 893 494 894 ! int2 495 895 call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,& 496 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)896 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr) 497 897 if (mp_ierr /= 0) goto 600 498 898 499 899 ! real 500 900 call MPI_SCATTER(uap,numpart_mpi,mp_sp,MPI_IN_PLACE,& 501 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)901 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 502 902 if (mp_ierr /= 0) goto 600 503 903 call MPI_SCATTER(ucp,numpart_mpi,mp_sp,MPI_IN_PLACE,& 504 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)904 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 505 905 if (mp_ierr /= 0) goto 600 506 906 call MPI_SCATTER(uzp,numpart_mpi,mp_sp,MPI_IN_PLACE,& 507 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)907 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 508 908 if (mp_ierr /= 0) goto 600 509 909 510 910 call MPI_SCATTER(us,numpart_mpi,mp_sp,MPI_IN_PLACE,& 511 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)911 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 512 912 if (mp_ierr /= 0) goto 600 513 913 call MPI_SCATTER(vs,numpart_mpi,mp_sp,MPI_IN_PLACE,& 514 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)914 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 515 915 if (mp_ierr /= 0) goto 600 516 916 call MPI_SCATTER(ws,numpart_mpi,mp_sp,MPI_IN_PLACE,& 517 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)917 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 518 918 if (mp_ierr /= 0) goto 600 519 919 520 920 call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,& 521 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)921 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 522 922 if (mp_ierr /= 0) goto 600 523 923 call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,& 524 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)924 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 525 925 if (mp_ierr /= 0) goto 600 526 926 call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,MPI_IN_PLACE,& 527 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)927 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 528 928 if (mp_ierr /= 0) goto 600 529 929 530 930 do i=1,nspec 531 931 call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,MPI_IN_PLACE,& 532 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)932 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 533 933 if (mp_ierr /= 0) goto 600 534 934 end do … … 537 937 ! integers 538 938 call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,& 539 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)939 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 540 940 if (mp_ierr /= 0) goto 600 541 941 call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,& 542 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)942 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 543 943 if (mp_ierr /= 0) goto 600 544 944 call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,& 545 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)945 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 546 946 if (mp_ierr /= 0) goto 600 547 947 call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,& 548 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)948 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 549 949 if (mp_ierr /= 0) goto 600 550 950 call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,& 551 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)951 & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr) 552 952 if (mp_ierr /= 0) goto 600 553 953 554 954 ! int2 555 955 call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,& 556 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)956 & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr) 557 957 if (mp_ierr /= 0) goto 600 558 958 559 959 ! reals 560 960 call MPI_SCATTER(uap,numpart_mpi,mp_sp,uap,& 561 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)961 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 562 962 if (mp_ierr /= 0) goto 600 563 963 call MPI_SCATTER(ucp,numpart_mpi,mp_sp,ucp,& 564 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)964 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 565 965 if (mp_ierr /= 0) goto 600 566 966 call MPI_SCATTER(uzp,numpart_mpi,mp_sp,uzp,& 567 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)967 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 568 968 if (mp_ierr /= 0) goto 600 569 969 570 970 call MPI_SCATTER(us,numpart_mpi,mp_sp,us,& 571 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)971 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 572 972 if (mp_ierr /= 0) goto 600 573 973 call MPI_SCATTER(vs,numpart_mpi,mp_sp,vs,& 574 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)974 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 575 975 if (mp_ierr /= 0) goto 600 576 976 call MPI_SCATTER(ws,numpart_mpi,mp_sp,ws,& 577 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)977 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 578 978 if (mp_ierr /= 0) goto 600 579 979 580 980 call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,& 581 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)981 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 582 982 if (mp_ierr /= 0) goto 600 583 983 call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,& 584 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)984 & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr) 585 985 if (mp_ierr /= 0) goto 600 586 986 call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,ztra1,& 587 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)987 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 588 988 if (mp_ierr /= 0) goto 600 589 989 590 990 do i=1,nspec 591 991 call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,xmass1(:,i),& 592 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)992 & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr) 593 993 if (mp_ierr /= 0) goto 600 594 994 end do … … 1145 1545 1146 1546 ! cloud water/ice: 1147 if (readclouds_nest(i)) then1148 1149 1150 call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)1151 if (mp_ierr /= 0) goto 6001152 end if1547 if (readclouds_nest(i)) then 1548 ! call MPI_Bcast(icloud_stats(:,:,:,li:ui),d2s1*5,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 1549 ! if (mp_ierr /= 0) goto 600 1550 call MPI_Bcast(ctwcn(:,:,li:ui,i),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr) 1551 if (mp_ierr /= 0) goto 600 1552 end if 1153 1553 1154 1554 ! 2D fields … … 1398 1798 integer :: d2s1 = nxmax*nymax 1399 1799 integer :: d2s2 = nxmax*nymax*maxspec 1400 1800 !integer :: d1_size1 = maxwf 1401 1801 1402 1802 ! integer :: d3s1,d3s2,d2s1,d2s2 … … 1645 2045 if (dest.eq.id_read) cycle 1646 2046 do k=1, numbnests 1647 i=dest*nvar_async1648 call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1649 if (mp_ierr /= 0) goto 6001650 i=i+11651 call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1652 if (mp_ierr /= 0) goto 6001653 i=i+11654 call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1655 if (mp_ierr /= 0) goto 6001656 i=i+11657 call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1658 if (mp_ierr /= 0) goto 6001659 i=i+11660 call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1661 if (mp_ierr /= 0) goto 6001662 i=i+11663 call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1664 if (mp_ierr /= 0) goto 6001665 i=i+11666 call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1667 if (mp_ierr /= 0) goto 6001668 i=i+11669 call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1670 if (mp_ierr /= 0) goto 6001671 i=i+11672 call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1673 if (mp_ierr /= 0) goto 6001674 i=i+11675 call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1676 if (mp_ierr /= 0) goto 6001677 i=i+11678 call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1679 i=i+11680 if (mp_ierr /= 0) goto 6001681 call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1682 if (mp_ierr /= 0) goto 6001683 i=i+11684 call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1685 if (mp_ierr /= 0) goto 6001686 i=i+11687 call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1688 if (mp_ierr /= 0) goto 6001689 i=i+11690 call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1691 if (mp_ierr /= 0) goto 6001692 i=i+12047 i=dest*nvar_async 2048 call MPI_Isend(uun(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2049 if (mp_ierr /= 0) goto 600 2050 i=i+1 2051 call MPI_Isend(vvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2052 if (mp_ierr /= 0) goto 600 2053 i=i+1 2054 call MPI_Isend(wwn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2055 if (mp_ierr /= 0) goto 600 2056 i=i+1 2057 call MPI_Isend(ttn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2058 if (mp_ierr /= 0) goto 600 2059 i=i+1 2060 call MPI_Isend(rhon(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2061 if (mp_ierr /= 0) goto 600 2062 i=i+1 2063 call MPI_Isend(drhodzn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2064 if (mp_ierr /= 0) goto 600 2065 i=i+1 2066 call MPI_Isend(tthn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2067 if (mp_ierr /= 0) goto 600 2068 i=i+1 2069 call MPI_Isend(qvhn(:,:,:,mind,k),d3s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2070 if (mp_ierr /= 0) goto 600 2071 i=i+1 2072 call MPI_Isend(qvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2073 if (mp_ierr /= 0) goto 600 2074 i=i+1 2075 call MPI_Isend(pvn(:,:,:,mind,k),d3s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2076 if (mp_ierr /= 0) goto 600 2077 i=i+1 2078 call MPI_Isend(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2079 i=i+1 2080 if (mp_ierr /= 0) goto 600 2081 call MPI_Isend(cloudshn(:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2082 if (mp_ierr /= 0) goto 600 2083 i=i+1 2084 call MPI_Isend(vdepn(:,:,:,mind,k),d2s2,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2085 if (mp_ierr /= 0) goto 600 2086 i=i+1 2087 call MPI_Isend(psn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2088 if (mp_ierr /= 0) goto 600 2089 i=i+1 2090 call MPI_Isend(sdn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2091 if (mp_ierr /= 0) goto 600 2092 i=i+1 1693 2093 ! 15 1694 call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1695 if (mp_ierr /= 0) goto 6001696 i=i+11697 call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1698 if (mp_ierr /= 0) goto 6001699 i=i+11700 call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1701 if (mp_ierr /= 0) goto 6001702 i=i+11703 call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1704 if (mp_ierr /= 0) goto 6001705 i=i+11706 call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1707 if (mp_ierr /= 0) goto 6001708 i=i+11709 call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1710 if (mp_ierr /= 0) goto 6001711 i=i+11712 call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1713 if (mp_ierr /= 0) goto 6001714 i=i+11715 call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1716 if (mp_ierr /= 0) goto 6001717 i=i+11718 call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1719 if (mp_ierr /= 0) goto 6001720 i=i+11721 call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr)1722 if (mp_ierr /= 0) goto 6002094 call MPI_Isend(tccn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2095 if (mp_ierr /= 0) goto 600 2096 i=i+1 2097 call MPI_Isend(tt2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2098 if (mp_ierr /= 0) goto 600 2099 i=i+1 2100 call MPI_Isend(td2n(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2101 if (mp_ierr /= 0) goto 600 2102 i=i+1 2103 call MPI_Isend(lsprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2104 if (mp_ierr /= 0) goto 600 2105 i=i+1 2106 call MPI_Isend(convprecn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2107 if (mp_ierr /= 0) goto 600 2108 i=i+1 2109 call MPI_Isend(ustarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2110 if (mp_ierr /= 0) goto 600 2111 i=i+1 2112 call MPI_Isend(wstarn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2113 if (mp_ierr /= 0) goto 600 2114 i=i+1 2115 call MPI_Isend(hmixn(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2116 if (mp_ierr /= 0) goto 600 2117 i=i+1 2118 call MPI_Isend(tropopausen(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2119 if (mp_ierr /= 0) goto 600 2120 i=i+1 2121 call MPI_Isend(olin(:,:,:,mind,k),d2s1,mp_sp,dest,tm1,MPI_COMM_WORLD,reqs(i),mp_ierr) 2122 if (mp_ierr /= 0) goto 600 1723 2123 ! 25 1724 2124 1725 2125 ! Send cloud water if it exists. Increment counter always (as on receiving end) 1726 if (readclouds) then 1727 i=i+1 1728 call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,& 1729 &MPI_COMM_WORLD,reqs(i),mp_ierr) 1730 if (mp_ierr /= 0) goto 600 1731 end if 2126 if (readclouds) then 2127 i=i+1 2128 call MPI_Isend(ctwcn(:,:,mind,k),d2s1,mp_sp,dest,tm1,& 2129 &MPI_COMM_WORLD,reqs(i),mp_ierr) 2130 if (mp_ierr /= 0) goto 600 2131 end if 2132 end do 1732 2133 end do 1733 end do1734 2134 1735 2135 if (mp_measure_time) call mpif_mtime('commtime',1) … … 1810 2210 do k=1, numbnests 1811 2211 ! Get MPI tags/requests for communications 1812 j=mp_pid*nvar_async1813 call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1814 &MPI_COMM_WORLD,reqs(j),mp_ierr)1815 if (mp_ierr /= 0) goto 6001816 j=j+11817 call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1818 &MPI_COMM_WORLD,reqs(j),mp_ierr)1819 if (mp_ierr /= 0) goto 6001820 j=j+11821 call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1822 &MPI_COMM_WORLD,reqs(j),mp_ierr)1823 if (mp_ierr /= 0) goto 6001824 j=j+11825 call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1826 &MPI_COMM_WORLD,reqs(j),mp_ierr)1827 if (mp_ierr /= 0) goto 6001828 j=j+11829 call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1830 &MPI_COMM_WORLD,reqs(j),mp_ierr)1831 if (mp_ierr /= 0) goto 6001832 j=j+11833 call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1834 &MPI_COMM_WORLD,reqs(j),mp_ierr)1835 if (mp_ierr /= 0) goto 6001836 j=j+11837 call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&1838 &MPI_COMM_WORLD,reqs(j),mp_ierr)1839 if (mp_ierr /= 0) goto 6001840 j=j+11841 call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,&1842 &MPI_COMM_WORLD,reqs(j),mp_ierr)1843 if (mp_ierr /= 0) goto 6001844 j=j+11845 call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1846 &MPI_COMM_WORLD,reqs(j),mp_ierr)1847 if (mp_ierr /= 0) goto 6001848 j=j+11849 call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,&1850 &MPI_COMM_WORLD,reqs(j),mp_ierr)1851 if (mp_ierr /= 0) goto 6001852 j=j+11853 call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,&1854 &MPI_COMM_WORLD,reqs(j),mp_ierr)1855 if (mp_ierr /= 0) goto 6001856 j=j+11857 call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1858 &MPI_COMM_WORLD,reqs(j),mp_ierr)1859 if (mp_ierr /= 0) goto 6001860 j=j+11861 call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,&1862 &MPI_COMM_WORLD,reqs(j),mp_ierr)1863 if (mp_ierr /= 0) goto 6001864 j=j+11865 call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1866 &MPI_COMM_WORLD,reqs(j),mp_ierr)1867 if (mp_ierr /= 0) goto 6001868 j=j+11869 call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1870 &MPI_COMM_WORLD,reqs(j),mp_ierr)1871 if (mp_ierr /= 0) goto 6001872 j=j+11873 call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1874 &MPI_COMM_WORLD,reqs(j),mp_ierr)1875 if (mp_ierr /= 0) goto 6001876 j=j+11877 call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1878 &MPI_COMM_WORLD,reqs(j),mp_ierr)1879 if (mp_ierr /= 0) goto 6001880 j=j+11881 call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1882 &MPI_COMM_WORLD,reqs(j),mp_ierr)1883 if (mp_ierr /= 0) goto 6001884 j=j+11885 call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1886 &MPI_COMM_WORLD,reqs(j),mp_ierr)1887 if (mp_ierr /= 0) goto 6001888 j=j+11889 call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1890 &MPI_COMM_WORLD,reqs(j),mp_ierr)1891 if (mp_ierr /= 0) goto 6001892 call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1893 &MPI_COMM_WORLD,reqs(j),mp_ierr)1894 if (mp_ierr /= 0) goto 6001895 j=j+11896 call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1897 &MPI_COMM_WORLD,reqs(j),mp_ierr)1898 if (mp_ierr /= 0) goto 6001899 j=j+11900 call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1901 &MPI_COMM_WORLD,reqs(j),mp_ierr)1902 if (mp_ierr /= 0) goto 6001903 j=j+11904 call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1905 &MPI_COMM_WORLD,reqs(j),mp_ierr)1906 if (mp_ierr /= 0) goto 6001907 j=j+11908 call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&1909 &MPI_COMM_WORLD,reqs(j),mp_ierr)1910 if (mp_ierr /= 0) goto 6002212 j=mp_pid*nvar_async 2213 call MPI_Irecv(uun(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2214 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2215 if (mp_ierr /= 0) goto 600 2216 j=j+1 2217 call MPI_Irecv(vvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2218 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2219 if (mp_ierr /= 0) goto 600 2220 j=j+1 2221 call MPI_Irecv(wwn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2222 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2223 if (mp_ierr /= 0) goto 600 2224 j=j+1 2225 call MPI_Irecv(ttn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2226 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2227 if (mp_ierr /= 0) goto 600 2228 j=j+1 2229 call MPI_Irecv(rhon(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2230 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2231 if (mp_ierr /= 0) goto 600 2232 j=j+1 2233 call MPI_Irecv(drhodzn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2234 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2235 if (mp_ierr /= 0) goto 600 2236 j=j+1 2237 call MPI_Irecv(tthn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& 2238 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2239 if (mp_ierr /= 0) goto 600 2240 j=j+1 2241 call MPI_Irecv(qvhn(:,:,:,mind,k),d3s2,mp_sp,id_read,MPI_ANY_TAG,& 2242 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2243 if (mp_ierr /= 0) goto 600 2244 j=j+1 2245 call MPI_Irecv(qvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2246 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2247 if (mp_ierr /= 0) goto 600 2248 j=j+1 2249 call MPI_Irecv(pvn(:,:,:,mind,k),d3s1,mp_sp,id_read,MPI_ANY_TAG,& 2250 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2251 if (mp_ierr /= 0) goto 600 2252 j=j+1 2253 call MPI_Irecv(cloudsn(:,:,:,mind,k),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,& 2254 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2255 if (mp_ierr /= 0) goto 600 2256 j=j+1 2257 call MPI_Irecv(cloudshn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2258 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2259 if (mp_ierr /= 0) goto 600 2260 j=j+1 2261 call MPI_Irecv(vdepn(:,:,:,mind,k),d2s2,mp_sp,id_read,MPI_ANY_TAG,& 2262 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2263 if (mp_ierr /= 0) goto 600 2264 j=j+1 2265 call MPI_Irecv(psn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2266 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2267 if (mp_ierr /= 0) goto 600 2268 j=j+1 2269 call MPI_Irecv(sdn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2270 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2271 if (mp_ierr /= 0) goto 600 2272 j=j+1 2273 call MPI_Irecv(tccn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2274 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2275 if (mp_ierr /= 0) goto 600 2276 j=j+1 2277 call MPI_Irecv(tt2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2278 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2279 if (mp_ierr /= 0) goto 600 2280 j=j+1 2281 call MPI_Irecv(td2n(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2282 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2283 if (mp_ierr /= 0) goto 600 2284 j=j+1 2285 call MPI_Irecv(lsprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2286 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2287 if (mp_ierr /= 0) goto 600 2288 j=j+1 2289 call MPI_Irecv(convprecn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2290 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2291 if (mp_ierr /= 0) goto 600 2292 call MPI_Irecv(ustarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2293 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2294 if (mp_ierr /= 0) goto 600 2295 j=j+1 2296 call MPI_Irecv(wstarn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2297 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2298 if (mp_ierr /= 0) goto 600 2299 j=j+1 2300 call MPI_Irecv(hmixn(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2301 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2302 if (mp_ierr /= 0) goto 600 2303 j=j+1 2304 call MPI_Irecv(tropopausen(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2305 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2306 if (mp_ierr /= 0) goto 600 2307 j=j+1 2308 call MPI_Irecv(olin(:,:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,& 2309 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2310 if (mp_ierr /= 0) goto 600 1911 2311 1912 2312 ! Post request for clwc. These data are possibly not sent, request must then be cancelled 1913 2313 ! For now assume that data at all steps either have or do not have water 1914 if (readclouds) then1915 j=j+11916 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&1917 &MPI_COMM_WORLD,reqs(j),mp_ierr)1918 if (mp_ierr /= 0) goto 6001919 end if1920 end do2314 if (readclouds) then 2315 j=j+1 2316 call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,& 2317 &MPI_COMM_WORLD,reqs(j),mp_ierr) 2318 if (mp_ierr /= 0) goto 600 2319 end if 2320 end do 1921 2321 1922 2322 if (mp_measure_time) call mpif_mtime('commtime',1) … … 1940 2340 ! 1941 2341 ! TODO 1942 ! take into account nested wind fields 2342 ! NB: take into account nested wind fields by using a separate 2343 ! subroutine mpif_gf_request_nest (and separate request arrays for the 2344 ! nested variables) 1943 2345 ! 1944 2346 !******************************************************************************* … … 2349 2751 write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR CONCCALC:',& 2350 2752 & mp_conccalc_time_total 2351 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',& 2352 ! & mp_vt_wtime_total 2353 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',& 2354 ! & mp_vt_time_total 2355 ! NB: the 'flush' function is possibly a gfortran-specific extension 2356 call flush() 2753 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR VERTTRANSFORM:',& 2754 ! & mp_vt_wtime_total 2755 ! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',& 2756 ! & mp_vt_time_total 2757 ! NB: the 'flush' function is possibly a gfortran-specific extension, 2758 ! comment out if it gives problems 2759 ! call flush() 2357 2760 end if 2358 2761 end do … … 2388 2791 2389 2792 2390 2391 2392 2393 2793 end subroutine mpif_finalize 2794 2795 2796 subroutine get_lun(my_lun) 2394 2797 !*********************************************************************** 2395 2798 ! get_lun: … … 2397 2800 !*********************************************************************** 2398 2801 2399 2400 2401 2402 2403 2404 2405 !*********************************************************************** 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2802 implicit none 2803 2804 integer, intent(inout) :: my_lun 2805 integer, save :: free_lun=100 2806 logical :: exists, iopen 2807 2808 !*********************************************************************** 2809 2810 loop1: do 2811 inquire(UNIT=free_lun, EXIST=exists, OPENED=iopen) 2812 if (exists .and. .not.iopen) exit loop1 2813 free_lun = free_lun+1 2814 end do loop1 2815 my_lun = free_lun 2816 2817 end subroutine get_lun 2818 2819 2820 subroutine write_data_dbg(array_in, array_name, tstep, ident) 2418 2821 !*********************************************************************** 2419 2822 ! Write one-dimensional arrays to file (for debugging purposes) 2420 2823 !*********************************************************************** 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 !*********************************************************************** 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2824 implicit none 2825 2826 real, intent(in), dimension(:) :: array_in 2827 integer, intent(in) :: tstep 2828 integer :: lios 2829 character(LEN=*), intent(in) :: ident, array_name 2830 2831 character(LEN=8) :: c_ts 2832 character(LEN=40) :: fn_1, fn_2 2833 2834 !*********************************************************************** 2835 2836 write(c_ts, FMT='(I8.8,BZ)') tstep 2837 fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident) 2838 write(c_ts, FMT='(I2.2,BZ)') mp_np 2839 fn_2= trim(adjustl(array_name))//trim(adjustl(fn_1))//'-np'//trim(adjustl(c_ts))//'.dat' 2840 2841 call get_lun(dat_lun) 2842 open(UNIT=dat_lun, FILE=fn_2, IOSTAT=lios, ACTION='WRITE', & 2843 FORM='UNFORMATTED', STATUS='REPLACE') 2844 write(UNIT=dat_lun, IOSTAT=lios) array_in 2845 close(UNIT=dat_lun) 2846 2847 end subroutine write_data_dbg 2445 2848 2446 2849 … … 2499 2902 clwc(:,:,:,li:ui)=0.0 2500 2903 ciwc(:,:,:,li:ui)=0.0 2501 2904 2502 2905 ! 2D fields 2503 2906 … … 2516 2919 tropopause(:,:,:,li:ui)=10000. 2517 2920 oli(:,:,:,li:ui)=0.01 2518 2921 2519 2922 end subroutine set_fields_synthetic 2520 2923 -
src/netcdf_output_mod.f90
rf28aa0a r4c64400 19 19 !********************************************************************** 20 20 21 21 22 !***************************************************************************** 22 23 ! * … … 24 25 ! residence time and wet and dry deposition output. * 25 26 ! * 26 ! - writeheader_netcdf generates file including all information previously 27 ! - writeheader_netcdf generates file including all information previously * 27 28 ! stored in separate header files * 28 ! - concoutput_netcdf write concentration output and wet and dry deposition 29 ! - concoutput_netcdf write concentration output and wet and dry deposition * 29 30 ! * 30 31 ! Author: D. Brunner * … … 893 894 gridsigmatotal=0. 894 895 gridtotalunc=0. 895 wetgridtotal=0. 896 wetgridsigmatotal=0. 897 wetgridtotalunc=0. 898 drygridtotal=0. 899 drygridsigmatotal=0. 900 drygridtotalunc=0. 896 wetgridtotal=0._dep_prec 897 wetgridsigmatotal=0._dep_prec 898 wetgridtotalunc=0._dep_prec 899 drygridtotal=0._dep_prec 900 drygridsigmatotal=0._dep_prec 901 drygridtotalunc=0._dep_prec 901 902 902 903 do ks=1,nspec … … 922 923 wetgridsigma(ix,jy),nclassunc) 923 924 ! Multiply by number of classes to get total concentration 924 wetgrid(ix,jy)=wetgrid(ix,jy)*real(nclassunc,kind= dep_prec)925 wetgrid(ix,jy)=wetgrid(ix,jy)*real(nclassunc,kind=sp) 925 926 wetgridtotal=wetgridtotal+wetgrid(ix,jy) 926 927 ! Calculate standard deviation of the mean … … 946 947 drygridsigma(ix,jy),nclassunc) 947 948 ! Multiply by number of classes to get total concentration 948 drygrid(ix,jy)=drygrid(ix,jy)*real(nclassunc )949 drygrid(ix,jy)=drygrid(ix,jy)*real(nclassunc,kind=sp) 949 950 drygridtotal=drygridtotal+drygrid(ix,jy) 950 951 ! Calculate standard deviation of the mean 951 952 drygridsigma(ix,jy)= & 952 953 drygridsigma(ix,jy)* & 953 sqrt(real(nclassunc ))954 sqrt(real(nclassunc, kind=dep_prec)) 954 955 drygridsigmatotal=drygridsigmatotal+ & 955 956 drygridsigma(ix,jy) … … 1054 1055 if (wetgridtotal.gt.0.) wetgridtotalunc=wetgridsigmatotal/ & 1055 1056 wetgridtotal 1056 if (drygridtotal.gt.0.) drygridtotalunc= drygridsigmatotal/ &1057 drygridtotal 1057 if (drygridtotal.gt.0.) drygridtotalunc=real(drygridsigmatotal/ & 1058 drygridtotal, kind=dep_prec) 1058 1059 1059 1060 ! Dump of receptor concentrations … … 1298 1299 wetgridsigma(ix,jy)= & 1299 1300 wetgridsigma(ix,jy)* & 1300 sqrt(real(nclassunc ))1301 sqrt(real(nclassunc,kind=dep_prec)) 1301 1302 endif 1302 1303 … … 1319 1320 drygridsigma(ix,jy)= & 1320 1321 drygridsigma(ix,jy)* & 1321 sqrt(real(nclassunc ))1322 sqrt(real(nclassunc,kind=dep_prec)) 1322 1323 endif 1323 1324 -
src/outg_mod.f90
r6a678e3 r4c64400 22 22 module outg_mod 23 23 24 use par_mod, only: dep_prec 24 use par_mod, only: dep_prec, sp 25 25 26 26 implicit none … … 39 39 real,allocatable, dimension (:,:,:) :: factor3d 40 40 real,allocatable, dimension (:,:,:) :: grid 41 real( dep_prec),allocatable, dimension (:,:) :: wetgrid42 real( dep_prec),allocatable, dimension (:,:) :: drygrid41 real(sp),allocatable, dimension (:,:) :: wetgrid 42 real(sp),allocatable, dimension (:,:) :: drygrid 43 43 real,allocatable, dimension (:,:,:) :: gridsigma 44 44 real(dep_prec),allocatable, dimension (:,:) :: drygridsigma -
src/par_mod.f90
rd1a8707 rd1a8707 58 58 59 59 integer,parameter :: dep_prec=sp 60 61 !**************************************************************** 62 ! Set to T to disable use of kernel for concentrations/deposition 63 !**************************************************************** 64 65 logical, parameter :: lnokernel=.false. 60 66 61 67 !*********************************************************** … … 186 192 !************************************************** 187 193 188 integer,parameter :: maxpart=20000000 189 integer,parameter :: maxspec=2 194 integer,parameter :: maxpart=10000000 195 integer,parameter :: maxspec=4 196 190 197 real,parameter :: minmass=0.0001 191 198 -
src/readavailable.f90
rdb712a8 r4c64400 236 236 do i=2,numbwf 237 237 idiff=abs(wftime(i)-wftime(i-1)) 238 if (idiff.gt.idiffmax ) then238 if (idiff.gt.idiffmax.and.lroot) then 239 239 write(*,*) 'FLEXPART WARNING: TIME DIFFERENCE BETWEEN TWO' 240 240 write(*,*) 'WIND FIELDS IS TOO BIG FOR TRANSPORT CALCULATION.& 241 241 &' 242 242 write(*,*) 'THEREFORE, TRAJECTORIES HAVE TO BE SKIPPED.' 243 else if (idiff.gt.idiffnorm ) then243 else if (idiff.gt.idiffnorm.and.lroot) then 244 244 write(*,*) 'FLEXPART WARNING: TIME DIFFERENCE BETWEEN TWO' 245 245 write(*,*) 'WIND FIELDS IS BIG. THIS MAY CAUSE A DEGRADATION' -
src/readcommand.f90
r8ee24a5 r8ee24a5 170 170 if (index(line,'LDIRECT') .eq. 0) then 171 171 old = .false. 172 write(*,*) 'COMMAND in old short format, please update to namelist format' 172 if (lroot) write(*,*) 'COMMAND in old short format, & 173 &please update to namelist format' 173 174 else 174 175 old = .true. 175 write(*,*) 'COMMAND in old long format, please update to namelist format' 176 if (lroot) write(*,*) 'COMMAND in old long format, & 177 &please update to namelist format' 176 178 endif 177 179 rewind(unitcommand) -
src/readreleases.f90
r8ee24a5 rc8fc724 218 218 rewind(unitreleases) 219 219 220 if (nspec.gt.maxspec) goto 994 221 220 222 ! allocate arrays of matching size for number of species (namelist output) 221 223 deallocate(mass) … … 274 276 do i=1,maxspec 275 277 DRYDEPSPEC(i)=.false. 278 WETDEPSPEC(i)=.false. 276 279 end do 277 280 … … 367 370 &(dquer(i).gt.0. .and. (crain_aero(i) .gt. 0. .or. csnow_aero(i).gt.0.))) then 368 371 WETDEP=.true. 372 WETDEPSPEC(i)=.true. 369 373 if (lroot) then 370 374 write (*,*) ' Below-cloud scavenging: ON' … … 378 382 if (dquer(i).gt.0..and.(ccn_aero(i).gt.0. .or. in_aero(i).gt.0.)) then 379 383 WETDEP=.true. 384 WETDEPSPEC(i)=.true. 380 385 if (lroot) then 381 386 write (*,*) ' In-cloud scavenging: ON' … … 397 402 endif 398 403 399 end do 404 end do ! end loop over species 400 405 401 406 if (WETDEP.or.DRYDEP) DEP=.true. … … 588 593 endif 589 594 595 590 596 ! Determine the release rate (particles per second) and total number 591 597 ! of particles released during the simulation … … 647 653 endif 648 654 655 if (lroot) then 656 write(*,FMT='(A,ES14.7)') ' Total mass released:', sum(xmass(1:numpoint,1:nspec)) 657 end if 658 649 659 return 650 660 -
src/releaseparticles_mpi.f90
r7e52e2e r16b61a5 21 21 22 22 subroutine releaseparticles(itime) 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 ! addpartextra particle assigned to MPI process if *49 50 23 ! o 24 !***************************************************************************** 25 ! * 26 ! This subroutine releases particles from the release locations. * 27 ! * 28 ! It searches for a "vacant" storage space and assigns all particle * 29 ! information to that space. A space is vacant either when no particle * 30 ! is yet assigned to it, or when it's particle is expired and, thus, * 31 ! the storage space is made available to a new particle. * 32 ! * 33 ! Author: A. Stohl * 34 ! * 35 ! 29 June 2002 * 36 ! * 37 ! CHANGES * 38 ! 12/2014 eso: MPI version * 39 ! * 40 !***************************************************************************** 41 ! * 42 ! Variables: * 43 ! itime [s] current time * 44 ! ireleasestart, ireleaseend start and end times of all releases * 45 ! npart(maxpoint) number of particles to be released in total * 46 ! numrel number of particles to be released during this time * 47 ! step * 48 ! addone extra particle assigned to MPI process if * 49 ! mod(npart(i),mp_partgroup_np) .ne. 0) * 50 !***************************************************************************** 51 51 52 52 use point_mod … … 59 59 implicit none 60 60 61 61 !real xaux,yaux,zaux,ran1,rfraction,xmasssave(maxpoint) 62 62 real :: xaux,yaux,zaux,rfraction 63 63 real :: topo,rhoaux(2),r,t,rhoout,ddx,ddy,rddx,rddy,p1,p2,p3,p4 … … 73 73 74 74 integer :: idummy = -7 75 76 75 !save idummy,xmasssave 76 !data idummy/-7/,xmasssave/maxpoint*0./ 77 77 78 78 logical :: first_call=.true. 79 79 80 81 80 ! Use different seed for each process. 81 !**************************************************************************** 82 82 if (first_call) then 83 83 idummy=idummy+mp_seed … … 87 87 mind2=memind(2) 88 88 89 90 89 ! Determine the actual date and time in Greenwich (i.e., UTC + correction for daylight savings time) 90 !***************************************************************************** 91 91 92 92 julmonday=juldate(19000101,0) ! this is a Monday … … 97 97 98 98 99 100 99 ! For every release point, check whether we are in the release time interval 100 !*************************************************************************** 101 101 102 102 minpart=1 … … 105 105 (itime.le.ireleaseend(i))) then 106 106 107 108 107 ! Determine the local day and time 108 !********************************* 109 109 110 110 xlonav=xlon0+(xpoint2(i)+xpoint1(i))/2.*dx ! longitude needed to determine local time … … 124 124 endif 125 125 126 127 128 129 126 ! Calculate a species- and time-dependent correction factor, distinguishing between 127 ! area (those with release starting at surface) and point (release starting above surface) sources 128 ! Also, calculate an average time correction factor (species independent) 129 !***************************************************************************** 130 130 average_timecorrect=0. 131 131 do k=1,nspec … … 139 139 average_timecorrect=average_timecorrect/real(nspec) 140 140 141 142 143 141 ! Determine number of particles to be released this time; at start and at end of release, 142 ! only half the particles are released 143 !***************************************************************************** 144 144 145 145 if (ireleasestart(i).ne.ireleaseend(i)) then … … 149 149 (itime.eq.ireleaseend(i))) rfraction=rfraction/2. 150 150 151 152 153 154 151 ! Take the species-average time correction factor in order to scale the 152 ! number of particles released this time 153 ! Also scale by number of MPI processes 154 !********************************************************************** 155 155 156 156 rfraction=rfraction*average_timecorrect … … 158 158 rfraction=rfraction+xmasssave(i) ! number to be released at this time 159 159 160 160 ! number to be released for one process 161 161 if (mp_partid.lt.mod(int(rfraction),mp_partgroup_np)) then 162 162 addone=1 … … 180 180 numrel=npart(i)/mp_partgroup_np+addone 181 181 endif 182 182 183 183 xaux=xpoint2(i)-xpoint1(i) 184 184 yaux=ypoint2(i)-ypoint1(i) … … 187 187 do ipart=minpart,maxpart_mpi ! search for free storage space 188 188 189 190 189 ! If a free storage space is found, attribute everything to this array element 190 !***************************************************************************** 191 191 192 192 if (itra1(ipart).ne.itime) then 193 193 194 195 196 197 198 194 ! Particle coordinates are determined by using a random position within the release volume 195 !***************************************************************************** 196 197 ! Determine horizontal particle position 198 !*************************************** 199 199 200 200 xtra1(ipart)=xpoint1(i)+ran1(idummy)*xaux … … 207 207 ytra1(ipart)=ypoint1(i)+ran1(idummy)*yaux 208 208 209 210 211 212 213 214 209 ! Assign mass to particle: Total mass divided by total number of particles. 210 ! Time variation has partly been taken into account already by a species-average 211 ! correction factor, by which the number of particles released this time has been 212 ! scaled. Adjust the mass per particle by the species-dependent time correction factor 213 ! divided by the species-average one 214 !***************************************************************************** 215 215 do k=1,nspec 216 217 218 219 220 216 xmass1(ipart,k)=xmass(i,k)/real(npart(i)) & 217 *timecorrect(k)/average_timecorrect 218 ! write (*,*) 'xmass1: ',xmass1(ipart,k),ipart,k 219 ! Assign certain properties to particle 220 !************************************** 221 221 end do 222 222 nclass(ipart)=min(int(ran1(idummy)*real(nclassunc))+1, & … … 234 234 235 235 236 237 236 ! Determine vertical particle position 237 !************************************* 238 238 239 239 ztra1(ipart)=zpoint1(i)+ran1(idummy)*zaux 240 240 241 242 243 244 245 241 ! Interpolation of topography and density 242 !**************************************** 243 244 ! Determine the nest we are in 245 !***************************** 246 246 247 247 ngrid=0 … … 257 257 43 continue 258 258 259 260 259 ! Determine (nested) grid coordinates and auxiliary parameters used for interpolation 260 !***************************************************************************** 261 261 262 262 if (ngrid.gt.0) then … … 294 294 endif 295 295 296 297 296 ! If starting height is in pressure coordinates, retrieve pressure profile and convert zpart1 to meters 297 !***************************************************************************** 298 298 if (kindz(i).eq.3) then 299 299 presspart=ztra1(ipart) … … 337 337 endif 338 338 339 340 341 339 ! If release positions are given in meters above sea level, subtract the 340 ! topography from the starting height 341 !*********************************************************************** 342 342 343 343 if (kindz(i).eq.2) ztra1(ipart)=ztra1(ipart)-topo … … 348 348 349 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 350 ! For special simulations, multiply particle concentration air density; 351 ! Simply take the 2nd field in memory to do this (accurate enough) 352 !*********************************************************************** 353 !AF IND_SOURCE switches between different units for concentrations at the source 354 !Af NOTE that in backward simulations the release of particles takes place at the 355 !Af receptor and the sampling at the source. 356 !Af 1="mass" 357 !Af 2="mass mixing ratio" 358 !Af IND_RECEPTOR switches between different units for concentrations at the receptor 359 !Af 1="mass" 360 !Af 2="mass mixing ratio" 361 362 !Af switches for the releasefile: 363 !Af IND_REL = 1 : xmass * rho 364 !Af IND_REL = 0 : xmass * 1 365 366 !Af ind_rel is defined in readcommand.f 367 367 368 368 if (ind_rel .eq. 1) then 369 369 370 371 370 ! Interpolate the air density 371 !**************************** 372 372 373 373 do ii=2,nz … … 403 403 404 404 405 406 405 ! Multiply "mass" (i.e., mass mixing ratio in forward runs) with density 406 !******************************************************************** 407 407 408 408 do k=1,nspec … … 416 416 endif 417 417 end do 418 if (ipart.gt.maxpart ) goto 996418 if (ipart.gt.maxpart_mpi) goto 996 419 419 420 420 34 minpart=ipart+1 421 421 end do 422 422 endif 423 423 end do 424 424 … … 426 426 return 427 427 428 996 428 996 continue 429 429 write(*,*) '#####################################################' 430 430 write(*,*) '#### FLEXPART MODEL SUBROUTINE RELEASEPARTICLES: ####' -
src/timemanager_mpi.f90
r3b80e98 r16b61a5 104 104 105 105 logical :: reqv_state=.false. ! .true. if waiting for a MPI_Irecv to complete 106 integer :: j,ks,kp,l,n,itime=0,nstop,nstop1,memstat=0 !,mind106 integer :: j,ks,kp,l,n,itime=0,nstop,nstop1,memstat=0 107 107 ! integer :: ksp 108 108 integer :: ip … … 155 155 156 156 do itime=0,ideltas,lsynctime 157 157 158 158 159 ! Computation of wet deposition, OH reaction and mass transfer … … 166 167 !******************************************************************** 167 168 168 if (mp_d ev_mode) write(*,*) 'myid, itime: ',mp_pid,itime169 if (mp_dbg_mode) write(*,*) 'myid, itime: ',mp_pid,itime 169 170 170 171 if (WETDEP .and. itime .ne. 0 .and. numpart .gt. 0) then … … 330 331 call releaseparticles(itime) 331 332 endif 333 334 335 ! Check if particles should be redistributed among processes 336 !*********************************************************** 337 call mpif_calculate_part_redist(itime) 332 338 333 339 … … 548 554 ! Decide whether to write an estimate of the number of particles released, 549 555 ! or exact number (require MPI reduce operation) 550 if (mp_d ev_mode) then556 if (mp_dbg_mode) then 551 557 numpart_tot_mpi = numpart 552 558 else … … 555 561 556 562 if (mp_exact_numpart.and..not.(lmpreader.and.lmp_use_reader).and.& 557 &.not.mp_d ev_mode) then563 &.not.mp_dbg_mode) then 558 564 call MPI_Reduce(numpart, numpart_tot_mpi, 1, MPI_INTEGER, MPI_SUM, id_root, & 559 565 & mp_comm_used, mp_ierr) … … 561 567 562 568 !CGZ-lifetime: output species lifetime 563 if (lroot.or.mp_d ev_mode) then569 if (lroot.or.mp_dbg_mode) then 564 570 ! write(*,*) 'Overview species lifetime in days', & 565 571 ! real((species_lifetime(:,1)/species_lifetime(:,2))/real(3600.0*24.0)) … … 571 577 ! end if 572 578 end if 579 580 ! Write number of particles for all processes 581 if (mp_dev_mode) write(*,*) "PID, itime, numpart", mp_pid,itime,numpart 582 573 583 574 584 45 format(i13,' SECONDS SIMULATED: ',i13, ' PARTICLES: Uncertainty: ',3f7.3) … … 860 870 endif 861 871 deallocate(gridunc) 862 deallocate(xpoint1,xpoint2,ypoint1,ypoint2,zpoint1,zpoint2,xmass, checklifetime) 872 deallocate(xpoint1,xpoint2,ypoint1,ypoint2,zpoint1,zpoint2,xmass) 873 if (allocated(checklifetime)) deallocate(checklifetime) 863 874 deallocate(ireleasestart,ireleaseend,npart,kindz) 864 875 deallocate(xmasssave) -
src/unc_mod.f90
r6a678e3 r4c64400 26 26 implicit none 27 27 28 real,allocatable ,dimension (:,:,:,:,:,:,:) :: gridunc28 real,allocatable, dimension (:,:,:,:,:,:,:) :: gridunc 29 29 real,allocatable, dimension (:,:,:,:,:,:,:) :: griduncn 30 30 real(dep_prec),allocatable, dimension (:,:,:,:,:,:) :: drygridunc -
src/wetdepo.f90
r0539b8f rc8fc724 66 66 integer :: itage,nage,il,interp_time, n 67 67 integer :: ks, kp 68 integer :: blc_count, inc_count68 integer(selected_int_kind(16)), dimension(nspec) :: blc_count, inc_count 69 69 real :: grfraction(3),wetscav 70 70 real :: wetdeposit(maxspec),restmass … … 83 83 !************************ 84 84 85 blc_count =086 inc_count =085 blc_count(:)=0 86 inc_count(:)=0 87 87 88 88 do jpart=1,numpart … … 104 104 105 105 do ks=1,nspec ! loop over species 106 107 if (WETDEPSPEC(ks).eqv..false.) cycle 106 108 107 109 !************************************************** … … 164 166 165 167 ! count the total number of below-cloud and in-cloud occurences: 166 tot_blc_count =tot_blc_count+blc_count167 tot_inc_count =tot_inc_count+inc_count168 tot_blc_count(1:nspec)=tot_blc_count(1:nspec)+blc_count(1:nspec) 169 tot_inc_count(1:nspec)=tot_inc_count(1:nspec)+inc_count(1:nspec) 168 170 169 171 end subroutine wetdepo -
src/wetdepokernel.f90
r1c0d5e6 r1c0d5e6 40 40 ! * 41 41 !***************************************************************************** 42 ! Changes: 43 ! eso 10/2016: Added option to disregard kernel 44 ! 45 !***************************************************************************** 42 46 43 47 use unc_mod … … 73 77 endif 74 78 79 ! If no kernel is used, direct attribution to grid cell 80 !****************************************************** 75 81 82 if (lnokernel) then 83 do ks=1,nspec 84 if ((ix.ge.0).and.(jy.ge.0).and.(ix.le.numxgrid-1).and. & 85 (jy.le.numygrid-1)) then 86 wetgridunc(ix,jy,ks,kp,nunc,nage)= & 87 wetgridunc(ix,jy,ks,kp,nunc,nage)+deposit(ks) 88 end if 89 end do 90 else ! use kernel 91 76 92 ! Determine mass fractions for four grid points 77 93 !********************************************** … … 112 128 endif 113 129 end do 130 end if 114 131 115 132 end subroutine wetdepokernel
Note: See TracChangeset
for help on using the changeset viewer.