Changeset 5f9d14a in flexpart.git for src/mpi_mod.f90
- Timestamp:
- Apr 8, 2015, 2:23:27 PM (9 years ago)
- Branches:
- master, 10.4.1_pesei, GFS_025, bugfixes+enhancements, dev, release-10, release-10.4.1, scaling-bug, univie
- Children:
- 1585284
- Parents:
- cd85138
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/mpi_mod.f90
- Property mode changed from 100755 to 100644
r8a65cb0 r5f9d14a 44 44 ! mp_pid Process ID of each MPI process * 45 45 ! mp_seed Parameter for random number seed * 46 ! read_grp_min Minimum number of processes at which one will be * 47 ! used as reader * 46 48 ! numpart_mpi, Number of particles per node * 47 49 ! maxpart_mpi * … … 50 52 ! loops over particles. Will be all processes * 51 53 ! unless a dedicated process runs getfields/readwind * 52 ! 54 ! lmp_sync If .false., use asynchronous MPI * 53 55 ! * 54 56 ! * … … 69 71 ! Set aside a process for reading windfields if using at least these many processes 70 72 !================================================== 71 integer, parameter, private :: read_grp_min= 9973 integer, parameter, private :: read_grp_min=4 72 74 !================================================== 73 75 … … 86 88 87 89 ! MPI tags/requests for send/receive operation 88 integer :: tm1 !=10089 integer, parameter :: nvar_async=27 90 integer :: tm1 91 integer, parameter :: nvar_async=27 !29 :DBG: 90 92 !integer, dimension(:), allocatable :: tags 91 93 integer, dimension(:), allocatable :: reqs … … 106 108 !=============================================================================== 107 109 108 ! mp_dbg_mode MPI related output for debugging etc.110 ! mp_dbg_mode Used for debugging MPI. 109 111 ! mp_dev_mode various checks related to debugging the parallel code 110 112 ! mp_dbg_out write some arrays to data file for debugging … … 197 199 & 'numwfmem should be set to 2 for syncronous reading' 198 200 write(*,FMT='(80("#"))') 199 end if 201 ! Force "syncronized" version if all processes will call getfields 202 else if (.not.lmp_sync.and.mp_np.lt.read_grp_min) then 203 if (lroot) then 204 write(*,FMT='(80("#"))') 205 write(*,*) '#### mpi_mod::mpif_init> WARNING: ', & 206 & 'all procs call getfields. Setting lmp_sync=.true.' 207 write(*,FMT='(80("#"))') 208 end if 209 lmp_sync=.true. ! :DBG: eso fix this... 210 end if 211 ! TODO: Add warnings for unimplemented flexpart features 200 212 201 213 ! Set ID of process that calls getfield/readwind. … … 211 223 call MPI_Comm_group (MPI_COMM_WORLD, world_group_id, mp_ierr) 212 224 213 ! Create a MPI group/communi actor that will calculate trajectories.225 ! Create a MPI group/communicator that will calculate trajectories. 214 226 ! Skip this step if program is run with only a few processes 215 227 !************************************************************************ … … 228 240 lmp_use_reader = .true. 229 241 230 ! Map the subgroup IDs= 0,1,2,...,mp_np-2, skipping 'readwind'process242 ! Map the subgroup IDs= 0,1,2,...,mp_np-2, skipping reader process 231 243 j=-1 232 244 do i=0, mp_np-2 !loop over all (subgroup) IDs … … 289 301 end if 290 302 if (mp_dev_mode) write(*,*) 'PID, mp_seed: ',mp_pid, mp_seed 303 if (mp_dbg_mode) then 304 ! :DBG: For debugging, set all seed to 0 and maxrand to e.g. 20 305 mp_seed=0 306 if (lroot) write(*,*) 'MPI: setting seed=0' 307 end if 291 308 292 309 ! Allocate request array for asynchronous MPI 293 310 if (.not.lmp_sync) then 294 311 allocate(reqs(0:nvar_async*mp_np-1)) 295 reqs =MPI_REQUEST_NULL312 reqs(:)=MPI_REQUEST_NULL 296 313 else 297 314 allocate(reqs(0:1)) 298 reqs =MPI_REQUEST_NULL315 reqs(:)=MPI_REQUEST_NULL 299 316 end if 300 317 … … 389 406 integer :: add_part=0 390 407 391 call MPI_B CAST(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)408 call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr) 392 409 393 410 ! MPI subgroup does the particle-loop … … 815 832 !******************************************************************************* 816 833 ! DESCRIPTION 817 ! Distribute 'getfield' variables from reader process to all processes.834 ! Distribute 'getfield' variables from reader process 818 835 ! 819 836 ! Called from timemanager … … 887 904 !********************************************************************** 888 905 906 ! The non-reader processes need to know if clouds were read. 907 ! TODO: only at first step or always? 908 call MPI_Bcast(readclouds,1,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr) 909 if (mp_ierr /= 0) goto 600 910 889 911 ! Static fields/variables sent only at startup 890 912 if (first_call) then … … 904 926 if (mp_ierr /= 0) goto 600 905 927 call MPI_Bcast(height,nzmax,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr) 906 if (mp_ierr /= 0) goto 600928 if (mp_ierr /= 0) goto 600 907 929 908 930 first_call=.false. … … 934 956 if (mp_ierr /= 0) goto 600 935 957 call MPI_Bcast(clouds(:,:,:,li:ui),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr) 936 if (mp_ierr /= 0) goto 600 958 if (mp_ierr /= 0) goto 600 959 960 ! cloud water/ice: 961 if (readclouds) then 962 call MPI_Bcast(clwc(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr) 963 if (mp_ierr /= 0) goto 600 964 call MPI_Bcast(ciwc(:,:,:,li:ui),d3s1,mp_pp,id_read,MPI_COMM_WORLD,mp_ierr) 965 if (mp_ierr /= 0) goto 600 966 end if 937 967 938 968 ! 2D fields … … 969 999 if (mp_ierr /= 0) goto 600 970 1000 971 972 1001 if (mp_measure_time) call mpif_mtime('commtime',1) 973 1002 … … 995 1024 ! step 996 1025 ! 1026 ! TODO 1027 ! Transfer cloud water/ice if and when available for nested 997 1028 ! 998 1029 !*********************************************************************** … … 1140 1171 ! DESCRIPTION 1141 1172 ! Distribute 'getfield' variables from reader process to all processes. 1142 ! 1143 ! Called from timemanager by root process only 1173 ! Called from timemanager by reader process only 1144 1174 ! 1145 1175 ! NOTE … … 1154 1184 ! memstat -- input, for resolving pointer to windfield index being read 1155 1185 ! mind -- index where to place new fields 1156 ! !!! Under development, don't use yet !!! 1157 ! 1186 ! 1187 ! TODO 1188 ! Transfer cloud water/ice 1158 1189 ! 1159 1190 !******************************************************************************* … … 1173 1204 integer :: d1s1 = maxwf 1174 1205 1175 !integer :: d3s1,d3s2,d2s1,d2s21176 1177 1206 !******************************************************************************* 1178 1179 ! :TODO: don't need these1180 ! d3s1=d3_size11181 ! d3s2=d3_size21182 ! d2s1=d2_size11183 ! d2s2=d2_size21184 1207 1185 1208 ! At the time the send is posted, the reader process is one step further … … 1220 1243 1221 1244 do dest=0,mp_np-1 ! mp_np-2 will also work if last proc reserved for reading 1222 ! :TODO: use mp_partgroup_np here1245 ! TODO: use mp_partgroup_np here 1223 1246 if (dest.eq.id_read) cycle 1224 1247 i=dest*nvar_async … … 1305 1328 if (mp_ierr /= 0) goto 600 1306 1329 1330 ! Send cloud water if it exists. Increment counter always (as on receiving end) 1331 if (readclouds) then 1332 i=i+1 1333 call MPI_Isend(clwc(:,:,:,mind),d3s1,mp_pp,dest,tm1,& 1334 &MPI_COMM_WORLD,reqs(i),mp_ierr) 1335 if (mp_ierr /= 0) goto 600 1336 i=i+1 1337 1338 call MPI_Isend(ciwc(:,:,:,mind),d3s1,mp_pp,dest,tm1,& 1339 &MPI_COMM_WORLD,reqs(i),mp_ierr) 1340 if (mp_ierr /= 0) goto 600 1341 ! else 1342 ! i=i+2 1343 end if 1344 1307 1345 end do 1308 1346 … … 1311 1349 goto 601 1312 1350 1313 600 write(*,*) " mpi_mod> mp_ierr \= 0", mp_ierr1351 600 write(*,*) "#### mpi_mod::mpif_gf_send_vars_async> mp_ierr \= 0", mp_ierr 1314 1352 stop 1315 1353 … … 1320 1358 !******************************************************************************* 1321 1359 ! DESCRIPTION 1322 ! Receive 'getfield' variables from reader process to all processes. 1323 ! 1324 ! Called from timemanager by all processes except root 1360 ! Receive 'getfield' variables from reader process. 1361 ! Called from timemanager by all processes except reader 1325 1362 ! 1326 1363 ! NOTE … … 1330 1367 ! memstat -- input, used to resolve windfield index being received 1331 1368 ! 1369 ! TODO 1370 ! Transfer cloud water/ice 1332 1371 ! 1333 1372 !******************************************************************************* … … 1388 1427 ! Get MPI tags/requests for communications 1389 1428 j=mp_pid*nvar_async 1390 call MPI_Irecv(uu(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1391 if (mp_ierr /= 0) goto 600 1392 j=j+1 1393 call MPI_Irecv(vv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1394 if (mp_ierr /= 0) goto 600 1395 j=j+1 1396 call MPI_Irecv(uupol(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1397 if (mp_ierr /= 0) goto 600 1398 j=j+1 1399 call MPI_Irecv(vvpol(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1400 if (mp_ierr /= 0) goto 600 1401 j=j+1 1402 call MPI_Irecv(ww(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1429 call MPI_Irecv(uu(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,& 1430 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1431 if (mp_ierr /= 0) goto 600 1432 j=j+1 1433 call MPI_Irecv(vv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,& 1434 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1435 if (mp_ierr /= 0) goto 600 1436 j=j+1 1437 call MPI_Irecv(uupol(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,& 1438 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1439 if (mp_ierr /= 0) goto 600 1440 j=j+1 1441 call MPI_Irecv(vvpol(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,& 1442 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1443 if (mp_ierr /= 0) goto 600 1444 j=j+1 1445 call MPI_Irecv(ww(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,& 1446 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1403 1447 if (mp_ierr /= 0) goto 600 1404 1448 j=j+1 1405 call MPI_Irecv(tt(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1406 if (mp_ierr /= 0) goto 600 1407 j=j+1 1408 call MPI_Irecv(rho(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1409 if (mp_ierr /= 0) goto 600 1410 j=j+1 1411 call MPI_Irecv(drhodz(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1412 if (mp_ierr /= 0) goto 600 1413 j=j+1 1414 call MPI_Irecv(tth(:,:,:,mind),d3s2,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1415 if (mp_ierr /= 0) goto 600 1416 j=j+1 1417 call MPI_Irecv(qvh(:,:,:,mind),d3s2,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1418 if (mp_ierr /= 0) goto 600 1419 j=j+1 1420 1421 call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1449 call MPI_Irecv(tt(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,& 1450 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1451 if (mp_ierr /= 0) goto 600 1452 j=j+1 1453 call MPI_Irecv(rho(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,& 1454 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1455 if (mp_ierr /= 0) goto 600 1456 j=j+1 1457 call MPI_Irecv(drhodz(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,& 1458 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1459 if (mp_ierr /= 0) goto 600 1460 j=j+1 1461 call MPI_Irecv(tth(:,:,:,mind),d3s2,mp_pp,id_read,MPI_ANY_TAG,& 1462 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1463 if (mp_ierr /= 0) goto 600 1464 j=j+1 1465 call MPI_Irecv(qvh(:,:,:,mind),d3s2,mp_pp,id_read,MPI_ANY_TAG,& 1466 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1467 if (mp_ierr /= 0) goto 600 1468 j=j+1 1469 1470 call MPI_Irecv(qv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,& 1471 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1422 1472 if (mp_ierr /= 0) goto 600 1423 j=j+1 1424 call MPI_Irecv(pv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1425 if (mp_ierr /= 0) goto 600 1426 j=j+1 1427 call MPI_Irecv(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1428 if (mp_ierr /= 0) goto 600 1429 j=j+1 1430 1431 call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1432 if (mp_ierr /= 0) goto 600 1433 j=j+1 1434 call MPI_Irecv(vdep(:,:,:,mind),d2s2,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1435 if (mp_ierr /= 0) goto 600 1436 j=j+1 1437 1438 call MPI_Irecv(ps(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1473 j=j+1 1474 call MPI_Irecv(pv(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,& 1475 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1476 if (mp_ierr /= 0) goto 600 1477 j=j+1 1478 call MPI_Irecv(clouds(:,:,:,mind),d3s1,MPI_INTEGER1,id_read,MPI_ANY_TAG,& 1479 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1480 if (mp_ierr /= 0) goto 600 1481 j=j+1 1482 1483 call MPI_Irecv(cloudsh(:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,& 1484 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1485 if (mp_ierr /= 0) goto 600 1486 j=j+1 1487 call MPI_Irecv(vdep(:,:,:,mind),d2s2,mp_pp,id_read,MPI_ANY_TAG,& 1488 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1489 if (mp_ierr /= 0) goto 600 1490 j=j+1 1491 call MPI_Irecv(ps(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,& 1492 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1439 1493 if (mp_ierr /= 0) goto 600 1440 1494 j=j+1 1441 call MPI_Irecv(sd(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1495 call MPI_Irecv(sd(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,& 1496 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1442 1497 if (mp_ierr /= 0) goto 600 1443 1498 j=j+1 1444 call MPI_Irecv(tcc(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1499 call MPI_Irecv(tcc(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,& 1500 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1445 1501 if (mp_ierr /= 0) goto 600 1446 1502 j=j+1 1447 call MPI_Irecv(tt2(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1448 if (mp_ierr /= 0) goto 600 1449 j=j+1 1450 call MPI_Irecv(td2(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1451 if (mp_ierr /= 0) goto 600 1452 j=j+1 1453 call MPI_Irecv(lsprec(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1454 if (mp_ierr /= 0) goto 600 1455 j=j+1 1456 call MPI_Irecv(convprec(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1457 if (mp_ierr /= 0) goto 600 1458 1459 call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1460 if (mp_ierr /= 0) goto 600 1461 j=j+1 1462 call MPI_Irecv(wstar(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1463 if (mp_ierr /= 0) goto 600 1464 j=j+1 1465 call MPI_Irecv(hmix(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1466 if (mp_ierr /= 0) goto 600 1467 j=j+1 1468 call MPI_Irecv(tropopause(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1503 call MPI_Irecv(tt2(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,& 1504 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1505 if (mp_ierr /= 0) goto 600 1506 j=j+1 1507 call MPI_Irecv(td2(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,& 1508 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1509 if (mp_ierr /= 0) goto 600 1510 j=j+1 1511 call MPI_Irecv(lsprec(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,& 1512 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1513 if (mp_ierr /= 0) goto 600 1514 j=j+1 1515 call MPI_Irecv(convprec(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,& 1516 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1517 if (mp_ierr /= 0) goto 600 1518 1519 call MPI_Irecv(ustar(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,& 1520 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1521 if (mp_ierr /= 0) goto 600 1522 j=j+1 1523 call MPI_Irecv(wstar(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,& 1524 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1525 if (mp_ierr /= 0) goto 600 1526 j=j+1 1527 call MPI_Irecv(hmix(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,& 1528 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1529 if (mp_ierr /= 0) goto 600 1530 j=j+1 1531 call MPI_Irecv(tropopause(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,& 1532 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1469 1533 if (mp_ierr /= 0) goto 600 1470 1534 j=j+1 1471 call MPI_Irecv(oli(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,MPI_COMM_WORLD,reqs(j),mp_ierr) 1472 if (mp_ierr /= 0) goto 600 1535 call MPI_Irecv(oli(:,:,:,mind),d2s1,mp_pp,id_read,MPI_ANY_TAG,& 1536 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1537 if (mp_ierr /= 0) goto 600 1538 1539 1540 ! Post request for clwc. These data are possibly not sent, request must then be cancelled 1541 ! For now assume that data at all steps either have or do not have water 1542 if (readclouds) then 1543 j=j+1 1544 call MPI_Irecv(clwc(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,& 1545 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1546 if (mp_ierr /= 0) goto 600 1547 j=j+1 1548 call MPI_Irecv(ciwc(:,:,:,mind),d3s1,mp_pp,id_read,MPI_ANY_TAG,& 1549 &MPI_COMM_WORLD,reqs(j),mp_ierr) 1550 if (mp_ierr /= 0) goto 600 1551 end if 1473 1552 1474 1553 … … 1477 1556 goto 601 1478 1557 1479 600 write(*,*) " mpi_mod> mp_ierr \= 0", mp_ierr1558 600 write(*,*) "#### mpi_mod::mpif_gf_recv_vars_async> MPI ERROR ", mp_ierr 1480 1559 stop 1481 1560 … … 1494 1573 ! 1495 1574 !******************************************************************************* 1575 use com_mod, only: readclouds 1576 1496 1577 implicit none 1497 1578 1498 ! if (mp_measure_time) call mpif_mtime('commtime',0) 1579 1580 integer :: n_req 1581 integer :: i 1582 1583 !*********************************************************************** 1584 1585 n_req=nvar_async*mp_np 1586 1587 if (mp_measure_time) call mpif_mtime('commtime',0) 1499 1588 1500 1589 ! call MPI_Wait(rm1,MPI_STATUS_IGNORE,mp_ierr) 1501 call MPI_Waitall(nvar_async*mp_np,reqs,MPI_STATUSES_IGNORE,mp_ierr) 1502 if (mp_ierr /= 0) goto 600 1503 1504 ! if (mp_measure_time) call mpif_mtime('commtime',1) 1590 1591 ! TODO: cancel recv request if readclouds=.false. 1592 ! if (readclouds) then 1593 call MPI_Waitall(n_req,reqs,MPI_STATUSES_IGNORE,mp_ierr) 1594 ! endif 1595 ! else 1596 ! do i = 0, nvar_async*mp_np-1 1597 ! if (mod(i,27).eq.0 .or. mod(i,28).eq.0) then 1598 ! call MPI_Cancel(reqs(i),mp_ierr) 1599 ! cycle 1600 ! end if 1601 ! call MPI_Wait(reqs(i),MPI_STATUS_IGNORE,mp_ierr) 1602 ! end do 1603 ! end if 1604 1605 if (mp_ierr /= 0) goto 600 1606 1607 if (mp_measure_time) call mpif_mtime('commtime',1) 1505 1608 1506 1609 goto 601 1507 1610 1508 600 write(*,*) "#### mpi_mod::mpif_gf_request> ERROR, mp_ierr \= 0", mp_ierr1611 600 write(*,*) "#### mpi_mod::mpif_gf_request> MPI ERROR ", mp_ierr 1509 1612 stop 1510 1613 … … 1606 1709 1607 1710 !********************************************************************** 1711 1608 1712 grid_size3d=numxgridn*numygridn*numzgrid*maxspec* & 1609 1713 & maxpointspec_act*nclassunc*maxageclass … … 1666 1770 character(LEN=*), intent(in) :: ident 1667 1771 integer, intent(in) :: imode 1772 1773 !*********************************************************************** 1668 1774 1669 1775 select case(ident) … … 1800 1906 1801 1907 integer :: ip,j,r 1908 1909 !*********************************************************************** 1802 1910 1803 1911 if (mp_measure_time) then … … 1857 1965 ! In the implementation with 3 fields, the processes may have posted 1858 1966 ! MPI_Irecv requests that should be cancelled here 1859 !! :TODO:1967 !! TODO: 1860 1968 ! if (.not.lmp_sync) then 1861 1969 ! r=mp_pid*nvar_async … … 1868 1976 call MPI_FINALIZE(mp_ierr) 1869 1977 if (mp_ierr /= 0) then 1870 write(*,*) '#### mpif_finalize::MPI_FINALIZE> ERROR####'1978 write(*,*) '#### mpif_finalize::MPI_FINALIZE> MPI ERROR ', mp_ierr, ' ####' 1871 1979 stop 1872 1980 end if … … 1887 1995 integer, save :: free_lun=100 1888 1996 logical :: exists, iopen 1997 1998 !*********************************************************************** 1889 1999 1890 2000 loop1: do … … 1912 2022 character(LEN=40) :: fn_1, fn_2 1913 2023 2024 !*********************************************************************** 2025 1914 2026 write(c_ts, FMT='(I8.8,BZ)') tstep 1915 2027 fn_1='-'//trim(adjustl(c_ts))//'-'//trim(ident)
Note: See TracChangeset
for help on using the changeset viewer.