source: branches/jerome/src_flexwrf_v3.1/sendint_mpi.f90 @ 16

Last change on this file since 16 was 16, checked in by jebri, 11 years ago

sources for flexwrf v3.1

File size: 5.9 KB
Line 
1!***********************************************************************
2!* Copyright 2012,2013                                                *
3!* Jerome Brioude, Delia Arnold, Andreas Stohl, Wayne Angevine,       *
4!* John Burkhart, Massimo Cassiani, Adam Dingwell, Richard C Easter, Sabine Eckhardt,*
5!* Stephanie Evan, Jerome D Fast, Don Morton, Ignacio Pisso,          *
6!* Petra Seibert, Gerard Wotawa, Caroline Forster, Harald Sodemann,   *
7!*                                                                     *
8!* This file is part of FLEXPART WRF                                   *
9!*                                                                     *
10!* FLEXPART is free software: you can redistribute it and/or modify    *
11!* it under the terms of the GNU General Public License as published by*
12!* the Free Software Foundation, either version 3 of the License, or   *
13!* (at your option) any later version.                                 *
14!*                                                                     *
15!* FLEXPART is distributed in the hope that it will be useful,         *
16!* but WITHOUT ANY WARRANTY; without even the implied warranty of      *
17!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the       *
18!* GNU General Public License for more details.                        *
19!*                                                                     *
20!* You should have received a copy of the GNU General Public License   *
21!* along with FLEXPART.  If not, see <http://www.gnu.org/licenses/>.   *
22!***********************************************************************
23
24  !   routine used to send integer vectors by MPI
25  !    Author: J. Brioude                                                      *
26  !    March 2012                                                           *
27         subroutine sendint_mpi(tag,numpart2,chunksize,direc)
28
29      use mpi_mod
30      use com_mod
31          implicit none
32      include 'mpif.h'
33
34!      character :: varname*20
35       integer :: chunksize,numpart2,jj1
36!      integer :: dummyi(numpart2),
37!      integer :: dummyi2(chunksize),
38!      integer :: MPI_COMM_WORLD
39       integer :: myid,ierr,ntasks,ii,jdeb,jfin,jj,direc,tag
40       integer :: jj2,from,jj3
41  integer, dimension(MPI_STATUS_SIZE) :: status
42       
43      call MPI_COMM_RANK ( MPI_COMM_WORLD, myid, ierr )
44      call MPI_COMM_SIZE ( MPI_COMM_WORLD, ntasks, ierr )
45     if (direc.eq.0) then ! the slaves get
46
47     if (myid.eq.0) then
48      do ii=1,ntasks-1
49       do jj2=1,chunksize
50        jj=(jj2-1)*ntasks+ii+1
51
52!      do jj=ii+1,numpart2+ii+1,ntasks
53!       jj2=(jj-ii-1)/ntasks+1
54!       dummyi2(jj2)=dummyi(jj)
55       if (tag.eq.1)  dummyi2(jj2)=npoint(jj)
56       if (tag.eq.2)  dummyi2(jj2)=idt(jj)
57       if (tag.eq.3)  dummyi2(jj2)=itra1(jj)
58       if (tag.eq.14)  dummyi2(jj2)=itramem(jj)
59       if (tag.eq.15)  dummyi2(jj2)=nclass(jj)
60       enddo
61       call MPI_SEND(dummyi2, chunksize, MPI_INTEGER, ii,tag, MPI_COMM_WORLD, ierr)
62      enddo
63
64!     chunksize2=int((numpart2-1)/ntasks)+1
65!     chunksize2=chunksize
66      ii=0
67      do jj=1,numpart2,ntasks
68      ii=ii+1
69      jj2=jj
70      enddo
71      chunksize2=ii+numpart2-jj2
72    if (tag.eq.1) then
73     do jj=1,numpart2,ntasks
74      jj3=(jj-1)/ntasks+1
75     mpi_npoint(jj3)=npoint(jj)
76     enddo
77     mpi_npoint(jj3:chunksize2)=npoint(jj2:numpart2)
78    elseif (tag.eq.2) then
79     do jj=1,numpart2,ntasks
80      jj3=(jj-1)/ntasks+1
81     mpi_idt(jj3)=idt(jj)
82     enddo
83     mpi_idt(jj3:chunksize2)=idt(jj2:numpart2)
84    elseif (tag.eq.3) then
85     do jj=1,numpart2,ntasks
86      jj3=(jj-1)/ntasks+1
87     mpi_itra1(jj3)=itra1(jj)
88     enddo
89     mpi_itra1(jj3:chunksize2)=itra1(jj2:numpart2)
90    elseif (tag.eq.14) then
91     do jj=1,numpart2,ntasks
92      jj3=(jj-1)/ntasks+1
93     mpi_itramem(jj3)=itramem(jj)
94     enddo
95     mpi_itramem(jj3:chunksize2)=itramem(jj2:numpart2)
96    elseif (tag.eq.15) then
97     do jj=1,numpart2,ntasks
98      jj3=(jj-1)/ntasks+1
99     mpi_nclass(jj3)=nclass(jj)
100     enddo
101     mpi_nclass(jj3:chunksize2)=nclass(jj2:numpart2)
102    endif
103
104    else ! the slaves receive
105     if (tag.eq.1) call MPI_RECV(mpi_npoint, chunksize, MPI_INTEGER, 0,1,MPI_COMM_WORLD,status, ierr)
106     if (tag.eq.2) call MPI_RECV(mpi_idt, chunksize, MPI_INTEGER, 0,2,MPI_COMM_WORLD,status, ierr)
107     if (tag.eq.3) call MPI_RECV(mpi_itra1, chunksize, MPI_INTEGER, 0,3,MPI_COMM_WORLD,status, ierr)
108     if (tag.eq.14) call MPI_RECV(mpi_itramem, chunksize, MPI_INTEGER, 0,14,MPI_COMM_WORLD,status, ierr)
109     if (tag.eq.15) call MPI_RECV(mpi_nclass, chunksize, MPI_INTEGER, 0,15,MPI_COMM_WORLD,status, ierr)
110    endif
111
112   else ! the master is going to get
113   if (myid.gt.0) then !slaves send
114   if (tag.eq.1) call MPI_SEND(mpi_npoint, chunksize, MPI_INTEGER, 0,1, MPI_COMM_WORLD, ierr)
115   if (tag.eq.2) call MPI_SEND(mpi_idt, chunksize, MPI_INTEGER, 0,2, MPI_COMM_WORLD, ierr)
116   if (tag.eq.3) call MPI_SEND(mpi_itra1, chunksize, MPI_INTEGER, 0,3, MPI_COMM_WORLD, ierr)
117   if (tag.eq.14) call MPI_SEND(mpi_itramem, chunksize, MPI_INTEGER, 0,14, MPI_COMM_WORLD, ierr)
118   if (tag.eq.15) call MPI_SEND(mpi_nclass, chunksize, MPI_INTEGER, 0,15, MPI_COMM_WORLD, ierr)
119
120    else ! the master gets
121
122     do from =1,ntasks-1
123    call MPI_RECV(dummyi2, chunksize, MPI_INTEGER, from,tag, MPI_COMM_WORLD,status,ierr)
124          jj1=(from-1)*chunksize+1
125          jj2=from*chunksize
126       if (tag.eq.1) npoint(jj1:jj2)=dummyi2(1:chunksize)
127       if (tag.eq.2) idt(jj1:jj2)=dummyi2(1:chunksize)
128       if (tag.eq.3) itra1(jj1:jj2)=dummyi2(1:chunksize)
129       if (tag.eq.14) itramem(jj1:jj2)=dummyi2(1:chunksize)
130       if (tag.eq.15) nclass(jj1:jj2)=dummyi2(1:chunksize)
131     enddo
132     if (tag.eq.1) npoint(jj2+1:numpart2)=mpi_npoint(1:chunksize2)
133     if (tag.eq.2) idt(jj2+1:numpart2)=mpi_idt(1:chunksize2)
134     if (tag.eq.3) itra1(jj2+1:numpart2)=mpi_itra1(1:chunksize2)
135     if (tag.eq.14) itramem(jj2+1:numpart2)=mpi_itramem(1:chunksize2)
136     if (tag.eq.15) nclass(jj2+1:numpart2)=mpi_nclass(1:chunksize2)
137
138    endif
139
140   endif
141
142       end subroutine sendint_mpi
Note: See TracBrowser for help on using the repository browser.
hosted by ZAMG