source: branches/jerome/src_flexwrf_v3.1/sendreal_mpi.f90 @ 16

Last change on this file since 16 was 16, checked in by jebri, 11 years ago

sources for flexwrf v3.1

File size: 6.9 KB
Line 
1!***********************************************************************
2!* Copyright 2012,2013                                                *
3!* Jerome Brioude, Delia Arnold, Andreas Stohl, Wayne Angevine,       *
4!* John Burkhart, Massimo Cassiani, Adam Dingwell, Richard C Easter, Sabine Eckhardt,*
5!* Stephanie Evan, Jerome D Fast, Don Morton, Ignacio Pisso,          *
6!* Petra Seibert, Gerard Wotawa, Caroline Forster, Harald Sodemann,   *
7!*                                                                     *
8!* This file is part of FLEXPART WRF                                   *
9!*                                                                     *
10!* FLEXPART is free software: you can redistribute it and/or modify    *
11!* it under the terms of the GNU General Public License as published by*
12!* the Free Software Foundation, either version 3 of the License, or   *
13!* (at your option) any later version.                                 *
14!*                                                                     *
15!* FLEXPART is distributed in the hope that it will be useful,         *
16!* but WITHOUT ANY WARRANTY; without even the implied warranty of      *
17!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the       *
18!* GNU General Public License for more details.                        *
19!*                                                                     *
20!* You should have received a copy of the GNU General Public License   *
21!* along with FLEXPART.  If not, see <http://www.gnu.org/licenses/>.   *
22!***********************************************************************
23
24  !   routine used to send real vectors by MPI
25  !    Author: J. Brioude                                                      *
26  !    March 2012                                                           *
27         subroutine sendreal_mpi(tag,numpart2,chunksize,direc)
28
29      use mpi_mod
30      use com_mod
31          implicit none
32      include 'mpif.h'
33
34!      character :: varname*20
35       integer :: chunksize,numpart2,jj1
36!      real :: dummyr(numpart),
37!      real :: dummyr2(chunksize)
38       integer :: myid,ierr,ntasks,ii,jdeb,jfin,jj,direc,tag
39!      integer :: MPI_COMM_WORLD
40       integer :: jj2,from,jj3
41  integer, dimension(MPI_STATUS_SIZE) :: status
42
43      call MPI_COMM_RANK ( MPI_COMM_WORLD, myid, ierr )
44      call MPI_COMM_SIZE ( MPI_COMM_WORLD, ntasks, ierr )
45
46     if (direc.eq.0) then ! the slaves get
47
48     if (myid.eq.0) then ! master sends
49      do ii=1,ntasks-1
50!      jdeb=(ii-1)*chunksize+1
51!      jfin=(ii)*chunksize
52!      do jj=jdeb,jfin
53!      do jj=ii+1,numpart2+ii,ntasks
54       do jj2=1,chunksize
55!       jj2=jj-jdeb+1
56!       jj2=(jj-ii-1)/ntasks+1
57        jj=(jj2-1)*ntasks+ii+1
58!       dummyr2(jj2)=dummyr(jj)
59       if (tag.eq.4)  dummyr2(jj2)=uap(jj)
60       if (tag.eq.5)  dummyr2(jj2)=ucp(jj)
61       if (tag.eq.6)  dummyr2(jj2)=uzp(jj)
62       if (tag.eq.7)  dummyr2(jj2)=us(jj)
63       if (tag.eq.8)  dummyr2(jj2)=vs(jj)
64       if (tag.eq.9)  dummyr2(jj2)=ws(jj)
65       if (tag.eq.10)  dummyr2(jj2)=ztra1(jj)
66       enddo
67       call MPI_SEND(dummyr2, chunksize, MPI_REAL, ii,tag, MPI_COMM_WORLD, ierr)
68      enddo
69
70!     jdeb=(ntasks-1)*chunksize+1
71!     jfin=numpart
72!     chunksize2=jfin-jdeb+1
73!     chunksize2=int((numpart2-1)/ntasks)+1
74!      chunksize2=chunksize
75      ii=0
76      do jj=1,numpart2,ntasks
77      ii=ii+1
78      jj2=jj
79      enddo
80      chunksize2=ii+numpart2-jj2
81
82    if (tag.eq.4) then
83     do jj=1,numpart2,ntasks
84      jj3=(jj-1)/ntasks+1
85     mpi_uap(jj3)=uap(jj)
86     enddo
87     mpi_uap(jj3:chunksize2)=uap(jj2:numpart2)
88    elseif (tag.eq.5) then
89     do jj=1,numpart2,ntasks
90      jj3=(jj-1)/ntasks+1
91     mpi_ucp(jj3)=ucp(jj)
92     enddo
93     mpi_ucp(jj3:chunksize2)=ucp(jj2:numpart2)
94    elseif (tag.eq.6) then
95     do jj=1,numpart2,ntasks
96      jj3=(jj-1)/ntasks+1
97     mpi_uzp(jj3)=uzp(jj)
98     enddo
99     mpi_uzp(jj3:chunksize2)=uzp(jj2:numpart2)
100    elseif (tag.eq.7) then
101     do jj=1,numpart2,ntasks
102      jj3=(jj-1)/ntasks+1
103     mpi_us(jj3)=us(jj)
104     enddo
105     mpi_us(jj3:chunksize2)=us(jj2:numpart2)
106    elseif (tag.eq.8) then
107     do jj=1,numpart2,ntasks
108      jj3=(jj-1)/ntasks+1
109     mpi_vs(jj3)=vs(jj)
110     enddo
111     mpi_vs(jj3:chunksize2)=vs(jj2:numpart2)
112    elseif (tag.eq.9) then
113     do jj=1,numpart2,ntasks
114      jj3=(jj-1)/ntasks+1
115     mpi_ws(jj3)=ws(jj)
116     enddo
117     mpi_ws(jj3:chunksize2)=ws(jj2:numpart2)
118    elseif (tag.eq.10) then
119     do jj=1,numpart2,ntasks
120      jj3=(jj-1)/ntasks+1
121     mpi_ztra1(jj3)=ztra1(jj)
122     enddo
123     mpi_ztra1(jj3:chunksize2)=ztra1(jj2:numpart2)
124    endif
125   
126     else ! the slaves receive
127    if (tag.eq.4) call MPI_RECV(mpi_uap, chunksize, MPI_REAL, 0,4,MPI_COMM_WORLD,status, ierr)
128    if (tag.eq.5) call MPI_RECV(mpi_ucp, chunksize, MPI_REAL, 0,5,MPI_COMM_WORLD,status, ierr)
129    if (tag.eq.6) call MPI_RECV(mpi_uzp, chunksize, MPI_REAL, 0,6,MPI_COMM_WORLD,status, ierr)
130    if (tag.eq.7) call MPI_RECV(mpi_us, chunksize, MPI_REAL, 0,7,MPI_COMM_WORLD,status, ierr)
131    if (tag.eq.8) call MPI_RECV(mpi_vs, chunksize, MPI_REAL, 0,8,MPI_COMM_WORLD,status, ierr)
132    if (tag.eq.9) call MPI_RECV(mpi_ws, chunksize, MPI_REAL, 0,9,MPI_COMM_WORLD,status, ierr)
133    if (tag.eq.10) call MPI_RECV(mpi_ztra1, chunksize, MPI_REAL, 0,10,MPI_COMM_WORLD,status, ierr)
134   endif
135
136   else ! the master is going to get
137
138   if (myid.gt.0) then !slaves send
139
140   if (tag.eq.4) call MPI_SEND(mpi_uap, chunksize, MPI_REAL, 0,4, MPI_COMM_WORLD, ierr)
141   if (tag.eq.5) call MPI_SEND(mpi_ucp, chunksize, MPI_REAL, 0,5, MPI_COMM_WORLD, ierr)
142   if (tag.eq.6) call MPI_SEND(mpi_uzp, chunksize, MPI_REAL, 0,6, MPI_COMM_WORLD, ierr)
143   if (tag.eq.7) call MPI_SEND(mpi_us, chunksize, MPI_REAL, 0,7, MPI_COMM_WORLD, ierr)
144   if (tag.eq.8) call MPI_SEND(mpi_vs, chunksize, MPI_REAL, 0,8, MPI_COMM_WORLD, ierr)
145   if (tag.eq.9) call MPI_SEND(mpi_ws, chunksize, MPI_REAL, 0,9, MPI_COMM_WORLD, ierr)
146   if (tag.eq.10) call MPI_SEND(mpi_ztra1, chunksize, MPI_REAL, 0,10, MPI_COMM_WORLD, ierr)
147
148    else ! the master gets
149
150     do from =1,ntasks-1
151    call MPI_RECV(dummyr2, chunksize, MPI_REAL, from,tag, MPI_COMM_WORLD,status,ierr)
152          jj1=(from-1)*chunksize+1
153          jj2=from*chunksize
154       if (tag.eq.4) uap(jj1:jj2)=dummyr2(1:chunksize)
155       if (tag.eq.5) ucp(jj1:jj2)=dummyr2(1:chunksize)
156       if (tag.eq.6) uzp(jj1:jj2)=dummyr2(1:chunksize)
157       if (tag.eq.7) us(jj1:jj2)=dummyr2(1:chunksize)
158       if (tag.eq.8) vs(jj1:jj2)=dummyr2(1:chunksize)
159       if (tag.eq.9) ws(jj1:jj2)=dummyr2(1:chunksize)
160       if (tag.eq.10) ztra1(jj1:jj2)=dummyr2(1:chunksize)
161
162     enddo
163     if (tag.eq.4) uap(jj2+1:numpart2)=mpi_uap(1:chunksize2)
164     if (tag.eq.5) ucp(jj2+1:numpart2)=mpi_ucp(1:chunksize2)
165     if (tag.eq.6) uzp(jj2+1:numpart2)=mpi_uzp(1:chunksize2)
166     if (tag.eq.7) us(jj2+1:numpart2)=mpi_us(1:chunksize2)
167     if (tag.eq.8) vs(jj2+1:numpart2)=mpi_vs(1:chunksize2)
168     if (tag.eq.9) ws(jj2+1:numpart2)=mpi_ws(1:chunksize2)
169     if (tag.eq.10) ztra1(jj2+1:numpart2)=mpi_ztra1(1:chunksize2)
170
171
172    endif
173
174   endif
175       end subroutine sendreal_mpi
Note: See TracBrowser for help on using the repository browser.
hosted by ZAMG