PRAgMaTIc  master
HaloExchange.h
Go to the documentation of this file.
1 /* Copyright (C) 2010 Imperial College London and others.
2  *
3  * Please see the AUTHORS file in the main source directory for a
4  * full list of copyright holders.
5  *
6  * Gerard Gorman
7  * Applied Modelling and Computation Group
8  * Department of Earth Science and Engineering
9  * Imperial College London
10  *
11  * g.gorman@imperial.ac.uk
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer in the documentation and/or other materials provided
21  * with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
24  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
25  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
30  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
31  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
32  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
33  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
34  * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 #ifndef HALOEXCHANGE_H
38 #define HALOEXCHANGE_H
39 
40 #include <vector>
41 #include <cassert>
42 
43 #include "PragmaticTypes.h"
44 #include "mpi_tools.h"
45 
46 template <typename DATATYPE, int block>
47  void halo_update(MPI_Comm comm,
48  const std::vector< std::vector<index_t> > &send,
49  const std::vector< std::vector<index_t> > &recv,
50  std::vector<DATATYPE> &vec){
51  int num_processes;
52  MPI_Comm_size(comm, &num_processes);
53  if(num_processes<2)
54  return;
55 
56  assert(num_processes==send.size());
57  assert(num_processes==recv.size());
58 
59  int rank;
60  MPI_Comm_rank(comm, &rank);
61 
63 
64  // MPI_Requests for all non-blocking communications.
65  std::vector<MPI_Request> request(num_processes*2);
66 
67  // Setup non-blocking receives.
68  std::vector< std::vector<DATATYPE> > recv_buff(num_processes);
69  for(int i=0;i<num_processes;i++){
70  if((i==rank)||(recv[i].size()==0)){
71  request[i] = MPI_REQUEST_NULL;
72  }else{
73  recv_buff[i].resize(recv[i].size()*block);
74  MPI_Irecv(&(recv_buff[i][0]), recv_buff[i].size(), wrap.mpi_type, i, 0, comm, &(request[i]));
75  }
76  }
77 
78  // Non-blocking sends.
79  std::vector< std::vector<DATATYPE> > send_buff(num_processes);
80  for(int i=0;i<num_processes;i++){
81  if((i==rank)||(send[i].size()==0)){
82  request[num_processes+i] = MPI_REQUEST_NULL;
83  }else{
84  for(typename std::vector<index_t>::const_iterator it=send[i].begin();it!=send[i].end();++it)
85  for(int j=0;j<block;j++){
86  send_buff[i].push_back(vec[(*it)*block+j]);
87  }
88  MPI_Isend(&(send_buff[i][0]), send_buff[i].size(), wrap.mpi_type, i, 0, comm, &(request[num_processes+i]));
89  }
90  }
91 
92  std::vector<MPI_Status> status(num_processes*2);
93  MPI_Waitall(num_processes, &(request[0]), &(status[0]));
94  MPI_Waitall(num_processes, &(request[num_processes]), &(status[num_processes]));
95 
96  for(int i=0;i<num_processes;i++){
97  int k=0;
98  for(typename std::vector<index_t>::const_iterator it=recv[i].begin();it!=recv[i].end();++it, ++k)
99  for(int j=0;j<block;j++)
100  vec[(*it)*block+j] = recv_buff[i][k*block+j];
101  }
102 
103  return;
104 }
105 
106 template <typename DATATYPE, int block0, int block1>
107  void halo_update(MPI_Comm comm,
108  const std::vector< std::vector<index_t> > &send,
109  const std::vector< std::vector<index_t> > &recv,
110  std::vector<DATATYPE> &vec0, std::vector<DATATYPE> &vec1){
111  int num_processes;
112  MPI_Comm_size(comm, &num_processes);
113  if(num_processes<2)
114  return;
115 
116  assert(num_processes==send.size());
117  assert(num_processes==recv.size());
118 
119  int rank;
120  MPI_Comm_rank(comm, &rank);
121 
123 
124  // MPI_Requests for all non-blocking communications.
125  std::vector<MPI_Request> request(num_processes*2);
126 
127  // Setup non-blocking receives.
128  std::vector< std::vector<DATATYPE> > recv_buff(num_processes);
129  for(int i=0;i<num_processes;i++){
130  int msg_size = recv[i].size()*(block0+block1);
131  if((i==rank)||(msg_size==0)){
132  request[i] = MPI_REQUEST_NULL;
133  }else{
134  recv_buff[i].resize(msg_size);
135  MPI_Irecv(&(recv_buff[i][0]), msg_size, wrap.mpi_type, i, 0, comm, &(request[i]));
136  }
137  }
138 
139  // Non-blocking sends.
140  std::vector< std::vector<DATATYPE> > send_buff(num_processes);
141  for(int i=0;i<num_processes;i++){
142  if((i==rank)||(send[i].size()==0)){
143  request[num_processes+i] = MPI_REQUEST_NULL;
144  }else{
145  for(typename std::vector<index_t>::const_iterator it=send[i].begin();it!=send[i].end();++it){
146  for(int j=0;j<block0;j++){
147  send_buff[i].push_back(vec0[(*it)*block0+j]);
148  }
149  for(int j=0;j<block1;j++){
150  send_buff[i].push_back(vec1[(*it)*block1+j]);
151  }
152  }
153  MPI_Isend(&(send_buff[i][0]), send_buff[i].size(), wrap.mpi_type, i, 0, comm, &(request[num_processes+i]));
154  }
155  }
156 
157  std::vector<MPI_Status> status(num_processes*2);
158  MPI_Waitall(num_processes, &(request[0]), &(status[0]));
159  MPI_Waitall(num_processes, &(request[num_processes]), &(status[num_processes]));
160 
161  int block01 = block0+block1;
162  for(int i=0;i<num_processes;i++){
163  int k=0;
164  for(typename std::vector<index_t>::const_iterator it=recv[i].begin();it!=recv[i].end();++it, ++k){
165  for(int j=0;j<block0;j++)
166  vec0[(*it)*block0+j] = recv_buff[i][k*block01+j];
167  for(int j=0;j<block1;j++)
168  vec1[(*it)*block1+j] = recv_buff[i][k*block01+block0+j];
169  }
170  }
171 
172  return;
173 }
174 
175 #endif
void halo_update(MPI_Comm comm, const std::vector< std::vector< index_t > > &send, const std::vector< std::vector< index_t > > &recv, std::vector< DATATYPE > &vec)
Definition: HaloExchange.h:47
const MPI_Datatype mpi_type
Definition: mpi_tools.h:46