globalindices.hh
Go to the documentation of this file.
1 // -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 // vi: set et ts=4 sw=4 sts=4:
3 /*
4  Copyright (C) 2011-2013 by Andreas Lauser
5 
6  This file is part of the Open Porous Media project (OPM).
7 
8  OPM is free software: you can redistribute it and/or modify
9  it under the terms of the GNU General Public License as published by
10  the Free Software Foundation, either version 2 of the License, or
11  (at your option) any later version.
12 
13  OPM is distributed in the hope that it will be useful,
14  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  GNU General Public License for more details.
17 
18  You should have received a copy of the GNU General Public License
19  along with OPM. If not, see <http://www.gnu.org/licenses/>.
20 */
25 #ifndef EWOMS_GLOBAL_INDICES_HH
26 #define EWOMS_GLOBAL_INDICES_HH
27 
28 #include <dune/grid/common/datahandleif.hh>
29 #include <dune/istl/bcrsmatrix.hh>
30 #include <dune/istl/scalarproducts.hh>
31 #include <dune/istl/operators.hh>
32 
33 #include <algorithm>
34 #include <set>
35 #include <map>
36 #include <iostream>
37 #include <tuple>
38 
39 #if HAVE_MPI
40 #include <mpi.h>
41 #endif
42 
43 #include "overlaptypes.hh"
44 
45 namespace Ewoms {
46 namespace Linear {
52 template <class ForeignOverlap>
54 {
56  {}
57 
58  typedef std::map<Index, Index> GlobalToDomesticMap;
59  typedef std::map<Index, Index> DomesticToGlobalMap;
60 
61 public:
62  GlobalIndices(const ForeignOverlap &foreignOverlap)
63  : foreignOverlap_(foreignOverlap)
64  {
65  myRank_ = 0;
66  mpiSize_ = 1;
67 
68 #if HAVE_MPI
69  {
70  int tmp;
71  MPI_Comm_rank(MPI_COMM_WORLD, &tmp);
72  myRank_ = tmp;
73  MPI_Comm_size(MPI_COMM_WORLD, &tmp);
74  mpiSize_ = tmp;
75  }
76 #endif
77 
78  // calculate the domestic overlap (i.e. all overlap indices in
79  // foreign processes which the current process overlaps.)
80  // This requires communication via MPI.
82  }
83 
87  int domesticToGlobal(int domesticIdx) const
88  {
89  assert(domesticToGlobal_.find(domesticIdx) != domesticToGlobal_.end());
90 
91  return domesticToGlobal_.find(domesticIdx)->second;
92  }
93 
97  int globalToDomestic(int globalIdx) const
98  {
99  const auto& tmp = globalToDomestic_.find(globalIdx);
100 
101  if (tmp == globalToDomestic_.end())
102  return -1;
103 
104  return tmp->second;
105  }
106 
111  int numLocal() const
112  { return foreignOverlap_.numLocal(); }
113 
120  int numDomestic() const
121  { return numDomestic_; }
122 
126  void addIndex(int domesticIdx, int globalIdx)
127  {
128  domesticToGlobal_[domesticIdx] = globalIdx;
129  globalToDomestic_[globalIdx] = domesticIdx;
131 
132  assert(domesticToGlobal_.size() == globalToDomestic_.size());
133  }
134 
138  void sendBorderIndex(int peerRank, int domesticIdx, int peerLocalIdx)
139  {
140 #if HAVE_MPI
141  PeerIndexGlobalIndex sendBuf;
142  sendBuf.peerIdx = peerLocalIdx;
143  sendBuf.globalIdx = domesticToGlobal(domesticIdx);
144  MPI_Send(&sendBuf, // buff
145  sizeof(PeerIndexGlobalIndex), // count
146  MPI_BYTE, // data type
147  peerRank, // peer process
148  0, // tag
149  MPI_COMM_WORLD); // communicator
150 #endif
151  }
152 
157  void receiveBorderIndex(int peerRank)
158  {
159 #if HAVE_MPI
160  PeerIndexGlobalIndex recvBuf;
161  MPI_Recv(&recvBuf, // buff
162  sizeof(PeerIndexGlobalIndex), // count
163  MPI_BYTE, // data type
164  peerRank, // peer process
165  0, // tag
166  MPI_COMM_WORLD, // communicator
167  MPI_STATUS_IGNORE); // status
168 
169  int domesticIdx = foreignOverlap_.nativeToLocal(recvBuf.peerIdx);
170  if (domesticIdx >= 0) {
171  int globalIdx = recvBuf.globalIdx;
172  addIndex(domesticIdx, globalIdx);
173  }
174 #endif // HAVE_MPI
175  }
176 
180  bool hasGlobalIndex(int globalIdx) const
181  { return globalToDomestic_.find(globalIdx) != globalToDomestic_.end(); }
182 
187  void print() const
188  {
189  std::cout << "(domestic index, global index, domestic->global->domestic)"
190  << " list for rank " << myRank_ << "\n";
191 
192  for (size_t domIdx = 0; domIdx < domesticToGlobal_.size(); ++domIdx)
193  std::cout << "(" << domIdx << ", " << domesticToGlobal(domIdx)
194  << ", " << globalToDomestic(domesticToGlobal(domIdx)) << ") ";
195  std::cout << "\n" << std::flush;
196  }
197 
198 protected:
199  // retrieve the offset for the indices where we are master in the
200  // global index list
202  {
203 #if HAVE_MPI
204  numDomestic_ = 0;
205 #else
206  numDomestic_ = foreignOverlap_.numLocal();
207 #endif
208 
209 #if HAVE_MPI
210  if (myRank_ == 0) {
211  // the first rank starts at index zero
212  domesticOffset_ = 0;
213  }
214  else {
215  // all other ranks retrieve their offset from the next
216  // lower rank
217  MPI_Recv(&domesticOffset_, // buffer
218  1, // count
219  MPI_INT, // data type
220  myRank_ - 1, // peer rank
221  0, // tag
222  MPI_COMM_WORLD, // communicator
223  MPI_STATUS_IGNORE);
224  }
225 
226  // create maps for all indices for which the current process
227  // is the master
228  int numMaster = 0;
229  for (int i = 0; i < foreignOverlap_.numLocal(); ++i) {
230  if (!foreignOverlap_.iAmMasterOf(i))
231  continue;
232 
233  addIndex(i, domesticOffset_ + numMaster);
234  ++numMaster;
235  }
236 
237  if (myRank_ < mpiSize_ - 1) {
238  // send the domestic offset plus the number of master
239  // indices to the process which is one rank higher
240  int tmp = domesticOffset_ + numMaster;
241  MPI_Send(&tmp, // buff
242  1, // count
243  MPI_INT, // data type
244  myRank_ + 1, // peer rank
245  0, // tag
246  MPI_COMM_WORLD); // communicator
247  }
248 
249  typename PeerSet::const_iterator peerIt;
250  typename PeerSet::const_iterator peerEndIt = peerSet_().end();
251  // receive the border indices from the lower ranks
252  peerIt = peerSet_().begin();
253  for (; peerIt != peerEndIt; ++peerIt) {
254  if (*peerIt < myRank_)
255  receiveBorderFrom_(*peerIt);
256  }
257 
258  // send the border indices to the higher ranks
259  peerIt = peerSet_().begin();
260  for (; peerIt != peerEndIt; ++peerIt) {
261  if (*peerIt > myRank_)
262  sendBorderTo_(*peerIt);
263  }
264 
265  // receive the border indices from the higher ranks
266  peerIt = peerSet_().begin();
267  for (; peerIt != peerEndIt; ++peerIt) {
268  if (*peerIt > myRank_)
269  receiveBorderFrom_(*peerIt);
270  }
271 
272  // send the border indices to the lower ranks
273  peerIt = peerSet_().begin();
274  for (; peerIt != peerEndIt; ++peerIt) {
275  if (*peerIt < myRank_)
276  sendBorderTo_(*peerIt);
277  }
278 #endif // HAVE_MPI
279  }
280 
281  void sendBorderTo_(ProcessRank peerRank)
282  {
283 #if HAVE_MPI
284  // send (local index on myRank, global index) pairs to the
285  // peers
286  BorderList::const_iterator borderIt = borderList_().begin();
287  BorderList::const_iterator borderEndIt = borderList_().end();
288  for (; borderIt != borderEndIt; ++borderIt) {
289  ProcessRank borderPeer = borderIt->peerRank;
290  int borderDistance = borderIt->borderDistance;
291  if (borderPeer != peerRank || borderDistance != 0)
292  continue;
293 
294  int localIdx = foreignOverlap_.nativeToLocal(borderIt->localIdx);
295  Index peerIdx = borderIt->peerIdx;
296  assert(localIdx >= 0);
297  if (foreignOverlap_.iAmMasterOf(localIdx)) {
298  sendBorderIndex(borderPeer, localIdx, peerIdx);
299  }
300  }
301 #endif // HAVE_MPI
302  }
303 
305  {
306 #if HAVE_MPI
307  // retrieve the global indices for which we are not master
308  // from the processes with lower rank
309  BorderList::const_iterator borderIt = borderList_().begin();
310  BorderList::const_iterator borderEndIt = borderList_().end();
311  for (; borderIt != borderEndIt; ++borderIt) {
312  ProcessRank borderPeer = borderIt->peerRank;
313  int borderDistance = borderIt->borderDistance;
314  if (borderPeer != peerRank || borderDistance != 0)
315  continue;
316 
317  int nativeIdx = borderIt->localIdx;
318  int localIdx = foreignOverlap_.nativeToLocal(nativeIdx);
319  if (localIdx >= 0 && foreignOverlap_.masterRank(localIdx) == borderPeer)
320  receiveBorderIndex(borderPeer);
321  }
322 #endif // HAVE_MPI
323  }
324 
325  const PeerSet &peerSet_() const
326  { return foreignOverlap_.peerSet(); }
327 
328  const BorderList &borderList_() const
329  { return foreignOverlap_.borderList(); }
330 
332  size_t mpiSize_;
333 
335  size_t numDomestic_;
336  const ForeignOverlap &foreignOverlap_;
337 
338  GlobalToDomesticMap globalToDomestic_;
339  DomesticToGlobalMap domesticToGlobal_;
340 };
341 
342 } // namespace Linear
343 } // namespace Ewoms
344 
345 #endif
GlobalToDomesticMap globalToDomestic_
Definition: globalindices.hh:338
A set of process ranks.
Definition: overlaptypes.hh:147
void receiveBorderFrom_(ProcessRank peerRank)
Definition: globalindices.hh:304
void addIndex(int domesticIdx, int globalIdx)
Add an index to the domestic<->global mapping.
Definition: globalindices.hh:126
const ForeignOverlap & foreignOverlap_
Definition: globalindices.hh:336
int domesticToGlobal(int domesticIdx) const
Converts a domestic index to a global one.
Definition: globalindices.hh:87
This structure stores a local index on a peer process and a global index.
Definition: overlaptypes.hh:68
This files provides several data structures for storing tuples of indices of remote and/or local proc...
Index peerIdx
Definition: overlaptypes.hh:70
const BorderList & borderList_() const
Definition: globalindices.hh:328
Index globalIdx
Definition: overlaptypes.hh:71
int domesticOffset_
Definition: globalindices.hh:334
DomesticToGlobalMap domesticToGlobal_
Definition: globalindices.hh:339
void sendBorderIndex(int peerRank, int domesticIdx, int peerLocalIdx)
Send a border index to a remote process.
Definition: globalindices.hh:138
void print() const
Prints the global indices of all domestic indices for debugging purposes.
Definition: globalindices.hh:187
void receiveBorderIndex(int peerRank)
Receive an index on the border from a remote process and add it the translation maps.
Definition: globalindices.hh:157
std::list< BorderIndex > BorderList
This class managages a list of indices which are on the border of a process' partition of the grid...
Definition: overlaptypes.hh:119
size_t numDomestic_
Definition: globalindices.hh:335
Definition: baseauxiliarymodule.hh:35
const PeerSet & peerSet_() const
Definition: globalindices.hh:325
int numLocal() const
Returns the number of indices which are in the interior or on the border of the current rank...
Definition: globalindices.hh:111
void sendBorderTo_(ProcessRank peerRank)
Definition: globalindices.hh:281
This class maps domestic row indices to and from "global" indices which is used to construct an algeb...
Definition: globalindices.hh:53
int numDomestic() const
Returns the number domestic indices.
Definition: globalindices.hh:120
ProcessRank myRank_
Definition: globalindices.hh:331
void buildGlobalIndices_()
Definition: globalindices.hh:201
int Index
The type of an index of a degree of freedom.
Definition: overlaptypes.hh:43
size_t mpiSize_
Definition: globalindices.hh:332
bool hasGlobalIndex(int globalIdx) const
Return true iff a given global index already exists.
Definition: globalindices.hh:180
int globalToDomestic(int globalIdx) const
Converts a global index to a domestic one.
Definition: globalindices.hh:97
GlobalIndices(const ForeignOverlap &foreignOverlap)
Definition: globalindices.hh:62
unsigned ProcessRank
The type of the rank of a process.
Definition: overlaptypes.hh:48