overlappingblockvector.hh
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 This file is part of the Open Porous Media project (OPM).
5
6 OPM is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 2 of the License, or
9 (at your option) any later version.
10
11 OPM is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with OPM. If not, see <http://www.gnu.org/licenses/>.
18
19 Consult the COPYING file in the top-level source directory of this
20 module for the precise wording of the license and the list of
21 copyright holders.
22*/
27#ifndef EWOMS_OVERLAPPING_BLOCK_VECTOR_HH
28#define EWOMS_OVERLAPPING_BLOCK_VECTOR_HH
29
30#include "overlaptypes.hh"
31
33#include <opm/material/common/Valgrind.hpp>
34
35#include <dune/istl/bvector.hh>
36#include <dune/common/fvector.hh>
37
38#include <memory>
39#include <map>
40#include <iostream>
41
42namespace Opm {
43namespace Linear {
44
48template <class FieldVector, class Overlap>
49class OverlappingBlockVector : public Dune::BlockVector<FieldVector>
50{
51 using ParentType = Dune::BlockVector<FieldVector>;
52 using BlockVector = Dune::BlockVector<FieldVector>;
53
54public:
59 OverlappingBlockVector(const Overlap& overlap)
60 : ParentType(overlap.numDomestic()), overlap_(&overlap)
61 { createBuffers_(); }
62
67 : ParentType(obv)
68 , numIndicesSendBuff_(obv.numIndicesSendBuff_)
69 , indicesSendBuff_(obv.indicesSendBuff_)
70 , indicesRecvBuff_(obv.indicesRecvBuff_)
71 , valuesSendBuff_(obv.valuesSendBuff_)
72 , valuesRecvBuff_(obv.valuesRecvBuff_)
73 , overlap_(obv.overlap_)
74 {}
75
80 {}
81
83
86 using ParentType::operator=;
88
93 {
94 ParentType::operator=(obv);
95 numIndicesSendBuff_ = obv.numIndicesSendBuff_;
96 indicesSendBuff_ = obv.indicesSendBuff_;
97 indicesRecvBuff_ = obv.indicesRecvBuff_;
98 valuesSendBuff_ = obv.valuesSendBuff_;
99 valuesRecvBuff_ = obv.valuesRecvBuff_;
100 overlap_ = obv.overlap_;
101 return *this;
102 }
103
108 template <class BlockVector>
109 void assignAddBorder(const BlockVector& nativeBlockVector)
110 {
111 size_t numDomestic = overlap_->numDomestic();
112
113 // assign the local rows from the non-overlapping block vector
114 for (unsigned domRowIdx = 0; domRowIdx < numDomestic; ++domRowIdx) {
115 Index nativeRowIdx = overlap_->domesticToNative(static_cast<Index>(domRowIdx));
116 if (nativeRowIdx < 0)
117 (*this)[domRowIdx] = 0.0;
118 else
119 (*this)[domRowIdx] = nativeBlockVector[nativeRowIdx];
120 }
121
122 // add up the contents of the overlapping rows. Since non-native rows are set to
123 // zero above, the addition is only different from an assignment if the row is
124 // shared amongst multiple ranks.
125 syncAdd();
126 }
127
132 template <class NativeBlockVector>
133 void assign(const NativeBlockVector& nativeBlockVector)
134 {
135 Index numDomestic = overlap_->numDomestic();
136
137 // assign the local rows from the non-overlapping block vector
138 for (Index domRowIdx = 0; domRowIdx < numDomestic; ++domRowIdx) {
139 Index nativeRowIdx = overlap_->domesticToNative(domRowIdx);
140 if (nativeRowIdx < 0)
141 (*this)[static_cast<unsigned>(domRowIdx)] = 0.0;
142 else
143 (*this)[static_cast<unsigned>(domRowIdx)] = nativeBlockVector[static_cast<unsigned>(nativeRowIdx)];
144 }
145
146 // add up the contents of border rows, for the remaining rows,
147 // get the values from their respective master process.
148 sync();
149 }
150
155 template <class NativeBlockVector>
156 void assignTo(NativeBlockVector& nativeBlockVector) const
157 {
158 // assign the local rows
159 size_t numNative = overlap_->numNative();
160 nativeBlockVector.resize(numNative);
161 for (unsigned nativeRowIdx = 0; nativeRowIdx < numNative; ++nativeRowIdx) {
162 Index domRowIdx = overlap_->nativeToDomestic(static_cast<Index>(nativeRowIdx));
163
164 if (domRowIdx < 0)
165 nativeBlockVector[nativeRowIdx] = 0.0;
166 else
167 nativeBlockVector[nativeRowIdx] = (*this)[static_cast<unsigned>(domRowIdx)];
168 }
169 }
170
175 void sync()
176 {
177 // send all entries to all peers
178 for (const auto peerRank: overlap_->peerSet())
179 sendEntries_(peerRank);
180
181 // recieve all entries to the peers
182 for (const auto peerRank: overlap_->peerSet())
183 receiveFromMaster_(peerRank);
184
185 // wait until we have send everything
186 waitSendFinished_();
187 }
188
193 void syncAdd()
194 {
195 // send all entries to all peers
196 for (const auto peerRank: overlap_->peerSet())
197 sendEntries_(peerRank);
198
199 // recieve all entries to the peers
200 for (const auto peerRank: overlap_->peerSet())
201 receiveAdd_(peerRank);
202
203 // wait until we have send everything
204 waitSendFinished_();
205 }
206
207 void print() const
208 {
209 for (unsigned i = 0; i < this->size(); ++i) {
210 std::cout << "row " << i << (overlap_->isLocal(i) ? " " : "*")
211 << ": " << (*this)[i] << "\n" << std::flush;
212 }
213 }
214
215private:
216 void createBuffers_()
217 {
218#if HAVE_MPI
219 // create array for the front indices
220 typename PeerSet::const_iterator peerIt;
221 typename PeerSet::const_iterator peerEndIt = overlap_->peerSet().end();
222
223 // send all indices to the peers
224 peerIt = overlap_->peerSet().begin();
225 for (; peerIt != peerEndIt; ++peerIt) {
226 ProcessRank peerRank = *peerIt;
227
228 size_t numEntries = overlap_->foreignOverlapSize(peerRank);
229 numIndicesSendBuff_[peerRank] = std::make_shared<MpiBuffer<unsigned> >(1);
230 indicesSendBuff_[peerRank] = std::make_shared<MpiBuffer<Index> >(numEntries);
231 valuesSendBuff_[peerRank] = std::make_shared<MpiBuffer<FieldVector> >(numEntries);
232
233 // fill the indices buffer with global indices
234 MpiBuffer<Index>& indicesSendBuff = *indicesSendBuff_[peerRank];
235 for (unsigned i = 0; i < numEntries; ++i) {
236 Index domRowIdx = overlap_->foreignOverlapOffsetToDomesticIdx(peerRank, i);
237 indicesSendBuff[i] = overlap_->domesticToGlobal(domRowIdx);
238 }
239
240 // first, send the number of indices
241 (*numIndicesSendBuff_[peerRank])[0] = static_cast<unsigned>(numEntries);
242 numIndicesSendBuff_[peerRank]->send(peerRank);
243
244 // then, send the indices themselfs
245 indicesSendBuff.send(peerRank);
246 }
247
248 // receive the indices from the peers
249 peerIt = overlap_->peerSet().begin();
250 for (; peerIt != peerEndIt; ++peerIt) {
251 ProcessRank peerRank = *peerIt;
252
253 // receive size of overlap to peer
254 MpiBuffer<unsigned> numRowsRecvBuff(1);
255 numRowsRecvBuff.receive(peerRank);
256 unsigned numRows = numRowsRecvBuff[0];
257
258 // then, create the MPI buffers
259 indicesRecvBuff_[peerRank] = std::shared_ptr<MpiBuffer<Index> >(
260 new MpiBuffer<Index>(numRows));
261 valuesRecvBuff_[peerRank] = std::shared_ptr<MpiBuffer<FieldVector> >(
262 new MpiBuffer<FieldVector>(numRows));
263 MpiBuffer<Index>& indicesRecvBuff = *indicesRecvBuff_[peerRank];
264
265 // next, receive the actual indices
266 indicesRecvBuff.receive(peerRank);
267
268 // finally, translate the global indices to domestic ones
269 for (unsigned i = 0; i != numRows; ++i) {
270 Index globalRowIdx = indicesRecvBuff[i];
271 Index domRowIdx = overlap_->globalToDomestic(globalRowIdx);
272
273 indicesRecvBuff[i] = domRowIdx;
274 }
275 }
276
277 // wait for all send operations to complete
278 peerIt = overlap_->peerSet().begin();
279 for (; peerIt != peerEndIt; ++peerIt) {
280 ProcessRank peerRank = *peerIt;
281 numIndicesSendBuff_[peerRank]->wait();
282 indicesSendBuff_[peerRank]->wait();
283
284 // convert the global indices of the send buffer to
285 // domestic ones
286 MpiBuffer<Index>& indicesSendBuff = *indicesSendBuff_[peerRank];
287 for (unsigned i = 0; i < indicesSendBuff.size(); ++i) {
288 indicesSendBuff[i] = overlap_->globalToDomestic(indicesSendBuff[i]);
289 }
290 }
291#endif // HAVE_MPI
292 }
293
294 void sendEntries_(ProcessRank peerRank)
295 {
296 // copy the values into the send buffer
297 const MpiBuffer<Index>& indices = *indicesSendBuff_[peerRank];
298 MpiBuffer<FieldVector>& values = *valuesSendBuff_[peerRank];
299 for (unsigned i = 0; i < indices.size(); ++i)
300 values[i] = (*this)[static_cast<unsigned>(indices[i])];
301
302 values.send(peerRank);
303 }
304
305 void waitSendFinished_()
306 {
307 typename PeerSet::const_iterator peerIt;
308 typename PeerSet::const_iterator peerEndIt = overlap_->peerSet().end();
309
310 // send all entries to all peers
311 peerIt = overlap_->peerSet().begin();
312 for (; peerIt != peerEndIt; ++peerIt) {
313 ProcessRank peerRank = *peerIt;
314 valuesSendBuff_[peerRank]->wait();
315 }
316 }
317
318 void receiveFromMaster_(ProcessRank peerRank)
319 {
320 const MpiBuffer<Index>& indices = *indicesRecvBuff_[peerRank];
321 MpiBuffer<FieldVector>& values = *valuesRecvBuff_[peerRank];
322
323 // receive the values from the peer
324 values.receive(peerRank);
325
326 // copy them into the block vector
327 for (unsigned j = 0; j < indices.size(); ++j) {
328 Index domRowIdx = indices[j];
329 if (overlap_->masterRank(domRowIdx) == peerRank) {
330 (*this)[static_cast<unsigned>(domRowIdx)] = values[j];
331 }
332 }
333 }
334
335 void receiveAdd_(ProcessRank peerRank)
336 {
337 const MpiBuffer<Index>& indices = *indicesRecvBuff_[peerRank];
338 MpiBuffer<FieldVector>& values = *valuesRecvBuff_[peerRank];
339
340 // receive the values from the peer
341 values.receive(peerRank);
342
343 // add up the values of rows on the shared boundary
344 for (unsigned j = 0; j < indices.size(); ++j) {
345 Index domRowIdx = indices[j];
346 (*this)[static_cast<unsigned>(domRowIdx)] += values[j];
347 }
348 }
349
350 std::map<ProcessRank, std::shared_ptr<MpiBuffer<unsigned> > > numIndicesSendBuff_;
351 std::map<ProcessRank, std::shared_ptr<MpiBuffer<Index> > > indicesSendBuff_;
352 std::map<ProcessRank, std::shared_ptr<MpiBuffer<Index> > > indicesRecvBuff_;
353 std::map<ProcessRank, std::shared_ptr<MpiBuffer<FieldVector> > > valuesSendBuff_;
354 std::map<ProcessRank, std::shared_ptr<MpiBuffer<FieldVector> > > valuesRecvBuff_;
355
356 const Overlap *overlap_;
357};
358
359} // namespace Linear
360} // namespace Opm
361
362#endif
An overlap aware block vector.
Definition: overlappingblockvector.hh:50
void syncAdd()
Syncronize all values of the block vector by adding up the values of all peer ranks.
Definition: overlappingblockvector.hh:193
OverlappingBlockVector & operator=(const OverlappingBlockVector &obv)
Assignment operator.
Definition: overlappingblockvector.hh:92
OverlappingBlockVector()
Default constructor.
Definition: overlappingblockvector.hh:79
OverlappingBlockVector(const Overlap &overlap)
Given a domestic overlap object, create an overlapping block vector coherent to it.
Definition: overlappingblockvector.hh:59
OverlappingBlockVector(const OverlappingBlockVector &obv)
Copy constructor.
Definition: overlappingblockvector.hh:66
void assign(const NativeBlockVector &nativeBlockVector)
Assign an overlapping block vector from a non-overlapping one, border entries are assigned using thei...
Definition: overlappingblockvector.hh:133
void print() const
Definition: overlappingblockvector.hh:207
void assignTo(NativeBlockVector &nativeBlockVector) const
Assign the local values to a non-overlapping block vector.
Definition: overlappingblockvector.hh:156
void assignAddBorder(const BlockVector &nativeBlockVector)
Assign an overlapping block vector from a non-overlapping one, border entries are added.
Definition: overlappingblockvector.hh:109
void sync()
Syncronize all values of the block vector from their master process.
Definition: overlappingblockvector.hh:175
unsigned ProcessRank
The type of the rank of a process.
Definition: overlaptypes.hh:49
int Index
The type of an index of a degree of freedom.
Definition: overlaptypes.hh:44
Definition: blackoilboundaryratevector.hh:37
This files provides several data structures for storing tuples of indices of remote and/or local proc...