overlappingbcrsmatrix.hh
Go to the documentation of this file.
1 // -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 // vi: set et ts=4 sw=4 sts=4:
3 /*
4  Copyright (C) 2011-2013 by Andreas Lauser
5 
6  This file is part of the Open Porous Media project (OPM).
7 
8  OPM is free software: you can redistribute it and/or modify
9  it under the terms of the GNU General Public License as published by
10  the Free Software Foundation, either version 2 of the License, or
11  (at your option) any later version.
12 
13  OPM is distributed in the hope that it will be useful,
14  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  GNU General Public License for more details.
17 
18  You should have received a copy of the GNU General Public License
19  along with OPM. If not, see <http://www.gnu.org/licenses/>.
20 */
25 #ifndef EWOMS_OVERLAPPING_BCRS_MATRIX_HH
26 #define EWOMS_OVERLAPPING_BCRS_MATRIX_HH
27 
32 
33 #include <opm/material/common/Valgrind.hpp>
34 
35 #include <dune/istl/scalarproducts.hh>
36 #include <dune/istl/io.hh>
37 
38 #include <algorithm>
39 #include <set>
40 #include <map>
41 #include <iostream>
42 #include <vector>
43 #include <memory>
44 
45 namespace Ewoms {
46 namespace Linear {
47 
51 template <class BCRSMatrix>
52 class OverlappingBCRSMatrix : public BCRSMatrix
53 {
54  typedef BCRSMatrix ParentType;
55 
56 public:
58 
59 private:
60  typedef std::vector<std::set<Index> > Entries;
61 
62 public:
63  typedef typename ParentType::ColIterator ColIterator;
64  typedef typename ParentType::ConstColIterator ConstColIterator;
65  typedef typename ParentType::block_type block_type;
66 
67  // no real copying done at the moment
69  : ParentType(other)
70  {}
71 
72  OverlappingBCRSMatrix(const BCRSMatrix& nativeMatrix,
73  const BorderList& borderList,
74  const BlackList& blackList,
75  int overlapSize)
76  {
77  overlap_ = std::shared_ptr<Overlap>(
78  new Overlap(nativeMatrix, borderList, blackList, overlapSize));
79  myRank_ = 0;
80 #if HAVE_MPI
81  MPI_Comm_rank(MPI_COMM_WORLD, &myRank_);
82 #endif // HAVE_MPI
83 
84  // build the overlapping matrix from the non-overlapping
85  // matrix and the overlap
86  build_(nativeMatrix);
87  }
88 
90  {
91  if (overlap_.use_count() == 0)
92  return;
93 
94  // delete all MPI buffers
95  const PeerSet &peerSet = overlap_->peerSet();
96  typename PeerSet::const_iterator peerIt = peerSet.begin();
97  typename PeerSet::const_iterator peerEndIt = peerSet.end();
98  for (; peerIt != peerEndIt; ++peerIt) {
99  int peerRank = *peerIt;
100 
101  delete rowSizesRecvBuff_[peerRank];
102  delete rowIndicesRecvBuff_[peerRank];
103  delete entryColIndicesRecvBuff_[peerRank];
104  delete entryValuesRecvBuff_[peerRank];
105 
106  delete numRowsSendBuff_[peerRank];
107  delete rowSizesSendBuff_[peerRank];
108  delete rowIndicesSendBuff_[peerRank];
109  delete entryColIndicesSendBuff_[peerRank];
110  delete entryValuesSendBuff_[peerRank];
111  }
112  }
113 
117  const Overlap &overlap() const
118  { return *overlap_; }
119 
123  void assignAdd(const BCRSMatrix &nativeMatrix)
124  {
125  // copy the native entries
126  assignFromNative_(nativeMatrix);
127 
128  // communicate and add the contents of overlapping rows
129  syncAdd_();
130  }
131 
138  void assignCopy(const BCRSMatrix &nativeMatrix)
139  {
140  // copy the native entries
141  assignFromNative_(nativeMatrix);
142 
143  // communicate and add the contents of overlapping rows
144  syncCopy_();
145  }
146 
150  void resetFront()
151  {
152  // create an identity matrix
153  block_type idMatrix(0.0);
154  for (unsigned i = 0; i < idMatrix.size(); ++i)
155  idMatrix[i][i] = 1.0;
156 
157  int numLocal = overlap_->numLocal();
158  int numDomestic = overlap_->numDomestic();
159  for (int domRowIdx = numLocal; domRowIdx < numDomestic; ++domRowIdx) {
160  if (overlap_->isFront(domRowIdx)) {
161  // set the front rows to a diagonal 1
162  (*this)[domRowIdx] = 0.0;
163  (*this)[domRowIdx][domRowIdx] = idMatrix;
164  }
165  }
166  }
167 
168  void print() const
169  {
170  overlap_->print();
171 
172  for (int i = 0; i < this->N(); ++i) {
173  if (overlap_->isLocal(i))
174  std::cout << " ";
175  else
176  std::cout << "*";
177  std::cout << "row " << i << " ";
178 
179  typedef typename BCRSMatrix::ConstColIterator ColIt;
180  ColIt colIt = (*this)[i].begin();
181  ColIt colEndIt = (*this)[i].end();
182  for (int j = 0; j < this->M(); ++j) {
183  if (colIt != colEndIt && j == colIt.index()) {
184  ++colIt;
185  if (overlap_->isBorder(j))
186  std::cout << "|";
187  else if (overlap_->isLocal(j))
188  std::cout << "X";
189  else
190  std::cout << "*";
191  }
192  else
193  std::cout << " ";
194  }
195  std::cout << "\n" << std::flush;
196  }
197  Dune::printSparseMatrix(std::cout,
198  *static_cast<const BCRSMatrix *>(this),
199  "M",
200  "row");
201  }
202 
203 private:
204  void assignFromNative_(const BCRSMatrix &nativeMatrix)
205  {
206  // first, set everything to 0,
207  BCRSMatrix::operator=(0.0);
208 
209  // then copy the domestic entries of the native matrix to the overlapping matrix
210  for (unsigned nativeRowIdx = 0; nativeRowIdx < nativeMatrix.N(); ++nativeRowIdx) {
211  int domesticRowIdx = overlap_->nativeToDomestic(nativeRowIdx);
212  if (domesticRowIdx < 0) {
213  continue; // row corresponds to a black-listed entry
214  }
215 
216  ConstColIterator nativeColIt = nativeMatrix[nativeRowIdx].begin();
217  const ConstColIterator &nativeColEndIt = nativeMatrix[nativeRowIdx].end();
218  for (; nativeColIt != nativeColEndIt; ++nativeColIt) {
219  int domesticColIdx = overlap_->nativeToDomestic(nativeColIt.index());
220 
221  // make sure to include all off-diagonal entries, even those which belong
222  // to DOFs which are managed by a peer process. For this, we have to
223  // re-map the column index of the black-listed index to a native one.
224  if (domesticColIdx < 0)
225  domesticColIdx = overlap_->blackList().nativeToDomestic(nativeColIt.index());
226 
227  if (domesticColIdx < 0)
228  // there is no domestic index which corresponds to a black-listed
229  // one. this can happen if the grid overlap is larger than the
230  // algebraic one...
231  continue;
232 
233  (*this)[domesticRowIdx][domesticColIdx] = *nativeColIt;
234  }
235  }
236  }
237 
238  void build_(const BCRSMatrix &nativeMatrix)
239  {
240  int numDomestic = overlap_->numDomestic();
241 
242  // allocate the rows
243  this->setSize(numDomestic, numDomestic);
244  this->setBuildMode(ParentType::random);
245 
246  // communicate the entries
247  buildIndices_(nativeMatrix);
248  }
249 
250  int numDomesticEntriesInRowFor_(const BCRSMatrix &nativeMatrix, int peerRank, int rowIdx)
251  {
252  int numEntries = 0;
253 
254  typedef typename BCRSMatrix::ConstColIterator ColIt;
255  ColIt colIt = nativeMatrix[rowIdx].begin();
256  ColIt colEndIt = nativeMatrix[rowIdx].end();
257  for (; colIt != colEndIt; ++colIt) {
258  if (overlap_->isDomesticIndexFor(peerRank, colIt.index()))
259  ++numEntries;
260  }
261 
262  return numEntries;
263  }
264 
265  void buildIndices_(const BCRSMatrix &nativeMatrix)
266  {
268  // first, add all local matrix entries
270  entries_.resize(overlap_->numDomestic());
271  for (unsigned nativeRowIdx = 0; nativeRowIdx < nativeMatrix.N(); ++nativeRowIdx) {
272  int domesticRowIdx = overlap_->nativeToDomestic(nativeRowIdx);
273  if (domesticRowIdx < 0)
274  continue;
275 
276  ConstColIterator nativeColIt = nativeMatrix[nativeRowIdx].begin();
277  ConstColIterator nativeColEndIt = nativeMatrix[nativeRowIdx].end();
278  for (; nativeColIt != nativeColEndIt; ++nativeColIt) {
279  int domesticColIdx = overlap_->nativeToDomestic(nativeColIt.index());
280 
281  // make sure to include all off-diagonal entries, even those which belong
282  // to DOFs which are managed by a peer process. For this, we have to
283  // re-map the column index of the black-listed index to a native one.
284  if (domesticColIdx < 0) {
285  domesticColIdx = overlap_->blackList().nativeToDomestic(nativeColIt.index());
286  }
287 
288  if (domesticColIdx < 0)
289  continue;
290 
291  entries_[domesticRowIdx].insert(domesticColIdx);
292  }
293  }
294 
296  // add the indices for all additional entries
298 
299  // first, send all our indices to all peers
300  const PeerSet &peerSet = overlap_->peerSet();
301  typename PeerSet::const_iterator peerIt = peerSet.begin();
302  typename PeerSet::const_iterator peerEndIt = peerSet.end();
303  for (; peerIt != peerEndIt; ++peerIt) {
304  int peerRank = *peerIt;
305  sendIndices_(nativeMatrix, peerRank);
306  }
307 
308  // then recieve all indices from the peers
309  peerIt = peerSet.begin();
310  for (; peerIt != peerEndIt; ++peerIt) {
311  int peerRank = *peerIt;
312  receiveIndices_(peerRank);
313  }
314 
315  // wait until all send operations are completed
316  peerIt = peerSet.begin();
317  for (; peerIt != peerEndIt; ++peerIt) {
318  int peerRank = *peerIt;
319 
320  numRowsSendBuff_[peerRank]->wait();
321  rowSizesSendBuff_[peerRank]->wait();
322  rowIndicesSendBuff_[peerRank]->wait();
323  entryColIndicesSendBuff_[peerRank]->wait();
324 
325  // convert the global indices in the send buffers to domestic
326  // ones
327  globalToDomesticBuff_(*rowIndicesSendBuff_[peerRank]);
328  globalToDomesticBuff_(*entryColIndicesSendBuff_[peerRank]);
329  }
330 
332  // actually initialize the BCRS matrix structure
334 
335  // set the row sizes
336  int numDomestic = overlap_->numDomestic();
337  for (int rowIdx = 0; rowIdx < numDomestic; ++rowIdx) {
338  int numCols = 0;
339  const auto &colIndices = entries_[rowIdx];
340  auto colIdxIt = colIndices.begin();
341  const auto &colIdxEndIt = colIndices.end();
342  for (; colIdxIt != colIdxEndIt; ++colIdxIt) {
343  if (*colIdxIt < 0)
344  // the matrix for the local process does not know about this DOF
345  continue;
346 
347  ++numCols;
348  }
349 
350  this->setrowsize(rowIdx, numCols);
351  }
352  this->endrowsizes();
353 
354  // set the indices
355  for (int rowIdx = 0; rowIdx < numDomestic; ++rowIdx) {
356  const auto &colIndices = entries_[rowIdx];
357 
358  auto colIdxIt = colIndices.begin();
359  const auto &colIdxEndIt = colIndices.end();
360  for (; colIdxIt != colIdxEndIt; ++colIdxIt) {
361  if (*colIdxIt < 0)
362  // the matrix for the local process does not know about this DOF
363  continue;
364 
365  this->addindex(rowIdx, *colIdxIt);
366  }
367  }
368  this->endindices();
369 
370  // free the memory occupied by the array of the matrix entries
371  entries_.resize(0);
372  }
373 
374  // send the overlap indices to a peer
375  void sendIndices_(const BCRSMatrix &nativeMatrix, int peerRank)
376  {
377 #if HAVE_MPI
378  // send size of foreign overlap to peer
379  int numOverlapRows = overlap_->foreignOverlapSize(peerRank);
380  numRowsSendBuff_[peerRank] = new MpiBuffer<Index>(1);
381  (*numRowsSendBuff_[peerRank])[0] = numOverlapRows;
382  numRowsSendBuff_[peerRank]->send(peerRank);
383 
384  // allocate the buffers which hold the global indices of each row and the number
385  // of entries which need to be communicated by the respective row
386  rowIndicesSendBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
387  rowSizesSendBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
388 
389  // compute the sets of the indices of the entries which need to be send to the peer
390  typedef std::set<int> ColumnIndexSet;
391  typedef std::map<int, ColumnIndexSet> EntryTuples;
392 
393  EntryTuples entryIndices;
394  int numEntries = 0; // <- total number of matrix entries to be send to the peer
395  for (int overlapOffset = 0; overlapOffset < numOverlapRows; ++overlapOffset) {
396  int domesticRowIdx = overlap_->foreignOverlapOffsetToDomesticIdx(peerRank, overlapOffset);
397  int nativeRowIdx = overlap_->domesticToNative(domesticRowIdx);
398  int globalRowIdx = overlap_->domesticToGlobal(domesticRowIdx);
399 
400  ColumnIndexSet &colIndices = entryIndices[globalRowIdx];
401 
402  typedef typename BCRSMatrix::ConstColIterator ColIt;
403  ColIt nativeColIt = nativeMatrix[nativeRowIdx].begin();
404  ColIt nativeColEndIt = nativeMatrix[nativeRowIdx].end();
405  for (; nativeColIt != nativeColEndIt; ++nativeColIt) {
406  int nativeColIdx = nativeColIt.index();
407  int domesticColIdx = overlap_->nativeToDomestic(nativeColIdx);
408 
409  if (domesticColIdx < 0)
410  // the native column index may be blacklisted, use the corresponding
411  // index in the domestic overlap.
412  domesticColIdx = overlap_->blackList().nativeToDomestic(nativeColIdx);
413 
414  if (domesticColIdx < 0)
415  // the column may still not be known locally, i.e. the corresponding
416  // DOF of the row is at the process's front. we don't need this
417  // entry.
418  continue;
419 
420  int globalColIdx = overlap_->domesticToGlobal(domesticColIdx);
421  colIndices.insert(globalColIdx);
422  ++numEntries;
423  }
424  };
425 
426  // fill the send buffers
427  entryColIndicesSendBuff_[peerRank] = new MpiBuffer<Index>(numEntries);
428  int overlapEntryIdx = 0;
429  for (int overlapOffset = 0; overlapOffset < numOverlapRows; ++overlapOffset) {
430  int domesticRowIdx = overlap_->foreignOverlapOffsetToDomesticIdx(peerRank, overlapOffset);
431  int globalRowIdx = overlap_->domesticToGlobal(domesticRowIdx);
432 
433  (*rowIndicesSendBuff_[peerRank])[overlapOffset] = globalRowIdx;
434 
435  const ColumnIndexSet &colIndexSet = entryIndices[globalRowIdx];
436  auto* rssb = rowSizesSendBuff_[peerRank];
437  (*rssb)[overlapOffset] = colIndexSet.size();
438  for (auto it = colIndexSet.begin(); it != colIndexSet.end(); ++it) {
439  int globalColIdx = *it;
440 
441  (*entryColIndicesSendBuff_[peerRank])[overlapEntryIdx] = globalColIdx;
442  ++ overlapEntryIdx;
443  }
444  }
445 
446  // actually communicate with the peer
447  rowSizesSendBuff_[peerRank]->send(peerRank);
448  rowIndicesSendBuff_[peerRank]->send(peerRank);
449  entryColIndicesSendBuff_[peerRank]->send(peerRank);
450 
451  // create the send buffers for the values of the matrix
452  // entries
453  entryValuesSendBuff_[peerRank] = new MpiBuffer<block_type>(numEntries);
454 #endif // HAVE_MPI
455  }
456 
457  // receive the overlap indices to a peer
458  void receiveIndices_(int peerRank)
459  {
460 #if HAVE_MPI
461  // receive size of foreign overlap to peer
462  Index numOverlapRows;
463  auto &numRowsRecvBuff = numRowsRecvBuff_[peerRank];
464  numRowsRecvBuff.resize(1);
465  numRowsRecvBuff.receive(peerRank);
466  numOverlapRows = numRowsRecvBuff[0];
467 
468  // create receive buffer for the row sizes and receive them
469  // from the peer
470  rowSizesRecvBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
471  rowIndicesRecvBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
472  rowSizesRecvBuff_[peerRank]->receive(peerRank);
473  rowIndicesRecvBuff_[peerRank]->receive(peerRank);
474 
475  // calculate the total number of indices which are send by the
476  // peer
477  int totalIndices = 0;
478  for (Index i = 0; i < numOverlapRows; ++i) {
479  totalIndices += (*rowSizesRecvBuff_[peerRank])[i];
480  }
481 
482  // create the buffer to store the column indices of the matrix entries
483  entryColIndicesRecvBuff_[peerRank] = new MpiBuffer<Index>(totalIndices);
484  entryValuesRecvBuff_[peerRank] = new MpiBuffer<block_type>(totalIndices);
485 
486  // communicate with the peer
487  entryColIndicesRecvBuff_[peerRank]->receive(peerRank);
488 
489  // convert the global indices in the receive buffers to
490  // domestic ones
491  globalToDomesticBuff_(*rowIndicesRecvBuff_[peerRank]);
492  globalToDomesticBuff_(*entryColIndicesRecvBuff_[peerRank]);
493 
494  // add the entries to the global entry map
495  int k = 0;
496  for (Index i = 0; i < numOverlapRows; ++i) {
497  int domRowIdx = (*rowIndicesRecvBuff_[peerRank])[i];
498  for (Index j = 0; j < (*rowSizesRecvBuff_[peerRank])[i]; ++j) {
499  int domColIdx = (*entryColIndicesRecvBuff_[peerRank])[k];
500  entries_[domRowIdx].insert(domColIdx);
501  ++k;
502  }
503  }
504 #endif // HAVE_MPI
505  }
506 
507  // communicates and adds up the contents of overlapping rows
508  void syncAdd_()
509  {
510  // first, send all entries to the peers
511  const PeerSet &peerSet = overlap_->peerSet();
512  typename PeerSet::const_iterator peerIt = peerSet.begin();
513  typename PeerSet::const_iterator peerEndIt = peerSet.end();
514  for (; peerIt != peerEndIt; ++peerIt) {
515  int peerRank = *peerIt;
516 
517  sendEntries_(peerRank);
518  }
519 
520  // then, receive entries from the peers
521  peerIt = peerSet.begin();
522  for (; peerIt != peerEndIt; ++peerIt) {
523  int peerRank = *peerIt;
524 
525  receiveAddEntries_(peerRank);
526  }
527 
528  // finally, make sure that everything which we send was
529  // received by the peers
530  peerIt = peerSet.begin();
531  for (; peerIt != peerEndIt; ++peerIt) {
532  int peerRank = *peerIt;
533  entryValuesSendBuff_[peerRank]->wait();
534  }
535  }
536 
537  // communicates and copies the contents of overlapping rows from
538  // the master
539  void syncCopy_()
540  {
541  // first, send all entries to the peers
542  const PeerSet &peerSet = overlap_->peerSet();
543  typename PeerSet::const_iterator peerIt = peerSet.begin();
544  typename PeerSet::const_iterator peerEndIt = peerSet.end();
545  for (; peerIt != peerEndIt; ++peerIt) {
546  int peerRank = *peerIt;
547 
548  sendEntries_(peerRank);
549  }
550 
551  // then, receive entries from the peers
552  peerIt = peerSet.begin();
553  for (; peerIt != peerEndIt; ++peerIt) {
554  int peerRank = *peerIt;
555 
556  receiveCopyEntries_(peerRank);
557  }
558 
559  // finally, make sure that everything which we send was
560  // received by the peers
561  peerIt = peerSet.begin();
562  for (; peerIt != peerEndIt; ++peerIt) {
563  int peerRank = *peerIt;
564  entryValuesSendBuff_[peerRank]->wait();
565  }
566  }
567 
568  void sendEntries_(int peerRank)
569  {
570 #if HAVE_MPI
571  auto &mpiSendBuff = *entryValuesSendBuff_[peerRank];
572 
573  auto &mpiRowIndicesSendBuff = *rowIndicesSendBuff_[peerRank];
574  auto &mpiRowSizesSendBuff = *rowSizesSendBuff_[peerRank];
575  auto &mpiColIndicesSendBuff = *entryColIndicesSendBuff_[peerRank];
576 
577  // fill the send buffer
578  unsigned k = 0;
579  for (unsigned i = 0; i < mpiRowIndicesSendBuff.size(); ++i) {
580  Index domRowIdx = mpiRowIndicesSendBuff[i];
581 
582  for (Index j = 0; j < static_cast<Index>(mpiRowSizesSendBuff[i]); ++j)
583  {
584  // move to the next column which is in the overlap
585  Index domColIdx = mpiColIndicesSendBuff[k];
586 
587  // add the values of this column to the send buffer
588  mpiSendBuff[k] = (*this)[domRowIdx][domColIdx];
589  ++k;
590  }
591  }
592 
593  mpiSendBuff.send(peerRank);
594 #endif // HAVE_MPI
595  }
596 
597  void receiveAddEntries_(int peerRank)
598  {
599 #if HAVE_MPI
600  auto &mpiRecvBuff = *entryValuesRecvBuff_[peerRank];
601 
602  auto &mpiRowIndicesRecvBuff = *rowIndicesRecvBuff_[peerRank];
603  auto &mpiRowSizesRecvBuff = *rowSizesRecvBuff_[peerRank];
604  auto &mpiColIndicesRecvBuff = *entryColIndicesRecvBuff_[peerRank];
605 
606  mpiRecvBuff.receive(peerRank);
607 
608  // retrieve the values from the receive buffer
609  int k = 0;
610  for (Index i = 0; i < static_cast<Index>(mpiRowIndicesRecvBuff.size()); ++i) {
611  Index domRowIdx = mpiRowIndicesRecvBuff[i];
612  for (Index j = 0; j < static_cast<Index>(mpiRowSizesRecvBuff[i]); ++j, ++k) {
613  Index domColIdx = mpiColIndicesRecvBuff[k];
614 
615  if (domColIdx < 0)
616  // the matrix for the current process does not know about this DOF
617  continue;
618 
619  (*this)[domRowIdx][domColIdx] += mpiRecvBuff[k];
620  }
621  }
622 #endif // HAVE_MPI
623  }
624 
625  void receiveCopyEntries_(int peerRank)
626  {
627 #if HAVE_MPI
628  MpiBuffer<block_type> &mpiRecvBuff = *entryValuesRecvBuff_[peerRank];
629 
630  MpiBuffer<Index> &mpiRowIndicesRecvBuff = *rowIndicesRecvBuff_[peerRank];
631  MpiBuffer<Index> &mpiRowSizesRecvBuff = *rowSizesRecvBuff_[peerRank];
632  MpiBuffer<Index> &mpiColIndicesRecvBuff = *entryColIndicesRecvBuff_[peerRank];
633 
634  mpiRecvBuff.receive(peerRank);
635 
636  // retrieve the values from the receive buffer
637  int k = 0;
638  for (int i = 0; i < mpiRowIndicesRecvBuff.size(); ++i) {
639  Index domRowIdx = mpiRowIndicesRecvBuff[i];
640  for (int j = 0; j < mpiRowSizesRecvBuff[i]; ++j, ++k) {
641  Index domColIdx = mpiColIndicesRecvBuff[k];
642 
643  if (domColIdx < 0)
644  // the matrix for the current process does not know about this DOF
645  continue;
646 
647  (*this)[domRowIdx][domColIdx] = mpiRecvBuff[k];
648  }
649  }
650 #endif // HAVE_MPI
651  }
652 
653  void globalToDomesticBuff_(MpiBuffer<Index> &idxBuff)
654  {
655  for (unsigned i = 0; i < idxBuff.size(); ++i)
656  idxBuff[i] = overlap_->globalToDomestic(idxBuff[i]);
657  }
658 
659  int myRank_;
660  Entries entries_;
661  std::shared_ptr<Overlap> overlap_;
662 
663  std::map<ProcessRank, MpiBuffer<Index> *> numRowsSendBuff_;
664  std::map<ProcessRank, MpiBuffer<Index> *> rowSizesSendBuff_;
665  std::map<ProcessRank, MpiBuffer<Index> *> rowIndicesSendBuff_;
666  std::map<ProcessRank, MpiBuffer<Index> *> entryColIndicesSendBuff_;
667  std::map<ProcessRank, MpiBuffer<block_type> *> entryValuesSendBuff_;
668 
669  std::map<ProcessRank, MpiBuffer<Index> > numRowsRecvBuff_;
670  std::map<ProcessRank, MpiBuffer<Index> *> rowSizesRecvBuff_;
671  std::map<ProcessRank, MpiBuffer<Index> *> rowIndicesRecvBuff_;
672  std::map<ProcessRank, MpiBuffer<Index> *> entryColIndicesRecvBuff_;
673  std::map<ProcessRank, MpiBuffer<block_type> *> entryValuesRecvBuff_;
674 };
675 
676 } // namespace Linear
677 } // namespace Ewoms
678 
679 #endif
A set of process ranks.
Definition: overlaptypes.hh:147
Expresses which degrees of freedom are blacklisted for the parallel linear solvers and which domestic...
Definition: blacklist.hh:45
An overlap aware block-compressed row storage (BCRS) matrix.
Definition: overlappingbcrsmatrix.hh:52
Simplifies handling of buffers to be used in conjunction with MPI.
void assignCopy(const BCRSMatrix &nativeMatrix)
Assign and syncronize the overlapping matrix from a non-overlapping one.
Definition: overlappingbcrsmatrix.hh:138
std::list< BorderIndex > BorderList
This class managages a list of indices which are on the border of a process' partition of the grid...
Definition: overlaptypes.hh:119
OverlappingBCRSMatrix(const OverlappingBCRSMatrix &other)
Definition: overlappingbcrsmatrix.hh:68
Definition: baseauxiliarymodule.hh:35
const Overlap & overlap() const
Returns the domestic overlap for the process.
Definition: overlappingbcrsmatrix.hh:117
OverlappingBCRSMatrix(const BCRSMatrix &nativeMatrix, const BorderList &borderList, const BlackList &blackList, int overlapSize)
Definition: overlappingbcrsmatrix.hh:72
void assignAdd(const BCRSMatrix &nativeMatrix)
Assign and syncronize the overlapping matrix from a non-overlapping one.
Definition: overlappingbcrsmatrix.hh:123
~OverlappingBCRSMatrix()
Definition: overlappingbcrsmatrix.hh:89
ParentType::block_type block_type
Definition: overlappingbcrsmatrix.hh:65
This class creates and manages the foreign overlap given an initial list of border indices and a BCRS...
Definition: domesticoverlapfrombcrsmatrix.hh:51
int Index
The type of an index of a degree of freedom.
Definition: overlaptypes.hh:43
This class maps domestic row indices to and from "global" indices which is used to construct an algeb...
void print() const
Definition: overlappingbcrsmatrix.hh:168
Ewoms::Linear::DomesticOverlapFromBCRSMatrix< BCRSMatrix > Overlap
Definition: overlappingbcrsmatrix.hh:57
ParentType::ColIterator ColIterator
Definition: overlappingbcrsmatrix.hh:63
ParentType::ConstColIterator ConstColIterator
Definition: overlappingbcrsmatrix.hh:64
Expresses which degrees of freedom are blacklisted for the parallel linear solvers and which domestic...
void resetFront()
Set the identity matrix on the main diagonal of front indices.
Definition: overlappingbcrsmatrix.hh:150
This class creates and manages the foreign overlap given an initial list of border indices and a BCRS...