ParallelOverlappingILU0_impl.hpp
Go to the documentation of this file.
1/*
2 Copyright 2015, 2022 Dr. Blatt - HPC-Simulation-Software & Services
3 Copyright 2015 Statoil AS
4
5 This file is part of the Open Porous Media project (OPM).
6
7 OPM is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11
12 OPM is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with OPM. If not, see <http://www.gnu.org/licenses/>.
19*/
20
22
23#include <dune/common/version.hh>
24
25#include <dune/istl/ilu.hh>
26#include <dune/istl/owneroverlapcopy.hh>
27
28#include <opm/common/ErrorMacros.hpp>
29#include <opm/common/TimingMacros.hpp>
30
33
34#include <cassert>
35
36namespace Opm
37{
38namespace detail
39{
40
42template<class M>
43void ghost_last_bilu0_decomposition (M& A, std::size_t interiorSize)
44{
45 OPM_TIMEBLOCK(GhostLastBlockILU0Decomp);
46 // iterator types
47 assert(interiorSize <= A.N());
48 using rowiterator = typename M::RowIterator;
49 using coliterator = typename M::ColIterator;
50 using block = typename M::block_type;
51
52 // implement left looking variant with stored inverse
53 for (rowiterator i = A.begin(); i.index() < interiorSize; ++i)
54 {
55 // coliterator is diagonal after the following loop
56 coliterator endij=(*i).end(); // end of row i
57 coliterator ij;
58
59 // eliminate entries left of diagonal; store L factor
60 for (ij=(*i).begin(); ij.index()<i.index(); ++ij)
61 {
62 // find A_jj which eliminates A_ij
63 coliterator jj = A[ij.index()].find(ij.index());
64
65 // compute L_ij = A_jj^-1 * A_ij
66 (*ij).rightmultiply(*jj);
67
68 // modify row
69 coliterator endjk=A[ij.index()].end(); // end of row j
70 coliterator jk=jj; ++jk;
71 coliterator ik=ij; ++ik;
72 while (ik!=endij && jk!=endjk)
73 if (ik.index()==jk.index())
74 {
75 block B(*jk);
76 B.leftmultiply(*ij);
77 *ik -= B;
78 ++ik; ++jk;
79 }
80 else
81 {
82 if (ik.index()<jk.index())
83 ++ik;
84 else
85 ++jk;
86 }
87 }
88
89 // invert pivot and store it in A
90 if (ij.index()!=i.index())
91 DUNE_THROW(Dune::ISTLError,"diagonal entry missing");
92 try {
93 (*ij).invert(); // compute inverse of diagonal block
94 }
95 catch (Dune::FMatrixError & e) {
96 DUNE_THROW(Dune::ISTLError,"ILU failed to invert matrix block");
97 }
98 }
99}
100
102template<class M, class CRS, class InvVector>
103void convertToCRS(const M& A, CRS& lower, CRS& upper, InvVector& inv)
104{
105 OPM_TIMEBLOCK(convertToCRS);
106 // No need to do anything for 0 rows. Return to prevent indexing a
107 // a zero sized array.
108 if ( A.N() == 0 )
109 {
110 return;
111 }
112
113 using size_type = typename M :: size_type;
114
115 lower.clear();
116 upper.clear();
117 inv.clear();
118 lower.resize( A.N() );
119 upper.resize( A.N() );
120 inv.resize( A.N() );
121
122 // Count the lower and upper matrix entries.
123 size_type numLower = 0;
124 size_type numUpper = 0;
125 const auto endi = A.end();
126 for (auto i = A.begin(); i != endi; ++i) {
127 const size_type iIndex = i.index();
128 size_type numLowerRow = 0;
129 for (auto j = (*i).begin(); j.index() < iIndex; ++j) {
130 ++numLowerRow;
131 }
132 numLower += numLowerRow;
133 numUpper += (*i).size() - numLowerRow - 1;
134 }
135 assert(numLower + numUpper + A.N() == A.nonzeroes());
136
137 lower.reserveAdditional( numLower );
138
139 // implement left looking variant with stored inverse
140 size_type row = 0;
141 size_type colcount = 0;
142 lower.rows_[ 0 ] = colcount;
143 for (auto i=A.begin(); i!=endi; ++i, ++row)
144 {
145 const size_type iIndex = i.index();
146
147 // eliminate entries left of diagonal; store L factor
148 for (auto j=(*i).begin(); j.index() < iIndex; ++j )
149 {
150 lower.push_back( (*j), j.index() );
151 ++colcount;
152 }
153 lower.rows_[ iIndex+1 ] = colcount;
154 }
155
156 assert(colcount == numLower);
157
158 const auto rendi = A.beforeBegin();
159 row = 0;
160 colcount = 0;
161 upper.rows_[ 0 ] = colcount ;
162
163 upper.reserveAdditional( numUpper );
164
165 // NOTE: upper and inv store entries in reverse order, reverse here
166 // relative to ILU
167 for (auto i=A.beforeEnd(); i!=rendi; --i, ++ row )
168 {
169 const size_type iIndex = i.index();
170
171 // store in reverse row order
172 // eliminate entries left of diagonal; store L factor
173 for (auto j=(*i).beforeEnd(); j.index()>=iIndex; --j )
174 {
175 const size_type jIndex = j.index();
176 if( j.index() == iIndex )
177 {
178 inv[ row ] = (*j);
179 break;
180 }
181 else if ( j.index() >= i.index() )
182 {
183 upper.push_back( (*j), jIndex );
184 ++colcount ;
185 }
186 }
187 upper.rows_[ row+1 ] = colcount;
188 }
189 assert(colcount == numUpper);
190}
191
192template <class PI>
193size_t set_interiorSize( [[maybe_unused]] size_t N, size_t interiorSize, [[maybe_unused]] const PI& comm)
194{
195 return interiorSize;
196}
197
198#if HAVE_MPI
199template<>
200size_t set_interiorSize(size_t N, size_t interiorSize, const Dune::OwnerOverlapCopyCommunication<int,int>& comm)
201{
202 if (interiorSize<N)
203 return interiorSize;
204 auto indexSet = comm.indexSet();
205
206 size_t new_is = 0;
207 for (auto idx = indexSet.begin(); idx!=indexSet.end(); ++idx)
208 {
209 if (idx->local().attribute()==1)
210 {
211 auto loc = idx->local().local();
212 if (loc > new_is) {
213 new_is = loc;
214 }
215 }
216 }
217 return new_is + 1;
218}
219#endif
220
221} // end namespace detail
222
223
224template<class Matrix, class Domain, class Range, class ParallelInfoT>
225Dune::SolverCategory::Category
227{
228 return std::is_same_v<ParallelInfoT, Dune::Amg::SequentialInformation> ?
229 Dune::SolverCategory::sequential : Dune::SolverCategory::overlapping;
230}
231
232template<class Matrix, class Domain, class Range, class ParallelInfoT>
234ParallelOverlappingILU0(const Matrix& A,
235 const int n, const field_type w,
236 MILU_VARIANT milu, bool redblack,
237 bool reorder_sphere)
238 : lower_(),
239 upper_(),
240 inv_(),
241 comm_(nullptr), w_(w),
242 relaxation_( std::abs( w - 1.0 ) > 1e-15 ),
243 A_(&reinterpret_cast<const Matrix&>(A)), iluIteration_(n),
244 milu_(milu), redBlack_(redblack), reorderSphere_(reorder_sphere)
245{
246 interiorSize_ = A.N();
247 // BlockMatrix is a Subclass of FieldMatrix that just adds
248 // methods. Therefore this cast should be safe.
249 update();
250}
251
252template<class Matrix, class Domain, class Range, class ParallelInfoT>
254ParallelOverlappingILU0(const Matrix& A,
255 const ParallelInfo& comm, const int n, const field_type w,
256 MILU_VARIANT milu, bool redblack,
257 bool reorder_sphere)
258 : lower_(),
259 upper_(),
260 inv_(),
261 comm_(&comm), w_(w),
262 relaxation_( std::abs( w - 1.0 ) > 1e-15 ),
263 A_(&reinterpret_cast<const Matrix&>(A)), iluIteration_(n),
264 milu_(milu), redBlack_(redblack), reorderSphere_(reorder_sphere)
265{
266 interiorSize_ = A.N();
267 // BlockMatrix is a Subclass of FieldMatrix that just adds
268 // methods. Therefore this cast should be safe.
269 update();
270}
271
272template<class Matrix, class Domain, class Range, class ParallelInfoT>
274ParallelOverlappingILU0(const Matrix& A,
275 const field_type w, MILU_VARIANT milu, bool redblack,
276 bool reorder_sphere)
277 : ParallelOverlappingILU0( A, 0, w, milu, redblack, reorder_sphere )
278{}
279
280template<class Matrix, class Domain, class Range, class ParallelInfoT>
282ParallelOverlappingILU0(const Matrix& A,
283 const ParallelInfo& comm, const field_type w,
284 MILU_VARIANT milu, bool redblack,
285 bool reorder_sphere)
286 : lower_(),
287 upper_(),
288 inv_(),
289 comm_(&comm), w_(w),
290 relaxation_( std::abs( w - 1.0 ) > 1e-15 ),
291 A_(&reinterpret_cast<const Matrix&>(A)), iluIteration_(0),
292 milu_(milu), redBlack_(redblack), reorderSphere_(reorder_sphere)
293{
294 interiorSize_ = A.N();
295 // BlockMatrix is a Subclass of FieldMatrix that just adds
296 // methods. Therefore this cast should be safe.
297 update();
298}
299
300template<class Matrix, class Domain, class Range, class ParallelInfoT>
302ParallelOverlappingILU0(const Matrix& A,
303 const ParallelInfo& comm,
304 const field_type w, MILU_VARIANT milu,
305 size_type interiorSize, bool redblack,
306 bool reorder_sphere)
307 : lower_(),
308 upper_(),
309 inv_(),
310 comm_(&comm), w_(w),
311 relaxation_( std::abs( w - 1.0 ) > 1e-15 ),
312 interiorSize_(interiorSize),
313 A_(&reinterpret_cast<const Matrix&>(A)), iluIteration_(0),
314 milu_(milu), redBlack_(redblack), reorderSphere_(reorder_sphere)
315{
316 // BlockMatrix is a Subclass of FieldMatrix that just adds
317 // methods. Therefore this cast should be safe.
318 assert(interiorSize <= A_->N());
319 update( );
320}
321
322template<class Matrix, class Domain, class Range, class ParallelInfoT>
324apply (Domain& v, const Range& d)
325{
326 OPM_TIMEBLOCK(apply);
327 Range& md = reorderD(d);
328 Domain& mv = reorderV(v);
329
330 // iterator types
331 using dblock = typename Range ::block_type;
332 using vblock = typename Domain::block_type;
333
334 const size_type iEnd = lower_.rows();
335 const size_type lastRow = iEnd - 1;
336 size_type upperLoopStart = iEnd - interiorSize_;
337 size_type lowerLoopEnd = interiorSize_;
338 if (iEnd != upper_.rows())
339 {
340 OPM_THROW(std::logic_error,"ILU: number of lower and upper rows must be the same");
341 }
342
343 // lower triangular solve
344 for (size_type i = 0; i < lowerLoopEnd; ++i)
345 {
346 dblock rhs( md[ i ] );
347 const size_type rowI = lower_.rows_[ i ];
348 const size_type rowINext = lower_.rows_[ i+1 ];
349
350 for (size_type col = rowI; col < rowINext; ++col)
351 {
352 lower_.values_[ col ].mmv( mv[ lower_.cols_[ col ] ], rhs );
353 }
354
355 mv[ i ] = rhs; // Lii = I
356 }
357
358 for (size_type i = upperLoopStart; i < iEnd; ++i)
359 {
360 vblock& vBlock = mv[ lastRow - i ];
361 vblock rhs ( vBlock );
362 const size_type rowI = upper_.rows_[ i ];
363 const size_type rowINext = upper_.rows_[ i+1 ];
364
365 for (size_type col = rowI; col < rowINext; ++col)
366 {
367 upper_.values_[ col ].mmv( mv[ upper_.cols_[ col ] ], rhs );
368 }
369
370 // apply inverse and store result
371 inv_[ i ].mv( rhs, vBlock);
372 }
373
374 copyOwnerToAll( mv );
375
376 if( relaxation_ ) {
377 mv *= w_;
378 }
379 reorderBack(mv, v);
380}
381
382template<class Matrix, class Domain, class Range, class ParallelInfoT>
383template<class V>
385copyOwnerToAll(V& v) const
386{
387 if( comm_ ) {
388 comm_->copyOwnerToAll(v, v);
389 }
390}
391
392template<class Matrix, class Domain, class Range, class ParallelInfoT>
394update()
395{
396 OPM_TIMEBLOCK(update);
397 // (For older DUNE versions the communicator might be
398 // invalid if redistribution in AMG happened on the coarset level.
399 // Therefore we check for nonzero size
400 if (comm_ && comm_->communicator().size() <= 0)
401 {
402 if (A_->N() > 0)
403 {
404 OPM_THROW(std::logic_error, "Expected a matrix with zero rows for an invalid communicator.");
405 }
406 else
407 {
408 // simply set the communicator to null
409 comm_ = nullptr;
410 }
411 }
412
413 int ilu_setup_successful = 1;
414 std::string message;
415 const int rank = comm_ ? comm_->communicator().rank() : 0;
416
417 if (redBlack_)
418 {
419 using Graph = Dune::Amg::MatrixGraph<const Matrix>;
420 Graph graph(*A_);
421 auto colorsTuple = colorVerticesWelshPowell(graph);
422 const auto& colors = std::get<0>(colorsTuple);
423 const auto& verticesPerColor = std::get<2>(colorsTuple);
424 auto noColors = std::get<1>(colorsTuple);
425 if ( reorderSphere_ )
426 {
427 ordering_ = reorderVerticesSpheres(colors, noColors, verticesPerColor,
428 graph, 0);
429 }
430 else
431 {
432 ordering_ = reorderVerticesPreserving(colors, noColors, verticesPerColor,
433 graph);
434 }
435 }
436
437 std::vector<std::size_t> inverseOrdering(ordering_.size());
438 {
439 OPM_TIMEBLOCK(createInverseOrdering);
440 std::size_t index = 0;
441 for (const auto newIndex : ordering_)
442 {
443 inverseOrdering[newIndex] = index++;
444 }
445 }
446
447 try
448 {
449 OPM_TIMEBLOCK(iluDecomposition);
450 if (iluIteration_ == 0) {
451
452 if (comm_) {
453 interiorSize_ = detail::set_interiorSize(A_->N(), interiorSize_, *comm_);
454 assert(interiorSize_ <= A_->N());
455 }
456
457 // create ILU-0 decomposition
458 if (ordering_.empty())
459 {
460 OPM_TIMEBLOCK(iluDecompositionUpdateMatrix);
461 if (ILU_) {
462 OPM_TIMEBLOCK(iluDecompositionCopyEntries);
463 // The ILU_ matrix is already a copy with the same
464 // sparse structure as A_, but the values of A_ may
465 // have changed, so we must copy all elements.
466 for (std::size_t row = 0; row < A_->N(); ++row) {
467 const auto& Arow = (*A_)[row];
468 auto& ILUrow = (*ILU_)[row];
469 auto Ait = Arow.begin();
470 auto Iit = ILUrow.begin();
471 for (; Ait != Arow.end(); ++Ait, ++Iit) {
472 *Iit = *Ait;
473 }
474 }
475 } else {
476 OPM_TIMEBLOCK(iluDecompositionDuplicateMatrix);
477 // First call, must duplicate matrix.
478 ILU_ = std::make_unique<Matrix>(*A_);
479 }
480 }
481 else
482 {
483 ILU_ = std::make_unique<Matrix>(A_->N(), A_->M(),
484 A_->nonzeroes(), Matrix::row_wise);
485 auto& newA = *ILU_;
486 // Create sparsity pattern
487 for (auto iter = newA.createbegin(), iend = newA.createend(); iter != iend; ++iter)
488 {
489 const auto& row = (*A_)[inverseOrdering[iter.index()]];
490 for (auto col = row.begin(), cend = row.end(); col != cend; ++col)
491 {
492 iter.insert(ordering_[col.index()]);
493 }
494 }
495 // Copy values
496 for (auto iter = A_->begin(), iend = A_->end(); iter != iend; ++iter)
497 {
498 auto& newRow = newA[ordering_[iter.index()]];
499 for (auto col = iter->begin(), cend = iter->end(); col != cend; ++col)
500 {
501 newRow[ordering_[col.index()]] = *col;
502 }
503 }
504 }
505
506 switch (milu_)
507 {
510 break;
512 detail::milu0_decomposition ( *ILU_, detail::identityFunctor<typename Matrix::field_type>,
513 detail::signFunctor<typename Matrix::field_type> );
514 break;
516 detail::milu0_decomposition ( *ILU_, detail::absFunctor<typename Matrix::field_type>,
517 detail::signFunctor<typename Matrix::field_type> );
518 break;
520 detail::milu0_decomposition ( *ILU_, detail::identityFunctor<typename Matrix::field_type>,
521 detail::isPositiveFunctor<typename Matrix::field_type> );
522 break;
523 default:
524 if (interiorSize_ == A_->N())
525 Dune::ILU::blockILU0Decomposition( *ILU_ );
526 else
527 detail::ghost_last_bilu0_decomposition(*ILU_, interiorSize_);
528 break;
529 }
530 }
531 else {
532 // create ILU-n decomposition
533 ILU_ = std::make_unique<Matrix>(A_->N(), A_->M(), Matrix::row_wise);
534 std::unique_ptr<detail::Reorderer> reorderer, inverseReorderer;
535 if (ordering_.empty())
536 {
537 reorderer.reset(new detail::NoReorderer());
538 inverseReorderer.reset(new detail::NoReorderer());
539 }
540 else
541 {
542 reorderer.reset(new detail::RealReorderer(ordering_));
543 inverseReorderer.reset(new detail::RealReorderer(inverseOrdering));
544 }
545
546 milun_decomposition( *A_, iluIteration_, milu_, *ILU_, *reorderer, *inverseReorderer );
547 }
548 }
549 catch (const Dune::MatrixBlockError& error)
550 {
551 message = error.what();
552 std::cerr << "Exception occurred on process " << rank << " during " <<
553 "setup of ILU0 preconditioner with message: "
554 << message<<std::endl;
555 ilu_setup_successful = 0;
556 }
557
558 // Check whether there was a problem on some process
559 {
560 OPM_TIMEBLOCK(checkParallelFailure);
561 const bool parallel_failure = comm_ && comm_->communicator().min(ilu_setup_successful) == 0;
562 const bool local_failure = ilu_setup_successful == 0;
563 if (local_failure || parallel_failure)
564 {
565 throw Dune::MatrixBlockError();
566 }
567 }
568
569 // store ILU in simple CRS format
570 detail::convertToCRS(*ILU_, lower_, upper_, inv_);
571}
572
573template<class Matrix, class Domain, class Range, class ParallelInfoT>
575reorderD(const Range& d)
576{
577 if (ordering_.empty())
578 {
579 // As d is non-const in the apply method of the
580 // solver casting away constness in this particular
581 // setting is not undefined. It is ugly though but due
582 // to the preconditioner interface of dune-istl.
583 return const_cast<Range&>(d);
584 }
585 else
586 {
587 reorderedD_.resize(d.size());
588 std::size_t i = 0;
589 for (const auto index : ordering_)
590 {
591 reorderedD_[index] = d[i++];
592 }
593 return reorderedD_;
594 }
595}
596
597template<class Matrix, class Domain, class Range, class ParallelInfoT>
599reorderV(Domain& v)
600{
601 if (ordering_.empty())
602 {
603 return v;
604 }
605 else
606 {
607 reorderedV_.resize(v.size());
608 std::size_t i = 0;
609 for (const auto index : ordering_)
610 {
611 reorderedV_[index] = v[i++];
612 }
613 return reorderedV_;
614 }
615}
616
617template<class Matrix, class Domain, class Range, class ParallelInfoT>
619reorderBack(const Range& reorderedV, Range& v)
620{
621 if (!ordering_.empty())
622 {
623 std::size_t i = 0;
624 for (const auto index : ordering_)
625 {
626 v[i++] = reorderedV[index];
627 }
628 }
629}
630
631} // end namespace Opm
A two-step version of an overlapping Schwarz preconditioner using one step ILU0 as.
Definition: ParallelOverlappingILU0.hpp:131
ParallelOverlappingILU0(const Matrix &A, const int n, const field_type w, MILU_VARIANT milu, bool redblack=false, bool reorder_sphere=true)
Constructor.
Definition: ParallelOverlappingILU0_impl.hpp:234
Domain & reorderV(Domain &v)
Reorder V if needed and return a reference to it.
Definition: ParallelOverlappingILU0_impl.hpp:599
size_type interiorSize_
Definition: ParallelOverlappingILU0.hpp:350
void reorderBack(const Range &reorderedV, Range &v)
Definition: ParallelOverlappingILU0_impl.hpp:619
Range & reorderD(const Range &d)
Reorder D if needed and return a reference to it.
Definition: ParallelOverlappingILU0_impl.hpp:575
void copyOwnerToAll(V &v) const
Definition: ParallelOverlappingILU0_impl.hpp:385
Dune::SolverCategory::Category category() const override
Definition: ParallelOverlappingILU0_impl.hpp:226
void update() override
Definition: ParallelOverlappingILU0_impl.hpp:394
void apply(Domain &v, const Range &d) override
Apply the preconditoner.
Definition: ParallelOverlappingILU0_impl.hpp:324
typename matrix_type::size_type size_type
Definition: ParallelOverlappingILU0.hpp:145
typename Domain::field_type field_type
The field type of the preconditioner.
Definition: ParallelOverlappingILU0.hpp:142
void milu0_decomposition(M &A, FieldFunct< M > absFunctor=signFunctor< typename M::field_type >, FieldFunct< M > signFunctor=oneFunctor< typename M::field_type >, std::vector< typename M::block_type > *diagonal=nullptr)
void convertToCRS(const M &A, CRS &lower, CRS &upper, InvVector &inv)
compute ILU decomposition of A. A is overwritten by its decomposition
Definition: ParallelOverlappingILU0_impl.hpp:103
void milun_decomposition(const M &A, int n, MILU_VARIANT milu, M &ILU, Reorderer &ordering, Reorderer &inverseOrdering)
size_t set_interiorSize(size_t N, size_t interiorSize, const PI &comm)
Definition: ParallelOverlappingILU0_impl.hpp:193
void ghost_last_bilu0_decomposition(M &A, std::size_t interiorSize)
Compute Blocked ILU0 decomposition, when we know junk ghost rows are located at the end of A.
Definition: ParallelOverlappingILU0_impl.hpp:43
Definition: blackoilbioeffectsmodules.hh:45
MILU_VARIANT
Definition: MILU.hpp:34
@ MILU_1
sum(dropped entries)
@ MILU_2
sum(dropped entries)
@ MILU_3
sum(|dropped entries|)
@ MILU_4
sum(dropped entries)
std::vector< std::size_t > reorderVerticesPreserving(const std::vector< int > &colors, int noColors, const std::vector< std::size_t > &verticesPerColor, const Graph &graph)
! Reorder colored graph preserving order of vertices with the same color.
Definition: GraphColoring.hpp:169
std::vector< std::size_t > reorderVerticesSpheres(const std::vector< int > &colors, int noColors, const std::vector< std::size_t > &verticesPerColor, const Graph &graph, typename Graph::VertexDescriptor root)
! Reorder Vetrices in spheres
Definition: GraphColoring.hpp:189
std::tuple< std::vector< int >, int, std::vector< std::size_t > > colorVerticesWelshPowell(const Graph &graph)
Color the vertices of graph.
Definition: GraphColoring.hpp:113
Definition: MILU.hpp:76
Definition: MILU.hpp:84