3#ifndef DUNE_AMG_AMG_CPR_HH
4#define DUNE_AMG_AMG_CPR_HH
10#include <opm/common/TimingMacros.hpp>
11#include <dune/common/exceptions.hh>
12#include <dune/common/version.hh>
13#include <dune/istl/paamg/amg.hh>
14#include <dune/istl/paamg/smoother.hh>
15#include <dune/istl/paamg/transfer.hh>
16#include <dune/istl/paamg/hierarchy.hh>
17#include <dune/istl/solvers.hh>
18#include <dune/istl/scalarproducts.hh>
19#include <dune/istl/superlu.hh>
20#include <dune/istl/umfpack.hh>
21#include <dune/istl/solvertype.hh>
22#include <dune/common/typetraits.hh>
23#include <dune/common/exceptions.hh>
34 template<
class M,
class T>
40 template<
class M,
class PI>
41 typename std::enable_if<!std::is_same<PI,SequentialInformation>::value,
void>::type
43 Dune::RedistributeInformation<PI>& redistInfo)
46 info.buildGlobalLookup(mat.N());
47 redistributeMatrixEntries(mat, matRedist, info, infoRedist, redistInfo);
48 info.freeGlobalLookup();
69 template<
class M,
class X,
class S,
class P,
class K,
class A>
85 template<
class M,
class X,
class S,
class PI=SequentialInformation,
86 class A=std::allocator<X> >
89 template<
class M1,
class X1,
class S1,
class P1,
class K1,
class A1>
135 const SmootherArgs& smootherArgs,
const Parameters& parms);
201 OPM_TIMEBLOCK(recalculateHierarch);
202 auto copyFlags = NegateSet<typename PI::OwnerSet>();
203 const auto& matrices = matrices_->matrices();
204 const auto& aggregatesMapHierarchy = matrices_->aggregatesMaps();
205 const auto& infoHierarchy = matrices_->parallelInformation();
206 const auto& redistInfoHierarchy = matrices_->redistributeInformation();
207 BaseGalerkinProduct productBuilder;
208 auto aggregatesMap = aggregatesMapHierarchy.begin();
209 auto info = infoHierarchy.finest();
210 auto redistInfo = redistInfoHierarchy.begin();
211 auto matrix = matrices.finest();
212 auto coarsestMatrix = matrices.coarsest();
214 using Matrix =
typename M::matrix_type;
217 if(matrix.isRedistributed()) {
219 const_cast<Matrix&
>(matrix.getRedistributed().getmat()),
220 const_cast<PI&
>(*info),
const_cast<PI&
>(info.getRedistributed()),
221 const_cast<Dune::RedistributeInformation<PI>&
>(*redistInfo));
225 for(; matrix!=coarsestMatrix; ++aggregatesMap) {
226 const Matrix& fine = (matrix.isRedistributed() ? matrix.getRedistributed() : *matrix).getmat();
230 productBuilder.calculate(fine, *(*aggregatesMap),
const_cast<Matrix&
>(matrix->getmat()), *info, copyFlags);
232 if(matrix.isRedistributed()) {
234 const_cast<Matrix&
>(matrix.getRedistributed().getmat()),
235 const_cast<PI&
>(*info),
const_cast<PI&
>(info.getRedistributed()),
236 const_cast<Dune::RedistributeInformation<PI>&
>(*redistInfo));
267 void createHierarchies(C& criterion,
Operator& matrix,
272 std::shared_ptr< Operator > op( &matrix, [](
Operator* ) {});
273 std::shared_ptr< PI > pifo(
const_cast< PI*
> (&pinfo), []( PI * ) {});
274 createHierarchies( criterion, op, pifo);
278 void createHierarchies(C& criterion, std::shared_ptr< Operator > matrix,
279 std::shared_ptr< PI > pinfo );
281 void setupCoarseSolver();
299 typename OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator
matrix;
303 typename ParallelInformationHierarchy::Iterator
pinfo;
307 typename OperatorHierarchy::RedistributeInfoList::const_iterator
redist;
311 typename OperatorHierarchy::AggregatesMapList::const_iterator
aggregates;
315 typename Hierarchy<Domain,A>::Iterator
lhs;
319 typename Hierarchy<Domain,A>::Iterator
update;
323 typename Hierarchy<Range,A>::Iterator
rhs;
335 void mgc(LevelContext& levelContext);
345 void moveToFineLevel(LevelContext& levelContext,
bool processedFineLevel);
351 bool moveToCoarseLevel(LevelContext& levelContext);
357 void initIteratorsWithFineLevel(LevelContext& levelContext);
360 std::shared_ptr<OperatorHierarchy> matrices_;
364 std::shared_ptr<Hierarchy<Smoother,A> > smoothers_;
366 std::shared_ptr<CoarseSolver> solver_;
368 std::shared_ptr< Hierarchy<Range,A> > rhs_;
370 std::shared_ptr< Hierarchy<Domain,A> > lhs_;
372 std::shared_ptr< Hierarchy<Domain,A> > update_;
374 using ScalarProduct = Dune::ScalarProduct<X>;
376 std::shared_ptr<ScalarProduct> scalarProduct_;
380 std::size_t preSteps_;
382 std::size_t postSteps_;
383 bool buildHierarchy_;
385 bool coarsesolverconverged;
386 std::shared_ptr<Smoother> coarseSmoother_;
388 SolverCategory::Category category_;
390 std::size_t verbosity_;
393 template<
class M,
class X,
class S,
class PI,
class A>
395 : matrices_(amg.matrices_), smootherArgs_(amg.smootherArgs_),
396 smoothers_(amg.smoothers_), solver_(amg.solver_),
397 rhs_(), lhs_(), update_(),
398 scalarProduct_(amg.scalarProduct_), gamma_(amg.gamma_),
399 preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
400 buildHierarchy_(amg.buildHierarchy_),
401 additive(amg.additive), coarsesolverconverged(amg.coarsesolverconverged),
402 coarseSmoother_(amg.coarseSmoother_),
403 category_(amg.category_),
404 verbosity_(amg.verbosity_)
407 rhs_.reset(
new Hierarchy<Range,A>(*amg.rhs_) );
409 lhs_.reset(
new Hierarchy<Domain,A>(*amg.lhs_) );
411 update_.reset(
new Hierarchy<Domain,A>(*amg.update_) );
414 template<
class M,
class X,
class S,
class PI,
class A>
417 const Parameters& parms)
418 : matrices_(stackobject_to_shared_ptr(matrices)), smootherArgs_(smootherArgs),
419 smoothers_(new Hierarchy<
Smoother,A>), solver_(&coarseSolver),
420 rhs_(), lhs_(), update_(), scalarProduct_(0),
421 gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
422 postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
423 additive(parms.getAdditive()), coarsesolverconverged(true),
426 category_(SolverCategory::category(*smoothers_->coarsest())),
427 verbosity_(parms.debugLevel())
429 assert(matrices_->isBuilt());
432 matrices_->coarsenSmoother(*smoothers_, smootherArgs_);
435 template<
class M,
class X,
class S,
class PI,
class A>
441 : smootherArgs_(smootherArgs),
442 smoothers_(new Hierarchy<
Smoother,A>), solver_(),
443 rhs_(), lhs_(), update_(), scalarProduct_(),
444 gamma_(criterion.getGamma()), preSteps_(criterion.getNoPreSmoothSteps()),
445 postSteps_(criterion.getNoPostSmoothSteps()), buildHierarchy_(true),
446 additive(criterion.getAdditive()), coarsesolverconverged(true),
448 category_(SolverCategory::category(pinfo)),
449 verbosity_(criterion.debugLevel())
451 if(SolverCategory::category(matrix) != SolverCategory::category(pinfo))
452 DUNE_THROW(InvalidSolverCategory,
"Matrix and Communication must have the same SolverCategory!");
453 createHierarchies(criterion,
const_cast<Operator&
>(matrix), pinfo);
456 template<
class M,
class X,
class S,
class PI,
class A>
459 if(buildHierarchy_) {
463 coarseSmoother_.reset();
467 template<
class M,
class X,
class S,
class PI,
class A>
474 template<
class M,
class X,
class S,
class PI,
class A>
477 OPM_TIMEBLOCK(update);
479 smoothers_.reset(
new Hierarchy<Smoother,A>);
481 coarseSmoother_.reset();
482 scalarProduct_.reset();
483 buildHierarchy_=
true;
484 coarsesolverconverged =
true;
485 smoothers_.reset(
new Hierarchy<Smoother,A>);
486 recalculateHierarchy();
487 matrices_->coarsenSmoother(*smoothers_, smootherArgs_);
489 if (verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0) {
490 std::cout <<
"Recalculating galerkin and coarse smoothers "<< matrices_->maxlevels() <<
" levels "
491 << watch.elapsed() <<
" seconds." << std::endl;
495 template<
class M,
class X,
class S,
class PI,
class A>
498 std::shared_ptr< PI > pinfo )
500 OPM_TIMEBLOCK(createHierarchies);
502 matrices_.reset(
new OperatorHierarchy(matrix, pinfo));
504 matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
507 matrices_->coarsenSmoother(*smoothers_, smootherArgs_);
509 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
510 std::cout<<
"Building hierarchy of "<<matrices_->maxlevels()<<
" levels "
511 <<
"(inclusive coarse solver) took "<<watch.elapsed()<<
" seconds."<<std::endl;
514 template<
class M,
class X,
class S,
class PI,
class A>
515 void AMGCPR<M,X,S,PI,A>::setupCoarseSolver()
517 OPM_TIMEBLOCK(setupCoarseSolver);
522 if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()
523 && ( ! matrices_->redistributeInformation().back().isSetup() ||
524 matrices_->parallelInformation().coarsest().getRedistributed().communicator().size() ) )
527 SmootherArgs sargs(smootherArgs_);
528 sargs.iterations = 1;
530 typename ConstructionTraits<Smoother>::Arguments cargs;
531 cargs.setArgs(sargs);
532 if(matrices_->redistributeInformation().back().isSetup()) {
534 cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
535 cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
537 cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
538 cargs.setComm(*matrices_->parallelInformation().coarsest());
541 coarseSmoother_ = ConstructionTraits<Smoother>::construct(cargs);
542 scalarProduct_ = createScalarProduct<X>(cargs.getComm(),category());
544 typedef DirectSolverSelector< typename M::matrix_type, X > SolverSelector;
547 if( SolverSelector::isDirectSolver &&
548 (std::is_same<ParallelInformation,SequentialInformation>::value
549 || matrices_->parallelInformation().coarsest()->communicator().size()==1
550 || (matrices_->parallelInformation().coarsest().isRedistributed()
551 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
552 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0) )
555 if(matrices_->parallelInformation().coarsest().isRedistributed())
557 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
560 solver_.reset(SolverSelector::create(matrices_->matrices().coarsest().getRedistributed().getmat(),
false,
false));
567 solver_.reset(SolverSelector::create(matrices_->matrices().coarsest()->getmat(),
false,
false));
569 if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
570 std::cout<<
"Using a direct coarse solver (" << SolverSelector::name() <<
")" << std::endl;
574 if(matrices_->parallelInformation().coarsest().isRedistributed())
576 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
581 solver_.reset(
new BiCGSTABSolver<X>(
const_cast<M&
>(matrices_->matrices().coarsest().getRedistributed()),
583 reinterpret_cast<typename
584 std::conditional<std::is_same<PI,SequentialInformation>::value,
585 Dune::SeqScalarProduct<X>,
586 Dune::OverlappingSchwarzScalarProduct<X,PI>
>::type&>(*scalarProduct_),
587 *coarseSmoother_, 1E-2, 1000, 0));
592 solver_.reset(
new BiCGSTABSolver<X>(
const_cast<M&
>(*matrices_->matrices().coarsest()),
594 reinterpret_cast<typename
595 std::conditional<std::is_same<PI,SequentialInformation>::value,
596 Dune::SeqScalarProduct<X>,
597 Dune::OverlappingSchwarzScalarProduct<X,PI>
>::type&>(*scalarProduct_),
598 *coarseSmoother_, 1E-2, 1000, 0));
618 template<
class M,
class X,
class S,
class PI,
class A>
626 typedef typename M::matrix_type Matrix;
627 typedef typename Matrix::ConstRowIterator RowIter;
628 typedef typename Matrix::ConstColIterator ColIter;
629 typedef typename Matrix::block_type Block;
631 zero=
typename Matrix::field_type();
633 const Matrix& mat=matrices_->matrices().finest()->getmat();
634 for(RowIter row=mat.begin(); row!=mat.end(); ++row) {
635 bool isDirichlet =
true;
636 bool hasDiagonal =
false;
638 for(ColIter col=row->begin(); col!=row->end(); ++col) {
639 if(row.index()==col.index()) {
647 if(isDirichlet && hasDiagonal)
648 diagonal.solve(x[row.index()], b[row.index()]);
651 if(smoothers_->levels()>0)
652 smoothers_->finest()->pre(x,b);
655 matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
658 typedef std::shared_ptr< Range > RangePtr ;
659 typedef std::shared_ptr< Domain > DomainPtr;
662 RangePtr copy(
new Range(b) );
663 rhs_.reset(
new Hierarchy<Range,A>(copy) );
664 DomainPtr dcopy(
new Domain(x) );
665 lhs_.reset(
new Hierarchy<Domain,A>(dcopy) );
666 DomainPtr dcopy2(
new Domain(x) );
667 update_.reset(
new Hierarchy<Domain,A>(dcopy2) );
669 matrices_->coarsenVector(*rhs_);
670 matrices_->coarsenVector(*lhs_);
671 matrices_->coarsenVector(*update_);
674 typedef typename Hierarchy<Smoother,A>::Iterator Iterator;
675 typedef typename Hierarchy<Range,A>::Iterator RIterator;
676 typedef typename Hierarchy<Domain,A>::Iterator DIterator;
677 Iterator coarsest = smoothers_->coarsest();
678 Iterator smoother = smoothers_->finest();
679 RIterator rhs = rhs_->finest();
680 DIterator lhs = lhs_->finest();
681 if(smoothers_->levels()>0) {
683 assert(lhs_->levels()==rhs_->levels());
684 assert(smoothers_->levels()==lhs_->levels() || matrices_->levels()==matrices_->maxlevels());
685 assert(smoothers_->levels()+1==lhs_->levels() || matrices_->levels()<matrices_->maxlevels());
687 if(smoother!=coarsest)
688 for(++smoother, ++lhs, ++rhs; smoother != coarsest; ++smoother, ++lhs, ++rhs)
689 smoother->pre(*lhs,*rhs);
690 smoother->pre(*lhs,*rhs);
700 template<
class M,
class X,
class S,
class PI,
class A>
703 return matrices_->levels();
705 template<
class M,
class X,
class S,
class PI,
class A>
708 return matrices_->maxlevels();
712 template<
class M,
class X,
class S,
class PI,
class A>
715 OPM_TIMEBLOCK(apply);
716 LevelContext levelContext;
724 initIteratorsWithFineLevel(levelContext);
727 *levelContext.lhs = v;
728 *levelContext.rhs = d;
729 *levelContext.update=0;
730 levelContext.level=0;
734 if(postSteps_==0||matrices_->maxlevels()==1)
735 levelContext.pinfo->copyOwnerToAll(*levelContext.update, *levelContext.update);
737 v=*levelContext.update;
742 template<
class M,
class X,
class S,
class PI,
class A>
745 OPM_TIMEBLOCK(initIteratorsWithFineLevel);
746 levelContext.smoother = smoothers_->finest();
747 levelContext.matrix = matrices_->matrices().finest();
748 levelContext.pinfo = matrices_->parallelInformation().finest();
749 levelContext.redist =
750 matrices_->redistributeInformation().begin();
751 levelContext.aggregates = matrices_->aggregatesMaps().begin();
752 levelContext.lhs = lhs_->finest();
753 levelContext.update = update_->finest();
754 levelContext.rhs = rhs_->finest();
757 template<
class M,
class X,
class S,
class PI,
class A>
758 bool AMGCPR<M,X,S,PI,A>
759 ::moveToCoarseLevel(LevelContext& levelContext)
761 OPM_TIMEBLOCK(moveToCoarseLevel);
762 bool processNextLevel=
true;
764 if(levelContext.redist->isSetup()) {
765 levelContext.redist->redistribute(
static_cast<const Range&
>(*levelContext.rhs),
766 levelContext.rhs.getRedistributed());
767 processNextLevel = levelContext.rhs.getRedistributed().size()>0;
768 if(processNextLevel) {
770 typename Hierarchy<Range,A>::Iterator fineRhs = levelContext.rhs++;
771 ++levelContext.pinfo;
772 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
773 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
774 static_cast<const Range&
>(fineRhs.getRedistributed()),
775 *levelContext.pinfo);
779 typename Hierarchy<Range,A>::Iterator fineRhs = levelContext.rhs++;
780 ++levelContext.pinfo;
781 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
782 ::restrictVector(*(*levelContext.aggregates),
783 *levelContext.rhs,
static_cast<const Range&
>(*fineRhs),
784 *levelContext.pinfo);
787 if(processNextLevel) {
790 ++levelContext.update;
791 ++levelContext.matrix;
792 ++levelContext.level;
793 ++levelContext.redist;
795 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
797 ++levelContext.smoother;
798 ++levelContext.aggregates;
801 *levelContext.update=0;
803 return processNextLevel;
806 template<
class M,
class X,
class S,
class PI,
class A>
807 void AMGCPR<M,X,S,PI,A>
808 ::moveToFineLevel(LevelContext& levelContext,
bool processNextLevel)
810 OPM_TIMEBLOCK(moveToFineLevel);
811 if(processNextLevel) {
812 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
814 --levelContext.smoother;
815 --levelContext.aggregates;
817 --levelContext.redist;
818 --levelContext.level;
820 --levelContext.matrix;
824 --levelContext.pinfo;
826 if(levelContext.redist->isSetup()) {
828 levelContext.lhs.getRedistributed()=0;
829 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
830 ::prolongateVector(*(*levelContext.aggregates), *levelContext.update, *levelContext.lhs,
831 levelContext.lhs.getRedistributed(),
832 matrices_->getProlongationDampingFactor(),
833 *levelContext.pinfo, *levelContext.redist);
836 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
837 ::prolongateVector(*(*levelContext.aggregates), *levelContext.update, *levelContext.lhs,
838 matrices_->getProlongationDampingFactor(),
839 *levelContext.pinfo);
843 if(processNextLevel) {
844 --levelContext.update;
848 *levelContext.update += *levelContext.lhs;
851 template<
class M,
class X,
class S,
class PI,
class A>
854 return IsDirectSolver< CoarseSolver>::value;
857 template<
class M,
class X,
class S,
class PI,
class A>
860 if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
864 if(levelContext.redist->isSetup()) {
865 levelContext.redist->redistribute(*levelContext.rhs, levelContext.rhs.getRedistributed());
866 if(levelContext.rhs.getRedistributed().size()>0) {
868 levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
869 levelContext.rhs.getRedistributed());
870 solver_->apply(levelContext.update.getRedistributed(),
871 levelContext.rhs.getRedistributed(), res);
873 levelContext.redist->redistributeBackward(*levelContext.update, levelContext.update.getRedistributed());
874 levelContext.pinfo->copyOwnerToAll(*levelContext.update, *levelContext.update);
876 levelContext.pinfo->copyOwnerToAll(*levelContext.rhs, *levelContext.rhs);
877 solver_->apply(*levelContext.update, *levelContext.rhs, res);
881 coarsesolverconverged =
false;
884 presmooth(levelContext, preSteps_);
886#ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
887 bool processNextLevel = moveToCoarseLevel(levelContext);
889 if(processNextLevel) {
891 for(std::size_t i=0; i<gamma_; i++)
895 moveToFineLevel(levelContext, processNextLevel);
900 if(levelContext.matrix == matrices_->matrices().finest()) {
901 coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
902 if(!coarsesolverconverged){
907 postsmooth(levelContext, postSteps_);
912 template<
class M,
class X,
class S,
class PI,
class A>
913 void AMGCPR<M,X,S,PI,A>::additiveMgc(){
914 OPM_TIMEBLOCK(additiveMgc);
916 typename ParallelInformationHierarchy::Iterator pinfo=matrices_->parallelInformation().finest();
917 typename Hierarchy<Range,A>::Iterator rhs=rhs_->finest();
918 typename Hierarchy<Domain,A>::Iterator lhs = lhs_->finest();
919 typename OperatorHierarchy::AggregatesMapList::const_iterator aggregates=matrices_->aggregatesMaps().begin();
921 for(
typename Hierarchy<Range,A>::Iterator fineRhs=rhs++; fineRhs != rhs_->coarsest(); fineRhs=rhs++, ++aggregates) {
923 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
924 ::restrictVector(*(*aggregates), *rhs,
static_cast<const Range&
>(*fineRhs), *pinfo);
930 lhs = lhs_->finest();
931 typename Hierarchy<Smoother,A>::Iterator smoother = smoothers_->finest();
933 for(rhs=rhs_->finest(); rhs != rhs_->coarsest(); ++lhs, ++rhs, ++smoother) {
936 smoother->apply(*lhs, *rhs);
940#ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
942 pinfo->copyOwnerToAll(*rhs, *rhs);
943 solver_->apply(*lhs, *rhs, res);
946 DUNE_THROW(MathError,
"Coarse solver did not converge");
954 for(
typename Hierarchy<Domain,A>::Iterator coarseLhs = lhs--; coarseLhs != lhs_->finest(); coarseLhs = lhs--, --aggregates, --pinfo) {
955 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
956 ::prolongateVector(*(*aggregates), *coarseLhs, *lhs, 1.0, *pinfo);
962 template<
class M,
class X,
class S,
class PI,
class A>
967 typedef typename Hierarchy<Smoother,A>::Iterator Iterator;
968 typedef typename Hierarchy<Domain,A>::Iterator DIterator;
969 Iterator coarsest = smoothers_->coarsest();
970 Iterator smoother = smoothers_->finest();
971 DIterator lhs = lhs_->finest();
972 if(smoothers_->levels()>0) {
973 if(smoother != coarsest || matrices_->levels()<matrices_->maxlevels())
974 smoother->post(*lhs);
975 if(smoother!=coarsest)
976 for(++smoother, ++lhs; smoother != coarsest; ++smoother, ++lhs)
977 smoother->post(*lhs);
978 smoother->post(*lhs);
988 template<
class M,
class X,
class S,
class PI,
class A>
992 matrices_->getCoarsestAggregatesOnFinest(cont);
Parallel algebraic multigrid based on agglomeration.
Definition: amgcpr.hh:88
Interface class adding the update() method to the preconditioner interface.
Definition: PreconditionerWithUpdate.hpp:32
M Operator
The matrix operator type.
Definition: amgcpr.hh:96
Hierarchy< Smoother, A >::Iterator smoother
The iterator over the smoothers.
Definition: amgcpr.hh:295
void post(Domain &x) override
Definition: amgcpr.hh:963
AMGCPR(const OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const SmootherArgs &smootherArgs, const Parameters &parms)
Construct a new amg with a specific coarse solver.
Definition: amgcpr.hh:415
Hierarchy< Domain, A >::Iterator update
The iterator over the updates.
Definition: amgcpr.hh:319
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition: amgcpr.hh:323
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition: amgcpr.hh:105
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition: amgcpr.hh:303
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition: amgcpr.hh:299
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition: amgcpr.hh:990
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition: amgcpr.hh:107
~AMGCPR()
Definition: amgcpr.hh:457
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition: amgcpr.hh:199
SmootherTraits< Smoother >::Arguments SmootherArgs
The argument type for the construction of the smoother.
Definition: amgcpr.hh:123
void apply(Domain &v, const Range &d) override
Definition: amgcpr.hh:713
X Range
The range type.
Definition: amgcpr.hh:112
void pre(Domain &x, Range &b) override
Definition: amgcpr.hh:619
X Domain
The domain type.
Definition: amgcpr.hh:110
void update() override
Update the coarse solver and the hierarchies.
Definition: amgcpr.hh:475
std::size_t maxlevels()
Definition: amgcpr.hh:706
S Smoother
The type of the smoother.
Definition: amgcpr.hh:120
void updateSolver(C &criterion, const Operator &, const PI &pinfo)
Update the coarse solver and the hierarchies.
Definition: amgcpr.hh:469
bool hasPerfectUpdate() const override
Definition: amgcpr.hh:172
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition: amgcpr.hh:311
PI ParallelInformation
The type of the parallel information. Either OwnerOverlapCommunication or another type describing the...
Definition: amgcpr.hh:103
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition: amgcpr.hh:114
std::size_t level
The level index.
Definition: amgcpr.hh:327
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition: amgcpr.hh:315
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition: amgcpr.hh:852
Smoother SmootherType
Definition: amgcpr.hh:291
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition: amgcpr.hh:307
SolverCategory::Category category() const override
Category of the preconditioner (see SolverCategory::Category)
Definition: amgcpr.hh:167
std::size_t levels()
Definition: amgcpr.hh:701
void redistributeMatrixAmg(M &, M &, SequentialInformation &, SequentialInformation &, T &)
Definition: amgcpr.hh:35
Definition: fvbaseprimaryvariables.hh:141
Dune::InverseOperatorResult InverseOperatorResult
Definition: BdaBridge.hpp:32