StandardPreconditioners_mpi.hpp
Go to the documentation of this file.
1/*
2 Copyright 2009, 2010 SINTEF ICT, Applied Mathematics.
3 Copyright 2019 SINTEF Digital, Mathematics and Cybernetics.
4
5 This file is part of the Open Porous Media project (OPM).
6
7 OPM is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11
12 OPM is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with OPM. If not, see <http://www.gnu.org/licenses/>.
19*/
20
21#ifndef OPM_STANDARDPRECONDITIONERS_MPI_HEADER
22#define OPM_STANDARDPRECONDITIONERS_MPI_HEADER
23
24#if HAVE_CUDA
25#if USE_HIP
26#include <opm/simulators/linalg/gpuistl_hip/PreconditionerCPUMatrixToGPUMatrix.hpp>
27#else
29#endif
30#endif
31
32
33namespace Opm {
34
35
36template <class Smoother>
38{
39 static auto args(const PropertyTree& prm)
40 {
41 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
42 SmootherArgs smootherArgs;
43 smootherArgs.iterations = prm.get<int>("iterations", 1);
44 // smootherArgs.overlap=SmootherArgs::vertex;
45 // smootherArgs.overlap=SmootherArgs::none;
46 // smootherArgs.overlap=SmootherArgs::aggregate;
47 smootherArgs.relaxationFactor = prm.get<double>("relaxation", 1.0);
48 return smootherArgs;
49 }
50};
51
52template <class M, class V, class C>
54{
55 static auto args(const PropertyTree& prm)
56 {
58 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
59 SmootherArgs smootherArgs;
60 smootherArgs.iterations = prm.get<int>("iterations", 1);
61 const int iluwitdh = prm.get<int>("iluwidth", 0);
62 smootherArgs.setN(iluwitdh);
63 const MILU_VARIANT milu = convertString2Milu(prm.get<std::string>("milutype", std::string("ilu")));
64 smootherArgs.setMilu(milu);
65 // smootherArgs.overlap=SmootherArgs::vertex;
66 // smootherArgs.overlap=SmootherArgs::none;
67 // smootherArgs.overlap=SmootherArgs::aggregate;
68 smootherArgs.relaxationFactor = prm.get<double>("relaxation", 1.0);
69 return smootherArgs;
70 }
71};
72
73// trailing return type with decltype used for detecting existence of setUseFixedOrder member function by overloading the setUseFixedOrder function
74template <typename C>
75auto setUseFixedOrder(C& criterion, bool booleanValue) -> decltype(criterion.setUseFixedOrder(booleanValue))
76{
77 return criterion.setUseFixedOrder(booleanValue); // Set flag to ensure that the matrices in the AMG hierarchy are constructed with deterministic indices.
78}
79template <typename C>
80void setUseFixedOrder(C&, ...)
81{
82 // do nothing, since the function setUseFixedOrder does not exist yet
83}
84
85template <class Operator, class Comm, class Matrix, class Vector>
88{
89 Criterion criterion(15, prm.get<int>("coarsenTarget", 1200));
90 criterion.setDefaultValuesIsotropic(2);
91 criterion.setAlpha(prm.get<double>("alpha", 0.33));
92 criterion.setBeta(prm.get<double>("beta", 1e-5));
93 criterion.setMaxLevel(prm.get<int>("maxlevel", 15));
94 criterion.setSkipIsolated(prm.get<bool>("skip_isolated", false));
95 criterion.setNoPreSmoothSteps(prm.get<int>("pre_smooth", 1));
96 criterion.setNoPostSmoothSteps(prm.get<int>("post_smooth", 1));
97 criterion.setDebugLevel(prm.get<int>("verbosity", 0));
98 // As the default we request to accumulate data to 1 process always as our matrix
99 // graph might be unsymmetric and hence not supported by the PTScotch/ParMetis
100 // calls in DUNE. Accumulating to 1 skips PTScotch/ParMetis
101 criterion.setAccumulate(static_cast<Dune::Amg::AccumulationMode>(prm.get<int>("accumulate", 1)));
102 criterion.setProlongationDampingFactor(prm.get<double>("prolongationdamping", 1.6));
103 criterion.setMaxDistance(prm.get<int>("maxdistance", 2));
104 criterion.setMaxConnectivity(prm.get<int>("maxconnectivity", 15));
105 criterion.setMaxAggregateSize(prm.get<int>("maxaggsize", 6));
106 criterion.setMinAggregateSize(prm.get<int>("minaggsize", 4));
107 setUseFixedOrder(criterion, true); // If possible, set flag to ensure that the matrices in the AMG hierarchy are constructed with deterministic indices.
108 return criterion;
109}
110
111template <class Operator, class Comm, class Matrix, class Vector>
112template <class Smoother>
115 const PropertyTree& prm,
116 bool useKamg)
117{
118 auto crit = criterion(prm);
120 if (useKamg) {
122 return std::make_shared<Type>(
123 op, crit, sargs, prm.get<std::size_t>("max_krylov", 1), prm.get<double>("min_reduction", 1e-1));
124 } else {
126 return std::make_shared<Type>(op, crit, sargs);
127 }
128}
129
130template <class Operator, class Comm, typename = void> // Note: Last argument is to allow partial specialization for GPU
132{
133 static void add()
134 {
135 using namespace Dune;
136 using O = Operator;
137 using C = Comm;
139 using M = typename F::Matrix;
140 using V = typename F::Vector;
141 using P = PropertyTree;
142 F::addCreator("ilu0", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
143 return createParILU(op, prm, comm, 0);
144 });
145 F::addCreator("paroverilu0",
146 [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
147 return createParILU(op, prm, comm, prm.get<int>("ilulevel", 0));
148 });
149 F::addCreator("ilun", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
150 return createParILU(op, prm, comm, prm.get<int>("ilulevel", 0));
151 });
152 F::addCreator("duneilu", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
153 const int n = prm.get<int>("ilulevel", 0);
154 const double w = prm.get<double>("relaxation", 1.0);
155 const bool resort = prm.get<bool>("resort", false);
156 return wrapBlockPreconditioner<RebuildOnUpdatePreconditioner<Dune::SeqILU<M, V, V>>>(
157 comm, op.getmat(), n, w, resort);
158 });
159 F::addCreator("dilu", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
160 DUNE_UNUSED_PARAMETER(prm);
161 return wrapBlockPreconditioner<MultithreadDILU<M, V, V>>(comm, op.getmat());
162 });
163 F::addCreator("jac", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
164 const int n = prm.get<int>("repeats", 1);
165 const double w = prm.get<double>("relaxation", 1.0);
166 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqJac<M, V, V>>>(comm, op.getmat(), n, w);
167 });
168 F::addCreator("gs", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
169 const int n = prm.get<int>("repeats", 1);
170 const double w = prm.get<double>("relaxation", 1.0);
171 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqGS<M, V, V>>>(comm, op.getmat(), n, w);
172 });
173 F::addCreator("sor", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
174 const int n = prm.get<int>("repeats", 1);
175 const double w = prm.get<double>("relaxation", 1.0);
176 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqSOR<M, V, V>>>(comm, op.getmat(), n, w);
177 });
178 F::addCreator("ssor", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
179 const int n = prm.get<int>("repeats", 1);
180 const double w = prm.get<double>("relaxation", 1.0);
181 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqSSOR<M, V, V>>>(comm, op.getmat(), n, w);
182 });
183
184 // Only add AMG preconditioners to the factory if the operator
185 // is the overlapping schwarz operator or GhostLastMatrixAdapter. This could be extended
186 // later, but at this point no other operators are compatible
187 // with the AMG hierarchy construction.
188 if constexpr (std::is_same_v<O, Dune::OverlappingSchwarzOperator<M, V, V, C>> ||
189 std::is_same_v<O, Opm::GhostLastMatrixAdapter<M, V, V, C>>) {
190 F::addCreator("amg", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
191 using PrecPtr = std::shared_ptr<Dune::PreconditionerWithUpdate<V, V>>;
192 std::string smoother = prm.get<std::string>("smoother", "paroverilu0");
193 // Make the smoother type lowercase for internal canonical representation
194 std::transform(smoother.begin(), smoother.end(), smoother.begin(), ::tolower);
195 // TODO: merge this with ILUn, and possibly simplify the factory to only work with ILU?
196 if (smoother == "ilu0" || smoother == "paroverilu0") {
198 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
200 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
201 return prec;
202 } else if (smoother == "dilu") {
203 using SeqSmoother = Dune::MultithreadDILU<M, V, V>;
204 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
205 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
206 SmootherArgs sargs;
207 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
208 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
209 return prec;
210 } else if (smoother == "jac") {
211 using SeqSmoother = SeqJac<M, V, V>;
212 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
213 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
214 SmootherArgs sargs;
215 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
216 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
217 return prec;
218 } else if (smoother == "gs") {
219 using SeqSmoother = SeqGS<M, V, V>;
220 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
221 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
222 SmootherArgs sargs;
223 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
224 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
225 return prec;
226 } else if (smoother == "sor") {
227 using SeqSmoother = SeqSOR<M, V, V>;
228 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
229 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
230 SmootherArgs sargs;
231 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
232 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
233 return prec;
234 } else if (smoother == "ssor") {
235 using SeqSmoother = SeqSSOR<M, V, V>;
236 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
237 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
238 SmootherArgs sargs;
239 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
240 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
241 return prec;
242 } else if (smoother == "ilun") {
243 using SeqSmoother = SeqILU<M, V, V>;
244 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
245 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
246 SmootherArgs sargs;
247 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
248 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
249 return prec;
250 } else {
251 OPM_THROW(std::invalid_argument, "Properties: No smoother with name " + smoother + ".");
252 }
253 });
254#if HAVE_HYPRE
255 if constexpr (M::block_type::rows == 1 && M::block_type::cols == 1
256 && std::is_same_v<HYPRE_Real, typename V::field_type>) {
257 F::addCreator(
258 "hypre", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
259 return std::make_shared<Hypre::HyprePreconditioner<M, V, V, C>>(op.getmat(), prm, comm);
260 });
261 }
262#endif
263 }
264
265
266 F::addCreator("cpr",
267 [](const O& op,
268 const P& prm,
269 const std::function<V()> weightsCalculator,
270 std::size_t pressureIndex,
271 const C& comm) {
272 assert(weightsCalculator);
273 if (pressureIndex == std::numeric_limits<std::size_t>::max()) {
274 OPM_THROW(std::logic_error,
275 "Pressure index out of bounds. It needs to specified for CPR");
276 }
277 using Scalar = typename V::field_type;
278 using LevelTransferPolicy = PressureTransferPolicy<O, Comm, Scalar, false>;
279 return std::make_shared<OwningTwoLevelPreconditioner<O, V, LevelTransferPolicy, Comm>>(
280 op, prm, weightsCalculator, pressureIndex, comm);
281 });
282 F::addCreator("cprt",
283 [](const O& op,
284 const P& prm,
285 const std::function<V()> weightsCalculator,
286 std::size_t pressureIndex,
287 const C& comm) {
288 assert(weightsCalculator);
289 if (pressureIndex == std::numeric_limits<std::size_t>::max()) {
290 OPM_THROW(std::logic_error,
291 "Pressure index out of bounds. It needs to specified for CPR");
292 }
293 using Scalar = typename V::field_type;
294 using LevelTransferPolicy = PressureTransferPolicy<O, Comm, Scalar, true>;
295 return std::make_shared<OwningTwoLevelPreconditioner<O, V, LevelTransferPolicy, Comm>>(
296 op, prm, weightsCalculator, pressureIndex, comm);
297 });
298
299 // Add CPRW only for the WellModelGhostLastMatrixAdapter, as the method requires that the
300 // operator has the addWellPressureEquations() method (and a few more) it can not be combined
301 // with a well-less operator such as GhostLastMatrixAdapter or OverlappingSchwarzOperator.
302 // For OPM Flow this corresponds to requiring --matrix-add-well-contributions=false
303 // (which is the default).
304 if constexpr (std::is_same_v<O, WellModelGhostLastMatrixAdapter<M, V, V, true>>) {
305 F::addCreator("cprw",
306 [](const O& op,
307 const P& prm,
308 const std::function<V()> weightsCalculator,
309 std::size_t pressureIndex,
310 const C& comm) {
311 assert(weightsCalculator);
312 if (pressureIndex == std::numeric_limits<std::size_t>::max()) {
313 OPM_THROW(std::logic_error,
314 "Pressure index out of bounds. It needs to specified for CPR");
315 }
316 using Scalar = typename V::field_type;
317 using LevelTransferPolicy = PressureBhpTransferPolicy<O, Comm, Scalar, false>;
318 return std::make_shared<OwningTwoLevelPreconditioner<O, V, LevelTransferPolicy, Comm>>(
319 op, prm, weightsCalculator, pressureIndex, comm);
320 });
321 }
322
323#if HAVE_CUDA
324 // Here we create the *wrapped* GPU preconditioners
325 // meaning they will act as CPU preconditioners on the outside,
326 // but copy data back and forth to the GPU as needed.
327
328 // TODO: Make this use the GPU preconditioner factory once that is up and running.
329 F::addCreator("gpuilu0", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
330 const double w = prm.get<double>("relaxation", 1.0);
331 using field_type = typename V::field_type;
332 using GpuILU0 = typename gpuistl::
333 GpuSeqILU0<M, gpuistl::GpuVector<field_type>, gpuistl::GpuVector<field_type>>;
334 auto gpuILU0 = std::make_shared<GpuILU0>(op.getmat(), w);
335
336 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, GpuILU0>>(gpuILU0);
337 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
338 return wrapped;
339 });
340
341 F::addCreator("gpujac", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
342 const double w = prm.get<double>("relaxation", 1.0);
343 using field_type = typename V::field_type;
344 using GpuJac =
346
349
350 auto gpuJac = std::make_shared<MatrixOwner>(op.getmat(), w);
351
352 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, MatrixOwner>>(gpuJac);
353 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
354 return wrapped;
355 });
356
357 F::addCreator("gpudilu", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
358 const bool split_matrix = prm.get<bool>("split_matrix", true);
359 const bool tune_gpu_kernels = prm.get<bool>("tune_gpu_kernels", true);
360 const int mixed_precision_scheme = prm.get<int>("mixed_precision_scheme", 0);
361 const bool reorder = prm.get<bool>("reorder", true);
362 using field_type = typename V::field_type;
366
367 // Note: op.getmat() is passed twice, because the GpuDILU needs both the CPU and GPU matrix.
368 // The first argument will be converted to a GPU matrix, and the second one is used as a CPU matrix.
369 auto gpuDILU = std::make_shared<MatrixOwner>(op.getmat(), op.getmat(), split_matrix, tune_gpu_kernels, mixed_precision_scheme, reorder);
370
371 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, MatrixOwner>>(gpuDILU);
372 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
373 return wrapped;
374 });
375
376 F::addCreator("opmgpuilu0", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
377 const bool split_matrix = prm.get<bool>("split_matrix", true);
378 const bool tune_gpu_kernels = prm.get<bool>("tune_gpu_kernels", true);
379 const int mixed_precision_scheme = prm.get<int>("mixed_precision_scheme", 0);
380 using field_type = typename V::field_type;
382
384 gpuistl::GpuVector<field_type>, OpmGpuILU0, M>;
385
386 // Note: op.getmat() is passed twice, because the OPMGPUILU0 needs both the CPU and GPU matrix.
387 // The first argument will be converted to a GPU matrix, and the second one is used as a CPU matrix.
388 auto gpuilu0 = std::make_shared<MatrixOwner>(op.getmat(), op.getmat(), split_matrix, tune_gpu_kernels, mixed_precision_scheme);
389
390 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, MatrixOwner>>(gpuilu0);
391 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
392 return wrapped;
393 });
394#endif // HAVE_CUDA
395 }
396
397
399 createParILU(const Operator& op, const PropertyTree& prm, const Comm& comm, const int ilulevel)
400 {
402 using M = typename F::Matrix;
403 using V = typename F::Vector;
404
405 const double w = prm.get<double>("relaxation", 1.0);
406 const bool redblack = prm.get<bool>("redblack", false);
407 const bool reorder_spheres = prm.get<bool>("reorder_spheres", false);
408 // Already a parallel preconditioner. Need to pass comm, but no need to wrap it in a BlockPreconditioner.
409 if (ilulevel == 0) {
410 const std::size_t num_interior = interiorIfGhostLast(comm);
411 assert(num_interior <= op.getmat().N());
412 return std::make_shared<ParallelOverlappingILU0<M, V, V, Comm>>(
413 op.getmat(), comm, w, MILU_VARIANT::ILU, num_interior, redblack, reorder_spheres);
414 } else {
415 return std::make_shared<ParallelOverlappingILU0<M, V, V, Comm>>(
416 op.getmat(), comm, ilulevel, w, MILU_VARIANT::ILU, redblack, reorder_spheres);
417 }
418 }
419
424 static std::size_t interiorIfGhostLast(const Comm& comm)
425 {
426 std::size_t interior_count = 0;
427 std::size_t highest_interior_index = 0;
428 const auto& is = comm.indexSet();
429 for (const auto& ind : is) {
430 if (Comm::OwnerSet::contains(ind.local().attribute())) {
431 ++interior_count;
432 highest_interior_index = std::max(highest_interior_index, ind.local().local());
433 }
434 }
435 if (highest_interior_index + 1 == interior_count) {
436 return interior_count;
437 } else {
438 return is.size();
439 }
440 }
441};
442
443
444} // namespace Opm
445
446#endif // OPM_STANDARDPRECONDITIONERS_MPI_HEADER
Dune::OwnerOverlapCopyCommunication< int, int > Comm
Definition: FlexibleSolver_impl.hpp:304
Parallel algebraic multigrid based on agglomeration.
Definition: amgcpr.hh:88
Definition: PreconditionerWithUpdate.hpp:43
The OpenMP thread parallelized DILU preconditioner.
Definition: DILU.hpp:53
A two-step version of an overlapping Schwarz preconditioner using one step ILU0 as.
Definition: ParallelOverlappingILU0.hpp:131
Definition: PreconditionerFactory.hpp:64
std::shared_ptr< Dune::PreconditionerWithUpdate< Vector, Vector > > PrecPtr
The type of pointer returned by create().
Definition: PreconditionerFactory.hpp:71
Definition: PressureBhpTransferPolicy.hpp:99
Definition: PressureTransferPolicy.hpp:55
Hierarchical collection of key/value pairs.
Definition: PropertyTree.hpp:39
T get(const std::string &key) const
DILU preconditioner on the GPU.
Definition: GpuDILU.hpp:53
Jacobi preconditioner on the GPU.
Definition: GpuJac.hpp:47
ILU0 preconditioner on the GPU.
Definition: OpmGpuILU0.hpp:51
Convert a CPU matrix to a GPU matrix and use a CUDA preconditioner on the GPU.
Definition: PreconditionerCPUMatrixToGPUMatrix.hpp:42
Definition: fvbaseprimaryvariables.hh:141
Definition: blackoilbioeffectsmodules.hh:43
MILU_VARIANT
Definition: MILU.hpp:34
@ ILU
Do not perform modified ILU.
auto setUseFixedOrder(C &criterion, bool booleanValue) -> decltype(criterion.setUseFixedOrder(booleanValue))
Definition: StandardPreconditioners_mpi.hpp:75
MILU_VARIANT convertString2Milu(const std::string &milu)
Dune::Amg::CoarsenCriterion< CriterionBase > Criterion
Definition: PreconditionerFactory.hpp:47
static Criterion criterion(const PropertyTree &prm)
Definition: StandardPreconditioners_mpi.hpp:87
std::shared_ptr< Dune::PreconditionerWithUpdate< Vector, Vector > > PrecPtr
Definition: PreconditionerFactory.hpp:44
static PrecPtr makeAmgPreconditioner(const Operator &op, const PropertyTree &prm, bool useKamg=false)
Definition: StandardPreconditioners_mpi.hpp:114
static auto args(const PropertyTree &prm)
Definition: StandardPreconditioners_mpi.hpp:55
Definition: StandardPreconditioners_mpi.hpp:38
static auto args(const PropertyTree &prm)
Definition: StandardPreconditioners_mpi.hpp:39
Definition: StandardPreconditioners_mpi.hpp:132
static PreconditionerFactory< Operator, Comm >::PrecPtr createParILU(const Operator &op, const PropertyTree &prm, const Comm &comm, const int ilulevel)
Definition: StandardPreconditioners_mpi.hpp:399
static std::size_t interiorIfGhostLast(const Comm &comm)
Definition: StandardPreconditioners_mpi.hpp:424
static void add()
Definition: StandardPreconditioners_mpi.hpp:133