StandardPreconditioners_mpi.hpp
Go to the documentation of this file.
1/*
2 Copyright 2009, 2010 SINTEF ICT, Applied Mathematics.
3 Copyright 2019 SINTEF Digital, Mathematics and Cybernetics.
4
5 This file is part of the Open Porous Media project (OPM).
6
7 OPM is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11
12 OPM is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with OPM. If not, see <http://www.gnu.org/licenses/>.
19*/
20
21#ifndef OPM_STANDARDPRECONDITIONERS_MPI_HEADER
22#define OPM_STANDARDPRECONDITIONERS_MPI_HEADER
23
24#if HAVE_CUDA
25#if USE_HIP
26#include <opm/simulators/linalg/gpuistl_hip/PreconditionerCPUMatrixToGPUMatrix.hpp>
27#else
29#endif
30#endif
31
32#include <functional>
33#include <memory>
34#include <type_traits>
35
36namespace Opm {
37
38
39template <class Smoother>
41{
42 static auto args(const PropertyTree& prm)
43 {
44 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
45 SmootherArgs smootherArgs;
46 smootherArgs.iterations = prm.get<int>("iterations", 1);
47 // smootherArgs.overlap=SmootherArgs::vertex;
48 // smootherArgs.overlap=SmootherArgs::none;
49 // smootherArgs.overlap=SmootherArgs::aggregate;
50 smootherArgs.relaxationFactor = prm.get<double>("relaxation", 1.0);
51 return smootherArgs;
52 }
53};
54
55template <class M, class V, class C>
57{
58 static auto args(const PropertyTree& prm)
59 {
61 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
62 SmootherArgs smootherArgs;
63 smootherArgs.iterations = prm.get<int>("iterations", 1);
64 const int iluwitdh = prm.get<int>("iluwidth", 0);
65 smootherArgs.setN(iluwitdh);
66 const MILU_VARIANT milu = convertString2Milu(prm.get<std::string>("milutype", std::string("ilu")));
67 smootherArgs.setMilu(milu);
68 // smootherArgs.overlap=SmootherArgs::vertex;
69 // smootherArgs.overlap=SmootherArgs::none;
70 // smootherArgs.overlap=SmootherArgs::aggregate;
71 smootherArgs.relaxationFactor = prm.get<double>("relaxation", 1.0);
72 return smootherArgs;
73 }
74};
75
76// trailing return type with decltype used for detecting existence of setUseFixedOrder member function by overloading the setUseFixedOrder function
77template <typename C>
78auto setUseFixedOrder(C& criterion, bool booleanValue) -> decltype(criterion.setUseFixedOrder(booleanValue))
79{
80 return criterion.setUseFixedOrder(booleanValue); // Set flag to ensure that the matrices in the AMG hierarchy are constructed with deterministic indices.
81}
82template <typename C>
83void setUseFixedOrder(C&, ...)
84{
85 // do nothing, since the function setUseFixedOrder does not exist yet
86}
87
88template <class Operator, class Comm, class Matrix, class Vector>
91{
92 Criterion criterion(15, prm.get<int>("coarsenTarget", 1200));
93 criterion.setDefaultValuesIsotropic(2);
94 criterion.setAlpha(prm.get<double>("alpha", 0.33));
95 criterion.setBeta(prm.get<double>("beta", 1e-5));
96 criterion.setMaxLevel(prm.get<int>("maxlevel", 15));
97 criterion.setSkipIsolated(prm.get<bool>("skip_isolated", false));
98 criterion.setNoPreSmoothSteps(prm.get<int>("pre_smooth", 1));
99 criterion.setNoPostSmoothSteps(prm.get<int>("post_smooth", 1));
100 criterion.setDebugLevel(prm.get<int>("verbosity", 0));
101 // As the default we request to accumulate data to 1 process always as our matrix
102 // graph might be unsymmetric and hence not supported by the PTScotch/ParMetis
103 // calls in DUNE. Accumulating to 1 skips PTScotch/ParMetis
104 criterion.setAccumulate(static_cast<Dune::Amg::AccumulationMode>(prm.get<int>("accumulate", 1)));
105 criterion.setProlongationDampingFactor(prm.get<double>("prolongationdamping", 1.6));
106 criterion.setMaxDistance(prm.get<int>("maxdistance", 2));
107 criterion.setMaxConnectivity(prm.get<int>("maxconnectivity", 15));
108 criterion.setMaxAggregateSize(prm.get<int>("maxaggsize", 6));
109 criterion.setMinAggregateSize(prm.get<int>("minaggsize", 4));
110 setUseFixedOrder(criterion, true); // If possible, set flag to ensure that the matrices in the AMG hierarchy are constructed with deterministic indices.
111 return criterion;
112}
113
114template <class Operator, class Comm, class Matrix, class Vector>
115template <class Smoother>
118 const PropertyTree& prm,
119 bool useKamg)
120{
121 auto crit = criterion(prm);
123 if (useKamg) {
125 return std::make_shared<Type>(
126 op, crit, sargs, prm.get<std::size_t>("max_krylov", 1), prm.get<double>("min_reduction", 1e-1));
127 } else {
129 return std::make_shared<Type>(op, crit, sargs);
130 }
131}
132
133template <class Operator, class Comm, typename = void> // Note: Last argument is to allow partial specialization for GPU
135{
136 static void add()
137 {
138 using namespace Dune;
139 using O = Operator;
140 using C = Comm;
142 using M = typename F::Matrix;
143 using V = typename F::Vector;
144 using P = PropertyTree;
145 F::addCreator("ilu0", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
146 return createParILU(op, prm, comm, 0);
147 });
148 F::addCreator("paroverilu0",
149 [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
150 return createParILU(op, prm, comm, prm.get<int>("ilulevel", 0));
151 });
152 F::addCreator("ilun", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
153 return createParILU(op, prm, comm, prm.get<int>("ilulevel", 0));
154 });
155 F::addCreator("duneilu", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
156 const int n = prm.get<int>("ilulevel", 0);
157 const double w = prm.get<double>("relaxation", 1.0);
158 const bool resort = prm.get<bool>("resort", false);
159 return wrapBlockPreconditioner<RebuildOnUpdatePreconditioner<Dune::SeqILU<M, V, V>>>(
160 comm, std::cref(op.getmat()), n, w, resort);
161 });
162 F::addCreator("dilu", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
163 DUNE_UNUSED_PARAMETER(prm);
164 return wrapBlockPreconditioner<MultithreadDILU<M, V, V>>(comm, op.getmat());
165 });
166 F::addCreator("jac", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
167 const int n = prm.get<int>("repeats", 1);
168 const double w = prm.get<double>("relaxation", 1.0);
169 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqJac<M, V, V>>>(comm, op.getmat(), n, w);
170 });
171 F::addCreator("gs", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
172 const int n = prm.get<int>("repeats", 1);
173 const double w = prm.get<double>("relaxation", 1.0);
174 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqGS<M, V, V>>>(comm, op.getmat(), n, w);
175 });
176 F::addCreator("sor", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
177 const int n = prm.get<int>("repeats", 1);
178 const double w = prm.get<double>("relaxation", 1.0);
179 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqSOR<M, V, V>>>(comm, op.getmat(), n, w);
180 });
181 F::addCreator("ssor", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
182 const int n = prm.get<int>("repeats", 1);
183 const double w = prm.get<double>("relaxation", 1.0);
184 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqSSOR<M, V, V>>>(comm, op.getmat(), n, w);
185 });
186
187 // Only add AMG preconditioners to the factory if the operator
188 // is the overlapping schwarz operator or GhostLastMatrixAdapter. This could be extended
189 // later, but at this point no other operators are compatible
190 // with the AMG hierarchy construction.
191 if constexpr (std::is_same_v<O, Dune::OverlappingSchwarzOperator<M, V, V, C>> ||
192 std::is_same_v<O, Opm::GhostLastMatrixAdapter<M, V, V, C>>) {
193 F::addCreator("amg", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
194 using PrecPtr = std::shared_ptr<Dune::PreconditionerWithUpdate<V, V>>;
195 std::string smoother = prm.get<std::string>("smoother", "paroverilu0");
196 // Make the smoother type lowercase for internal canonical representation
197 std::ranges::transform(smoother, smoother.begin(), ::tolower);
198 // TODO: merge this with ILUn, and possibly simplify the factory to only work with ILU?
199 if (smoother == "ilu0" || smoother == "paroverilu0") {
201 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
203 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
204 return prec;
205 } else if (smoother == "dilu") {
206 using SeqSmoother = Dune::MultithreadDILU<M, V, V>;
207 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
208 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
209 SmootherArgs sargs;
210 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
211 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
212 return prec;
213 } else if (smoother == "jac") {
214 using SeqSmoother = SeqJac<M, V, V>;
215 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
216 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
217 SmootherArgs sargs;
218 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
219 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
220 return prec;
221 } else if (smoother == "gs") {
222 using SeqSmoother = SeqGS<M, V, V>;
223 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
224 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
225 SmootherArgs sargs;
226 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
227 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
228 return prec;
229 } else if (smoother == "sor") {
230 using SeqSmoother = SeqSOR<M, V, V>;
231 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
232 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
233 SmootherArgs sargs;
234 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
235 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
236 return prec;
237 } else if (smoother == "ssor") {
238 using SeqSmoother = SeqSSOR<M, V, V>;
239 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
240 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
241 SmootherArgs sargs;
242 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
243 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
244 return prec;
245 } else if (smoother == "ilun") {
246 using SeqSmoother = SeqILU<M, V, V>;
247 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
248 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
249 SmootherArgs sargs;
250 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
251 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
252 return prec;
253 } else {
254 OPM_THROW(std::invalid_argument, "Properties: No smoother with name " + smoother + ".");
255 }
256 });
257#if HAVE_HYPRE
258 if constexpr (M::block_type::rows == 1 && M::block_type::cols == 1
259 && std::is_same_v<HYPRE_Real, typename V::field_type>) {
260 F::addCreator(
261 "hypre", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
262 return std::make_shared<Hypre::HyprePreconditioner<M, V, V, C>>(op.getmat(), prm, comm);
263 });
264 }
265#endif
266 }
267
268
269 F::addCreator("cpr",
270 [](const O& op,
271 const P& prm,
272 const std::function<V()> weightsCalculator,
273 std::size_t pressureIndex,
274 const C& comm) {
275 assert(weightsCalculator);
276 if (pressureIndex == std::numeric_limits<std::size_t>::max()) {
277 OPM_THROW(std::logic_error,
278 "Pressure index out of bounds. It needs to specified for CPR");
279 }
280 using Scalar = typename V::field_type;
281 using LevelTransferPolicy = PressureTransferPolicy<O, Comm, Scalar, false>;
282 return std::make_shared<OwningTwoLevelPreconditioner<O, V, LevelTransferPolicy, Comm>>(
283 op, prm, weightsCalculator, pressureIndex, comm);
284 });
285 F::addCreator("cprt",
286 [](const O& op,
287 const P& prm,
288 const std::function<V()> weightsCalculator,
289 std::size_t pressureIndex,
290 const C& comm) {
291 assert(weightsCalculator);
292 if (pressureIndex == std::numeric_limits<std::size_t>::max()) {
293 OPM_THROW(std::logic_error,
294 "Pressure index out of bounds. It needs to specified for CPR");
295 }
296 using Scalar = typename V::field_type;
297 using LevelTransferPolicy = PressureTransferPolicy<O, Comm, Scalar, true>;
298 return std::make_shared<OwningTwoLevelPreconditioner<O, V, LevelTransferPolicy, Comm>>(
299 op, prm, weightsCalculator, pressureIndex, comm);
300 });
301
302 // Add CPRW only for the WellModelGhostLastMatrixAdapter, as the method requires that the
303 // operator has the addWellPressureEquations() method (and a few more) it can not be combined
304 // with a well-less operator such as GhostLastMatrixAdapter or OverlappingSchwarzOperator.
305 // For OPM Flow this corresponds to requiring --matrix-add-well-contributions=false
306 // (which is the default).
307 if constexpr (std::is_same_v<O, WellModelGhostLastMatrixAdapter<M, V, V, true>>) {
308 F::addCreator("cprw",
309 [](const O& op,
310 const P& prm,
311 const std::function<V()> weightsCalculator,
312 std::size_t pressureIndex,
313 const C& comm) {
314 assert(weightsCalculator);
315 if (pressureIndex == std::numeric_limits<std::size_t>::max()) {
316 OPM_THROW(std::logic_error,
317 "Pressure index out of bounds. It needs to specified for CPR");
318 }
319 using Scalar = typename V::field_type;
320 using LevelTransferPolicy = PressureBhpTransferPolicy<O, Comm, Scalar, false>;
321 return std::make_shared<OwningTwoLevelPreconditioner<O, V, LevelTransferPolicy, Comm>>(
322 op, prm, weightsCalculator, pressureIndex, comm);
323 });
324 }
325
326#if HAVE_CUDA
327 // Here we create the *wrapped* GPU preconditioners
328 // meaning they will act as CPU preconditioners on the outside,
329 // but copy data back and forth to the GPU as needed.
330
331 // TODO: Make this use the GPU preconditioner factory once that is up and running.
332 F::addCreator("gpuilu0", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
333 const double w = prm.get<double>("relaxation", 1.0);
334 using field_type = typename V::field_type;
335 using GpuILU0 = typename gpuistl::
336 GpuSeqILU0<M, gpuistl::GpuVector<field_type>, gpuistl::GpuVector<field_type>>;
337 auto gpuILU0 = std::make_shared<GpuILU0>(op.getmat(), w);
338
339 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, GpuILU0>>(gpuILU0);
340 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
341 return wrapped;
342 });
343
344 F::addCreator("gpujac", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
345 const double w = prm.get<double>("relaxation", 1.0);
346 using field_type = typename V::field_type;
347 using GpuJac =
349
352
353 auto gpuJac = std::make_shared<MatrixOwner>(op.getmat(), w);
354
355 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, MatrixOwner>>(gpuJac);
356 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
357 return wrapped;
358 });
359
360 F::addCreator("gpudilu", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
361 const bool split_matrix = prm.get<bool>("split_matrix", true);
362 const bool tune_gpu_kernels = prm.get<bool>("tune_gpu_kernels", true);
363 const int mixed_precision_scheme = prm.get<int>("mixed_precision_scheme", 0);
364 const bool reorder = prm.get<bool>("reorder", true);
365 using field_type = typename V::field_type;
369
370 // Note: op.getmat() is passed twice, because the GpuDILU needs both the CPU and GPU matrix.
371 // The first argument will be converted to a GPU matrix, and the second one is used as a CPU matrix.
372 auto gpuDILU = std::make_shared<MatrixOwner>(op.getmat(), op.getmat(), split_matrix, tune_gpu_kernels, mixed_precision_scheme, reorder);
373
374 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, MatrixOwner>>(gpuDILU);
375 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
376 return wrapped;
377 });
378
379 F::addCreator("opmgpuilu0", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
380 const bool split_matrix = prm.get<bool>("split_matrix", true);
381 const bool tune_gpu_kernels = prm.get<bool>("tune_gpu_kernels", true);
382 const int mixed_precision_scheme = prm.get<int>("mixed_precision_scheme", 0);
383 using field_type = typename V::field_type;
385
387 gpuistl::GpuVector<field_type>, OpmGpuILU0, M>;
388
389 // Note: op.getmat() is passed twice, because the OPMGPUILU0 needs both the CPU and GPU matrix.
390 // The first argument will be converted to a GPU matrix, and the second one is used as a CPU matrix.
391 auto gpuilu0 = std::make_shared<MatrixOwner>(op.getmat(), op.getmat(), split_matrix, tune_gpu_kernels, mixed_precision_scheme);
392
393 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, MatrixOwner>>(gpuilu0);
394 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
395 return wrapped;
396 });
397#endif // HAVE_CUDA
398 }
399
400
402 createParILU(const Operator& op, const PropertyTree& prm, const Comm& comm, const int ilulevel)
403 {
405 using M = typename F::Matrix;
406 using V = typename F::Vector;
407
408 const double w = prm.get<double>("relaxation", 1.0);
409 const bool redblack = prm.get<bool>("redblack", false);
410 const bool reorder_spheres = prm.get<bool>("reorder_spheres", false);
411 // Already a parallel preconditioner. Need to pass comm, but no need to wrap it in a BlockPreconditioner.
412 if (ilulevel == 0) {
413 const std::size_t num_interior = interiorIfGhostLast(comm);
414 assert(num_interior <= op.getmat().N());
415 return std::make_shared<ParallelOverlappingILU0<M, V, V, Comm>>(
416 op.getmat(), comm, w, MILU_VARIANT::ILU, num_interior, redblack, reorder_spheres);
417 } else {
418 return std::make_shared<ParallelOverlappingILU0<M, V, V, Comm>>(
419 op.getmat(), comm, ilulevel, w, MILU_VARIANT::ILU, redblack, reorder_spheres);
420 }
421 }
422
427 static std::size_t interiorIfGhostLast(const Comm& comm)
428 {
429 std::size_t interior_count = 0;
430 std::size_t highest_interior_index = 0;
431 const auto& is = comm.indexSet();
432 for (const auto& ind : is) {
433 if (Comm::OwnerSet::contains(ind.local().attribute())) {
434 ++interior_count;
435 highest_interior_index = std::max(highest_interior_index, ind.local().local());
436 }
437 }
438 if (highest_interior_index + 1 == interior_count) {
439 return interior_count;
440 } else {
441 return is.size();
442 }
443 }
444};
445
446
447} // namespace Opm
448
449#endif // OPM_STANDARDPRECONDITIONERS_MPI_HEADER
Dune::OwnerOverlapCopyCommunication< int, int > Comm
Definition: FlexibleSolver_impl.hpp:325
Parallel algebraic multigrid based on agglomeration.
Definition: amgcpr.hh:88
Definition: PreconditionerWithUpdate.hpp:45
The OpenMP thread parallelized DILU preconditioner.
Definition: DILU.hpp:53
A two-step version of an overlapping Schwarz preconditioner using one step ILU0 as.
Definition: ParallelOverlappingILU0.hpp:131
Definition: PreconditionerFactory.hpp:64
std::shared_ptr< Dune::PreconditionerWithUpdate< Vector, Vector > > PrecPtr
The type of pointer returned by create().
Definition: PreconditionerFactory.hpp:71
Definition: PressureBhpTransferPolicy.hpp:99
Definition: PressureTransferPolicy.hpp:55
Hierarchical collection of key/value pairs.
Definition: PropertyTree.hpp:39
T get(const std::string &key) const
DILU preconditioner on the GPU.
Definition: GpuDILU.hpp:53
Jacobi preconditioner on the GPU.
Definition: GpuJac.hpp:47
ILU0 preconditioner on the GPU.
Definition: OpmGpuILU0.hpp:51
Convert a CPU matrix to a GPU matrix and use a CUDA preconditioner on the GPU.
Definition: PreconditionerCPUMatrixToGPUMatrix.hpp:42
Definition: fvbaseprimaryvariables.hh:161
Definition: blackoilbioeffectsmodules.hh:45
MILU_VARIANT
Definition: MILU.hpp:34
@ ILU
Do not perform modified ILU.
auto setUseFixedOrder(C &criterion, bool booleanValue) -> decltype(criterion.setUseFixedOrder(booleanValue))
Definition: StandardPreconditioners_mpi.hpp:78
MILU_VARIANT convertString2Milu(const std::string &milu)
Dune::Amg::CoarsenCriterion< CriterionBase > Criterion
Definition: PreconditionerFactory.hpp:47
static Criterion criterion(const PropertyTree &prm)
Definition: StandardPreconditioners_mpi.hpp:90
std::shared_ptr< Dune::PreconditionerWithUpdate< Vector, Vector > > PrecPtr
Definition: PreconditionerFactory.hpp:44
static PrecPtr makeAmgPreconditioner(const Operator &op, const PropertyTree &prm, bool useKamg=false)
Definition: StandardPreconditioners_mpi.hpp:117
static auto args(const PropertyTree &prm)
Definition: StandardPreconditioners_mpi.hpp:58
Definition: StandardPreconditioners_mpi.hpp:41
static auto args(const PropertyTree &prm)
Definition: StandardPreconditioners_mpi.hpp:42
Definition: StandardPreconditioners_mpi.hpp:135
static PreconditionerFactory< Operator, Comm >::PrecPtr createParILU(const Operator &op, const PropertyTree &prm, const Comm &comm, const int ilulevel)
Definition: StandardPreconditioners_mpi.hpp:402
static std::size_t interiorIfGhostLast(const Comm &comm)
Definition: StandardPreconditioners_mpi.hpp:427
static void add()
Definition: StandardPreconditioners_mpi.hpp:136