StandardPreconditioners_mpi.hpp
Go to the documentation of this file.
1/*
2 Copyright 2009, 2010 SINTEF ICT, Applied Mathematics.
3 Copyright 2019 SINTEF Digital, Mathematics and Cybernetics.
4
5 This file is part of the Open Porous Media project (OPM).
6
7 OPM is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11
12 OPM is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with OPM. If not, see <http://www.gnu.org/licenses/>.
19*/
20
21#ifndef OPM_STANDARDPRECONDITIONERS_MPI_HEADER
22#define OPM_STANDARDPRECONDITIONERS_MPI_HEADER
23
24#if HAVE_CUDA
26#endif
27
28
29namespace Opm {
30
31
32template <class Smoother>
34{
35 static auto args(const PropertyTree& prm)
36 {
37 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
38 SmootherArgs smootherArgs;
39 smootherArgs.iterations = prm.get<int>("iterations", 1);
40 // smootherArgs.overlap=SmootherArgs::vertex;
41 // smootherArgs.overlap=SmootherArgs::none;
42 // smootherArgs.overlap=SmootherArgs::aggregate;
43 smootherArgs.relaxationFactor = prm.get<double>("relaxation", 1.0);
44 return smootherArgs;
45 }
46};
47
48template <class M, class V, class C>
50{
51 static auto args(const PropertyTree& prm)
52 {
54 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
55 SmootherArgs smootherArgs;
56 smootherArgs.iterations = prm.get<int>("iterations", 1);
57 const int iluwitdh = prm.get<int>("iluwidth", 0);
58 smootherArgs.setN(iluwitdh);
59 const MILU_VARIANT milu = convertString2Milu(prm.get<std::string>("milutype", std::string("ilu")));
60 smootherArgs.setMilu(milu);
61 // smootherArgs.overlap=SmootherArgs::vertex;
62 // smootherArgs.overlap=SmootherArgs::none;
63 // smootherArgs.overlap=SmootherArgs::aggregate;
64 smootherArgs.relaxationFactor = prm.get<double>("relaxation", 1.0);
65 return smootherArgs;
66 }
67};
68
69// trailing return type with decltype used for detecting existence of setUseFixedOrder member function by overloading the setUseFixedOrder function
70template <typename C>
71auto setUseFixedOrder(C& criterion, bool booleanValue) -> decltype(criterion.setUseFixedOrder(booleanValue))
72{
73 return criterion.setUseFixedOrder(booleanValue); // Set flag to ensure that the matrices in the AMG hierarchy are constructed with deterministic indices.
74}
75template <typename C>
76void setUseFixedOrder(C&, ...)
77{
78 // do nothing, since the function setUseFixedOrder does not exist yet
79}
80
81template <class Operator, class Comm, class Matrix, class Vector>
84{
85 Criterion criterion(15, prm.get<int>("coarsenTarget", 1200));
86 criterion.setDefaultValuesIsotropic(2);
87 criterion.setAlpha(prm.get<double>("alpha", 0.33));
88 criterion.setBeta(prm.get<double>("beta", 1e-5));
89 criterion.setMaxLevel(prm.get<int>("maxlevel", 15));
90 criterion.setSkipIsolated(prm.get<bool>("skip_isolated", false));
91 criterion.setNoPreSmoothSteps(prm.get<int>("pre_smooth", 1));
92 criterion.setNoPostSmoothSteps(prm.get<int>("post_smooth", 1));
93 criterion.setDebugLevel(prm.get<int>("verbosity", 0));
94 // As the default we request to accumulate data to 1 process always as our matrix
95 // graph might be unsymmetric and hence not supported by the PTScotch/ParMetis
96 // calls in DUNE. Accumulating to 1 skips PTScotch/ParMetis
97 criterion.setAccumulate(static_cast<Dune::Amg::AccumulationMode>(prm.get<int>("accumulate", 1)));
98 criterion.setProlongationDampingFactor(prm.get<double>("prolongationdamping", 1.6));
99 criterion.setMaxDistance(prm.get<int>("maxdistance", 2));
100 criterion.setMaxConnectivity(prm.get<int>("maxconnectivity", 15));
101 criterion.setMaxAggregateSize(prm.get<int>("maxaggsize", 6));
102 criterion.setMinAggregateSize(prm.get<int>("minaggsize", 4));
103 setUseFixedOrder(criterion, true); // If possible, set flag to ensure that the matrices in the AMG hierarchy are constructed with deterministic indices.
104 return criterion;
105}
106
107template <class Operator, class Comm, class Matrix, class Vector>
108template <class Smoother>
111 const PropertyTree& prm,
112 bool useKamg)
113{
114 auto crit = criterion(prm);
116 if (useKamg) {
118 return std::make_shared<Type>(
119 op, crit, sargs, prm.get<std::size_t>("max_krylov", 1), prm.get<double>("min_reduction", 1e-1));
120 } else {
122 return std::make_shared<Type>(op, crit, sargs);
123 }
124}
125
126template <class Operator, class Comm, typename = void> // Note: Last argument is to allow partial specialization for GPU
128{
129 static void add()
130 {
131 using namespace Dune;
132 using O = Operator;
133 using C = Comm;
135 using M = typename F::Matrix;
136 using V = typename F::Vector;
137 using P = PropertyTree;
138 F::addCreator("ilu0", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
139 return createParILU(op, prm, comm, 0);
140 });
141 F::addCreator("paroverilu0",
142 [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
143 return createParILU(op, prm, comm, prm.get<int>("ilulevel", 0));
144 });
145 F::addCreator("ilun", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
146 return createParILU(op, prm, comm, prm.get<int>("ilulevel", 0));
147 });
148 F::addCreator("duneilu", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
149 const int n = prm.get<int>("ilulevel", 0);
150 const double w = prm.get<double>("relaxation", 1.0);
151 const bool resort = prm.get<bool>("resort", false);
152 return wrapBlockPreconditioner<RebuildOnUpdatePreconditioner<Dune::SeqILU<M, V, V>>>(
153 comm, op.getmat(), n, w, resort);
154 });
155 F::addCreator("dilu", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
156 DUNE_UNUSED_PARAMETER(prm);
157 return wrapBlockPreconditioner<MultithreadDILU<M, V, V>>(comm, op.getmat());
158 });
159 F::addCreator("jac", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
160 const int n = prm.get<int>("repeats", 1);
161 const double w = prm.get<double>("relaxation", 1.0);
162 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqJac<M, V, V>>>(comm, op.getmat(), n, w);
163 });
164 F::addCreator("gs", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
165 const int n = prm.get<int>("repeats", 1);
166 const double w = prm.get<double>("relaxation", 1.0);
167 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqGS<M, V, V>>>(comm, op.getmat(), n, w);
168 });
169 F::addCreator("sor", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
170 const int n = prm.get<int>("repeats", 1);
171 const double w = prm.get<double>("relaxation", 1.0);
172 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqSOR<M, V, V>>>(comm, op.getmat(), n, w);
173 });
174 F::addCreator("ssor", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
175 const int n = prm.get<int>("repeats", 1);
176 const double w = prm.get<double>("relaxation", 1.0);
177 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqSSOR<M, V, V>>>(comm, op.getmat(), n, w);
178 });
179
180 // Only add AMG preconditioners to the factory if the operator
181 // is the overlapping schwarz operator or GhostLastMatrixAdapter. This could be extended
182 // later, but at this point no other operators are compatible
183 // with the AMG hierarchy construction.
184 if constexpr (std::is_same_v<O, Dune::OverlappingSchwarzOperator<M, V, V, C>> ||
185 std::is_same_v<O, Opm::GhostLastMatrixAdapter<M, V, V, C>>) {
186 F::addCreator("amg", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
187 using PrecPtr = std::shared_ptr<Dune::PreconditionerWithUpdate<V, V>>;
188 std::string smoother = prm.get<std::string>("smoother", "paroverilu0");
189 // Make the smoother type lowercase for internal canonical representation
190 std::transform(smoother.begin(), smoother.end(), smoother.begin(), ::tolower);
191 // TODO: merge this with ILUn, and possibly simplify the factory to only work with ILU?
192 if (smoother == "ilu0" || smoother == "paroverilu0") {
194 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
196 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
197 return prec;
198 } else if (smoother == "dilu") {
199 using SeqSmoother = Dune::MultithreadDILU<M, V, V>;
200 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
201 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
202 SmootherArgs sargs;
203 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
204 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
205 return prec;
206 } else if (smoother == "jac") {
207 using SeqSmoother = SeqJac<M, V, V>;
208 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
209 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
210 SmootherArgs sargs;
211 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
212 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
213 return prec;
214 } else if (smoother == "gs") {
215 using SeqSmoother = SeqGS<M, V, V>;
216 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
217 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
218 SmootherArgs sargs;
219 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
220 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
221 return prec;
222 } else if (smoother == "sor") {
223 using SeqSmoother = SeqSOR<M, V, V>;
224 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
225 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
226 SmootherArgs sargs;
227 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
228 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
229 return prec;
230 } else if (smoother == "ssor") {
231 using SeqSmoother = SeqSSOR<M, V, V>;
232 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
233 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
234 SmootherArgs sargs;
235 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
236 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
237 return prec;
238 } else if (smoother == "ilun") {
239 using SeqSmoother = SeqILU<M, V, V>;
240 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
241 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
242 SmootherArgs sargs;
243 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
244 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
245 return prec;
246 } else {
247 OPM_THROW(std::invalid_argument, "Properties: No smoother with name " + smoother + ".");
248 }
249 });
250#if HAVE_HYPRE
251 if constexpr (M::block_type::rows == 1 && M::block_type::cols == 1
252 && std::is_same_v<HYPRE_Real, typename V::field_type>) {
253 F::addCreator(
254 "hypre", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
255 return std::make_shared<Hypre::HyprePreconditioner<M, V, V, C>>(op.getmat(), prm, comm);
256 });
257 }
258#endif
259 }
260
261
262 F::addCreator("cpr",
263 [](const O& op,
264 const P& prm,
265 const std::function<V()> weightsCalculator,
266 std::size_t pressureIndex,
267 const C& comm) {
268 assert(weightsCalculator);
269 if (pressureIndex == std::numeric_limits<std::size_t>::max()) {
270 OPM_THROW(std::logic_error,
271 "Pressure index out of bounds. It needs to specified for CPR");
272 }
273 using Scalar = typename V::field_type;
274 using LevelTransferPolicy = PressureTransferPolicy<O, Comm, Scalar, false>;
275 return std::make_shared<OwningTwoLevelPreconditioner<O, V, LevelTransferPolicy, Comm>>(
276 op, prm, weightsCalculator, pressureIndex, comm);
277 });
278 F::addCreator("cprt",
279 [](const O& op,
280 const P& prm,
281 const std::function<V()> weightsCalculator,
282 std::size_t pressureIndex,
283 const C& comm) {
284 assert(weightsCalculator);
285 if (pressureIndex == std::numeric_limits<std::size_t>::max()) {
286 OPM_THROW(std::logic_error,
287 "Pressure index out of bounds. It needs to specified for CPR");
288 }
289 using Scalar = typename V::field_type;
290 using LevelTransferPolicy = PressureTransferPolicy<O, Comm, Scalar, true>;
291 return std::make_shared<OwningTwoLevelPreconditioner<O, V, LevelTransferPolicy, Comm>>(
292 op, prm, weightsCalculator, pressureIndex, comm);
293 });
294
295 // Add CPRW only for the WellModelGhostLastMatrixAdapter, as the method requires that the
296 // operator has the addWellPressureEquations() method (and a few more) it can not be combined
297 // with a well-less operator such as GhostLastMatrixAdapter or OverlappingSchwarzOperator.
298 // For OPM Flow this corresponds to requiring --matrix-add-well-contributions=false
299 // (which is the default).
300 if constexpr (std::is_same_v<O, WellModelGhostLastMatrixAdapter<M, V, V, true>>) {
301 F::addCreator("cprw",
302 [](const O& op,
303 const P& prm,
304 const std::function<V()> weightsCalculator,
305 std::size_t pressureIndex,
306 const C& comm) {
307 assert(weightsCalculator);
308 if (pressureIndex == std::numeric_limits<std::size_t>::max()) {
309 OPM_THROW(std::logic_error,
310 "Pressure index out of bounds. It needs to specified for CPR");
311 }
312 using Scalar = typename V::field_type;
313 using LevelTransferPolicy = PressureBhpTransferPolicy<O, Comm, Scalar, false>;
314 return std::make_shared<OwningTwoLevelPreconditioner<O, V, LevelTransferPolicy, Comm>>(
315 op, prm, weightsCalculator, pressureIndex, comm);
316 });
317 }
318
319#if HAVE_CUDA
320 // Here we create the *wrapped* GPU preconditioners
321 // meaning they will act as CPU preconditioners on the outside,
322 // but copy data back and forth to the GPU as needed.
323
324 // TODO: Make this use the GPU preconditioner factory once that is up and running.
325 F::addCreator("gpuilu0", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
326 const double w = prm.get<double>("relaxation", 1.0);
327 using field_type = typename V::field_type;
328 using GpuILU0 = typename gpuistl::
329 GpuSeqILU0<M, gpuistl::GpuVector<field_type>, gpuistl::GpuVector<field_type>>;
330 auto gpuILU0 = std::make_shared<GpuILU0>(op.getmat(), w);
331
332 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, GpuILU0>>(gpuILU0);
333 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
334 return wrapped;
335 });
336
337 F::addCreator("gpujac", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
338 const double w = prm.get<double>("relaxation", 1.0);
339 using field_type = typename V::field_type;
340 using GpuJac =
342
345
346 auto gpuJac = std::make_shared<MatrixOwner>(op.getmat(), w);
347
348 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, MatrixOwner>>(gpuJac);
349 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
350 return wrapped;
351 });
352
353 F::addCreator("gpudilu", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
354 const bool split_matrix = prm.get<bool>("split_matrix", true);
355 const bool tune_gpu_kernels = prm.get<bool>("tune_gpu_kernels", true);
356 const int mixed_precision_scheme = prm.get<int>("mixed_precision_scheme", 0);
357 const bool reorder = prm.get<bool>("reorder", true);
358 using field_type = typename V::field_type;
362
363 // Note: op.getmat() is passed twice, because the GpuDILU needs both the CPU and GPU matrix.
364 // The first argument will be converted to a GPU matrix, and the second one is used as a CPU matrix.
365 auto gpuDILU = std::make_shared<MatrixOwner>(op.getmat(), op.getmat(), split_matrix, tune_gpu_kernels, mixed_precision_scheme, reorder);
366
367 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, MatrixOwner>>(gpuDILU);
368 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
369 return wrapped;
370 });
371
372 F::addCreator("opmgpuilu0", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
373 const bool split_matrix = prm.get<bool>("split_matrix", true);
374 const bool tune_gpu_kernels = prm.get<bool>("tune_gpu_kernels", true);
375 const int mixed_precision_scheme = prm.get<int>("mixed_precision_scheme", 0);
376 using field_type = typename V::field_type;
378
380 gpuistl::GpuVector<field_type>, OpmGpuILU0, M>;
381
382 // Note: op.getmat() is passed twice, because the OPMGPUILU0 needs both the CPU and GPU matrix.
383 // The first argument will be converted to a GPU matrix, and the second one is used as a CPU matrix.
384 auto gpuilu0 = std::make_shared<MatrixOwner>(op.getmat(), op.getmat(), split_matrix, tune_gpu_kernels, mixed_precision_scheme);
385
386 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, MatrixOwner>>(gpuilu0);
387 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
388 return wrapped;
389 });
390#endif // HAVE_CUDA
391 }
392
393
395 createParILU(const Operator& op, const PropertyTree& prm, const Comm& comm, const int ilulevel)
396 {
398 using M = typename F::Matrix;
399 using V = typename F::Vector;
400
401 const double w = prm.get<double>("relaxation", 1.0);
402 const bool redblack = prm.get<bool>("redblack", false);
403 const bool reorder_spheres = prm.get<bool>("reorder_spheres", false);
404 // Already a parallel preconditioner. Need to pass comm, but no need to wrap it in a BlockPreconditioner.
405 if (ilulevel == 0) {
406 const std::size_t num_interior = interiorIfGhostLast(comm);
407 assert(num_interior <= op.getmat().N());
408 return std::make_shared<ParallelOverlappingILU0<M, V, V, Comm>>(
409 op.getmat(), comm, w, MILU_VARIANT::ILU, num_interior, redblack, reorder_spheres);
410 } else {
411 return std::make_shared<ParallelOverlappingILU0<M, V, V, Comm>>(
412 op.getmat(), comm, ilulevel, w, MILU_VARIANT::ILU, redblack, reorder_spheres);
413 }
414 }
415
420 static std::size_t interiorIfGhostLast(const Comm& comm)
421 {
422 std::size_t interior_count = 0;
423 std::size_t highest_interior_index = 0;
424 const auto& is = comm.indexSet();
425 for (const auto& ind : is) {
426 if (Comm::OwnerSet::contains(ind.local().attribute())) {
427 ++interior_count;
428 highest_interior_index = std::max(highest_interior_index, ind.local().local());
429 }
430 }
431 if (highest_interior_index + 1 == interior_count) {
432 return interior_count;
433 } else {
434 return is.size();
435 }
436 }
437};
438
439
440} // namespace Opm
441
442#endif // OPM_STANDARDPRECONDITIONERS_MPI_HEADER
Dune::OwnerOverlapCopyCommunication< int, int > Comm
Definition: FlexibleSolver_impl.hpp:304
Parallel algebraic multigrid based on agglomeration.
Definition: amgcpr.hh:88
Definition: PreconditionerWithUpdate.hpp:43
The OpenMP thread parallelized DILU preconditioner.
Definition: DILU.hpp:53
A two-step version of an overlapping Schwarz preconditioner using one step ILU0 as.
Definition: ParallelOverlappingILU0.hpp:131
Definition: PreconditionerFactory.hpp:64
std::shared_ptr< Dune::PreconditionerWithUpdate< Vector, Vector > > PrecPtr
The type of pointer returned by create().
Definition: PreconditionerFactory.hpp:71
Definition: PressureBhpTransferPolicy.hpp:99
Definition: PressureTransferPolicy.hpp:55
Hierarchical collection of key/value pairs.
Definition: PropertyTree.hpp:39
T get(const std::string &key) const
DILU preconditioner on the GPU.
Definition: GpuDILU.hpp:53
Jacobi preconditioner on the GPU.
Definition: GpuJac.hpp:47
ILU0 preconditioner on the GPU.
Definition: OpmGpuILU0.hpp:51
Convert a CPU matrix to a GPU matrix and use a CUDA preconditioner on the GPU.
Definition: PreconditionerCPUMatrixToGPUMatrix.hpp:42
Definition: fvbaseprimaryvariables.hh:141
Definition: blackoilboundaryratevector.hh:39
MILU_VARIANT
Definition: MILU.hpp:34
@ ILU
Do not perform modified ILU.
auto setUseFixedOrder(C &criterion, bool booleanValue) -> decltype(criterion.setUseFixedOrder(booleanValue))
Definition: StandardPreconditioners_mpi.hpp:71
MILU_VARIANT convertString2Milu(const std::string &milu)
Dune::Amg::CoarsenCriterion< CriterionBase > Criterion
Definition: PreconditionerFactory.hpp:47
static Criterion criterion(const PropertyTree &prm)
Definition: StandardPreconditioners_mpi.hpp:83
std::shared_ptr< Dune::PreconditionerWithUpdate< Vector, Vector > > PrecPtr
Definition: PreconditionerFactory.hpp:44
static PrecPtr makeAmgPreconditioner(const Operator &op, const PropertyTree &prm, bool useKamg=false)
Definition: StandardPreconditioners_mpi.hpp:110
static auto args(const PropertyTree &prm)
Definition: StandardPreconditioners_mpi.hpp:51
Definition: StandardPreconditioners_mpi.hpp:34
static auto args(const PropertyTree &prm)
Definition: StandardPreconditioners_mpi.hpp:35
Definition: StandardPreconditioners_mpi.hpp:128
static PreconditionerFactory< Operator, Comm >::PrecPtr createParILU(const Operator &op, const PropertyTree &prm, const Comm &comm, const int ilulevel)
Definition: StandardPreconditioners_mpi.hpp:395
static std::size_t interiorIfGhostLast(const Comm &comm)
Definition: StandardPreconditioners_mpi.hpp:420
static void add()
Definition: StandardPreconditioners_mpi.hpp:129