StandardPreconditioners_mpi.hpp
Go to the documentation of this file.
1/*
2 Copyright 2009, 2010 SINTEF ICT, Applied Mathematics.
3 Copyright 2019 SINTEF Digital, Mathematics and Cybernetics.
4
5 This file is part of the Open Porous Media project (OPM).
6
7 OPM is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11
12 OPM is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with OPM. If not, see <http://www.gnu.org/licenses/>.
19*/
20
21#ifndef OPM_STANDARDPRECONDITIONERS_MPI_HEADER
22#define OPM_STANDARDPRECONDITIONERS_MPI_HEADER
23
24#if HAVE_CUDA
26#endif
27
28
29namespace Opm {
30
31
32template <class Smoother>
34{
35 static auto args(const PropertyTree& prm)
36 {
37 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
38 SmootherArgs smootherArgs;
39 smootherArgs.iterations = prm.get<int>("iterations", 1);
40 // smootherArgs.overlap=SmootherArgs::vertex;
41 // smootherArgs.overlap=SmootherArgs::none;
42 // smootherArgs.overlap=SmootherArgs::aggregate;
43 smootherArgs.relaxationFactor = prm.get<double>("relaxation", 1.0);
44 return smootherArgs;
45 }
46};
47
48template <class M, class V, class C>
50{
51 static auto args(const PropertyTree& prm)
52 {
54 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
55 SmootherArgs smootherArgs;
56 smootherArgs.iterations = prm.get<int>("iterations", 1);
57 const int iluwitdh = prm.get<int>("iluwidth", 0);
58 smootherArgs.setN(iluwitdh);
59 const MILU_VARIANT milu = convertString2Milu(prm.get<std::string>("milutype", std::string("ilu")));
60 smootherArgs.setMilu(milu);
61 // smootherArgs.overlap=SmootherArgs::vertex;
62 // smootherArgs.overlap=SmootherArgs::none;
63 // smootherArgs.overlap=SmootherArgs::aggregate;
64 smootherArgs.relaxationFactor = prm.get<double>("relaxation", 1.0);
65 return smootherArgs;
66 }
67};
68
69// trailing return type with decltype used for detecting existence of setUseFixedOrder member function by overloading the setUseFixedOrder function
70template <typename C>
71auto setUseFixedOrder(C& criterion, bool booleanValue) -> decltype(criterion.setUseFixedOrder(booleanValue))
72{
73 return criterion.setUseFixedOrder(booleanValue); // Set flag to ensure that the matrices in the AMG hierarchy are constructed with deterministic indices.
74}
75template <typename C>
76void setUseFixedOrder(C&, ...)
77{
78 // do nothing, since the function setUseFixedOrder does not exist yet
79}
80
81template <class Operator, class Comm, class Matrix, class Vector>
84{
85 Criterion criterion(15, prm.get<int>("coarsenTarget", 1200));
86 criterion.setDefaultValuesIsotropic(2);
87 criterion.setAlpha(prm.get<double>("alpha", 0.33));
88 criterion.setBeta(prm.get<double>("beta", 1e-5));
89 criterion.setMaxLevel(prm.get<int>("maxlevel", 15));
90 criterion.setSkipIsolated(prm.get<bool>("skip_isolated", false));
91 criterion.setNoPreSmoothSteps(prm.get<int>("pre_smooth", 1));
92 criterion.setNoPostSmoothSteps(prm.get<int>("post_smooth", 1));
93 criterion.setDebugLevel(prm.get<int>("verbosity", 0));
94 // As the default we request to accumulate data to 1 process always as our matrix
95 // graph might be unsymmetric and hence not supported by the PTScotch/ParMetis
96 // calls in DUNE. Accumulating to 1 skips PTScotch/ParMetis
97 criterion.setAccumulate(static_cast<Dune::Amg::AccumulationMode>(prm.get<int>("accumulate", 1)));
98 criterion.setProlongationDampingFactor(prm.get<double>("prolongationdamping", 1.6));
99 criterion.setMaxDistance(prm.get<int>("maxdistance", 2));
100 criterion.setMaxConnectivity(prm.get<int>("maxconnectivity", 15));
101 criterion.setMaxAggregateSize(prm.get<int>("maxaggsize", 6));
102 criterion.setMinAggregateSize(prm.get<int>("minaggsize", 4));
103 setUseFixedOrder(criterion, true); // If possible, set flag to ensure that the matrices in the AMG hierarchy are constructed with deterministic indices.
104 return criterion;
105}
106
107template <class Operator, class Comm, class Matrix, class Vector>
108template <class Smoother>
111 const PropertyTree& prm,
112 bool useKamg)
113{
114 auto crit = criterion(prm);
116 if (useKamg) {
118 return std::make_shared<Type>(
119 op, crit, sargs, prm.get<std::size_t>("max_krylov", 1), prm.get<double>("min_reduction", 1e-1));
120 } else {
122 return std::make_shared<Type>(op, crit, sargs);
123 }
124}
125
126template <class Operator, class Comm, typename = void> // Note: Last argument is to allow partial specialization for GPU
128{
129 static void add()
130 {
131 using namespace Dune;
132 using O = Operator;
133 using C = Comm;
135 using M = typename F::Matrix;
136 using V = typename F::Vector;
137 using P = PropertyTree;
138 F::addCreator("ILU0", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
139 return createParILU(op, prm, comm, 0);
140 });
141 F::addCreator("ParOverILU0",
142 [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
143 return createParILU(op, prm, comm, prm.get<int>("ilulevel", 0));
144 });
145 F::addCreator("ILUn", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
146 return createParILU(op, prm, comm, prm.get<int>("ilulevel", 0));
147 });
148 F::addCreator("DuneILU", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
149 const int n = prm.get<int>("ilulevel", 0);
150 const double w = prm.get<double>("relaxation", 1.0);
151 const bool resort = prm.get<bool>("resort", false);
152 return wrapBlockPreconditioner<RebuildOnUpdatePreconditioner<Dune::SeqILU<M, V, V>>>(
153 comm, op.getmat(), n, w, resort);
154 });
155 F::addCreator("DILU", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
156 DUNE_UNUSED_PARAMETER(prm);
157 return wrapBlockPreconditioner<MultithreadDILU<M, V, V>>(comm, op.getmat());
158 });
159 F::addCreator("Jac", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
160 const int n = prm.get<int>("repeats", 1);
161 const double w = prm.get<double>("relaxation", 1.0);
162 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqJac<M, V, V>>>(comm, op.getmat(), n, w);
163 });
164 F::addCreator("GS", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
165 const int n = prm.get<int>("repeats", 1);
166 const double w = prm.get<double>("relaxation", 1.0);
167 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqGS<M, V, V>>>(comm, op.getmat(), n, w);
168 });
169 F::addCreator("SOR", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
170 const int n = prm.get<int>("repeats", 1);
171 const double w = prm.get<double>("relaxation", 1.0);
172 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqSOR<M, V, V>>>(comm, op.getmat(), n, w);
173 });
174 F::addCreator("SSOR", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
175 const int n = prm.get<int>("repeats", 1);
176 const double w = prm.get<double>("relaxation", 1.0);
177 return wrapBlockPreconditioner<DummyUpdatePreconditioner<SeqSSOR<M, V, V>>>(comm, op.getmat(), n, w);
178 });
179
180 // Only add AMG preconditioners to the factory if the operator
181 // is the overlapping schwarz operator or GhostLastMatrixAdapter. This could be extended
182 // later, but at this point no other operators are compatible
183 // with the AMG hierarchy construction.
184 if constexpr (std::is_same_v<O, Dune::OverlappingSchwarzOperator<M, V, V, C>> ||
185 std::is_same_v<O, Opm::GhostLastMatrixAdapter<M, V, V, C>>) {
186 F::addCreator("amg", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
187 using PrecPtr = std::shared_ptr<Dune::PreconditionerWithUpdate<V, V>>;
188 const std::string smoother = prm.get<std::string>("smoother", "ParOverILU0");
189 // TODO: merge this with ILUn, and possibly simplify the factory to only work with ILU?
190 if (smoother == "ILU0" || smoother == "ParOverILU0") {
192 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
194 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
195 return prec;
196 } else if (smoother == "DILU") {
197 using SeqSmoother = Dune::MultithreadDILU<M, V, V>;
198 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
199 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
200 SmootherArgs sargs;
201 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
202 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
203 return prec;
204 } else if (smoother == "Jac") {
205 using SeqSmoother = SeqJac<M, V, V>;
206 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
207 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
208 SmootherArgs sargs;
209 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
210 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
211 return prec;
212 } else if (smoother == "GS") {
213 using SeqSmoother = SeqGS<M, V, V>;
214 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
215 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
216 SmootherArgs sargs;
217 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
218 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
219 return prec;
220 } else if (smoother == "SOR") {
221 using SeqSmoother = SeqSOR<M, V, V>;
222 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
223 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
224 SmootherArgs sargs;
225 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
226 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
227 return prec;
228 } else if (smoother == "SSOR") {
229 using SeqSmoother = SeqSSOR<M, V, V>;
230 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
231 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
232 SmootherArgs sargs;
233 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
234 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
235 return prec;
236 } else if (smoother == "ILUn") {
237 using SeqSmoother = SeqILU<M, V, V>;
238 using Smoother = Dune::BlockPreconditioner<V, V, C, SeqSmoother>;
239 using SmootherArgs = typename Dune::Amg::SmootherTraits<Smoother>::Arguments;
240 SmootherArgs sargs;
241 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
242 PrecPtr prec = std::make_shared<Dune::Amg::AMGCPR<O, V, Smoother, C>>(op, crit, sargs, comm);
243 return prec;
244 } else {
245 OPM_THROW(std::invalid_argument, "Properties: No smoother with name " + smoother + ".");
246 }
247 });
248 }
249
250 F::addCreator("cpr",
251 [](const O& op,
252 const P& prm,
253 const std::function<V()> weightsCalculator,
254 std::size_t pressureIndex,
255 const C& comm) {
256 assert(weightsCalculator);
257 if (pressureIndex == std::numeric_limits<std::size_t>::max()) {
258 OPM_THROW(std::logic_error,
259 "Pressure index out of bounds. It needs to specified for CPR");
260 }
261 using Scalar = typename V::field_type;
262 using LevelTransferPolicy = PressureTransferPolicy<O, Comm, Scalar, false>;
263 return std::make_shared<OwningTwoLevelPreconditioner<O, V, LevelTransferPolicy, Comm>>(
264 op, prm, weightsCalculator, pressureIndex, comm);
265 });
266 F::addCreator("cprt",
267 [](const O& op,
268 const P& prm,
269 const std::function<V()> weightsCalculator,
270 std::size_t pressureIndex,
271 const C& comm) {
272 assert(weightsCalculator);
273 if (pressureIndex == std::numeric_limits<std::size_t>::max()) {
274 OPM_THROW(std::logic_error,
275 "Pressure index out of bounds. It needs to specified for CPR");
276 }
277 using Scalar = typename V::field_type;
278 using LevelTransferPolicy = PressureTransferPolicy<O, Comm, Scalar, true>;
279 return std::make_shared<OwningTwoLevelPreconditioner<O, V, LevelTransferPolicy, Comm>>(
280 op, prm, weightsCalculator, pressureIndex, comm);
281 });
282
283 // Add CPRW only for the WellModelGhostLastMatrixAdapter, as the method requires that the
284 // operator has the addWellPressureEquations() method (and a few more) it can not be combined
285 // with a well-less operator such as GhostLastMatrixAdapter or OverlappingSchwarzOperator.
286 // For OPM Flow this corresponds to requiring --matrix-add-well-contributions=false
287 // (which is the default).
288 if constexpr (std::is_same_v<O, WellModelGhostLastMatrixAdapter<M, V, V, true>>) {
289 F::addCreator("cprw",
290 [](const O& op,
291 const P& prm,
292 const std::function<V()> weightsCalculator,
293 std::size_t pressureIndex,
294 const C& comm) {
295 assert(weightsCalculator);
296 if (pressureIndex == std::numeric_limits<std::size_t>::max()) {
297 OPM_THROW(std::logic_error,
298 "Pressure index out of bounds. It needs to specified for CPR");
299 }
300 using Scalar = typename V::field_type;
301 using LevelTransferPolicy = PressureBhpTransferPolicy<O, Comm, Scalar, false>;
302 return std::make_shared<OwningTwoLevelPreconditioner<O, V, LevelTransferPolicy, Comm>>(
303 op, prm, weightsCalculator, pressureIndex, comm);
304 });
305 }
306
307#if HAVE_CUDA
308 // Here we create the *wrapped* GPU preconditioners
309 // meaning they will act as CPU preconditioners on the outside,
310 // but copy data back and forth to the GPU as needed.
311
312 // TODO: Make this use the GPU preconditioner factory once that is up and running.
313 F::addCreator("GPUILU0", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
314 const double w = prm.get<double>("relaxation", 1.0);
315 using field_type = typename V::field_type;
316 using GpuILU0 = typename gpuistl::
317 GpuSeqILU0<M, gpuistl::GpuVector<field_type>, gpuistl::GpuVector<field_type>>;
318 auto gpuILU0 = std::make_shared<GpuILU0>(op.getmat(), w);
319
320 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, GpuILU0>>(gpuILU0);
321 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
322 return wrapped;
323 });
324
325 F::addCreator("GPUJAC", [](const O& op, const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
326 const double w = prm.get<double>("relaxation", 1.0);
327 using field_type = typename V::field_type;
328 using GpuJac =
329 typename gpuistl::GpuJac<gpuistl::GpuSparseMatrix<field_type>, gpuistl::GpuVector<field_type>, gpuistl::GpuVector<field_type>>;
330
332 gpuistl::GpuVector<field_type>, GpuJac, M>;
333
334 auto gpuJac = std::make_shared<MatrixOwner>(op.getmat(), w);
335
336 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, MatrixOwner>>(gpuJac);
337 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
338 return wrapped;
339 });
340
341 F::addCreator("GPUDILU", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
342 const bool split_matrix = prm.get<bool>("split_matrix", true);
343 const bool tune_gpu_kernels = prm.get<bool>("tune_gpu_kernels", true);
344 const int mixed_precision_scheme = prm.get<int>("mixed_precision_scheme", 0);
345 using field_type = typename V::field_type;
346 using GpuDILU = typename gpuistl::GpuDILU<M, gpuistl::GpuVector<field_type>, gpuistl::GpuVector<field_type>>;
348 gpuistl::GpuVector<field_type>, GpuDILU, M>;
349
350 // Note: op.getmat() is passed twice, because the GpuDILU needs both the CPU and GPU matrix.
351 // The first argument will be converted to a GPU matrix, and the second one is used as a CPU matrix.
352 auto gpuDILU = std::make_shared<MatrixOwner>(op.getmat(), op.getmat(), split_matrix, tune_gpu_kernels, mixed_precision_scheme);
353
354 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, MatrixOwner>>(gpuDILU);
355 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
356 return wrapped;
357 });
358
359 F::addCreator("OPMGPUILU0", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t, const C& comm) {
360 const bool split_matrix = prm.get<bool>("split_matrix", true);
361 const bool tune_gpu_kernels = prm.get<bool>("tune_gpu_kernels", true);
362 const int mixed_precision_scheme = prm.get<int>("mixed_precision_scheme", 0);
363 using field_type = typename V::field_type;
364 using OpmGpuILU0 = typename gpuistl::OpmGpuILU0<M, gpuistl::GpuVector<field_type>, gpuistl::GpuVector<field_type>>;
365
367 gpuistl::GpuVector<field_type>, OpmGpuILU0, M>;
368
369 // Note: op.getmat() is passed twice, because the OPMGPUILU0 needs both the CPU and GPU matrix.
370 // The first argument will be converted to a GPU matrix, and the second one is used as a CPU matrix.
371 auto gpuilu0 = std::make_shared<MatrixOwner>(op.getmat(), op.getmat(), split_matrix, tune_gpu_kernels, mixed_precision_scheme);
372
373 auto adapted = std::make_shared<gpuistl::PreconditionerAdapter<V, V, MatrixOwner>>(gpuilu0);
374 auto wrapped = std::make_shared<gpuistl::GpuBlockPreconditioner<V, V, Comm>>(adapted, comm);
375 return wrapped;
376 });
377#endif // HAVE_CUDA
378 }
379
380
382 createParILU(const Operator& op, const PropertyTree& prm, const Comm& comm, const int ilulevel)
383 {
385 using M = typename F::Matrix;
386 using V = typename F::Vector;
387
388 const double w = prm.get<double>("relaxation", 1.0);
389 const bool redblack = prm.get<bool>("redblack", false);
390 const bool reorder_spheres = prm.get<bool>("reorder_spheres", false);
391 // Already a parallel preconditioner. Need to pass comm, but no need to wrap it in a BlockPreconditioner.
392 if (ilulevel == 0) {
393 const std::size_t num_interior = interiorIfGhostLast(comm);
394 assert(num_interior <= op.getmat().N());
395 return std::make_shared<ParallelOverlappingILU0<M, V, V, Comm>>(
396 op.getmat(), comm, w, MILU_VARIANT::ILU, num_interior, redblack, reorder_spheres);
397 } else {
398 return std::make_shared<ParallelOverlappingILU0<M, V, V, Comm>>(
399 op.getmat(), comm, ilulevel, w, MILU_VARIANT::ILU, redblack, reorder_spheres);
400 }
401 }
402
407 static std::size_t interiorIfGhostLast(const Comm& comm)
408 {
409 std::size_t interior_count = 0;
410 std::size_t highest_interior_index = 0;
411 const auto& is = comm.indexSet();
412 for (const auto& ind : is) {
413 if (Comm::OwnerSet::contains(ind.local().attribute())) {
414 ++interior_count;
415 highest_interior_index = std::max(highest_interior_index, ind.local().local());
416 }
417 }
418 if (highest_interior_index + 1 == interior_count) {
419 return interior_count;
420 } else {
421 return is.size();
422 }
423 }
424};
425
426
427} // namespace Opm
428
429#endif // OPM_STANDARDPRECONDITIONERS_MPI_HEADER
Dune::OwnerOverlapCopyCommunication< int, int > Comm
Definition: FlexibleSolver_impl.hpp:304
Parallel algebraic multigrid based on agglomeration.
Definition: amgcpr.hh:88
Definition: PreconditionerWithUpdate.hpp:43
The OpenMP thread parallelized DILU preconditioner.
Definition: DILU.hpp:53
A two-step version of an overlapping Schwarz preconditioner using one step ILU0 as.
Definition: ParallelOverlappingILU0.hpp:131
Definition: PreconditionerFactory.hpp:64
std::shared_ptr< Dune::PreconditionerWithUpdate< Vector, Vector > > PrecPtr
The type of pointer returned by create().
Definition: PreconditionerFactory.hpp:71
Definition: PressureBhpTransferPolicy.hpp:99
Definition: PressureTransferPolicy.hpp:55
Hierarchical collection of key/value pairs.
Definition: PropertyTree.hpp:39
T get(const std::string &key) const
DILU preconditioner on the GPU.
Definition: GpuDILU.hpp:50
Jacobi preconditioner on the GPU.
Definition: GpuJac.hpp:47
ILU0 preconditioner on the GPU.
Definition: OpmGpuILU0.hpp:51
Convert a CPU matrix to a GPU matrix and use a CUDA preconditioner on the GPU.
Definition: PreconditionerCPUMatrixToGPUMatrix.hpp:42
Definition: fvbaseprimaryvariables.hh:141
Definition: blackoilboundaryratevector.hh:39
MILU_VARIANT
Definition: MILU.hpp:34
@ ILU
Do not perform modified ILU.
auto setUseFixedOrder(C &criterion, bool booleanValue) -> decltype(criterion.setUseFixedOrder(booleanValue))
Definition: StandardPreconditioners_mpi.hpp:71
MILU_VARIANT convertString2Milu(const std::string &milu)
Dune::Amg::CoarsenCriterion< CriterionBase > Criterion
Definition: PreconditionerFactory.hpp:47
static Criterion criterion(const PropertyTree &prm)
Definition: StandardPreconditioners_mpi.hpp:83
std::shared_ptr< Dune::PreconditionerWithUpdate< Vector, Vector > > PrecPtr
Definition: PreconditionerFactory.hpp:44
static PrecPtr makeAmgPreconditioner(const Operator &op, const PropertyTree &prm, bool useKamg=false)
Definition: StandardPreconditioners_mpi.hpp:110
static auto args(const PropertyTree &prm)
Definition: StandardPreconditioners_mpi.hpp:51
Definition: StandardPreconditioners_mpi.hpp:34
static auto args(const PropertyTree &prm)
Definition: StandardPreconditioners_mpi.hpp:35
Definition: StandardPreconditioners_mpi.hpp:128
static PreconditionerFactory< Operator, Comm >::PrecPtr createParILU(const Operator &op, const PropertyTree &prm, const Comm &comm, const int ilulevel)
Definition: StandardPreconditioners_mpi.hpp:382
static std::size_t interiorIfGhostLast(const Comm &comm)
Definition: StandardPreconditioners_mpi.hpp:407
static void add()
Definition: StandardPreconditioners_mpi.hpp:129