StandardPreconditioners_serial.hpp
Go to the documentation of this file.
1/*
2 Copyright 2009, 2010 SINTEF ICT, Applied Mathematics.
3 Copyright 2019 SINTEF Digital, Mathematics and Cybernetics.
4
5 This file is part of the Open Porous Media project (OPM).
6
7 OPM is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11
12 OPM is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with OPM. If not, see <http://www.gnu.org/licenses/>.
19*/
20
21#ifndef OPM_STANDARDPRECONDITIONERS_SERIAL_HPP
22#define OPM_STANDARDPRECONDITIONERS_SERIAL_HPP
23
24#if HAVE_CUDA
25#if USE_HIP
26#include <opm/simulators/linalg/gpuistl_hip/PreconditionerCPUMatrixToGPUMatrix.hpp>
27#else
29#endif
30#endif
31
32namespace Opm {
33
34
35template <class Operator>
36struct StandardPreconditioners<Operator, Dune::Amg::SequentialInformation, typename std::enable_if_t<!Opm::is_gpu_operator_v<Operator>>>
37{
38 static void add()
39 {
40 using namespace Dune;
41 using O = Operator;
42 using C = Dune::Amg::SequentialInformation;
44 using M = typename F::Matrix;
45 using V = typename F::Vector;
46 using P = PropertyTree;
47 F::addCreator("ilu0", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
48 const double w = prm.get<double>("relaxation", 1.0);
49 return std::make_shared<ParallelOverlappingILU0<M, V, V, C>>(
50 op.getmat(), 0, w, MILU_VARIANT::ILU);
51 });
52 F::addCreator("duneilu", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
53 const double w = prm.get<double>("relaxation", 1.0);
54 const int n = prm.get<int>("ilulevel", 0);
55 const bool resort = prm.get<bool>("resort", false);
56 return getRebuildOnUpdateWrapper<Dune::SeqILU<M, V, V>>(op.getmat(), n, w, resort);
57 });
58 F::addCreator("paroverilu0", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
59 const double w = prm.get<double>("relaxation", 1.0);
60 const int n = prm.get<int>("ilulevel", 0);
61 return std::make_shared<ParallelOverlappingILU0<M, V, V, C>>(
62 op.getmat(), n, w, MILU_VARIANT::ILU);
63 });
64 F::addCreator("ilun", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
65 const int n = prm.get<int>("ilulevel", 0);
66 const double w = prm.get<double>("relaxation", 1.0);
67 return std::make_shared<ParallelOverlappingILU0<M, V, V, C>>(
68 op.getmat(), n, w, MILU_VARIANT::ILU);
69 });
70 F::addCreator("dilu", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
71 DUNE_UNUSED_PARAMETER(prm);
72 return std::make_shared<MultithreadDILU<M, V, V>>(op.getmat());
73 });
74 F::addCreator("jac", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
75 const int n = prm.get<int>("repeats", 1);
76 const double w = prm.get<double>("relaxation", 1.0);
77 return getDummyUpdateWrapper<SeqJac<M, V, V>>(op.getmat(), n, w);
78 });
79 F::addCreator("gs", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
80 const int n = prm.get<int>("repeats", 1);
81 const double w = prm.get<double>("relaxation", 1.0);
82 return getDummyUpdateWrapper<SeqGS<M, V, V>>(op.getmat(), n, w);
83 });
84 F::addCreator("sor", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
85 const int n = prm.get<int>("repeats", 1);
86 const double w = prm.get<double>("relaxation", 1.0);
87 return getDummyUpdateWrapper<SeqSOR<M, V, V>>(op.getmat(), n, w);
88 });
89 F::addCreator("ssor", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
90 const int n = prm.get<int>("repeats", 1);
91 const double w = prm.get<double>("relaxation", 1.0);
92 return getDummyUpdateWrapper<SeqSSOR<M, V, V>>(op.getmat(), n, w);
93 });
94
95 // Only add AMG preconditioners to the factory if the operator
96 // is an actual matrix operator.
97 if constexpr (std::is_same_v<O, Dune::MatrixAdapter<M, V, V>>) {
98 F::addCreator("amg", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
99 std::string smoother = prm.get<std::string>("smoother", "paroverilu0");
100 // Make the smoother type lowercase for internal canonical representation
101 std::transform(smoother.begin(), smoother.end(), smoother.begin(), ::tolower);
102 if (smoother == "ilu0" || smoother == "paroverilu0") {
103 using Smoother = SeqILU<M, V, V>;
104 return AMGHelper<O, C, M, V>::template makeAmgPreconditioner<Smoother>(op, prm);
105 } else if (smoother == "jac") {
106 using Smoother = SeqJac<M, V, V>;
107 return AMGHelper<O, C, M, V>::template makeAmgPreconditioner<Smoother>(op, prm);
108 } else if (smoother == "gs") {
109 using Smoother = SeqGS<M, V, V>;
110 return AMGHelper<O, C, M, V>::template makeAmgPreconditioner<Smoother>(op, prm);
111 } else if (smoother == "dilu") {
112 using Smoother = MultithreadDILU<M, V, V>;
113 return AMGHelper<O, C, M, V>::template makeAmgPreconditioner<Smoother>(op, prm);
114 } else if (smoother == "sor") {
115 using Smoother = SeqSOR<M, V, V>;
116 return AMGHelper<O, C, M, V>::template makeAmgPreconditioner<Smoother>(op, prm);
117 } else if (smoother == "ssor") {
118 using Smoother = SeqSSOR<M, V, V>;
119 return AMGHelper<O, C, M, V>::template makeAmgPreconditioner<Smoother>(op, prm);
120 } else if (smoother == "ilun") {
121 using Smoother = SeqILU<M, V, V>;
122 return AMGHelper<O, C, M, V>::template makeAmgPreconditioner<Smoother>(op, prm);
123 } else {
124 OPM_THROW(std::invalid_argument, "Properties: No smoother with name " + smoother + ".");
125 }
126 });
127 F::addCreator("kamg", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
128 std::string smoother = prm.get<std::string>("smoother", "paroverilu0");
129 // Make the smoother type lowercase for internal canonical representation
130 std::transform(smoother.begin(), smoother.end(), smoother.begin(), ::tolower);
131 if (smoother == "ilu0" || smoother == "paroverilu0") {
132 using Smoother = SeqILU<M, V, V>;
133 return AMGHelper<O, C, M, V>::template makeAmgPreconditioner<Smoother>(op, prm, true);
134 } else if (smoother == "jac") {
135 using Smoother = SeqJac<M, V, V>;
136 return AMGHelper<O, C, M, V>::template makeAmgPreconditioner<Smoother>(op, prm, true);
137 } else if (smoother == "sor") {
138 using Smoother = SeqSOR<M, V, V>;
139 return AMGHelper<O, C, M, V>::template makeAmgPreconditioner<Smoother>(op, prm, true);
140 } else if (smoother == "gs") {
141 using Smoother = SeqGS<M, V, V>;
142 return AMGHelper<O, C, M, V>::template makeAmgPreconditioner<Smoother>(op, prm, true);
143 } else if (smoother == "ssor") {
144 using Smoother = SeqSSOR<M, V, V>;
145 return AMGHelper<O, C, M, V>::template makeAmgPreconditioner<Smoother>(op, prm, true);
146 } else if (smoother == "ilun") {
147 using Smoother = SeqILU<M, V, V>;
148 return AMGHelper<O, C, M, V>::template makeAmgPreconditioner<Smoother>(op, prm, true);
149 } else {
150 OPM_THROW(std::invalid_argument, "Properties: No smoother with name " + smoother + ".");
151 }
152 });
153 F::addCreator("famg", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
154 if constexpr (std::is_same_v<typename V::field_type, float>) {
155 OPM_THROW(std::logic_error, "famg requires UMFPack which is not available for floats");
156 return nullptr;
157 } else {
158 auto crit = AMGHelper<O, C, M, V>::criterion(prm);
159 Dune::Amg::Parameters parms;
160 parms.setNoPreSmoothSteps(1);
161 parms.setNoPostSmoothSteps(1);
162 return getRebuildOnUpdateWrapper<Dune::Amg::FastAMG<O, V>>(op, crit, parms);
163 }
164 });
165
166#if HAVE_AMGX
167 // Only add AMGX for scalar matrices
168 if constexpr (M::block_type::rows == 1 && M::block_type::cols == 1) {
169 F::addCreator("amgx", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
170 auto prm_copy = prm;
171 prm_copy.put("setup_frequency", Opm::Parameters::Get<Opm::Parameters::CprReuseInterval>());
172 return std::make_shared<Amgx::AmgxPreconditioner<M, V, V>>(op.getmat(), prm_copy);
173 });
174 }
175#endif
176
177#if HAVE_HYPRE
178 // Only add Hypre for scalar matrices
179 if constexpr (M::block_type::rows == 1 && M::block_type::cols == 1 &&
180 std::is_same_v<HYPRE_Real, typename V::field_type>) {
181 F::addCreator("hypre", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
182 return std::make_shared<Hypre::HyprePreconditioner<M, V, V, Dune::Amg::SequentialInformation>>(op.getmat(), prm, Dune::Amg::SequentialInformation());
183 });
184 }
185#endif
186 }
187
188 // Add CPRW only for the WellModelMatrixAdapter, as the method requires that the operator
189 // has the addWellPressureEquations() method (and a few more) it can not be combined with
190 // a well-less operator such as Dune::MatrixAdapter. For OPM Flow this corresponds to
191 // requiring --matrix-add-well-contributions=false (which is the default).
192 if constexpr (std::is_same_v<O, WellModelMatrixAdapter<M, V, V>>) {
193 F::addCreator(
194 "cprw",
195 [](const O& op, const P& prm, const std::function<V()>& weightsCalculator, std::size_t pressureIndex) {
196 if (pressureIndex == std::numeric_limits<std::size_t>::max()) {
197 OPM_THROW(std::logic_error, "Pressure index out of bounds. It needs to specified for CPR");
198 }
199 using Scalar = typename V::field_type;
200 using LevelTransferPolicy
202 return std::make_shared<OwningTwoLevelPreconditioner<O, V, LevelTransferPolicy>>(
203 op, prm, weightsCalculator, pressureIndex);
204 });
205 }
206
207 F::addCreator(
208 "cpr",
209 [](const O& op, const P& prm, const std::function<V()>& weightsCalculator, std::size_t pressureIndex) {
210 if (pressureIndex == std::numeric_limits<std::size_t>::max()) {
211 OPM_THROW(std::logic_error, "Pressure index out of bounds. It needs to specified for CPR");
212 }
213 using Scalar = typename V::field_type;
215 return std::make_shared<OwningTwoLevelPreconditioner<O, V, LevelTransferPolicy>>(
216 op, prm, weightsCalculator, pressureIndex);
217 });
218 F::addCreator(
219 "cprt",
220 [](const O& op, const P& prm, const std::function<V()>& weightsCalculator, std::size_t pressureIndex) {
221 if (pressureIndex == std::numeric_limits<std::size_t>::max()) {
222 OPM_THROW(std::logic_error, "Pressure index out of bounds. It needs to specified for CPR");
223 }
224 using Scalar = typename V::field_type;
226 return std::make_shared<OwningTwoLevelPreconditioner<O, V, LevelTransferPolicy>>(
227 op, prm, weightsCalculator, pressureIndex);
228 });
229
230#if HAVE_CUDA
231 // Here we create the *wrapped* GPU preconditioners
232 // meaning they will act as CPU preconditioners on the outside,
233 // but copy data back and forth to the GPU as needed.
234
235 // TODO: Make this use the GPU preconditioner factory once that is up and running.
236 F::addCreator("gpuilu0", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
237 const double w = prm.get<double>("relaxation", 1.0);
238 using field_type = typename V::field_type;
239 using GpuILU0 = typename gpuistl::
240 GpuSeqILU0<M, gpuistl::GpuVector<field_type>, gpuistl::GpuVector<field_type>>;
241 return std::make_shared<gpuistl::PreconditionerAdapter<V, V, GpuILU0>>(
242 std::make_shared<GpuILU0>(op.getmat(), w));
243 });
244
245 F::addCreator("gpuilu0float", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
246 const double w = prm.get<double>("relaxation", 1.0);
247 using block_type = typename V::block_type;
248 using VTo = Dune::BlockVector<Dune::FieldVector<float, block_type::dimension>>;
249 using matrix_type_to =
250 typename Dune::BCRSMatrix<Dune::FieldMatrix<float, block_type::dimension, block_type::dimension>>;
251 using GpuILU0 = typename gpuistl::
252 GpuSeqILU0<matrix_type_to, gpuistl::GpuVector<float>, gpuistl::GpuVector<float>>;
255 auto converted = std::make_shared<Converter>(op.getmat());
256 auto adapted = std::make_shared<Adapter>(std::make_shared<GpuILU0>(converted->getConvertedMatrix(), w));
257 converted->setUnderlyingPreconditioner(adapted);
258 return converted;
259 });
260
261 F::addCreator("gpujac", [](const O& op, const P& prm, const std::function<V()>&, std::size_t) {
262 const double w = prm.get<double>("relaxation", 1.0);
263 using field_type = typename V::field_type;
264 using GPUJac =
267
270 return std::make_shared<gpuistl::PreconditionerAdapter<V, V, MatrixOwner>>(
271 std::make_shared<MatrixOwner>(op.getmat(), w));
272 });
273
274 F::addCreator("opmgpuilu0", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t) {
275 const bool split_matrix = prm.get<bool>("split_matrix", true);
276 const bool tune_gpu_kernels = prm.get<bool>("tune_gpu_kernels", true);
277 const int mixed_precision_scheme = prm.get<int>("mixed_precision_scheme", 0);
278
279 using field_type = typename V::field_type;
283 return std::make_shared<gpuistl::PreconditionerAdapter<V, V, MatrixOwner>>(
284 // Note: op.getmat() is passed twice, because the ILU0 needs both the CPU and GPU matrix.
285 // The first argument will be converted to a GPU matrix, and the second one is used as a CPU matrix.
286 std::make_shared<MatrixOwner>(op.getmat(), op.getmat(), split_matrix, tune_gpu_kernels, mixed_precision_scheme));
287 });
288
289
290 F::addCreator("gpudilu", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t) {
291 const bool split_matrix = prm.get<bool>("split_matrix", true);
292 const bool tune_gpu_kernels = prm.get<bool>("tune_gpu_kernels", true);
293 const int mixed_precision_scheme = prm.get<int>("mixed_precision_scheme", 0);
294 const bool reorder = prm.get<bool>("reorder", true);
295 using field_type = typename V::field_type;
299 return std::make_shared<gpuistl::PreconditionerAdapter<V, V, MatrixOwner>>(
300 // Note: op.getmat() is passed twice, because the DILU needs both the CPU and GPU matrix.
301 // The first argument will be converted to a GPU matrix, and the second one is used as a CPU matrix.
302 std::make_shared<MatrixOwner>(op.getmat(), op.getmat(), split_matrix, tune_gpu_kernels, mixed_precision_scheme, reorder));
303 });
304
305 F::addCreator("gpudilufloat", [](const O& op, [[maybe_unused]] const P& prm, const std::function<V()>&, std::size_t) {
306 const bool split_matrix = prm.get<bool>("split_matrix", true);
307 const bool tune_gpu_kernels = prm.get<bool>("tune_gpu_kernels", true);
308 const int mixed_precision_scheme = prm.get<int>("mixed_precision_scheme", 0);
309 const bool reorder = prm.get<bool>("reorder", true);
310
311 using block_type = typename V::block_type;
312 using VTo = Dune::BlockVector<Dune::FieldVector<float, block_type::dimension>>;
313 using matrix_type_to = typename Dune::BCRSMatrix<Dune::FieldMatrix<float, block_type::dimension, block_type::dimension>>;
316 gpuistl::GpuVector<float>, GpuDILU, matrix_type_to>;
319
320
321 auto converted = std::make_shared<Converter>(op.getmat());
322 // Note: converted->getConvertedMatrix() is passed twice, because the DILU needs both the CPU and GPU matrix.
323 // The first argument will be converted to a GPU matrix, and the second one is used as a CPU matrix.
324 auto adapted = std::make_shared<Adapter>(std::make_shared<MatrixOwner>(
325 converted->getConvertedMatrix(), converted->getConvertedMatrix(),
326 split_matrix, tune_gpu_kernels, mixed_precision_scheme, reorder));
327 converted->setUnderlyingPreconditioner(adapted);
328 return converted;
329 });
330#endif // HAVE_CUDA
331 }
332};
333
334
335} // namespace Opm
336
337
338#endif // OPM_STANDARDPRECONDITIONERS_SERIAL_HPP
The OpenMP thread parallelized DILU preconditioner.
Definition: DILU.hpp:53
Definition: PreconditionerFactory.hpp:64
Definition: PressureBhpTransferPolicy.hpp:99
Definition: PressureTransferPolicy.hpp:55
Hierarchical collection of key/value pairs.
Definition: PropertyTree.hpp:39
DILU preconditioner on the GPU.
Definition: GpuDILU.hpp:53
Jacobi preconditioner on the GPU.
Definition: GpuJac.hpp:47
ILU0 preconditioner on the GPU.
Definition: OpmGpuILU0.hpp:51
Makes a CUDA preconditioner available to a CPU simulator.
Definition: PreconditionerAdapter.hpp:43
Convert a CPU matrix to a GPU matrix and use a CUDA preconditioner on the GPU.
Definition: PreconditionerCPUMatrixToGPUMatrix.hpp:42
Converts the field type (eg. double to float) to benchmark single precision preconditioners.
Definition: PreconditionerConvertFieldTypeAdapter.hpp:86
Definition: fvbaseprimaryvariables.hh:141
Definition: blackoilbioeffectsmodules.hh:43
@ ILU
Do not perform modified ILU.
Definition: PreconditionerFactory.hpp:43
static Criterion criterion(const PropertyTree &prm)
Definition: StandardPreconditioners_mpi.hpp:87
Definition: StandardPreconditioners_mpi.hpp:132