17#ifndef OPM_DILU_HEADER_INCLUDED
18#define OPM_DILU_HEADER_INCLUDED
20#include <opm/common/ErrorMacros.hpp>
21#include <opm/common/TimingMacros.hpp>
24#include <dune/common/fmatrix.hh>
25#include <dune/common/unused.hh>
26#include <dune/common/version.hh>
27#include <dune/istl/bcrsmatrix.hh>
51template <
class M,
class X,
class Y>
71 OPM_TIMEBLOCK(prec_construct);
74 use_multithreading = omp_get_max_threads() > 1;
77 if (use_multithreading) {
78 A_reordered_.emplace(A_.N(), A_.N(), A_.nonzeroes(), M::row_wise);
83 reordered_to_natural_ = std::vector<std::size_t>(A_.N());
84 natural_to_reorder_ = std::vector<std::size_t>(A_.N());
86 for (
const auto& level_set : level_sets_) {
87 for (
const auto j : level_set) {
88 reordered_to_natural_[globCnt] = j;
89 natural_to_reorder_[j] = globCnt++;
93 for (
auto dst_row_it = A_reordered_->createbegin(); dst_row_it != A_reordered_->createend(); ++dst_row_it) {
94 auto src_row = A_.begin() + reordered_to_natural_[dst_row_it.index()];
96 for (
auto elem = src_row->begin(); elem != src_row->end(); elem++) {
97 dst_row_it.insert(elem.index());
102 Dinv_.resize(A_.N());
112 OPM_TIMEBLOCK(prec_update);
113 if (use_multithreading) {
124 void pre(X& v, Y& d)
override
126 DUNE_UNUSED_PARAMETER(v);
127 DUNE_UNUSED_PARAMETER(d);
135 void apply(X& v,
const Y& d)
override
137 OPM_TIMEBLOCK(prec_apply);
138 if (use_multithreading) {
151 DUNE_UNUSED_PARAMETER(x);
160 virtual SolverCategory::Category
category()
const override
162 return SolverCategory::sequential;
170 std::optional<M> A_reordered_;
172 std::vector<typename M::block_type> Dinv_;
174 Opm::SparseTable<std::size_t> level_sets_;
176 std::vector<std::size_t> reordered_to_natural_;
178 std::vector<std::size_t> natural_to_reorder_;
180 bool use_multithreading{
false};
184 for (std::size_t row = 0; row < A_.N(); ++row) {
185 Dinv_[row] = A_[row][row];
187 for (
auto row = A_.begin(); row != A_.end(); ++row) {
188 const auto row_i = row.index();
189 auto Dinv_temp = Dinv_[row_i];
190 for (
auto a_ij = row->begin(); a_ij.index() < row_i; ++a_ij) {
191 const auto col_j = a_ij.index();
192 const auto a_ji = A_[col_j].find(row_i);
194 if (a_ji != A_[col_j].end()) {
196 Dinv_temp -= (*a_ij) * Dune::FieldMatrix(Dinv_[col_j]) * (*a_ji);
200 Dinv_[row_i] = Dinv_temp;
204 void parallelUpdate()
207#pragma omp parallel for
209 for (std::size_t row = 0; row != A_.N(); ++row) {
210 Dinv_[natural_to_reorder_[row]] = A_[row][row];
214 for (
auto dst_row_it = A_reordered_->begin(); dst_row_it != A_reordered_->end(); ++dst_row_it) {
215 auto src_row = A_.begin() + reordered_to_natural_[dst_row_it.index()];
216 for (
auto elem = src_row->begin(); elem != src_row->end(); elem++) {
217 (*A_reordered_)[dst_row_it.index()][elem.index()] = *elem;
221 int level_start_idx = 0;
222 for (
int level = 0; level < level_sets_.size(); ++level) {
223 const int num_of_rows_in_level = level_sets_[level].size();
226#pragma omp parallel for
228 for (
int row_idx_in_level = 0; row_idx_in_level < num_of_rows_in_level; ++row_idx_in_level) {
229 auto row = A_reordered_->begin() + level_start_idx + row_idx_in_level;
230 const auto row_i = reordered_to_natural_[row.index()];
232 auto Dinv_temp = Dinv_[level_start_idx + row_idx_in_level];
233 for (
auto a_ij = row->begin(); a_ij.index() < row_i; ++a_ij) {
234 const auto col_j = natural_to_reorder_[a_ij.index()];
235 const auto a_ji = (*A_reordered_)[col_j].find(row_i);
236 if (a_ji != (*A_reordered_)[col_j].end()) {
238 Dinv_temp -= (*a_ij) * Dune::FieldMatrix(Dinv_[col_j]) * (*a_ji);
242 Dinv_[level_start_idx + row_idx_in_level] = Dinv_temp;
245 level_start_idx += num_of_rows_in_level;
249 void serialApply(X& v,
const Y& d)
258 using Xblock =
typename X::block_type;
259 using Yblock =
typename Y::block_type;
261 OPM_TIMEBLOCK(lower_solve);
262 auto endi = A_.end();
263 for (
auto row = A_.begin(); row != endi; ++row) {
264 const auto row_i = row.index();
265 Yblock rhs = d[row_i];
266 for (
auto a_ij = (*row).begin(); a_ij.index() < row_i; ++a_ij) {
269 const auto col_j = a_ij.index();
270 a_ij->mmv(v[col_j], rhs);
274 Dinv_[row_i].mv(rhs, v[row_i]);
279 OPM_TIMEBLOCK(upper_solve);
282 auto rendi = A_.beforeBegin();
283 for (
auto row = A_.beforeEnd(); row != rendi; --row) {
284 const auto row_i = row.index();
287 for (
auto a_ij = (*row).beforeEnd(); a_ij.index() > row_i; --a_ij) {
290 const auto col_j = a_ij.index();
291 a_ij->umv(v[col_j], rhs);
296 Dinv_[row_i].mmv(rhs, v[row_i]);
301 void parallelApply(X& v,
const Y& d)
303 using Xblock =
typename X::block_type;
304 using Yblock =
typename Y::block_type;
306 OPM_TIMEBLOCK(lower_solve);
307 int level_start_idx = 0;
308 for (
int level = 0; level < level_sets_.size(); ++level) {
309 const int num_of_rows_in_level = level_sets_[level].size();
312#pragma omp parallel for
314 for (
int row_idx_in_level = 0; row_idx_in_level < num_of_rows_in_level; ++row_idx_in_level) {
315 auto row = A_reordered_->begin() + level_start_idx + row_idx_in_level;
316 const auto row_i = reordered_to_natural_[row.index()];
317 Yblock rhs = d[row_i];
318 for (
auto a_ij = (*row).begin(); a_ij.index() < row_i; ++a_ij) {
321 const auto col_j = a_ij.index();
322 a_ij->mmv(v[col_j], rhs);
326 Dinv_[level_start_idx + row_idx_in_level].mv(rhs, v[row_i]);
328 level_start_idx += num_of_rows_in_level;
333 int level_start_idx = A_.N();
335 for (
int level = level_sets_.size() - 1; level >= 0; --level) {
336 const int num_of_rows_in_level = level_sets_[level].size();
337 level_start_idx -= num_of_rows_in_level;
339#pragma omp parallel for
341 for (
int row_idx_in_level = num_of_rows_in_level - 1; row_idx_in_level >= 0; --row_idx_in_level) {
342 auto row = A_reordered_->begin() + level_start_idx + row_idx_in_level;
343 const auto row_i = reordered_to_natural_[row.index()];
345 for (
auto a_ij = (*row).beforeEnd(); a_ij.index() > row_i; --a_ij) {
347 const auto col_j = a_ij.index();
348 a_ij->umv(v[col_j], rhs);
353 Dinv_[level_start_idx + row_idx_in_level].mmv(rhs, v[row_i]);
The OpenMP thread parallelized DILU preconditioner.
Definition: DILU.hpp:53
void post(X &x) override
Clean up.
Definition: DILU.hpp:149
M matrix_type
The matrix type the preconditioner is for.
Definition: DILU.hpp:56
void apply(X &v, const Y &d) override
Apply the preconditioner.
Definition: DILU.hpp:135
X domain_type
The domain type of the preconditioner.
Definition: DILU.hpp:58
void pre(X &v, Y &d) override
Prepare the preconditioner.
Definition: DILU.hpp:124
MultithreadDILU(const M &A)
scalar type underlying the field_type
Definition: DILU.hpp:68
virtual SolverCategory::Category category() const override
Category of the preconditioner (see SolverCategory::Category)
Definition: DILU.hpp:160
Y range_type
The range type of the preconditioner.
Definition: DILU.hpp:60
void update() override
Update the preconditioner.
Definition: DILU.hpp:110
std::vector< typename M::block_type > getDiagonal()
Definition: DILU.hpp:154
typename X::field_type field_type
The field type of the preconditioner.
Definition: DILU.hpp:62
Interface class adding the update() method to the preconditioner interface.
Definition: PreconditionerWithUpdate.hpp:32
Definition: SupportsFaceTag.hpp:27
Opm::SparseTable< std::size_t > getMatrixRowColoring(const M &matrix, ColoringType coloringType)
Given a matrix and dependecy type, returns a SparseTable grouping the rows by which can be executed i...
Definition: GraphColoring.hpp:248