17#ifndef OPM_DILU_HEADER_INCLUDED
18#define OPM_DILU_HEADER_INCLUDED
20#include <opm/common/ErrorMacros.hpp>
21#include <opm/common/TimingMacros.hpp>
24#include <dune/common/fmatrix.hh>
25#include <dune/common/unused.hh>
26#include <dune/common/version.hh>
27#include <dune/istl/bcrsmatrix.hh>
51template <
class M,
class X,
class Y>
71 OPM_TIMEBLOCK(prec_construct);
74 use_multithreading = omp_get_max_threads() > 1;
77 if (use_multithreading) {
78 A_reordered_.emplace(A_.N(), A_.N(), A_.nonzeroes(), M::row_wise);
83 reordered_to_natural_ = std::vector<std::size_t>(A_.N());
84 natural_to_reorder_ = std::vector<std::size_t>(A_.N());
86 for (
const auto& level_set : level_sets_) {
87 for (
const auto j : level_set) {
88 reordered_to_natural_[globCnt] = j;
89 natural_to_reorder_[j] = globCnt++;
93 for (
auto dst_row_it = A_reordered_->createbegin(); dst_row_it != A_reordered_->createend(); ++dst_row_it) {
94 auto src_row = A_.begin() + reordered_to_natural_[dst_row_it.index()];
96 for (
auto elem = src_row->begin(); elem != src_row->end(); elem++) {
97 dst_row_it.insert(elem.index());
102 Dinv_.resize(A_.N());
112 OPM_TIMEBLOCK(prec_update);
113 if (use_multithreading) {
124 void pre(X& v, Y& d)
override
126 DUNE_UNUSED_PARAMETER(v);
127 DUNE_UNUSED_PARAMETER(d);
135 void apply(X& v,
const Y& d)
override
137 OPM_TIMEBLOCK(prec_apply);
138 if (use_multithreading) {
151 DUNE_UNUSED_PARAMETER(x);
160 virtual SolverCategory::Category
category()
const override
162 return SolverCategory::sequential;
174 std::optional<M> A_reordered_;
176 std::vector<typename M::block_type> Dinv_;
178 Opm::SparseTable<std::size_t> level_sets_;
180 std::vector<std::size_t> reordered_to_natural_;
182 std::vector<std::size_t> natural_to_reorder_;
184 bool use_multithreading{
false};
188 for (std::size_t row = 0; row < A_.N(); ++row) {
189 Dinv_[row] = A_[row][row];
191 for (
auto row = A_.begin(); row != A_.end(); ++row) {
192 const auto row_i = row.index();
193 auto Dinv_temp = Dinv_[row_i];
194 for (
auto a_ij = row->begin(); a_ij.index() < row_i; ++a_ij) {
195 const auto col_j = a_ij.index();
196 const auto a_ji = A_[col_j].find(row_i);
198 if (a_ji != A_[col_j].end()) {
200 Dinv_temp -= (*a_ij) * Dune::FieldMatrix(Dinv_[col_j]) * (*a_ji);
204 Dinv_[row_i] = Dinv_temp;
208 void parallelUpdate()
211#pragma omp parallel for
213 for (std::size_t row = 0; row != A_.N(); ++row) {
214 Dinv_[natural_to_reorder_[row]] = A_[row][row];
218 for (
auto dst_row_it = A_reordered_->begin(); dst_row_it != A_reordered_->end(); ++dst_row_it) {
219 auto src_row = A_.begin() + reordered_to_natural_[dst_row_it.index()];
220 for (
auto elem = src_row->begin(); elem != src_row->end(); elem++) {
221 (*A_reordered_)[dst_row_it.index()][elem.index()] = *elem;
225 int level_start_idx = 0;
226 for (
int level = 0; level < level_sets_.size(); ++level) {
227 const int num_of_rows_in_level = level_sets_[level].size();
230#pragma omp parallel for
232 for (
int row_idx_in_level = 0; row_idx_in_level < num_of_rows_in_level; ++row_idx_in_level) {
233 auto row = A_reordered_->begin() + level_start_idx + row_idx_in_level;
234 const auto row_i = reordered_to_natural_[row.index()];
236 auto Dinv_temp = Dinv_[level_start_idx + row_idx_in_level];
237 for (
auto a_ij = row->begin(); a_ij.index() < row_i; ++a_ij) {
238 const auto col_j = natural_to_reorder_[a_ij.index()];
239 const auto a_ji = (*A_reordered_)[col_j].find(row_i);
240 if (a_ji != (*A_reordered_)[col_j].end()) {
242 Dinv_temp -= (*a_ij) * Dune::FieldMatrix(Dinv_[col_j]) * (*a_ji);
246 Dinv_[level_start_idx + row_idx_in_level] = Dinv_temp;
249 level_start_idx += num_of_rows_in_level;
253 void serialApply(X& v,
const Y& d)
262 using Xblock =
typename X::block_type;
263 using Yblock =
typename Y::block_type;
265 OPM_TIMEBLOCK(lower_solve);
266 auto endi = A_.end();
267 for (
auto row = A_.begin(); row != endi; ++row) {
268 const auto row_i = row.index();
269 Yblock rhs = d[row_i];
270 for (
auto a_ij = (*row).begin(); a_ij.index() < row_i; ++a_ij) {
273 const auto col_j = a_ij.index();
274 a_ij->mmv(v[col_j], rhs);
278 Dinv_[row_i].mv(rhs, v[row_i]);
283 OPM_TIMEBLOCK(upper_solve);
286 auto rendi = A_.beforeBegin();
287 for (
auto row = A_.beforeEnd(); row != rendi; --row) {
288 const auto row_i = row.index();
291 for (
auto a_ij = (*row).beforeEnd(); a_ij.index() > row_i; --a_ij) {
294 const auto col_j = a_ij.index();
295 a_ij->umv(v[col_j], rhs);
300 Dinv_[row_i].mmv(rhs, v[row_i]);
305 void parallelApply(X& v,
const Y& d)
307 using Xblock =
typename X::block_type;
308 using Yblock =
typename Y::block_type;
310 OPM_TIMEBLOCK(lower_solve);
311 int level_start_idx = 0;
312 for (
int level = 0; level < level_sets_.size(); ++level) {
313 const int num_of_rows_in_level = level_sets_[level].size();
316#pragma omp parallel for
318 for (
int row_idx_in_level = 0; row_idx_in_level < num_of_rows_in_level; ++row_idx_in_level) {
319 auto row = A_reordered_->begin() + level_start_idx + row_idx_in_level;
320 const auto row_i = reordered_to_natural_[row.index()];
321 Yblock rhs = d[row_i];
322 for (
auto a_ij = (*row).begin(); a_ij.index() < row_i; ++a_ij) {
325 const auto col_j = a_ij.index();
326 a_ij->mmv(v[col_j], rhs);
330 Dinv_[level_start_idx + row_idx_in_level].mv(rhs, v[row_i]);
332 level_start_idx += num_of_rows_in_level;
337 int level_start_idx = A_.N();
339 for (
int level = level_sets_.size() - 1; level >= 0; --level) {
340 const int num_of_rows_in_level = level_sets_[level].size();
341 level_start_idx -= num_of_rows_in_level;
343#pragma omp parallel for
345 for (
int row_idx_in_level = num_of_rows_in_level - 1; row_idx_in_level >= 0; --row_idx_in_level) {
346 auto row = A_reordered_->begin() + level_start_idx + row_idx_in_level;
347 const auto row_i = reordered_to_natural_[row.index()];
349 for (
auto a_ij = (*row).beforeEnd(); a_ij.index() > row_i; --a_ij) {
351 const auto col_j = a_ij.index();
352 a_ij->umv(v[col_j], rhs);
357 Dinv_[level_start_idx + row_idx_in_level].mmv(rhs, v[row_i]);
The OpenMP thread parallelized DILU preconditioner.
Definition: DILU.hpp:53
void post(X &x) override
Clean up.
Definition: DILU.hpp:149
M matrix_type
The matrix type the preconditioner is for.
Definition: DILU.hpp:56
void apply(X &v, const Y &d) override
Apply the preconditioner.
Definition: DILU.hpp:135
X domain_type
The domain type of the preconditioner.
Definition: DILU.hpp:58
void pre(X &v, Y &d) override
Prepare the preconditioner.
Definition: DILU.hpp:124
MultithreadDILU(const M &A)
scalar type underlying the field_type
Definition: DILU.hpp:68
virtual SolverCategory::Category category() const override
Category of the preconditioner (see SolverCategory::Category)
Definition: DILU.hpp:160
Y range_type
The range type of the preconditioner.
Definition: DILU.hpp:60
void update() override
Update the preconditioner.
Definition: DILU.hpp:110
virtual bool hasPerfectUpdate() const override
Definition: DILU.hpp:165
std::vector< typename M::block_type > getDiagonal()
Definition: DILU.hpp:154
typename X::field_type field_type
The field type of the preconditioner.
Definition: DILU.hpp:62
Interface class adding the update() method to the preconditioner interface.
Definition: PreconditionerWithUpdate.hpp:32
Definition: fvbaseprimaryvariables.hh:141
Opm::SparseTable< std::size_t > getMatrixRowColoring(const M &matrix, ColoringType coloringType)
Given a matrix and dependecy type, returns a SparseTable grouping the rows by which can be executed i...
Definition: GraphColoring.hpp:248