HypreSetup.hpp
Go to the documentation of this file.
1/*
2 Copyright 2025 Equinor ASA
3
4 This file is part of the Open Porous Media project (OPM).
5
6 OPM is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 3 of the License, or
9 (at your option) any later version.
10
11 OPM is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with OPM. If not, see <http://www.gnu.org/licenses/>.
18*/
19
20#ifndef OPM_HYPRE_SETUP_HPP
21#define OPM_HYPRE_SETUP_HPP
22
27
28#include <dune/istl/owneroverlapcopy.hh>
29#include <dune/istl/paamg/graph.hh>
30#include <dune/istl/paamg/pinfo.hh>
31#include <dune/istl/repartition.hh>
32
33#if HAVE_CUDA
34#if USE_HIP
35#include <opm/simulators/linalg/gpuistl_hip/GpuSparseMatrix.hpp>
36#else
38#endif
39#endif // HAVE_CUDA
40
41#include <HYPRE.h>
42#include <HYPRE_parcsr_ls.h>
43#include <_hypre_utilities.h>
44
45#include <algorithm>
46#include <cstddef>
47#include <numeric>
48
50{
51
52// GPU-specific helper functions
53template <typename T>
55 const ParallelInfo& par_info,
56 bool owner_first);
57
58template <typename T>
59std::vector<HYPRE_Int> computeRowIndexesWithMappingGpu(const GpuSparseMatrix<T>& gpu_matrix,
60 const std::vector<int>& local_dune_to_local_hypre);
61
62// Serial helper functions
64
65// Parallel helper functions
66template <typename CommType, typename MatrixType>
67ParallelInfo setupHypreParallelInfoParallel(const CommType& comm, const MatrixType& matrix);
68
69template <typename MatrixType>
71setupSparsityPatternFromCpuMatrix(const MatrixType& matrix, const ParallelInfo& par_info, bool owner_first);
72
73template <typename MatrixType>
74std::vector<HYPRE_Int> computeRowIndexesWithMappingCpu(const MatrixType& matrix,
75 const std::vector<HYPRE_Int>& ncols,
76 const std::vector<int>& local_dune_to_local_hypre,
77 bool owner_first);
78
79template <typename MatrixType>
80std::vector<HYPRE_Int> computeRowIndexesWithMappingCpu(const MatrixType& matrix,
81 const std::vector<int>& local_dune_to_local_hypre);
88inline void
89initialize(bool use_gpu_backend)
90{
91 // Set memory location and execution policy
92#if HYPRE_USING_CUDA || HYPRE_USING_HIP
93 if (use_gpu_backend) {
94 OPM_HYPRE_SAFE_CALL(HYPRE_SetMemoryLocation(HYPRE_MEMORY_DEVICE));
95 OPM_HYPRE_SAFE_CALL(HYPRE_SetExecutionPolicy(HYPRE_EXEC_DEVICE));
96 // use hypre's SpGEMM instead of vendor implementation
97 OPM_HYPRE_SAFE_CALL(HYPRE_SetSpGemmUseVendor(false));
98 // use cuRand for PMIS
99 OPM_HYPRE_SAFE_CALL(HYPRE_SetUseGpuRand(1));
100 OPM_HYPRE_SAFE_CALL(HYPRE_DeviceInitialize());
101 OPM_HYPRE_SAFE_CALL(HYPRE_PrintDeviceInfo());
102 } else
103#endif
104 {
105 OPM_HYPRE_SAFE_CALL(HYPRE_SetMemoryLocation(HYPRE_MEMORY_HOST));
106 OPM_HYPRE_SAFE_CALL(HYPRE_SetExecutionPolicy(HYPRE_EXEC_HOST));
107 }
108}
109
116inline HYPRE_Solver
118{
119 HYPRE_Solver solver;
120 OPM_HYPRE_SAFE_CALL(HYPRE_BoomerAMGCreate(&solver));
121 return solver;
122}
123
132inline void
133setSolverParameters(HYPRE_Solver solver, const PropertyTree& prm, bool use_gpu_backend)
134{
135 // Set parameters from property tree with defaults
136 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetPrintLevel(solver, prm.get<int>("print_level", 0)));
137 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetMaxIter(solver, prm.get<int>("max_iter", 1)));
138 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetStrongThreshold(solver, prm.get<double>("strong_threshold", 0.5)));
139 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetAggTruncFactor(solver, prm.get<double>("agg_trunc_factor", 0.3)));
140 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetInterpType(solver, prm.get<int>("interp_type", 6)));
141 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetMaxLevels(solver, prm.get<int>("max_levels", 15)));
142 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetTol(solver, prm.get<double>("tolerance", 0.0)));
143
144 if (use_gpu_backend) {
145 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetRelaxType(solver, prm.get<int>("relax_type", 16)));
146 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetCoarsenType(solver, prm.get<int>("coarsen_type", 8)));
147 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetAggNumLevels(solver, prm.get<int>("agg_num_levels", 0)));
148 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetAggInterpType(solver, prm.get<int>("agg_interp_type", 6)));
149 // Keep transpose to avoid SpMTV
150 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetKeepTranspose(solver, true));
151 } else {
152 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetRelaxType(solver, prm.get<int>("relax_type", 13)));
153 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetCoarsenType(solver, prm.get<int>("coarsen_type", 10)));
154 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetAggNumLevels(solver, prm.get<int>("agg_num_levels", 1)));
155 HYPRE_SAFE_CALL(HYPRE_BoomerAMGSetAggInterpType(solver, prm.get<int>("agg_interp_type", 4)));
156 }
157}
158
168template <typename CommType>
169HYPRE_IJMatrix
170createMatrix(HYPRE_Int N, HYPRE_Int dof_offset, const CommType& comm)
171{
172 HYPRE_IJMatrix matrix;
173 MPI_Comm mpi_comm;
174 if constexpr (std::is_same_v<CommType, Dune::Amg::SequentialInformation>) {
175 mpi_comm = MPI_COMM_SELF;
176 } else {
177 mpi_comm = comm.communicator();
178 }
180 HYPRE_IJMatrixCreate(mpi_comm, dof_offset, dof_offset + (N - 1), dof_offset, dof_offset + (N - 1), &matrix));
181 OPM_HYPRE_SAFE_CALL(HYPRE_IJMatrixSetObjectType(matrix, HYPRE_PARCSR));
182 OPM_HYPRE_SAFE_CALL(HYPRE_IJMatrixInitialize(matrix));
183 return matrix;
184}
185
195template <typename CommType>
196HYPRE_IJVector
197createVector(HYPRE_Int N, HYPRE_Int dof_offset, const CommType& comm)
198{
199 HYPRE_IJVector vector;
200 MPI_Comm mpi_comm;
201 if constexpr (std::is_same_v<CommType, Dune::Amg::SequentialInformation>) {
202 mpi_comm = MPI_COMM_SELF;
203 } else {
204 mpi_comm = comm.communicator();
205 }
206 OPM_HYPRE_SAFE_CALL(HYPRE_IJVectorCreate(mpi_comm, dof_offset, dof_offset + (N - 1), &vector));
207 OPM_HYPRE_SAFE_CALL(HYPRE_IJVectorSetObjectType(vector, HYPRE_PARCSR));
208 OPM_HYPRE_SAFE_CALL(HYPRE_IJVectorInitialize(vector));
209 return vector;
210}
211
218inline void
219destroySolver(HYPRE_Solver solver)
220{
221 if (solver) {
222 OPM_HYPRE_SAFE_CALL(HYPRE_BoomerAMGDestroy(solver));
223 }
224}
225
232inline void
233destroyMatrix(HYPRE_IJMatrix matrix)
234{
235 if (matrix) {
236 OPM_HYPRE_SAFE_CALL(HYPRE_IJMatrixDestroy(matrix));
237 }
238}
239
246inline void
247destroyVector(HYPRE_IJVector vector)
248{
249 if (vector) {
250 OPM_HYPRE_SAFE_CALL(HYPRE_IJVectorDestroy(vector));
251 }
252}
253
261template <typename CommType, typename MatrixType>
263setupHypreParallelInfo(const CommType& comm, const MatrixType& matrix)
264{
265 if constexpr (std::is_same_v<CommType, Dune::Amg::SequentialInformation>) {
266 return setupHypreParallelInfoSerial(static_cast<HYPRE_Int>(matrix.N()));
267 } else {
268 return setupHypreParallelInfoParallel(comm, matrix);
269 }
270}
271
278inline ParallelInfo
280{
281 ParallelInfo info;
282 info.N_owned = N;
283
284 info.local_dune_to_local_hypre.resize(N);
285 info.local_dune_to_global_hypre.resize(N);
286 info.local_hypre_to_local_dune.resize(N);
287
288 std::iota(info.local_dune_to_local_hypre.begin(), info.local_dune_to_local_hypre.end(), 0);
289 std::iota(info.local_hypre_to_local_dune.begin(), info.local_hypre_to_local_dune.end(), 0);
290 std::iota(info.local_dune_to_global_hypre.begin(), info.local_dune_to_global_hypre.end(), 0);
291
292 info.dof_offset = 0;
293 info.owner_first = true;
294
295 return info;
296}
297
361template <typename CommType, typename MatrixType>
362inline ParallelInfo
363setupHypreParallelInfoParallel(const CommType& comm, const MatrixType& matrix)
364{
365 ParallelInfo info;
366 const auto& collective_comm = comm.communicator();
367
368 // Initialize mapping arrays to not owned (-1) state
369 info.local_dune_to_local_hypre.resize(comm.indexSet().size(), -1);
370 info.local_dune_to_global_hypre.resize(comm.indexSet().size(), -1);
371
372 // Handle edge case: ensure index set covers all matrix rows
373 if (!(matrix.N() == comm.indexSet().size())) {
374 // in OPM this will likely not be trigged
375 // ensure no holes in index sett
376 const_cast<CommType&>(comm).buildGlobalLookup(matrix.N()); // need?
377 Dune::Amg::MatrixGraph<MatrixType> graph(const_cast<MatrixType&>(matrix)); // do not know why not const ref is sufficient
378 Dune::fillIndexSetHoles(graph, const_cast<CommType&>(comm));
379 assert(matrix.N() == comm.indexSet().size());
380 }
381
382 // STEP 1: Ownership Detection
383 // Scan Dune's index set to identify which DOFs this process owns
384 // Note: iteration order in index set is NOT sequential by local index
385 for (const auto& ind : comm.indexSet()) {
386 int local_ind = ind.local().local();
387 if (ind.local().attribute() == Dune::OwnerOverlapCopyAttributeSet::owner) {
388 // Mark as owned (temporarily use 1, will be replaced with proper local index)
389 info.local_dune_to_local_hypre[local_ind] = 1;
390 } else {
391 // Mark as ghost/non-owned
392 info.local_dune_to_local_hypre[local_ind] = -1;
393 }
394 }
395
396 // STEP 2: Local Reordering & Owner-First Detection
397 // Create compact [0..N_owned-1] local HYPRE indexing for owned DOFs
398 // Simultaneously detect if owned DOFs appear before all ghost DOFs
399 bool owner_first = true;
400 bool visited_ghost = false; // Have we seen any ghost DOF yet?
401 std::size_t count = 0; // Counter for owned DOFs
402
403 for (std::size_t i = 0; i < info.local_dune_to_local_hypre.size(); ++i) {
404 if (info.local_dune_to_local_hypre[i] < 0) {
405 visited_ghost = true;
406 } else {
407 // This is an owned DOF - assign its local HYPRE index
408 info.local_dune_to_local_hypre[i] = count;
409 // Store the inverse mapping
410 info.local_hypre_to_local_dune.push_back(i);
411
412 // Check if we've seen ghost DOFs before this owner
413 owner_first = owner_first && !visited_ghost;
414 count += 1;
415 }
416 }
417
418 // Owner first need other copying of data
419 info.owner_first = owner_first;
420 info.N_owned = count;
421
422 // STEP 3: Global Offset Calculation
423 // Coordinate with other processes to determine global index ranges
424 // Each process owns a contiguous range of global indices
425 std::vector<int> dof_counts_per_process(collective_comm.size());
426 collective_comm.allgather(&info.N_owned, 1, dof_counts_per_process.data());
427
428 // Calculate this process's global offset (sum of DOFs in processes with lower rank)
429 info.dof_offset = std::accumulate(dof_counts_per_process.begin(),
430 dof_counts_per_process.begin() + collective_comm.rank(),
431 0);
432
433 // STEP 4: Create Global Indices for Owned DOFs
434 // Convert local HYPRE indices to global HYPRE indices by adding offset
435 for (std::size_t i = 0; i < info.local_dune_to_local_hypre.size(); ++i) {
436 if (info.local_dune_to_local_hypre[i] >= 0) {
437 // Owned DOF: global index = local HYPRE index + this process's offset
439 } else {
440 info.local_dune_to_global_hypre[i] = -1;
441 }
442 }
443
444 if (collective_comm.rank() > 0) {
445 assert(info.dof_offset > 0);
446 }
447
448 // STEP 5: Exchange global indices for ghost DOFs
449 // After this call, ghost DOFs will have their correct global indices
450 comm.copyOwnerToAll(info.local_dune_to_global_hypre, info.local_dune_to_global_hypre);
451
452 return info;
453}
454
463template <typename MatrixType>
465setupSparsityPattern(const MatrixType& matrix, const ParallelInfo& par_info, bool owner_first)
466{
467#if HYPRE_USING_CUDA || HYPRE_USING_HIP
468 if constexpr (is_gpu_type<MatrixType>::value) {
469 return setupSparsityPatternFromGpuMatrix(matrix, par_info, owner_first);
470 } else
471#endif
472 {
473 return setupSparsityPatternFromCpuMatrix(matrix, par_info, owner_first);
474 }
475}
476
485template <typename MatrixType>
487setupSparsityPatternFromCpuMatrix(const MatrixType& matrix, const ParallelInfo& par_info, bool owner_first)
488{
489 SparsityPattern pattern;
490
491 // Determine the size for cols array based on owner_first
492 if (owner_first) {
493 std::size_t cols_size = 0;
494 // For owner_first=true case, we need to calculate how many owned entries there are
495 for (auto row = matrix.begin(); row != matrix.end(); ++row) {
496 const int rowIdx = row.index();
497 if (par_info.local_dune_to_local_hypre[rowIdx] >= 0) {
498 cols_size += row->size();
499 }
500 }
501 pattern.nnz = cols_size;
502 } else {
503 // Full matrix space case: all entries (including gaps)
504 pattern.nnz = matrix.nonzeroes();
505 }
506
507 // Setup host arrays
508 pattern.ncols.resize(par_info.N_owned);
509 pattern.rows.resize(par_info.N_owned);
510 pattern.cols.resize(pattern.nnz);
511
512 int pos = 0;
513 for (auto row = matrix.begin(); row != matrix.end(); ++row) {
514 const int rind = row.index();
515 const int local_rowIdx = par_info.local_dune_to_local_hypre[rind];
516
517 // For owner_first=true: skip ghost rows entirely
518 // For owner_first=false: process all rows (owned + ghost)
519 if (owner_first && local_rowIdx < 0) {
520 continue;
521 }
522
523 if (local_rowIdx >= 0) {
524 // This is an owned row - record its metadata
525 const int global_rowIdx = par_info.local_dune_to_global_hypre[rind];
526 pattern.rows[local_rowIdx] = global_rowIdx;
527 pattern.ncols[local_rowIdx] = row->size();
528 }
529
530 // Add column indices for this row
531 for (auto col = row->begin(); col != row->end(); ++col) {
532 const int global_colIdx = par_info.local_dune_to_global_hypre[col.index()];
533 assert(global_colIdx >= 0);
534 pattern.cols[pos++] = global_colIdx;
535 }
536 }
537
538 return pattern;
539}
540
541#if HYPRE_USING_CUDA || HYPRE_USING_HIP
550template <typename T>
553 const ParallelInfo& par_info,
554 bool owner_first)
555{
556 SparsityPattern pattern;
557
558 // Determine the size for cols array based on owner_first
559 if (owner_first) {
560 std::size_t cols_size = 0;
561 // For owner_first=true case, we need to calculate how many owned entries there are
562 auto host_row_ptrs = gpu_matrix.getRowIndices().asStdVector();
563 for (int rind = 0; rind < static_cast<int>(gpu_matrix.N()); ++rind) {
564 if (par_info.local_dune_to_local_hypre[rind] >= 0) {
565 const int row_start = host_row_ptrs[rind];
566 const int row_end = host_row_ptrs[rind + 1];
567 cols_size += (row_end - row_start);
568 }
569 }
570 pattern.nnz = cols_size;
571 } else {
572 // Full matrix space case: all entries (including ghost rows)
573 pattern.nnz = gpu_matrix.nonzeroes();
574 }
575
576 // Setup host arrays
577 pattern.ncols.resize(par_info.N_owned);
578 pattern.rows.resize(par_info.N_owned);
579 pattern.cols.resize(pattern.nnz);
580
581 // Get row pointers and column indices from GPU matrix (one-time host copy during setup)
582 auto host_row_ptrs = gpu_matrix.getRowIndices().asStdVector();
583 auto host_col_indices = gpu_matrix.getColumnIndices().asStdVector();
584
585 int pos = 0;
586 for (int rind = 0; rind < static_cast<int>(gpu_matrix.N()); ++rind) {
587 const int local_rowIdx = par_info.local_dune_to_local_hypre[rind];
588
589 // For owner_first=true: skip ghost rows entirely
590 // For owner_first=false: process all rows (owned + ghost)
591 if (owner_first && local_rowIdx < 0) {
592 continue;
593 }
594
595 const int row_start = host_row_ptrs[rind];
596 const int row_end = host_row_ptrs[rind + 1];
597 const int num_cols = row_end - row_start;
598
599 if (local_rowIdx >= 0) {
600 // This is an owned row - record its metadata
601 const int global_rowIdx = par_info.local_dune_to_global_hypre[rind];
602 pattern.rows[local_rowIdx] = global_rowIdx;
603 pattern.ncols[local_rowIdx] = num_cols;
604 }
605
606 // Add column indices for this row
607 for (int col_idx = row_start; col_idx < row_end; ++col_idx) {
608 const int colIdx = host_col_indices[col_idx];
609 const int global_colIdx = par_info.local_dune_to_global_hypre[colIdx];
610 assert(global_colIdx >= 0);
611 pattern.cols[pos++] = global_colIdx;
612 }
613 }
614
615 return pattern;
616}
617#endif // HYPRE_USING_CUDA || HYPRE_USING_HIP
618
632template <typename MatrixType>
633std::vector<HYPRE_Int>
634computeRowIndexes(const MatrixType& matrix,
635 const std::vector<HYPRE_Int>& ncols,
636 const std::vector<int>& local_dune_to_local_hypre,
637 bool owner_first)
638{
639 if (owner_first) {
640 // Simple contiguous case: prefix sum of ncols
641 std::vector<HYPRE_Int> row_indexes(ncols.size());
642 row_indexes[0] = 0;
643 for (std::size_t i = 1; i < ncols.size(); ++i) {
644 row_indexes[i] = row_indexes[i - 1] + ncols[i - 1];
645 }
646 return row_indexes;
647 } else {
648 // We need to compute the row indexes with mapping since we have gaps in the data
649#if HYPRE_USING_CUDA || HYPRE_USING_HIP
650 if constexpr (is_gpu_type<MatrixType>::value) {
651 return computeRowIndexesWithMappingGpu(matrix, local_dune_to_local_hypre);
652 } else
653#endif // HYPRE_USING_CUDA || HYPRE_USING_HIP
654 {
655 return computeRowIndexesWithMappingCpu(matrix, local_dune_to_local_hypre);
656 }
657 }
658}
659
679template <typename MatrixType>
680std::vector<HYPRE_Int>
681computeRowIndexesWithMappingCpu(const MatrixType& matrix, const std::vector<int>& local_dune_to_local_hypre)
682{
683 const int N = std::count_if(
684 local_dune_to_local_hypre.begin(), local_dune_to_local_hypre.end(), [](int val) { return val >= 0; });
685 std::vector<HYPRE_Int> row_indexes(N);
686 int data_position = 0; // Current position in the FULL (including ghost) matrix data
687 // Manually compute row starting positions by iterating through all matrix rows
688 for (auto row = matrix.begin(); row != matrix.end(); ++row) {
689 const int dune_row_idx = row.index();
690 const int hypre_row_idx = local_dune_to_local_hypre[dune_row_idx];
691
692 if (hypre_row_idx >= 0) {
693 // This is an owned row - record where its data starts in the FULL matrix
694 // Use hypre_row_idx as index (maps to Hypre ordering up to N_owned)
695 row_indexes[hypre_row_idx] = data_position;
696 }
697 // Always advance position (including ghost rows - this creates gaps for owned-only access)
698 data_position += row->size();
699 }
700 return row_indexes;
701}
702
703#if HYPRE_USING_CUDA || HYPRE_USING_HIP
715template <typename T>
716std::vector<HYPRE_Int>
717computeRowIndexesWithMappingGpu(const GpuSparseMatrix<T>& gpu_matrix, const std::vector<int>& local_dune_to_local_hypre)
718{
719 const int N = std::count_if(
720 local_dune_to_local_hypre.begin(), local_dune_to_local_hypre.end(), [](int val) { return val >= 0; });
721 std::vector<HYPRE_Int> row_indexes(N);
722
723 // Use pre-computed BSR row pointers (already contain row starting positions)
724 auto host_row_ptrs = gpu_matrix.getRowIndices().asStdVector();
725
726 // Map each owned Hypre row to its starting position in the FULL (including ghost) GPU matrix
727 for (int dune_row_idx = 0; dune_row_idx < static_cast<int>(gpu_matrix.N()); ++dune_row_idx) {
728 const int hypre_row_idx = local_dune_to_local_hypre[dune_row_idx];
729
730 if (hypre_row_idx >= 0) {
731 // This is an owned row - record where its data starts in the FULL GPU matrix
732 // Use hypre_row_idx as index (maps to Hypre ordering up to N_owned)
733 row_indexes[hypre_row_idx] = host_row_ptrs[dune_row_idx];
734 }
735 // Non-owned rows create natural gaps in the indexing
736 }
737 return row_indexes;
738}
739#endif // HYPRE_USING_CUDA || HYPRE_USING_HIP
740
741} // namespace Opm::gpuistl::HypreInterface
742
743#endif // OPM_HYPRE_SETUP_HPP
#define OPM_HYPRE_SAFE_CALL(expr)
Macro to wrap Hypre function calls with error checking.
Definition: HypreErrorHandling.hpp:96
#define HYPRE_SAFE_CALL(expr)
Short form macro for Hypre function calls (for backward compatibility)
Definition: HypreErrorHandling.hpp:102
Hierarchical collection of key/value pairs.
Definition: PropertyTree.hpp:39
T get(const std::string &key) const
The GpuSparseMatrix class simple wrapper class for a CuSparse matrix.
Definition: GpuSparseMatrix.hpp:60
GpuVector< int > & getRowIndices()
getRowIndices returns the row indices used to represent the BSR structure.
Definition: GpuSparseMatrix.hpp:207
size_t nonzeroes() const
nonzeroes behaves as the Dune::BCRSMatrix::nonzeros() function and returns the number of non zero blo...
Definition: GpuSparseMatrix.hpp:166
size_t N() const
N returns the number of rows (which is equal to the number of columns)
Definition: GpuSparseMatrix.hpp:152
GpuVector< int > & getColumnIndices()
getColumnIndices returns the column indices used to represent the BSR structure.
Definition: GpuSparseMatrix.hpp:233
Unified interface for Hypre operations with both CPU and GPU data structures.
Definition: HypreInterface.hpp:61
void destroySolver(HYPRE_Solver solver)
Destroy Hypre solver.
Definition: HypreSetup.hpp:219
std::vector< HYPRE_Int > computeRowIndexesWithMappingCpu(const MatrixType &matrix, const std::vector< HYPRE_Int > &ncols, const std::vector< int > &local_dune_to_local_hypre, bool owner_first)
SparsityPattern setupSparsityPatternFromCpuMatrix(const MatrixType &matrix, const ParallelInfo &par_info, bool owner_first)
Setup sparsity pattern from CPU matrix (BCRSMatrix)
Definition: HypreSetup.hpp:487
ParallelInfo setupHypreParallelInfo(const CommType &comm, const MatrixType &matrix)
Setup parallel information for Hypre (automatically detects serial/parallel)
Definition: HypreSetup.hpp:263
void destroyMatrix(HYPRE_IJMatrix matrix)
Destroy Hypre matrix.
Definition: HypreSetup.hpp:233
HYPRE_Solver createAMGSolver()
Create Hypre solver (BoomerAMG)
Definition: HypreSetup.hpp:117
std::vector< HYPRE_Int > computeRowIndexesWithMappingGpu(const GpuSparseMatrix< T > &gpu_matrix, const std::vector< int > &local_dune_to_local_hypre)
std::vector< HYPRE_Int > computeRowIndexes(const MatrixType &matrix, const std::vector< HYPRE_Int > &ncols, const std::vector< int > &local_dune_to_local_hypre, bool owner_first)
Compute row indexes for HYPRE_IJMatrixSetValues2.
Definition: HypreSetup.hpp:634
HYPRE_IJMatrix createMatrix(HYPRE_Int N, HYPRE_Int dof_offset, const CommType &comm)
Create Hypre matrix.
Definition: HypreSetup.hpp:170
ParallelInfo setupHypreParallelInfoParallel(const CommType &comm, const MatrixType &matrix)
Create mappings between Dune and HYPRE indexing for parallel decomposition.
Definition: HypreSetup.hpp:363
SparsityPattern setupSparsityPattern(const MatrixType &matrix, const ParallelInfo &par_info, bool owner_first)
Setup sparsity pattern from matrix (automatically detects CPU/GPU type)
Definition: HypreSetup.hpp:465
void initialize(bool use_gpu_backend)
Initialize the Hypre library and set memory/execution policy.
Definition: HypreSetup.hpp:89
HYPRE_IJVector createVector(HYPRE_Int N, HYPRE_Int dof_offset, const CommType &comm)
Create Hypre vector.
Definition: HypreSetup.hpp:197
SparsityPattern setupSparsityPatternFromGpuMatrix(const GpuSparseMatrix< T > &gpu_matrix, const ParallelInfo &par_info, bool owner_first)
void destroyVector(HYPRE_IJVector vector)
Destroy Hypre vector.
Definition: HypreSetup.hpp:247
void setSolverParameters(HYPRE_Solver solver, const PropertyTree &prm, bool use_gpu_backend)
Set solver parameters from property tree.
Definition: HypreSetup.hpp:133
ParallelInfo setupHypreParallelInfoSerial(HYPRE_Int N)
Setup parallel information for Hypre in serial case.
Definition: HypreSetup.hpp:279
Parallel domain decomposition information for HYPRE-Dune interface.
Definition: HypreDataStructures.hpp:37
bool owner_first
Whether owned DOFs appear first in local Dune ordering.
Definition: HypreDataStructures.hpp:77
std::vector< int > local_dune_to_global_hypre
Mapping from local Dune indices to global HYPRE indices.
Definition: HypreDataStructures.hpp:51
std::vector< int > local_dune_to_local_hypre
Mapping from local Dune indices to local HYPRE indices.
Definition: HypreDataStructures.hpp:44
std::vector< int > local_hypre_to_local_dune
Mapping from local HYPRE indices to local Dune indices.
Definition: HypreDataStructures.hpp:59
HYPRE_Int dof_offset
Global index offset for this process's owned DOFs.
Definition: HypreDataStructures.hpp:69
HYPRE_Int N_owned
Number of DOFs owned by this MPI process.
Definition: HypreDataStructures.hpp:62
Compressed Sparse Row (CSR) sparsity pattern for HYPRE matrix assembly.
Definition: HypreDataStructures.hpp:86
std::vector< HYPRE_BigInt > rows
Global row indices for owned rows (size: N_owned)
Definition: HypreDataStructures.hpp:91
std::vector< HYPRE_Int > ncols
Non-zero entries per owned row (size: N_owned)
Definition: HypreDataStructures.hpp:88
HYPRE_Int nnz
Number of non-zero entries in matrix.
Definition: HypreDataStructures.hpp:97
std::vector< HYPRE_BigInt > cols
Global column indices in CSR format (size: nnz)
Definition: HypreDataStructures.hpp:94
Type trait to detect if a type is a GPU type.
Definition: gpu_type_detection.hpp:40