Opm Namespace Reference

Holds the implementation of the CpGrid as a pimple. More...

Namespaces

namespace  Impl
 
namespace  time
 
namespace  UgGridHelpers
 

Classes

class  CellQuadrature
 
class  ElementChunks
 
class  FaceQuadrature
 
class  GraphOfGrid
 A class storing a graph representation of the grid. More...
 
class  GridManager
 
struct  iterator_range
 
struct  iterator_range_pod
 
class  LevelCartesianIndexMapper
 
class  LevelCartesianIndexMapper< Dune::CpGrid >
 
class  LevelCartesianIndexMapper< Dune::PolyhedralGrid< dim, dimworld, coord_t > >
 
class  LookUpCartesianData
 
struct  LookUpCellCentroid
 
class  LookUpData
 
class  MinpvProcessor
 Transform a corner-point grid ZCORN field to account for MINPV processing. More...
 
struct  mutable_iterator_range
 
class  RegionMapping
 
class  SparseTable
 
class  VariableSizeCommunicator
 A buffered communicator where the amount of data sent does not have to be known a priori. More...
 
class  VelocityInterpolationConstant
 
class  VelocityInterpolationECVI
 
class  VelocityInterpolationInterface
 Abstract interface for velocity interpolation method classes. More...
 
class  WachspressCoord
 

Functions

void extractColumn (const UnstructuredGrid &grid, std::vector< std::vector< int > > &columns)
 
template<class T , class A , class C >
std::pair< std::vector< T, A >, std::vector< int > > allGatherv (const std::vector< T, A > &input, const C &comm)
 Gathers vectors from all processes on all processes. More...
 
template<class T , class A , class C >
std::pair< std::vector< T, A >, std::vector< int > > gatherv (const std::vector< T, A > &input, const C &comm, int root)
 Gathers vectors from all processes on a root process. More...
 
std::pair< std::unordered_map< int, int >, std::vector< std::array< int, 3 > > > lgrIJK (const Dune::CpGrid &grid, const std::string &lgr_name)
 Retrieves Cartesian indices for a specified Local Grid Refinement (LGR) level in a Dune::CpGrid. More...
 
std::pair< std::vector< double >, std::vector< double > > lgrCOORDandZCORN (const Dune::CpGrid &grid, int level, const std::unordered_map< int, int > &lgrCartesianIdxToCellIdx, const std::vector< std::array< int, 3 > > &lgrIJK)
 Extracts the COORD and ZCORN values for the LGR (Local Grid Refinement) block. More...
 
void setPillarCoordinates (int i, int j, int nx, int topCorner, int bottomCorner, int positionIdx, const Dune::cpgrid::Entity< 0 > &topElem, const Dune::cpgrid::Entity< 0 > &bottomElem, std::vector< double > &lgrCOORD)
 Sets the coordinates for a pillar. More...
 
void processPillars (int i, int j, int nx, const Dune::cpgrid::Entity< 0 > &topElem, const Dune::cpgrid::Entity< 0 > &bottomElem, std::vector< double > &lgrCOORD)
 Processes and sets the coordinates for all four pillars of a given "(i,j) column of cells". More...
 
int getGraphOfGridNumVertices (void *pGraph, int *err)
 callback function for ZOLTAN_NUM_OBJ_FN More...
 
void getGraphOfGridVerticesList (void *pGraph, int dimGlobalID, int dimLocalID, ZOLTAN_ID_PTR gIDs, ZOLTAN_ID_PTR lIDs, int weightDim, float *objWeights, int *err)
 callback function for ZOLTAN_OBJ_LIST_FN More...
 
void getGraphOfGridNumEdges (void *pGraph, int dimGlobalID, int dimLocalID, int numCells, ZOLTAN_ID_PTR gIDs, ZOLTAN_ID_PTR lIDs, int *numEdges, int *err)
 callback function for ZOLTAN_NUM_EDGES_MULTI_FN More...
 
void getGraphOfGridEdgeList (void *pGraph, int dimGlobalID, int dimLocalID, int numCells, ZOLTAN_ID_PTR gIDs, ZOLTAN_ID_PTR lIDs, int *numEdges, ZOLTAN_ID_PTR nborGIDs, int *nborProc, int weightDim, float *edgeWeights, int *err)
 callback function for ZOLTAN_EDGE_LIST_MULTI_FN More...
 
template<typename Zoltan_Struct >
void setGraphOfGridZoltanGraphFunctions (Zoltan_Struct *zz, GraphOfGrid< Dune::CpGrid > &gog, bool pretendNull)
 Register callback functions to Zoltan. More...
 
void addFutureConnectionWells (GraphOfGrid< Dune::CpGrid > &gog, const std::unordered_map< std::string, std::set< int > > &wells, bool checkWellIntersections=true)
 Adds well to the GraphOfGrid. More...
 
void addWellConnections (GraphOfGrid< Dune::CpGrid > &gog, const Dune::cpgrid::WellConnections &wells, bool checkWellIntersections=true)
 Add WellConnections to the GraphOfGrid. More...
 
void extendGIDtoRank (const GraphOfGrid< Dune::CpGrid > &gog, std::vector< int > &gIDtoRank, const int &root=-1)
 Correct gIDtoRank's data about well cells. More...
 
void extendAndSortExportAndImportLists (const GraphOfGrid< Dune::CpGrid > &gog, const Dune::cpgrid::CpGridDataTraits::Communication &cc, int root, std::vector< std::tuple< int, int, char > > &exportList, std::vector< std::tuple< int, int, char, int > > &importList, const std::vector< int > &gIDtoRank={})
 Add well cells' global IDs to the root's export and others' import list. More...
 
std::vector< int > getWellRanks (const std::vector< int > &gIDtoRank, const Dune::cpgrid::WellConnections &wellConnections)
 Find to which ranks wells were assigned. More...
 
std::vector< std::pair< std::string, bool > > wellsOnThisRank (const std::vector< Dune::cpgrid::OpmWellType > &wells, const std::vector< int > &wellRanks, const Dune::cpgrid::CpGridDataTraits::Communication &cc, int root)
 Get rank-specific information about which wells are present. More...
 
template<class Id >
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > > > makeImportAndExportLists (const GraphOfGrid< Dune::CpGrid > &gog, const Dune::Communication< MPI_Comm > &cc, const std::vector< Dune::cpgrid::OpmWellType > *wells, const Dune::cpgrid::WellConnections &wellConnections, int root, int numExport, int numImport, const Id *exportLocalGids, const Id *exportGlobalGids, const int *exportToPart, const Id *importGlobalGids, int level)
 Transform Zoltan output into tuples. More...
 
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, Dune::cpgrid::WellConnectionszoltanPartitioningWithGraphOfGrid (const Dune::CpGrid &grid, const std::vector< Dune::cpgrid::OpmWellType > *wells, const std::unordered_map< std::string, std::set< int > > &possibleFutureConnections, const double *transmissibilities, const Dune::cpgrid::CpGridDataTraits::Communication &cc, Dune::EdgeWeightMethod edgeWeightMethod, int root, const double zoltanImbalanceTol, bool allowDistributedWells, const std::map< std::string, std::string > &params, int level)
 Call Zoltan partitioner on GraphOfGrid. More...
 
std::vector< std::vector< int > > makeExportListsFromGIDtoRank (const std::vector< int > &gIDtoRank, int ccsize)
 Make complete export lists from a vector holding destination rank for each global ID. More...
 
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, Dune::cpgrid::WellConnectionszoltanSerialPartitioningWithGraphOfGrid (const Dune::CpGrid &grid, const std::vector< Dune::cpgrid::OpmWellType > *wells, const std::unordered_map< std::string, std::set< int > > &possibleFutureConnections, const double *transmissibilities, const Dune::cpgrid::CpGridDataTraits::Communication &cc, Dune::EdgeWeightMethod edgeWeightMethod, int root, const double zoltanImbalanceTol, bool allowDistributedWells, const std::map< std::string, std::string > &params)
 Call serial Zoltan partitioner on GraphOfGrid. More...
 
SparseTable< int > cellNeighboursAcrossVertices (const UnstructuredGrid &grid)
 
void orderCounterClockwise (const UnstructuredGrid &grid, SparseTable< int > &nb)
 
std::unordered_map< int, int > cartesianToCompressed (const int num_cells, const int *global_cell)
 
std::vector< int > compressedToCartesian (const int num_cells, const int *global_cell)
 
template<class Range >
auto createChunkIterators (const Range &r, const std::size_t num_elem, const std::size_t num_chunks)
 
template<class Range >
auto createThreadIterators (const Range &r, const std::size_t num_elem, const std::size_t num_threads, const std::size_t max_chunk_size, int &chunk_size)
 
template<class GridView >
auto createThreadIterators (const GridView &gv, const std::size_t num_threads, const std::size_t max_chunk_size, int &chunk_size)
 

Detailed Description

Holds the implementation of the CpGrid as a pimple.

Author
Markus Blatt marku.nosp@m.s@dr.nosp@m.-blat.nosp@m.t.de Atgeirr F Rasmussen atgei.nosp@m.rr@s.nosp@m.intef.nosp@m..no Bård Skaflestad bard..nosp@m.skaf.nosp@m.lesta.nosp@m.d@si.nosp@m.ntef..nosp@m.no

Function Documentation

◆ addFutureConnectionWells()

void Opm::addFutureConnectionWells ( GraphOfGrid< Dune::CpGrid > &  gog,
const std::unordered_map< std::string, std::set< int > > &  wells,
bool  checkWellIntersections = true 
)

Adds well to the GraphOfGrid.

Translates wells' cartesian ID to global ID used in the graph. Adding the well contracts vertices of the well into one vertex.

checkWellIntersections==true makes the algorithm check if wells intersect and if their cell IDs are present in the graph. Setting it to false makes the algorithm faster but leaves user responsible for keeping wells disjoint.

◆ addWellConnections()

void Opm::addWellConnections ( GraphOfGrid< Dune::CpGrid > &  gog,
const Dune::cpgrid::WellConnections wells,
bool  checkWellIntersections = true 
)

Add WellConnections to the GraphOfGrid.

checkWellIntersections==true makes the algorithm check if wells intersect and if their cell IDs are present in the graph. Setting it to false makes the algorithm faster but leaves user responsible for keeping wells disjoint.

◆ allGatherv()

template<class T , class A , class C >
std::pair< std::vector< T, A >, std::vector< int > > Opm::allGatherv ( const std::vector< T, A > &  input,
const C &  comm 
)

Gathers vectors from all processes on all processes.

In parallel this will call MPI_Allgatherv. Has to be called on all ranks.

Parameters
inputThe input vector to gather from the rank.
commThe Dune::Communication object.
Returns
A pair of a vector with all the values gathered (first the ones from rank 0, then the ones from rank 1, ...) and a vector with the offsets of the first value from each rank (values[offset[rank]] will be the first value from rank). This vector is one bigger than the number of processes and the last entry is the size of the first array.

◆ cartesianToCompressed()

std::unordered_map< int, int > Opm::cartesianToCompressed ( const int  num_cells,
const int *  global_cell 
)

◆ cellNeighboursAcrossVertices()

SparseTable< int > Opm::cellNeighboursAcrossVertices ( const UnstructuredGrid grid)

For each cell, find indices of all cells sharing a vertex with it.

Parameters
[in]gridA grid object.
Returns
A table of neighbour cell-indices by cell.

◆ compressedToCartesian()

std::vector< int > Opm::compressedToCartesian ( const int  num_cells,
const int *  global_cell 
)

◆ createChunkIterators()

template<class Range >
auto Opm::createChunkIterators ( const Range &  r,
const std::size_t  num_elem,
const std::size_t  num_chunks 
)

Create a vector containing a spread of iterators into the elements of the range, to facilitate for example OpenMP parallelization of iterations over the elements. While the original range may have only forward iterators to the individual elements, the returned vector will provide random access to the chunks.

Template Parameters
RangeRange of elements that supports (multipass) forward iteration.
Parameters
[in]rRange to be iterated over.
[in]num_elemThe number of elements in r.
[in]num_chunksThe number of chunks to create.
Returns
A vector of iterators, with the range's begin and end iterators being the first and last ones, and filling in iterators in between such that the distance from one to the next is num_elem/num_chunks, except for possibly the last interval(s). The size of the vector will be num_chunks + 1.

Referenced by Opm::ElementChunks< GridView, PartitionSet >::ElementChunks().

◆ createThreadIterators() [1/2]

template<class GridView >
auto Opm::createThreadIterators ( const GridView &  gv,
const std::size_t  num_threads,
const std::size_t  max_chunk_size,
int &  chunk_size 
)

Convenience function for using createThreadIterators() with the cells in a grid view.

References createThreadIterators().

◆ createThreadIterators() [2/2]

template<class Range >
auto Opm::createThreadIterators ( const Range &  r,
const std::size_t  num_elem,
const std::size_t  num_threads,
const std::size_t  max_chunk_size,
int &  chunk_size 
)

Create a vector containing a spread of iterators into the elements of the range, to facilitate for example OpenMP parallelization of iterations over the elements. While the original range may have only forward iterators to the individual elements, the returned vector will provide random access to the chunks.

Template Parameters
RangeRange of elements that supports (multipass) forward iteration.
Parameters
[in]rRange to be iterated over.
[in]num_elemThe number of elements in r.
[in]num_threadsThe number of threads to target.
[in]max_chunk_sizeThe maximum allowed chunk size.
[out]chunk_sizeThe chunk size found by the algorithm.
Returns
A vector of iterators, with the range's begin and end iterators being the first and last ones, and filling in iterators in between such that the distance from one to the next is chunk_size, except for possibly the last interval. If num_threads is 1, there will only be the begin and end iterators and no chunks in between.

Referenced by createThreadIterators().

◆ extendAndSortExportAndImportLists()

void Opm::extendAndSortExportAndImportLists ( const GraphOfGrid< Dune::CpGrid > &  gog,
const Dune::cpgrid::CpGridDataTraits::Communication cc,
int  root,
std::vector< std::tuple< int, int, char > > &  exportList,
std::vector< std::tuple< int, int, char, int > > &  importList,
const std::vector< int > &  gIDtoRank = {} 
)

Add well cells' global IDs to the root's export and others' import list.

Output of the partitioning is missing vertices that were contracted. This function fills in omitted gIDs and gives them the properties (like process number and ownership) of their representative cell (well ID). Root is the only rank with information about wells, and communicates the necessary information to other ranks. On root ImportList has been already extended with all cells on the current rank.

◆ extendGIDtoRank()

void Opm::extendGIDtoRank ( const GraphOfGrid< Dune::CpGrid > &  gog,
std::vector< int > &  gIDtoRank,
const int &  root = -1 
)

Correct gIDtoRank's data about well cells.

gIDtoRank's entries come from Zoltan partitioner's export list that does not contain all well cells. Default value is root's rank. parameter root allows skipping wells that are correct.

◆ extractColumn()

void Opm::extractColumn ( const UnstructuredGrid grid,
std::vector< std::vector< int > > &  columns 
)

Extract each column of the grid.

Note
Assumes the pillars of the grid are all vertically aligned.
Parameters
gridThe grid from which to extract the columns.
columnswill for each (i, j) where (i, j) represents a non-empty column, contain the cell indices contained in the column centered at (i, j) in the second variable, and i+jN in the first variable.

◆ gatherv()

template<class T , class A , class C >
std::pair< std::vector< T, A >, std::vector< int > > Opm::gatherv ( const std::vector< T, A > &  input,
const C &  comm,
int  root 
)

Gathers vectors from all processes on a root process.

In parallel this will call MPI_Gatherv. Has to be called on all ranks.

Parameters
inputThe input vector to gather from the rank.
commThe Dune::Communication object.
rootThe rank of the processes to gather the values-
Returns
On non-root ranks a pair of empty vectors. On the root rank a pair of a vector with all the values gathered (first the ones from rank 0, then the ones from rank 1, ...) and a vector with the offsets of the first value from each rank (values[offset[rank]] will be the first value from rank). This vector is one bigger than the number of processes and the last entry is the size of the first array.

◆ getGraphOfGridEdgeList()

void Opm::getGraphOfGridEdgeList ( void *  pGraph,
int  dimGlobalID,
int  dimLocalID,
int  numCells,
ZOLTAN_ID_PTR  gIDs,
ZOLTAN_ID_PTR  lIDs,
int *  numEdges,
ZOLTAN_ID_PTR  nborGIDs,
int *  nborProc,
int  weightDim,
float *  edgeWeights,
int *  err 
)

callback function for ZOLTAN_EDGE_LIST_MULTI_FN

takes the list of global IDs (gIDs) and fills (consecutively): vector nborGIDs with the list of neighbors (all into 1 vector), vector nborProc with neighbors' process numbers, vector edgeWeights with edge weights. The vector numEdges provides the number of edges for each gID

◆ getGraphOfGridNumEdges()

void Opm::getGraphOfGridNumEdges ( void *  pGraph,
int  dimGlobalID,
int  dimLocalID,
int  numCells,
ZOLTAN_ID_PTR  gIDs,
ZOLTAN_ID_PTR  lIDs,
int *  numEdges,
int *  err 
)

callback function for ZOLTAN_NUM_EDGES_MULTI_FN

takes the list of global IDs (gIDs) and fills (consecutively) vector numEdges with the number of their edges

◆ getGraphOfGridNumVertices()

int Opm::getGraphOfGridNumVertices ( void *  pGraph,
int *  err 
)

callback function for ZOLTAN_NUM_OBJ_FN

returns the number of vertices in the graph

◆ getGraphOfGridVerticesList()

void Opm::getGraphOfGridVerticesList ( void *  pGraph,
int  dimGlobalID,
int  dimLocalID,
ZOLTAN_ID_PTR  gIDs,
ZOLTAN_ID_PTR  lIDs,
int  weightDim,
float *  objWeights,
int *  err 
)

callback function for ZOLTAN_OBJ_LIST_FN

fills the vector gIDs with vertex global IDs and the vector objWeights with their weights

◆ getWellRanks()

std::vector< int > Opm::getWellRanks ( const std::vector< int > &  gIDtoRank,
const Dune::cpgrid::WellConnections wellConnections 
)

Find to which ranks wells were assigned.

returns the vector of ranks, ordering is given by wellConnections

Parameters
gIDtoRankTakes global ID and returns rank
wellConnectionsHas global IDs of well cells

◆ lgrCOORDandZCORN()

std::pair< std::vector< double >, std::vector< double > > Opm::lgrCOORDandZCORN ( const Dune::CpGrid grid,
int  level,
const std::unordered_map< int, int > &  lgrCartesianIdxToCellIdx,
const std::vector< std::array< int, 3 > > &  lgrIJK 
)

Extracts the COORD and ZCORN values for the LGR (Local Grid Refinement) block.

COORD: This retrieves a vector of std::array<double, 6>, where each element represents the coordinate values of a pillar in the LGR block. The values correspond to the COORD keyword, which defines the corner-point geometry of the grid. ZCORN: It is initialized to inactive values, i.e. set to std::numeric_limits<double>::max(), and then calculates the bottom and top ZCORN values for each cell in the LGR based on its geometry. It determines the active cell layers and assigns ZCORN values accordingly.

Special Cases:

  • If a pillar within the LGR block is "inactive", its COORD values are set to std::numeric_limits<double>::max() to indicate the inactive status.
  • If all pillars are "inactive", an exception is thrown.
Parameters
[in]grid
[in]levelThe refinement level of the LGR block.
[in]lgrCartesianIdxToCellIdxMapping from LGR Cartesian indices to level cell indices.
[in]lgrIJKThe IJK indices corresponding to the LGR block active cells.
Returns
A pair containing:
  • A std::vector<double> storing the coordinate values for each pillar.
  • A std::vector<double> storing the depth of each corner point of the LGR pillars.

◆ lgrIJK()

std::pair< std::unordered_map< int, int >, std::vector< std::array< int, 3 > > > Opm::lgrIJK ( const Dune::CpGrid grid,
const std::string &  lgr_name 
)

Retrieves Cartesian indices for a specified Local Grid Refinement (LGR) level in a Dune::CpGrid.

This function extracts the mapping between active cell indices and Cartesian indices for a given LGR name. If the specified name is not found, an exception is thrown.

Parameters
[in]gridThe Dune::CpGrid
[in]lgr_nameThe name of the LGR whose indices are to be retrieved.
Returns
A pair containing:
  • A std::unordered_map<int, int> mapping Cartesian indices back to cell indices (handles inactive parent cells).
  • A std::vector<std::array<int, 3>> storing the (i, j, k) Cartesian coordinates for active cells.

◆ makeExportListsFromGIDtoRank()

std::vector< std::vector< int > > Opm::makeExportListsFromGIDtoRank ( const std::vector< int > &  gIDtoRank,
int  ccsize 
)

Make complete export lists from a vector holding destination rank for each global ID.

Intended to use on the root, as other ranks can not construct gIDtoRank. Export lists include root's cells that stay on root.

Parameters
gIDtoRanka vector indexed by global ID holding rank to which it is exported
ccsizethe number of ranks, cc.size()
Returns
vector of vectors, vector[i] holds a vector of global IDs exported to rank i

◆ makeImportAndExportLists()

template<class Id >
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > > > Opm::makeImportAndExportLists ( const GraphOfGrid< Dune::CpGrid > &  gog,
const Dune::Communication< MPI_Comm > &  cc,
const std::vector< Dune::cpgrid::OpmWellType > *  wells,
const Dune::cpgrid::WellConnections wellConnections,
int  root,
int  numExport,
int  numImport,
const Id *  exportLocalGids,
const Id *  exportGlobalGids,
const int *  exportToPart,
const Id *  importGlobalGids,
int  level 
)

Transform Zoltan output into tuples.

Parameters
gogGraphOfGrid, has ref. to CpGrid and knows how well-cells were contracted
ccCommunication object
wellsUsed to extract well names
wellConnectionsContains wells' global IDs, ordered as
wells.
rootRank of the process executing the partitioning (usually 0)
numExportNumber of cells in the export list
numImportNumber of cells in the import list
exportLocalGidsUnused. Partitioning is performed on root process that has access to all cells.
exportGlobalGidsZoltan output: Global IDs of exported cells
exportToPartZoltan output: ranks to which cells are exported
importGlobalGidsZoltan output: Global IDs of cells imported to this rank
levelInteger representing the level grid to be partitioned. Leaf grid view set to -1.
Returns
gIDtoRank A vector indexed by global ID storing the rank of cell parallel_wells A vector of pairs wells.name and bool of "Is wells.name on this rank?" myExportList vector of cells to be moved from this rank myImportList vector of cells to be moved to this rank

◆ orderCounterClockwise()

void Opm::orderCounterClockwise ( const UnstructuredGrid grid,
SparseTable< int > &  nb 
)

For each cell, order the (cell) neighbours counterclockwise.

Parameters
[in]gridA 2d grid object.
[in,out]nbA cell-cell neighbourhood table, such as from vertexNeighbours().

◆ processPillars()

void Opm::processPillars ( int  i,
int  j,
int  nx,
const Dune::cpgrid::Entity< 0 > &  topElem,
const Dune::cpgrid::Entity< 0 > &  bottomElem,
std::vector< double > &  lgrCOORD 
)

Processes and sets the coordinates for all four pillars of a given "(i,j) column of cells".

This function calls setPillarCoordinates for each of the four pillars (i,j), (i+1,j), (i,j+1), and (i+1,j+1), assigning the correct top and bottom coordinates from the given elements.

Parameters
[in]iColumn index of the current element.
[in]jRow index of the current element.
[in]nxNumber of elements in the x-direction.
[in]topElemReference to the top element.
[in]bottomElemReference to the bottom element.
[out]lgrCOORDReference to the array storing the pillar coordinates.

◆ setGraphOfGridZoltanGraphFunctions()

template<typename Zoltan_Struct >
void Opm::setGraphOfGridZoltanGraphFunctions ( Zoltan_Struct *  zz,
GraphOfGrid< Dune::CpGrid > &  gog,
bool  pretendNull 
)

Register callback functions to Zoltan.

◆ setPillarCoordinates()

void Opm::setPillarCoordinates ( int  i,
int  j,
int  nx,
int  topCorner,
int  bottomCorner,
int  positionIdx,
const Dune::cpgrid::Entity< 0 > &  topElem,
const Dune::cpgrid::Entity< 0 > &  bottomElem,
std::vector< double > &  lgrCOORD 
)

Sets the coordinates for a pillar.

This function calculates the pillar index based on the given (i, j) position and assigns the top and bottom coordinates from the corresponding element corners.

Parameters
[in]iColumn index of the pillar.
[in]jRow index of the pillar.
[in]nxNumber of elements in the x-direction.
[in]topCornerIndex of the top element corner associated with the pillar.
[in]bottomCornerIndex of the bottom element corner associated with the pillar.
[in]positionIdxTo determine the correct pillar index (0-3).
[in]topElemReference to the top element.
[in]bottomElemReference to the bottom element.
[out]lgrCOORDReference to the array storing the pillar coordinates.

◆ wellsOnThisRank()

std::vector< std::pair< std::string, bool > > Opm::wellsOnThisRank ( const std::vector< Dune::cpgrid::OpmWellType > &  wells,
const std::vector< int > &  wellRanks,
const Dune::cpgrid::CpGridDataTraits::Communication cc,
int  root 
)

Get rank-specific information about which wells are present.

Parameters
wellsVector of wells containing names (and other...)
wellRanksTells on which (single) rank is the well placed, ordering in the vector is given by wells
ccThe communication object
rootRank holding the information about the grid
Returns
vector of pairs string-bool that hold the name of the well and whether it is situated on this process rank

This function only gets the information from wellRanks into proper format to call computeParallelWells.

◆ zoltanPartitioningWithGraphOfGrid()

std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, Dune::cpgrid::WellConnections > Opm::zoltanPartitioningWithGraphOfGrid ( const Dune::CpGrid grid,
const std::vector< Dune::cpgrid::OpmWellType > *  wells,
const std::unordered_map< std::string, std::set< int > > &  possibleFutureConnections,
const double *  transmissibilities,
const Dune::cpgrid::CpGridDataTraits::Communication cc,
Dune::EdgeWeightMethod  edgeWeightMethod,
int  root,
const double  zoltanImbalanceTol,
bool  allowDistributedWells,
const std::map< std::string, std::string > &  params,
int  level 
)

Call Zoltan partitioner on GraphOfGrid.

GraphOfGrid represents a well by one vertex, so wells can not be spread over several processes.

◆ zoltanSerialPartitioningWithGraphOfGrid()

std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, Dune::cpgrid::WellConnections > Opm::zoltanSerialPartitioningWithGraphOfGrid ( const Dune::CpGrid grid,
const std::vector< Dune::cpgrid::OpmWellType > *  wells,
const std::unordered_map< std::string, std::set< int > > &  possibleFutureConnections,
const double *  transmissibilities,
const Dune::cpgrid::CpGridDataTraits::Communication cc,
Dune::EdgeWeightMethod  edgeWeightMethod,
int  root,
const double  zoltanImbalanceTol,
bool  allowDistributedWells,
const std::map< std::string, std::string > &  params 
)

Call serial Zoltan partitioner on GraphOfGrid.

GraphOfGrid represents a well by one vertex, so wells can not be spread over several processes.