|
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, WellConnections > | createListsFromParts (const CpGrid &grid, const std::vector< cpgrid::OpmWellType > *wells, const std::unordered_map< std::string, std::set< int > > &possibleFutureConnections, const double *transmissibilities, const std::vector< int > &parts, bool allowDistributedWells, std::shared_ptr< cpgrid::CombinedGridWellGraph > gridAndWells=nullptr, int level=-1) |
| Creates lists as Zoltan would return for vanilla / user specified partitioning. More...
|
|
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, WellConnections > | vanillaPartitionGridOnRoot (const CpGrid &grid, const std::vector< cpgrid::OpmWellType > *wells, const std::unordered_map< std::string, std::set< int > > &possibleFutureConnections, const double *transmissibilities, bool allowDistributedWells) |
| Creates a vanilla partitioning without a real loadbalancer. More...
|
|
std::vector< std::vector< int > > | perforatingWellIndicesOnProc (const std::vector< int > &parts, const std::vector< Dune::cpgrid::OpmWellType > &wells, const std::unordered_map< std::string, std::set< int > > &possibleFutureConnections, const CpGrid &cpgrid) |
| Determines the wells that have perforate cells for each process. More...
|
|
std::vector< std::vector< int > > | postProcessPartitioningForWells (std::vector< int > &parts, std::function< int(int)> gid, const std::vector< OpmWellType > &wells, const WellConnections &well_connections, const std::vector< std::set< int > > &wellGraph, std::vector< std::tuple< int, int, char > > &exportList, std::vector< std::tuple< int, int, char, int > > &importList, const Communication< MPI_Comm > &cc) |
| Computes wells assigned to processes. More...
|
|
std::vector< std::pair< std::string, bool > > | computeParallelWells (const std::vector< std::vector< int > > &wells_on_proc, const std::vector< OpmWellType > &wells, const Communication< MPI_Comm > &cc, int root) |
| Computes whether wells are perforating cells on this process. More...
|
|
int | getNumberOfEdgesForSpecificCellForGridWithWells (const CombinedGridWellGraph &graph, int localCellId) |
| Get the number of edges of the graph of the grid and the wells for one cell. More...
|
|
template<typename ID , typename weightType > |
void | fillNBORGIDAndWeightsForSpecificCellAndIncrementNeighborCounterForGridWithWells (const CombinedGridWellGraph &graph, const int localCellId, ID globalID, int &neighborCounter, ID &nborGID, weightType *ewgts) |
| Get the list of edges and weights for one cell of a grid with wells. More...
|
|
template<class Id > |
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, WellConnections > | makeImportAndExportLists (const Dune::CpGrid &cpgrid, const Dune::Communication< MPI_Comm > &cc, const std::vector< Dune::cpgrid::OpmWellType > *wells, const std::unordered_map< std::string, std::set< int > > &possibleFutureConnections, const Dune::cpgrid::CombinedGridWellGraph *gridAndWells, int root, int numExport, int numImport, const Id *exportLocalGids, const Id *exportGlobalGids, const int *exportToPart, const Id *importGlobalGids, bool allowDistributedWells=false) |
| Creates all the information needed from the partitioning. More...
|
|
template<class Id > |
std::tuple< int, std::vector< Id > > | scatterExportInformation (int numExport, const Id *exportGlobalGids, const int *exportToPart, int root, const Dune::Communication< MPI_Comm > &cc) |
|
template<int mydim, int cdim> |
auto | referenceElement (const cpgrid::Geometry< mydim, cdim > &geo) -> decltype(referenceElement< double, mydim >(geo.type())) |
|
Copyright 2019 Equinor AS.
This file is part of The Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
OPM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with OPM. If not, see http://www.gnu.org/licenses/.
◆ OpmWellType
◆ computeParallelWells()
std::vector< std::pair< std::string, bool > > Dune::cpgrid::computeParallelWells |
( |
const std::vector< std::vector< int > > & |
wells_on_proc, |
|
|
const std::vector< OpmWellType > & |
wells, |
|
|
const Communication< MPI_Comm > & |
cc, |
|
|
int |
root |
|
) |
| |
Computes whether wells are perforating cells on this process.
- Parameters
-
wells_on_proc | well indices assigned to each process |
eclipseState | The eclipse information |
cc | The communicator |
root | The rank of the process that has the complete partitioning information. |
- Returns
- Vector of pairs of well name and a boolean indicating whether the well with this name perforates cells here. Sorted by well name!
◆ createListsFromParts()
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, WellConnections > Dune::cpgrid::createListsFromParts |
( |
const CpGrid & |
grid, |
|
|
const std::vector< cpgrid::OpmWellType > * |
wells, |
|
|
const std::unordered_map< std::string, std::set< int > > & |
possibleFutureConnections, |
|
|
const double * |
transmissibilities, |
|
|
const std::vector< int > & |
parts, |
|
|
bool |
allowDistributedWells, |
|
|
std::shared_ptr< cpgrid::CombinedGridWellGraph > |
gridAndWells = nullptr , |
|
|
int |
level = -1 |
|
) |
| |
Creates lists as Zoltan would return for vanilla / user specified partitioning.
- Parameters
-
grid | The grid |
wells | Pointer to vector with all possible wells (all perforations) of the problem. nullptr is possible |
possibleFutureConnections | Possible future connections of wells that might get added through an ACTIONX. The grid will then be partitioned such that these connections are on the same partition. If NULL, they will be neglected. |
transmissibilities | C-array with transmissibilities or nullptr. |
parts | The partitioning information. Contains for each cells the partition number (zero-based, consecutive |
allowDistributedWells | |
gridAndWells | |
level | Indicating level grid that is partitioned. Default -1 for leaf grid view. |
- Returns
- A tuple consisting of a vector that contains for each local cell of the original grid the the number of the process that owns it after repartitioning, a vector containing a pair of name and a boolean indicating whether this well has perforated cells local to the process of all wells, vector containing information for each exported cell (global id of cell, process id to send to, attribute there), a vector containing information for each imported cell (global index, process id that sends, attribute here, local index here), and a WellConnections object containing information about the well connections (if argument wells was not null and this is the root rank this will contain connections in form of global indices)
◆ fillNBORGIDAndWeightsForSpecificCellAndIncrementNeighborCounterForGridWithWells()
template<typename ID , typename weightType >
void Dune::cpgrid::fillNBORGIDAndWeightsForSpecificCellAndIncrementNeighborCounterForGridWithWells |
( |
const CombinedGridWellGraph & |
graph, |
|
|
const int |
localCellId, |
|
|
ID |
globalID, |
|
|
int & |
neighborCounter, |
|
|
ID & |
nborGID, |
|
|
weightType * |
ewgts |
|
) |
| |
Get the list of edges and weights for one cell of a grid with wells.
◆ getNumberOfEdgesForSpecificCellForGridWithWells()
int Dune::cpgrid::getNumberOfEdgesForSpecificCellForGridWithWells |
( |
const CombinedGridWellGraph & |
graph, |
|
|
int |
localCellId |
|
) |
| |
Get the number of edges of the graph of the grid and the wells for one cell.
◆ makeImportAndExportLists()
template<class Id >
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, WellConnections > Dune::cpgrid::makeImportAndExportLists |
( |
const Dune::CpGrid & |
cpgrid, |
|
|
const Dune::Communication< MPI_Comm > & |
cc, |
|
|
const std::vector< Dune::cpgrid::OpmWellType > * |
wells, |
|
|
const std::unordered_map< std::string, std::set< int > > & |
possibleFutureConnections, |
|
|
const Dune::cpgrid::CombinedGridWellGraph * |
gridAndWells, |
|
|
int |
root, |
|
|
int |
numExport, |
|
|
int |
numImport, |
|
|
const Id * |
exportLocalGids, |
|
|
const Id * |
exportGlobalGids, |
|
|
const int * |
exportToPart, |
|
|
const Id * |
importGlobalGids, |
|
|
bool |
allowDistributedWells = false |
|
) |
| |
Creates all the information needed from the partitioning.
Note that if wells is not null and allowDitributedWells is false, then the partitioning is postprocessed such that all cells that a perforated by a well are part of the interior of exactly one process. Otherwise they might spreaded between multiple processes.
- Parameters
-
grid | The grid |
cc | The MPI communicator used for the partitioning. |
wells | Pointer to vector with all possible wells (all perforations) of the problem. nullptr is possible |
possibleFutureConnections | Possible future connections of wells that might get added through an ACTIONX. The grid will then be partitioned such that these connections are on the same partition. If NULL, they will be neglected. |
gridAndWells | Graph representing grid and wells (must match the other parameters!) |
root | The rank that has the whole grid before loadbalancing. |
numExport | The number of exported cells (created e.g. by Zoltan) |
numImport | The number of imported cells (created e.g. by Zoltan) |
exportGlobalIds | C-array of the global ids of exported cells (created e.g. by Zoltan) |
exportLocalIds | C-array of the local ids of exported cells (created e.g. by Zoltan) |
exportToPart | C-array with target partition of exported cells (created e.g. by Zoltan) |
importGlobalIds | C-array of the global ids of exported cells (created e.g. by Zoltan) |
allowDistributedWells | Whether wells might be distibuted to the interior of multiple processes. |
- Returns
- A tuple consisting of a vector that contains for each local cell of the original grid the the number of the process that owns it after repartitioning, a vector containing a pair of name and a boolean indicating whether this well has perforated cells local to the process of all wells, vector containing information for each exported cell (global id of cell, process id to send to, attribute there), and a vector containing information for each imported cell (global index, process id that sends, attribute here, local index here), and a WellConnections object containing information about the well connections (if argument wells was not null and this is the root rank this will contain connections in form of global indices)
◆ perforatingWellIndicesOnProc()
std::vector< std::vector< int > > Dune::cpgrid::perforatingWellIndicesOnProc |
( |
const std::vector< int > & |
parts, |
|
|
const std::vector< Dune::cpgrid::OpmWellType > & |
wells, |
|
|
const std::unordered_map< std::string, std::set< int > > & |
possibleFutureConnections, |
|
|
const CpGrid & |
cpgrid |
|
) |
| |
Determines the wells that have perforate cells for each process.
On the root process omputes for all processes all indices of wells that will perforate local cells. Note that a well might perforate local cells of multiple processes
- Parameters
-
parts | The partition number for each cell |
wells | The eclipse information about the wells. |
possibleFutureConnections | Possible future connections of wells that might get added through an ACTIONX. The grid will then be partitioned such that these connections are on the same partition. If NULL, they will be neglected. |
cpGrid | The unbalanced grid we compute on. |
- Returns
- On the rank that has the global grid a vector with the well indices for process i at index i.
◆ postProcessPartitioningForWells()
std::vector< std::vector< int > > Dune::cpgrid::postProcessPartitioningForWells |
( |
std::vector< int > & |
parts, |
|
|
std::function< int(int)> |
gid, |
|
|
const std::vector< OpmWellType > & |
wells, |
|
|
const WellConnections & |
well_connections, |
|
|
const std::vector< std::set< int > > & |
wellGraph, |
|
|
std::vector< std::tuple< int, int, char > > & |
exportList, |
|
|
std::vector< std::tuple< int, int, char, int > > & |
importList, |
|
|
const Communication< MPI_Comm > & |
cc |
|
) |
| |
Computes wells assigned to processes.
Computes for all processes all indices of wells that will be assigned to this process. - Parameters
-
parts | The partition number for each cell |
gid | Functor that turns cell index to global id. |
eclipseState | The eclipse information |
well_connecton | The information about the perforations of each well. |
exportList | List of cells to be exported. Each entry is a tuple of the global cell index, the process rank to export to, and the attribute on that rank (assumed to be owner) |
exportList | List of cells to be imported. Each entry is a tuple of the global cell index, the process rank that exports it, and the attribute on this rank (assumed to be owner) |
cc | Information about the parallelism together with the decomposition. |
- Returns
- On rank 0 a vector with the well indices for process i at index i.
◆ referenceElement()
template<int mydim, int cdim>
auto Dune::cpgrid::referenceElement |
( |
const cpgrid::Geometry< mydim, cdim > & |
geo | ) |
-> decltype(referenceElement<double,mydim>(geo.type()))
|
◆ scatterExportInformation()
template<class Id >
std::tuple< int, std::vector< Id > > Dune::cpgrid::scatterExportInformation |
( |
int |
numExport, |
|
|
const Id * |
exportGlobalGids, |
|
|
const int * |
exportToPart, |
|
|
int |
root, |
|
|
const Dune::Communication< MPI_Comm > & |
cc |
|
) |
| |
◆ vanillaPartitionGridOnRoot()
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, WellConnections > Dune::cpgrid::vanillaPartitionGridOnRoot |
( |
const CpGrid & |
grid, |
|
|
const std::vector< cpgrid::OpmWellType > * |
wells, |
|
|
const std::unordered_map< std::string, std::set< int > > & |
possibleFutureConnections, |
|
|
const double * |
transmissibilities, |
|
|
bool |
allowDistributedWells |
|
) |
| |
Creates a vanilla partitioning without a real loadbalancer.
The loadbalancing will take place on the cartesian grid. - Parameters
-
grid | The grid |
wells | Pointer to vector with all possible wells (all perforations) of the problem. nullptr is possible |
possibleFutureConnections | Possible future connections of wells that might get added through an ACTIONX. The grid will then be partitioned such that these connections are on the same partition. If NULL, they will be neglected. |
transmissibilities | C-array with transmissibilities or nullptr. |
- Returns
- A tuple consisting of a vector that contains for each local cell of the original grid the the number of the process that owns it after repartitioning, a vector containing a pair of name and a boolean indicating whether this well has perforated cells local to the process of all wells, vector containing information for each exported cell (global id of cell, process id to send to, attribute there), a vector containing information for each imported cell (global index, process id that sends, attribute here, local index here), and a WellConnections object containing information about the well connections (if argument wells was not null and this is the root rank this will contain connections in form of global indices)
|