|
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, WellConnections > | createZoltanListsFromParts (const CpGrid &grid, const std::vector< cpgrid::OpmWellType > *wells, const double *transmissibilities, const std::vector< int > &parts, bool allowDistributedWells) |
| Creates lists as Zoltan would return for vanilla / user specified partitioning. More...
|
|
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, WellConnections > | vanillaPartitionGridOnRoot (const CpGrid &grid, const std::vector< cpgrid::OpmWellType > *wells, const double *transmissibilities, bool allowDistributedWells) |
| Creates a vanilla partitioning without a real loadbalancer. More...
|
|
std::vector< std::vector< int > > | perforatingWellIndicesOnProc (const std::vector< int > &parts, const std::vector< Dune::cpgrid::OpmWellType > &wells, const CpGrid &cpgrid) |
| Determines the wells that have perforate cells for each process. More...
|
|
std::vector< std::vector< int > > | postProcessPartitioningForWells (std::vector< int > &parts, std::function< int(int)> gid, const std::vector< OpmWellType > &wells, const WellConnections &well_connections, const std::vector< std::set< int > > &wellGraph, std::vector< std::tuple< int, int, char > > &exportList, std::vector< std::tuple< int, int, char, int > > &importList, const Communication< MPI_Comm > &cc) |
| Computes wells assigned to processes. More...
|
|
std::vector< std::pair< std::string, bool > > | computeParallelWells (const std::vector< std::vector< int > > &wells_on_proc, const std::vector< OpmWellType > &wells, const Communication< MPI_Comm > &cc, int root) |
| Computes whether wells are perforating cells on this process. More...
|
|
template<class Id > |
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, WellConnections > | makeImportAndExportLists (const Dune::CpGrid &cpgrid, const Dune::Communication< MPI_Comm > &cc, const std::vector< Dune::cpgrid::OpmWellType > *wells, const Dune::cpgrid::CombinedGridWellGraph *gridAndWells, int root, int numExport, int numImport, const Id *exportLocalGids, const Id *exportGlobalGids, const int *exportToPart, const Id *importGlobalGids, bool allowDistributedWells=false) |
| Creates all the information needed from the partitioning. More...
|
|
template<class Id > |
std::tuple< int, std::vector< Id > > | scatterExportInformation (int numExport, const Id *exportGlobalGids, const int *exportToPart, int root, const Dune::Communication< MPI_Comm > &cc) |
|
◆ OpmWellType
◆ computeParallelWells()
std::vector< std::pair< std::string, bool > > Dune::cpgrid::computeParallelWells |
( |
const std::vector< std::vector< int > > & |
wells_on_proc, |
|
|
const std::vector< OpmWellType > & |
wells, |
|
|
const Communication< MPI_Comm > & |
cc, |
|
|
int |
root |
|
) |
| |
Computes whether wells are perforating cells on this process.
- Parameters
-
wells_on_proc | well indices assigned to each process |
eclipseState | The eclipse information |
cc | The communicator |
root | The rank of the process that has the complete partitioning information. |
- Returns
- Vector of pairs of well name and a boolean indicating whether the well with this name perforates cells here. Sorted by well name!
◆ createZoltanListsFromParts()
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, WellConnections > Dune::cpgrid::createZoltanListsFromParts |
( |
const CpGrid & |
grid, |
|
|
const std::vector< cpgrid::OpmWellType > * |
wells, |
|
|
const double * |
transmissibilities, |
|
|
const std::vector< int > & |
parts, |
|
|
bool |
allowDistributedWells |
|
) |
| |
Creates lists as Zoltan would return for vanilla / user specified partitioning.
- Parameters
-
grid | The grid |
wells | Pointer to vector with all possible wells (all perforations) of the problem. nullptr is possible |
transmissibilities | C-array with transmissibilities or nullptr. |
parts | The partitioning information. Contains for each cells the partition number (zero-based, consecutive |
- Returns
- A tuple consisting of a vector that contains for each local cell of the original grid the the number of the process that owns it after repartitioning, a vector containing a pair of name and a boolean indicating whether this well has perforated cells local to the process of all wells, vector containing information for each exported cell (global id of cell, process id to send to, attribute there), a vector containing information for each imported cell (global index, process id that sends, attribute here, local index here), and a WellConnections object containing information about the well connections (if argument wells was not null and this is the root rank this will contain connections in form of global indices)
◆ makeImportAndExportLists()
template<class Id >
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, WellConnections > Dune::cpgrid::makeImportAndExportLists |
( |
const Dune::CpGrid & |
cpgrid, |
|
|
const Dune::Communication< MPI_Comm > & |
cc, |
|
|
const std::vector< Dune::cpgrid::OpmWellType > * |
wells, |
|
|
const Dune::cpgrid::CombinedGridWellGraph * |
gridAndWells, |
|
|
int |
root, |
|
|
int |
numExport, |
|
|
int |
numImport, |
|
|
const Id * |
exportLocalGids, |
|
|
const Id * |
exportGlobalGids, |
|
|
const int * |
exportToPart, |
|
|
const Id * |
importGlobalGids, |
|
|
bool |
allowDistributedWells = false |
|
) |
| |
Creates all the information needed from the partitioning.
Note that if wells is not null and allowDitributedWells is false, then the partitioning is postprocessed such that all cells that a perforated by a well are part of the interior of exactly one process. Otherwise they might spreaded between multiple processes.
- Parameters
-
grid | The grid |
cc | The MPI communicator used for the partitioning. |
wells | Pointer to vector with all possible wells (all perforations) of the problem. nullptr is possible |
gridAndWells | Graph representing grid and wells (must match the other parameters!) |
root | The rank that has the whole grid before loadbalancing. |
numExport | The number of exported cells (created e.g. by Zoltan) |
numImport | The number of imported cells (created e.g. by Zoltan) |
exportGlobalIds | C-array of the global ids of exported cells (created e.g. by Zoltan) |
exportLocalIds | C-array of the local ids of exported cells (created e.g. by Zoltan) |
exportToPart | C-array with target partition of exported cells (created e.g. by Zoltan) |
importGlobalIds | C-array of the global ids of exported cells (created e.g. by Zoltan) |
allowDistributedWells | Whether wells might be distibuted to the interior of multiple processes. |
- Returns
- A tuple consisting of a vector that contains for each local cell of the original grid the the number of the process that owns it after repartitioning, a vector containing a pair of name and a boolean indicating whether this well has perforated cells local to the process of all wells, vector containing information for each exported cell (global id of cell, process id to send to, attribute there), and a vector containing information for each imported cell (global index, process id that sends, attribute here, local index here), and a WellConnections object containing information about the well connections (if argument wells was not null and this is the root rank this will contain connections in form of global indices)
◆ perforatingWellIndicesOnProc()
std::vector< std::vector< int > > Dune::cpgrid::perforatingWellIndicesOnProc |
( |
const std::vector< int > & |
parts, |
|
|
const std::vector< Dune::cpgrid::OpmWellType > & |
wells, |
|
|
const CpGrid & |
cpgrid |
|
) |
| |
Determines the wells that have perforate cells for each process.
On the root process omputes for all processes all indices of wells that will perforate local cells. Note that a well might perforate local cells of multiple processes
- Parameters
-
parts | The partition number for each cell |
well | The eclipse information about the wells. |
cpGrid | The unbalanced grid we compute on. |
- Returns
- On the rank that has the global grid a vector with the well indices for process i at index i.
◆ postProcessPartitioningForWells()
std::vector< std::vector< int > > Dune::cpgrid::postProcessPartitioningForWells |
( |
std::vector< int > & |
parts, |
|
|
std::function< int(int)> |
gid, |
|
|
const std::vector< OpmWellType > & |
wells, |
|
|
const WellConnections & |
well_connections, |
|
|
const std::vector< std::set< int > > & |
wellGraph, |
|
|
std::vector< std::tuple< int, int, char > > & |
exportList, |
|
|
std::vector< std::tuple< int, int, char, int > > & |
importList, |
|
|
const Communication< MPI_Comm > & |
cc |
|
) |
| |
Computes wells assigned to processes.
Computes for all processes all indices of wells that will be assigned to this process. - Parameters
-
parts | The partition number for each cell |
gid | Functor that turns cell index to global id. |
eclipseState | The eclipse information |
well_connecton | The information about the perforations of each well. |
exportList | List of cells to be exported. Each entry is a tuple of the global cell index, the process rank to export to, and the attribute on that rank (assumed to be owner) |
exportList | List of cells to be imported. Each entry is a tuple of the global cell index, the process rank that exports it, and the attribute on this rank (assumed to be owner) |
cc | Information about the parallelism together with the decomposition. |
- Returns
- On rank 0 a vector with the well indices for process i at index i.
◆ scatterExportInformation()
template<class Id >
std::tuple< int, std::vector< Id > > Dune::cpgrid::scatterExportInformation |
( |
int |
numExport, |
|
|
const Id * |
exportGlobalGids, |
|
|
const int * |
exportToPart, |
|
|
int |
root, |
|
|
const Dune::Communication< MPI_Comm > & |
cc |
|
) |
| |
◆ vanillaPartitionGridOnRoot()
std::tuple< std::vector< int >, std::vector< std::pair< std::string, bool > >, std::vector< std::tuple< int, int, char > >, std::vector< std::tuple< int, int, char, int > >, WellConnections > Dune::cpgrid::vanillaPartitionGridOnRoot |
( |
const CpGrid & |
grid, |
|
|
const std::vector< cpgrid::OpmWellType > * |
wells, |
|
|
const double * |
transmissibilities, |
|
|
bool |
allowDistributedWells |
|
) |
| |
Creates a vanilla partitioning without a real loadbalancer.
The loadbalancing will take place on the cartesian grid. - Parameters
-
grid | The grid |
wells | Pointer to vector with all possible wells (all perforations) of the problem. nullptr is possible |
transmissibilities | C-array with transmissibilities or nullptr. |
- Returns
- A tuple consisting of a vector that contains for each local cell of the original grid the the number of the process that owns it after repartitioning, a vector containing a pair of name and a boolean indicating whether this well has perforated cells local to the process of all wells, vector containing information for each exported cell (global id of cell, process id to send to, attribute there), a vector containing information for each imported cell (global index, process id that sends, attribute here, local index here), and a WellConnections object containing information about the well connections (if argument wells was not null and this is the root rank this will contain connections in form of global indices)
|