28#ifndef TPFA_LINEARIZER_HH
29#define TPFA_LINEARIZER_HH
34#include <opm/common/Exceptions.hpp>
35#include <opm/common/TimingMacros.hpp>
37#include <opm/grid/utility/SparseTable.hpp>
39#include <opm/input/eclipse/EclipseState/Grid/FaceDir.hpp>
40#include <opm/input/eclipse/Schedule/BCProp.hpp>
44#include <dune/common/version.hh>
45#include <dune/common/fvector.hh>
46#include <dune/common/fmatrix.hh>
58 template<
class TypeTag,
class MyTypeTag>
68template<
class TypeTag>
80template<
class TypeTag>
100 using Element =
typename GridView::template Codim<0>::Entity;
101 using ElementIterator =
typename GridView::template Codim<0>::Iterator;
103 using Vector = GlobalEqVector;
105 enum { numEq = getPropValue<TypeTag, Properties::NumEq>() };
106 enum { historySize = getPropValue<TypeTag, Properties::TimeDiscHistorySize>() };
107 enum { dimWorld = GridView::dimensionworld };
109 using MatrixBlock =
typename SparseMatrixAdapter::MatrixBlock;
110 using VectorBlock = Dune::FieldVector<Scalar, numEq>;
113 static const bool linearizeNonLocalElements = getPropValue<TypeTag, Properties::LinearizeNonLocalElements>();
114 static const bool enableEnergy = getPropValue<TypeTag, Properties::EnableEnergy>();
115 static const bool enableDiffusion = getPropValue<TypeTag, Properties::EnableDiffusion>();
125 separateSparseSourceTerms_ = Parameters::get<TypeTag, Properties::SeparateSparseSourceTerms>();
137 Parameters::registerParam<TypeTag, Properties::SeparateSparseSourceTerms>
138 (
"Treat well source terms all in one go, instead of on a cell by cell basis.");
152 simulatorPtr_ = &simulator;
200 catch (
const std::exception& e)
202 std::cout <<
"rank " << simulator_().
gridView().comm().rank()
203 <<
" caught an exception while linearizing:" << e.what()
204 <<
"\n" << std::flush;
209 std::cout <<
"rank " << simulator_().
gridView().comm().rank()
210 <<
" caught an exception while linearizing"
211 <<
"\n" << std::flush;
214 succeeded = simulator_().
gridView().comm().min(succeeded);
217 throw NumericalProblem(
"A process did not succeed in linearizing the system");
230 template <
class SubDomainType>
238 initFirstIteration_();
241 if (domain.cells.size() == model_().numTotalDof()) {
252 { jacobian_->finalize(); }
260 OPM_TIMEBLOCK(linearizeAuxilaryEquations);
264 auto& model = model_();
265 const auto& comm = simulator_().
gridView().comm();
266 for (
unsigned auxModIdx = 0; auxModIdx < model.numAuxiliaryModules(); ++auxModIdx) {
267 bool succeeded =
true;
269 model.auxiliaryModule(auxModIdx)->linearize(*jacobian_, residual_);
271 catch (
const std::exception& e) {
274 std::cout <<
"rank " << simulator_().
gridView().comm().rank()
275 <<
" caught an exception while linearizing:" << e.what()
276 <<
"\n" << std::flush;
279 succeeded = comm.min(succeeded);
282 throw NumericalProblem(
"linearization of an auxiliary equation failed");
290 {
return *jacobian_; }
293 {
return *jacobian_; }
299 {
return residual_; }
302 {
return residual_; }
305 linearizationType_ = linearizationType;
309 return linearizationType_;
339 return velocityInfo_;
344 updateStoredTransmissibilities();
348 for (
auto& bdyInfo : boundaryInfo_) {
349 const auto [type, massrateAD] = problem_().boundaryCondition(bdyInfo.cell, bdyInfo.dir);
352 VectorBlock massrate(0.0);
353 for (
size_t ii = 0; ii < massrate.size(); ++ii) {
354 massrate[ii] = massrateAD[ii].value();
356 if (type != BCType::NONE) {
357 const auto& exFluidState = problem_().boundaryFluidState(bdyInfo.cell, bdyInfo.dir);
358 bdyInfo.bcdata.type = type;
359 bdyInfo.bcdata.massRate = massrate;
360 bdyInfo.bcdata.exFluidState = exFluidState;
373 template <
class SubDomainType>
377 initFirstIteration_();
379 for (
int globI : domain.cells) {
380 residual_[globI] = 0.0;
381 jacobian_->clearRow(globI, 0.0);
387 {
return *simulatorPtr_; }
388 const Simulator& simulator_()
const
389 {
return *simulatorPtr_; }
392 {
return simulator_().
problem(); }
393 const Problem& problem_()
const
394 {
return simulator_().
problem(); }
397 {
return simulator_().
model(); }
398 const Model& model_()
const
399 {
return simulator_().
model(); }
401 const GridView& gridView_()
const
402 {
return problem_().gridView(); }
404 void initFirstIteration_()
410 residual_.resize(model_().numTotalDof());
420 OPM_TIMEBLOCK(createMatrix);
421 if (!neighborInfo_.empty()) {
426 const auto& model = model_();
427 Stencil stencil(gridView_(), model_().dofMapper());
431 using NeighborSet = std::set< unsigned >;
432 std::vector<NeighborSet> sparsityPattern(model.numTotalDof());
433 const Scalar gravity = problem_().gravity()[dimWorld - 1];
434 unsigned numCells = model.numTotalDof();
435 neighborInfo_.reserve(numCells, 6 * numCells);
436 std::vector<NeighborInfo> loc_nbinfo;
437 for (
const auto& elem : elements(gridView_())) {
438 stencil.update(elem);
440 for (
unsigned primaryDofIdx = 0; primaryDofIdx < stencil.numPrimaryDof(); ++primaryDofIdx) {
441 unsigned myIdx = stencil.globalSpaceIndex(primaryDofIdx);
442 loc_nbinfo.resize(stencil.numDof() - 1);
444 for (
unsigned dofIdx = 0; dofIdx < stencil.numDof(); ++dofIdx) {
445 unsigned neighborIdx = stencil.globalSpaceIndex(dofIdx);
446 sparsityPattern[myIdx].insert(neighborIdx);
448 const Scalar trans = problem_().transmissibility(myIdx, neighborIdx);
449 const auto scvfIdx = dofIdx - 1;
450 const auto& scvf = stencil.interiorFace(scvfIdx);
451 const Scalar area = scvf.area();
452 const Scalar Vin = problem_().model().dofTotalVolume(myIdx);
453 const Scalar Vex = problem_().model().dofTotalVolume(neighborIdx);
454 const Scalar zIn = problem_().dofCenterDepth(myIdx);
455 const Scalar zEx = problem_().dofCenterDepth(neighborIdx);
456 const Scalar dZg = (zIn - zEx)*gravity;
457 const Scalar thpres = problem_().thresholdPressure(myIdx, neighborIdx);
459 Scalar outAlpha {0.};
460 Scalar diffusivity {0.};
461 Scalar dispersivity {0.};
462 if constexpr(enableEnergy){
463 inAlpha = problem_().thermalHalfTransmissibility(myIdx, neighborIdx);
464 outAlpha = problem_().thermalHalfTransmissibility(neighborIdx, myIdx);
466 if constexpr(enableDiffusion){
467 diffusivity = problem_().diffusivity(myIdx, neighborIdx);
469 if (simulator_().vanguard().eclState().getSimulationConfig().rock_config().dispersion()) {
470 dispersivity = problem_().dispersivity(myIdx, neighborIdx);
472 const auto dirId = scvf.dirId();
473 auto faceDir = dirId < 0 ? FaceDir::DirEnum::Unknown
474 : FaceDir::FromIntersectionIndex(dirId);
475 loc_nbinfo[dofIdx - 1] = NeighborInfo{neighborIdx, {trans, area, thpres, dZg, faceDir, Vin, Vex, inAlpha, outAlpha, diffusivity, dispersivity},
nullptr};
479 neighborInfo_.appendRow(loc_nbinfo.begin(), loc_nbinfo.end());
480 if (problem_().nonTrivialBoundaryConditions()) {
481 for (
unsigned bfIndex = 0; bfIndex < stencil.numBoundaryFaces(); ++bfIndex) {
482 const auto& bf = stencil.boundaryFace(bfIndex);
483 const int dir_id = bf.dirId();
487 const auto [type, massrateAD] = problem_().boundaryCondition(myIdx, dir_id);
489 VectorBlock massrate(0.0);
490 for (
size_t ii = 0; ii < massrate.size(); ++ii) {
491 massrate[ii] = massrateAD[ii].value();
493 const auto& exFluidState = problem_().boundaryFluidState(myIdx, dir_id);
494 BoundaryConditionData bcdata{type,
496 exFluidState.pvtRegionIndex(),
499 bf.integrationPos()[dimWorld - 1],
501 boundaryInfo_.push_back({myIdx, dir_id, bfIndex, bcdata});
509 size_t numAuxMod = model.numAuxiliaryModules();
510 for (
unsigned auxModIdx = 0; auxModIdx < numAuxMod; ++auxModIdx)
511 model.auxiliaryModule(auxModIdx)->addNeighbors(sparsityPattern);
514 jacobian_.reset(
new SparseMatrixAdapter(simulator_()));
515 diagMatAddress_.resize(numCells);
517 jacobian_->reserve(sparsityPattern);
518 for (
unsigned globI = 0; globI < numCells; globI++) {
519 const auto& nbInfos = neighborInfo_[globI];
520 diagMatAddress_[globI] = jacobian_->blockAddress(globI, globI);
521 for (
auto& nbInfo : nbInfos) {
522 nbInfo.matBlockAddress = jacobian_->blockAddress(nbInfo.neighbor, globI);
527 fullDomain_.cells.resize(numCells);
528 std::iota(fullDomain_.cells.begin(), fullDomain_.cells.end(), 0);
542 OPM_TIMEBLOCK(createFlows);
546 const bool anyFlows = simulator_().
problem().eclWriter()->outputModule().anyFlows();
547 const bool anyFlores = simulator_().
problem().eclWriter()->outputModule().anyFlores();
548 const bool enableDispersion = simulator_().
vanguard().eclState().getSimulationConfig().rock_config().dispersion();
549 if (((!anyFlows || !flowsInfo_.empty()) && (!anyFlores || !floresInfo_.empty())) && !enableDispersion) {
552 const auto& model = model_();
553 const auto& nncOutput = simulator_().
problem().eclWriter()->getOutputNnc();
554 Stencil stencil(gridView_(), model_().dofMapper());
555 unsigned numCells = model.numTotalDof();
556 std::unordered_multimap<int, std::pair<int, int>> nncIndices;
557 std::vector<FlowInfo> loc_flinfo;
558 std::vector<VelocityInfo> loc_vlinfo;
559 unsigned int nncId = 0;
560 VectorBlock flow(0.0);
563 for (
unsigned int nncIdx = 0; nncIdx < nncOutput.size(); ++nncIdx) {
564 const int ci1 = nncOutput[nncIdx].cell1;
565 const int ci2 = nncOutput[nncIdx].cell2;
566 nncIndices.emplace(ci1, std::make_pair(ci2, nncIdx));
570 flowsInfo_.reserve(numCells, 6 * numCells);
573 floresInfo_.reserve(numCells, 6 * numCells);
575 if (enableDispersion) {
576 velocityInfo_.reserve(numCells, 6 * numCells);
579 for (
const auto& elem : elements(gridView_())) {
580 stencil.update(elem);
581 for (
unsigned primaryDofIdx = 0; primaryDofIdx < stencil.numPrimaryDof(); ++primaryDofIdx) {
582 unsigned myIdx = stencil.globalSpaceIndex(primaryDofIdx);
583 int numFaces = stencil.numBoundaryFaces() + stencil.numInteriorFaces();
584 loc_flinfo.resize(numFaces);
585 loc_vlinfo.resize(stencil.numDof() - 1);
587 for (
unsigned dofIdx = 0; dofIdx < stencil.numDof(); ++dofIdx) {
588 unsigned neighborIdx = stencil.globalSpaceIndex(dofIdx);
590 const auto scvfIdx = dofIdx - 1;
591 const auto& scvf = stencil.interiorFace(scvfIdx);
592 int faceId = scvf.dirId();
593 const int cartMyIdx = simulator_().
vanguard().cartesianIndex(myIdx);
594 const int cartNeighborIdx = simulator_().
vanguard().cartesianIndex(neighborIdx);
595 const auto& range = nncIndices.equal_range(cartMyIdx);
596 for (
auto it = range.first; it != range.second; ++it) {
597 if (it->second.first == cartNeighborIdx){
601 nncId = it->second.second;
604 loc_flinfo[dofIdx - 1] = FlowInfo{faceId, flow, nncId};
605 loc_vlinfo[dofIdx - 1] = VelocityInfo{flow};
609 for (
unsigned bdfIdx = 0; bdfIdx < stencil.numBoundaryFaces(); ++bdfIdx) {
610 const auto& scvf = stencil.boundaryFace(bdfIdx);
611 int faceId = scvf.dirId();
612 loc_flinfo[stencil.numInteriorFaces() + bdfIdx] = FlowInfo{faceId, flow, nncId};
617 flowsInfo_.appendRow(loc_flinfo.begin(), loc_flinfo.end());
620 floresInfo_.appendRow(loc_flinfo.begin(), loc_flinfo.end());
622 if (enableDispersion) {
623 velocityInfo_.appendRow(loc_vlinfo.begin(), loc_vlinfo.end());
632 for (
unsigned eqIdx = 0; eqIdx < numEq; eqIdx++)
633 res[eqIdx] = resid[eqIdx].value();
635 for (
unsigned eqIdx = 0; eqIdx < numEq; eqIdx++) {
636 for (
unsigned pvIdx = 0; pvIdx < numEq; pvIdx++) {
641 bMat[eqIdx][pvIdx] = resid[eqIdx].derivative(pvIdx);
647 OPM_TIMEBLOCK(updateFlows);
648 const bool& enableFlows = simulator_().
problem().eclWriter()->outputModule().hasFlows() ||
649 simulator_().
problem().eclWriter()->outputModule().hasBlockFlows();
650 const bool& enableFlores = simulator_().
problem().eclWriter()->outputModule().hasFlores();
651 if (!enableFlows && !enableFlores) {
654 const unsigned int numCells = model_().numTotalDof();
657#pragma omp parallel for
659 for (
unsigned globI = 0; globI < numCells; ++globI) {
660 OPM_TIMEBLOCK_LOCAL(linearizationForEachCell);
661 const auto& nbInfos = neighborInfo_[globI];
662 ADVectorBlock adres(0.0);
663 ADVectorBlock darcyFlux(0.0);
664 const IntensiveQuantities& intQuantsIn = model_().intensiveQuantities(globI, 0);
667 OPM_TIMEBLOCK_LOCAL(fluxCalculationForEachCell);
669 for (
const auto& nbInfo : nbInfos) {
670 OPM_TIMEBLOCK_LOCAL(fluxCalculationForEachFace);
671 unsigned globJ = nbInfo.neighbor;
672 assert(globJ != globI);
675 const IntensiveQuantities& intQuantsEx = model_().intensiveQuantities(globJ, 0);
676 LocalResidual::computeFlux(adres,darcyFlux, globI, globJ, intQuantsIn, intQuantsEx, nbInfo.res_nbinfo);
677 adres *= nbInfo.res_nbinfo.faceArea;
679 for (
unsigned eqIdx = 0; eqIdx < numEq; ++ eqIdx) {
680 flowsInfo_[globI][loc].flow[eqIdx] = adres[eqIdx].value();
684 for (
unsigned eqIdx = 0; eqIdx < numEq; ++ eqIdx) {
685 floresInfo_[globI][loc].flow[eqIdx] = darcyFlux[eqIdx].value();
694 for (
const auto& bdyInfo : boundaryInfo_) {
695 if (bdyInfo.bcdata.type == BCType::NONE)
698 ADVectorBlock adres(0.0);
699 const unsigned globI = bdyInfo.cell;
700 const auto& nbInfos = neighborInfo_[globI];
701 const IntensiveQuantities& insideIntQuants = model_().intensiveQuantities(globI, 0);
702 LocalResidual::computeBoundaryFlux(adres, problem_(), bdyInfo.bcdata, insideIntQuants, globI);
703 adres *= bdyInfo.bcdata.faceArea;
704 const unsigned bfIndex = bdyInfo.bfIndex;
706 for (
unsigned eqIdx = 0; eqIdx < numEq; ++ eqIdx) {
707 flowsInfo_[globI][nbInfos.size() + bfIndex].flow[eqIdx] = adres[eqIdx].value();
715 template <
class SubDomainType>
716 void linearize_(
const SubDomainType& domain)
721 if (!problem_().recycleFirstIterationStorage()) {
722 if (!model_().storeIntensiveQuantities() && !model_().enableStorageCache()) {
723 OPM_THROW(std::runtime_error,
"Must have cached either IQs or storage when we cannot recycle.");
732 const bool& enableDispersion = simulator_().
vanguard().eclState().getSimulationConfig().rock_config().dispersion();
733 const unsigned int numCells = domain.cells.size();
734 const bool on_full_domain = (numCells == model_().numTotalDof());
737#pragma omp parallel for
739 for (
unsigned ii = 0; ii < numCells; ++ii) {
740 OPM_TIMEBLOCK_LOCAL(linearizationForEachCell);
741 const unsigned globI = domain.cells[ii];
742 const auto& nbInfos = neighborInfo_[globI];
743 VectorBlock res(0.0);
744 MatrixBlock bMat(0.0);
745 ADVectorBlock adres(0.0);
746 ADVectorBlock darcyFlux(0.0);
747 const IntensiveQuantities& intQuantsIn = model_().intensiveQuantities(globI, 0);
751 OPM_TIMEBLOCK_LOCAL(fluxCalculationForEachCell);
753 for (
const auto& nbInfo : nbInfos) {
754 OPM_TIMEBLOCK_LOCAL(fluxCalculationForEachFace);
755 unsigned globJ = nbInfo.neighbor;
756 assert(globJ != globI);
761 const IntensiveQuantities& intQuantsEx = model_().intensiveQuantities(globJ, 0);
762 LocalResidual::computeFlux(adres,darcyFlux, globI, globJ, intQuantsIn, intQuantsEx, nbInfo.res_nbinfo);
763 adres *= nbInfo.res_nbinfo.faceArea;
764 if (enableDispersion) {
765 for (
unsigned phaseIdx = 0; phaseIdx < numEq; ++ phaseIdx) {
766 velocityInfo_[globI][loc].velocity[phaseIdx] = darcyFlux[phaseIdx].value() / nbInfo.res_nbinfo.faceArea;
770 residual_[globI] += res;
772 *diagMatAddress_[globI] += bMat;
775 *nbInfo.matBlockAddress += bMat;
782 double volume = model_().dofTotalVolume(globI);
783 Scalar storefac = volume / dt;
786 OPM_TIMEBLOCK_LOCAL(computeStorage);
787 LocalResidual::computeStorage(adres, intQuantsIn);
791 if (model_().enableStorageCache()) {
795 model_().updateCachedStorage(globI, 0, res);
796 if (model_().newtonMethod().numIterations() == 0) {
798 if (problem_().recycleFirstIterationStorage()) {
801 if (on_full_domain) {
808 model_().updateCachedStorage(globI, 1, res);
811 Dune::FieldVector<Scalar, numEq> tmp;
812 IntensiveQuantities intQuantOld = model_().intensiveQuantities(globI, 1);
813 LocalResidual::computeStorage(tmp, intQuantOld);
814 model_().updateCachedStorage(globI, 1, tmp);
817 res -= model_().cachedStorage(globI, 1);
819 OPM_TIMEBLOCK_LOCAL(computeStorage0);
820 Dune::FieldVector<Scalar, numEq> tmp;
821 IntensiveQuantities intQuantOld = model_().intensiveQuantities(globI, 1);
822 LocalResidual::computeStorage(tmp, intQuantOld);
828 residual_[globI] += res;
830 *diagMatAddress_[globI] += bMat;
837 if (separateSparseSourceTerms_) {
838 LocalResidual::computeSourceDense(adres, problem_(), globI, 0);
840 LocalResidual::computeSource(adres, problem_(), globI, 0);
844 residual_[globI] += res;
846 *diagMatAddress_[globI] += bMat;
850 if (separateSparseSourceTerms_) {
851 problem_().wellModel().addReservoirSourceTerms(residual_, diagMatAddress_);
855 for (
const auto& bdyInfo : boundaryInfo_) {
856 if (bdyInfo.bcdata.type == BCType::NONE)
859 VectorBlock res(0.0);
860 MatrixBlock bMat(0.0);
861 ADVectorBlock adres(0.0);
862 const unsigned globI = bdyInfo.cell;
863 const IntensiveQuantities& insideIntQuants = model_().intensiveQuantities(globI, 0);
864 LocalResidual::computeBoundaryFlux(adres, problem_(), bdyInfo.bcdata, insideIntQuants, globI);
865 adres *= bdyInfo.bcdata.faceArea;
867 residual_[globI] += res;
869 *diagMatAddress_[globI] += bMat;
873 void updateStoredTransmissibilities()
875 if (neighborInfo_.empty()) {
879 initFirstIteration_();
881 unsigned numCells = model_().numTotalDof();
883#pragma omp parallel for
885 for (
unsigned globI = 0; globI < numCells; globI++) {
886 auto nbInfos = neighborInfo_[globI];
887 for (
auto& nbInfo : nbInfos) {
888 unsigned globJ = nbInfo.neighbor;
889 nbInfo.res_nbinfo.trans = problem_().transmissibility(globI, globJ);
895 Simulator *simulatorPtr_;
898 std::unique_ptr<SparseMatrixAdapter> jacobian_;
901 GlobalEqVector residual_;
903 LinearizationType linearizationType_;
905 using ResidualNBInfo =
typename LocalResidual::ResidualNBInfo;
908 unsigned int neighbor;
909 ResidualNBInfo res_nbinfo;
910 MatrixBlock* matBlockAddress;
912 SparseTable<NeighborInfo> neighborInfo_;
913 std::vector<MatrixBlock*> diagMatAddress_;
921 SparseTable<FlowInfo> flowsInfo_;
922 SparseTable<FlowInfo> floresInfo_;
926 VectorBlock velocity;
928 SparseTable<VelocityInfo> velocityInfo_;
930 using ScalarFluidState =
typename IntensiveQuantities::ScalarFluidState;
931 struct BoundaryConditionData
934 VectorBlock massRate;
935 unsigned pvtRegionIdx;
936 unsigned boundaryFaceIndex;
939 ScalarFluidState exFluidState;
945 unsigned int bfIndex;
946 BoundaryConditionData bcdata;
948 std::vector<BoundaryInfo> boundaryInfo_;
949 bool separateSparseSourceTerms_ =
false;
952 std::vector<int> cells;
953 std::vector<bool> interior;
955 FullDomain fullDomain_;
The base class for the element-centered finite-volume discretization scheme.
Definition: ecfvdiscretization.hh:147
Definition: matrixblock.hh:227
Manages the initializing and running of time dependent problems.
Definition: simulator.hh:102
Scalar timeStepSize() const
Returns the time step length so that we don't miss the beginning of the next episode or cross the en...
Definition: simulator.hh:455
Vanguard & vanguard()
Return a reference to the grid manager of simulation.
Definition: simulator.hh:275
Problem & problem()
Return the object which specifies the pysical setup of the simulation.
Definition: simulator.hh:306
const GridView & gridView() const
Return the grid view for which the simulation is done.
Definition: simulator.hh:287
Model & model()
Return the physical model used in the simulation.
Definition: simulator.hh:293
The common code for the linearizers of non-linear systems of equations.
Definition: tpfalinearizer.hh:82
const auto & getFloresInfo() const
Return constant reference to the floresInfo.
Definition: tpfalinearizer.hh:327
const LinearizationType & getLinearizationType() const
Definition: tpfalinearizer.hh:308
void updateBoundaryConditionData()
Definition: tpfalinearizer.hh:347
void linearize()
Linearize the full system of non-linear equations.
Definition: tpfalinearizer.hh:177
SparseMatrixAdapter & jacobian()
Definition: tpfalinearizer.hh:292
const auto & getFlowsInfo() const
Return constant reference to the flowsInfo.
Definition: tpfalinearizer.hh:317
TpfaLinearizer()
Definition: tpfalinearizer.hh:121
void linearizeDomain(const SubDomainType &domain)
Linearize the part of the non-linear system of equations that is associated with a part of the spatia...
Definition: tpfalinearizer.hh:231
void finalize()
Definition: tpfalinearizer.hh:251
void init(Simulator &simulator)
Initialize the linearizer.
Definition: tpfalinearizer.hh:150
void updateFlowsInfo()
Definition: tpfalinearizer.hh:646
void setResAndJacobi(VectorBlock &res, MatrixBlock &bMat, const ADVectorBlock &resid) const
Definition: tpfalinearizer.hh:630
~TpfaLinearizer()
Definition: tpfalinearizer.hh:128
static void registerParameters()
Register all run-time parameters for the Jacobian linearizer.
Definition: tpfalinearizer.hh:135
void linearizeDomain()
Linearize the part of the non-linear system of equations that is associated with the spatial domain.
Definition: tpfalinearizer.hh:193
const SparseMatrixAdapter & jacobian() const
Return constant reference to global Jacobian matrix backend.
Definition: tpfalinearizer.hh:289
void setLinearizationType(LinearizationType linearizationType)
Definition: tpfalinearizer.hh:304
GlobalEqVector & residual()
Definition: tpfalinearizer.hh:301
void eraseMatrix()
Causes the Jacobian matrix to be recreated from scratch before the next iteration.
Definition: tpfalinearizer.hh:163
const auto & getVelocityInfo() const
Return constant reference to the velocityInfo.
Definition: tpfalinearizer.hh:337
void updateDiscretizationParameters()
Definition: tpfalinearizer.hh:342
void resetSystem_(const SubDomainType &domain)
Definition: tpfalinearizer.hh:374
const std::map< unsigned, Constraints > constraintsMap() const
Returns the map of constraint degrees of freedom.
Definition: tpfalinearizer.hh:370
void linearizeAuxiliaryEquations()
Linearize the part of the non-linear system of equations that is associated with the spatial domain.
Definition: tpfalinearizer.hh:258
const GlobalEqVector & residual() const
Return constant reference to global residual vector.
Definition: tpfalinearizer.hh:298
Declare the properties used by the infrastructure code of the finite volume discretizations.
Definition: blackoilmodel.hh:72
Definition: blackoilboundaryratevector.hh:37
typename Properties::Detail::GetPropImpl< TypeTag, Property >::type::type GetPropType
get the type alias defined in the property (equivalent to old macro GET_PROP_TYPE(....
Definition: propertysystem.hh:242
Definition: linearizationtype.hh:35
Definition: tpfalinearizer.hh:59
bool type
Definition: tpfalinearizer.hh:60
static constexpr type value
Definition: tpfalinearizer.hh:61