fvbaselinearizer.hh
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 This file is part of the Open Porous Media project (OPM).
5
6 OPM is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 2 of the License, or
9 (at your option) any later version.
10
11 OPM is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with OPM. If not, see <http://www.gnu.org/licenses/>.
18
19 Consult the COPYING file in the top-level source directory of this
20 module for the precise wording of the license and the list of
21 copyright holders.
22*/
28#ifndef EWOMS_FV_BASE_LINEARIZER_HH
29#define EWOMS_FV_BASE_LINEARIZER_HH
30
31#include <dune/common/fmatrix.hh>
32#include <dune/common/fvector.hh>
33#include <dune/common/version.hh>
34
35#include <dune/grid/common/gridenums.hh>
36
37#include <opm/common/Exceptions.hpp>
38#include <opm/common/TimingMacros.hpp>
39
40#include <opm/grid/utility/SparseTable.hpp>
41
42#include <opm/material/common/MathToolbox.hpp>
43
47
51
52#include <cstddef>
53#include <exception> // current_exception, rethrow_exception
54#include <iostream>
55#include <map>
56#include <memory>
57#include <mutex>
58#include <set>
59#include <vector>
60
61namespace Opm {
62
63// forward declarations
64template<class TypeTag>
65class EcfvDiscretization;
66
76template<class TypeTag>
78{
90
98
100
101 using Toolbox = MathToolbox<Evaluation>;
102
103 using Element = typename GridView::template Codim<0>::Entity;
104 using ElementIterator = typename GridView::template Codim<0>::Iterator;
105
106 using Vector = GlobalEqVector;
107
108 using IstlMatrix = typename SparseMatrixAdapter::IstlMatrix;
109
110 enum { numEq = getPropValue<TypeTag, Properties::NumEq>() };
111 enum { historySize = getPropValue<TypeTag, Properties::TimeDiscHistorySize>() };
112
113 using MatrixBlock = typename SparseMatrixAdapter::MatrixBlock;
114 using VectorBlock = Dune::FieldVector<Scalar, numEq>;
115
116 static constexpr bool linearizeNonLocalElements = getPropValue<TypeTag, Properties::LinearizeNonLocalElements>();
117
119
120public:
121 FvBaseLinearizer() = default;
122
123 // copying the linearizer is not a good idea
125
129 static void registerParameters()
130 {}
131
141 void init(Simulator& simulator)
142 {
143 simulatorPtr_ = &simulator;
144 eraseMatrix();
145 elementCtx_.clear();
146 fullDomain_ = std::make_unique<FullDomain>(simulator.gridView());
147 }
148
157 {
158 jacobian_.reset();
159 }
160
171 {
174 }
175
187 {
188 linearizeDomain(*fullDomain_);
189 }
190
191 template <class SubDomainType>
192 void linearizeDomain(const SubDomainType& domain)
193 {
194 OPM_TIMEBLOCK(linearizeDomain);
195 // we defer the initialization of the Jacobian matrix until here because the
196 // auxiliary modules usually assume the problem, model and grid to be fully
197 // initialized...
198 if (!jacobian_) {
199 initFirstIteration_();
200 }
201
202 // Called here because it is no longer called from linearize_().
203 if (static_cast<std::size_t>(domain.view.size(0)) == model_().numTotalDof()) {
204 // We are on the full domain.
205 resetSystem_();
206 }
207 else {
208 resetSystem_(domain);
209 }
210
211 int succeeded;
212 try {
213 linearize_(domain);
214 succeeded = 1;
215 }
216 catch (const std::exception& e)
217 {
218 std::cout << "rank " << simulator_().gridView().comm().rank()
219 << " caught an exception while linearizing:" << e.what()
220 << "\n" << std::flush;
221 succeeded = 0;
222 }
223 catch (...)
224 {
225 std::cout << "rank " << simulator_().gridView().comm().rank()
226 << " caught an exception while linearizing"
227 << "\n" << std::flush;
228 succeeded = 0;
229 }
230 succeeded = simulator_().gridView().comm().min(succeeded);
231
232 if (!succeeded) {
233 throw NumericalProblem("A process did not succeed in linearizing the system");
234 }
235 }
236
237 void finalize()
238 { jacobian_->finalize(); }
239
245 {
246 OPM_TIMEBLOCK(linearizeAuxiliaryEquations);
247 // flush possible local caches into matrix structure
248 jacobian_->commit();
249
250 auto& model = model_();
251 const auto& comm = simulator_().gridView().comm();
252 for (unsigned auxModIdx = 0; auxModIdx < model.numAuxiliaryModules(); ++auxModIdx) {
253 bool succeeded = true;
254 try {
255 model.auxiliaryModule(auxModIdx)->linearize(*jacobian_, residual_);
256 }
257 catch (const std::exception& e) {
258 succeeded = false;
259
260 std::cout << "rank " << simulator_().gridView().comm().rank()
261 << " caught an exception while linearizing:" << e.what()
262 << "\n" << std::flush;
263 }
264
265 succeeded = comm.min(succeeded);
266
267 if (!succeeded) {
268 throw NumericalProblem("linearization of an auxiliary equation failed");
269 }
270 }
271 }
272
276 const SparseMatrixAdapter& jacobian() const
277 { return *jacobian_; }
278
279 SparseMatrixAdapter& jacobian()
280 { return *jacobian_; }
281
285 const GlobalEqVector& residual() const
286 { return residual_; }
287
288 GlobalEqVector& residual()
289 { return residual_; }
290
292 { linearizationType_ = linearizationType; }
293
295 { return linearizationType_; }
296
298 {
299 // This linearizer stores no such parameters.
300 }
301
303 {
304 // This linearizer stores no such data.
305 }
306
308 {
309 // This linearizer stores no such data.
310 }
311
317 const std::map<unsigned, Constraints>& constraintsMap() const
318 { return constraintsMap_; }
319
325 const auto& getFlowsInfo() const
326 { return flowsInfo_; }
327
333 const auto& getFloresInfo() const
334 { return floresInfo_; }
335
336 template <class SubDomainType>
337 void resetSystem_(const SubDomainType& domain)
338 {
339 if (!jacobian_) {
340 initFirstIteration_();
341 }
342
343 // loop over selected elements
344 using GridViewType = decltype(domain.view);
345 ThreadedEntityIterator<GridViewType, /*codim=*/0> threadedElemIt(domain.view);
346#ifdef _OPENMP
347#pragma omp parallel
348#endif
349 {
350 const unsigned threadId = ThreadManager::threadId();
351 auto elemIt = threadedElemIt.beginParallel();
352 MatrixBlock zeroBlock;
353 zeroBlock = 0.0;
354 for (; !threadedElemIt.isFinished(elemIt); elemIt = threadedElemIt.increment()) {
355 const Element& elem = *elemIt;
356 ElementContext& elemCtx = *elementCtx_[threadId];
357 elemCtx.updatePrimaryStencil(elem);
358 // Set to zero the relevant residual and jacobian parts.
359 for (unsigned primaryDofIdx = 0;
360 primaryDofIdx < elemCtx.numPrimaryDof(/*timeIdx=*/0);
361 ++primaryDofIdx)
362 {
363 const unsigned globI = elemCtx.globalSpaceIndex(primaryDofIdx, /*timeIdx=*/0);
364 residual_[globI] = 0.0;
365 jacobian_->clearRow(globI, 0.0);
366 }
367 }
368 }
369 }
370
371private:
372 Simulator& simulator_()
373 { return *simulatorPtr_; }
374
375 const Simulator& simulator_() const
376 { return *simulatorPtr_; }
377
378 Problem& problem_()
379 { return simulator_().problem(); }
380
381 const Problem& problem_() const
382 { return simulator_().problem(); }
383
384 Model& model_()
385 { return simulator_().model(); }
386
387 const Model& model_() const
388 { return simulator_().model(); }
389
390 const GridView& gridView_() const
391 { return problem_().gridView(); }
392
393 const ElementMapper& elementMapper_() const
394 { return model_().elementMapper(); }
395
396 const DofMapper& dofMapper_() const
397 { return model_().dofMapper(); }
398
399 void initFirstIteration_()
400 {
401 // initialize the BCRS matrix for the Jacobian of the residual function
402 createMatrix_();
403
404 // initialize the Jacobian matrix and the vector for the residual function
405 residual_.resize(model_().numTotalDof());
406 resetSystem_();
407
408 // create the per-thread context objects
409 elementCtx_.clear();
410 elementCtx_.reserve(ThreadManager::maxThreads());
411 for (unsigned threadId = 0; threadId != ThreadManager::maxThreads(); ++ threadId) {
412 elementCtx_.push_back(std::make_unique<ElementContext>(simulator_()));
413 }
414 }
415
416 // Construct the BCRS matrix for the Jacobian of the residual function
417 void createMatrix_()
418 {
419 const auto& model = model_();
420 Stencil stencil(gridView_(), model_().dofMapper());
421
422 // for the main model, find out the global indices of the neighboring degrees of
423 // freedom of each primary degree of freedom
424 sparsityPattern_.clear();
425 sparsityPattern_.resize(model.numTotalDof());
426
427 for (const auto& elem : elements(gridView_())) {
428 stencil.update(elem);
429
430 for (unsigned primaryDofIdx = 0; primaryDofIdx < stencil.numPrimaryDof(); ++primaryDofIdx) {
431 const unsigned myIdx = stencil.globalSpaceIndex(primaryDofIdx);
432
433 for (unsigned dofIdx = 0; dofIdx < stencil.numDof(); ++dofIdx) {
434 const unsigned neighborIdx = stencil.globalSpaceIndex(dofIdx);
435 sparsityPattern_[myIdx].insert(neighborIdx);
436 }
437 }
438 }
439
440 // add the additional neighbors and degrees of freedom caused by the auxiliary
441 // equations
442 const std::size_t numAuxMod = model.numAuxiliaryModules();
443 for (unsigned auxModIdx = 0; auxModIdx < numAuxMod; ++auxModIdx) {
444 model.auxiliaryModule(auxModIdx)->addNeighbors(sparsityPattern_);
445 }
446
447 // allocate raw matrix
448 jacobian_ = std::make_unique<SparseMatrixAdapter>(simulator_());
449
450 // create matrix structure based on sparsity pattern
451 jacobian_->reserve(sparsityPattern_);
452 }
453
454 // reset the global linear system of equations.
455 void resetSystem_()
456 {
457 residual_ = 0.0;
458 // zero all matrix entries
459 jacobian_->clear();
460 }
461
462 // query the problem for all constraint degrees of freedom. note that this method is
463 // quite involved and is thus relatively slow.
464 void updateConstraintsMap_()
465 {
466 if (!enableConstraints_()) {
467 // constraints are not explictly enabled, so we don't need to consider them!
468 return;
469 }
470
471 constraintsMap_.clear();
472
473 // loop over all elements...
474 ThreadedEntityIterator<GridView, /*codim=*/0> threadedElemIt(gridView_());
475#ifdef _OPENMP
476#pragma omp parallel
477#endif
478 {
479 const unsigned threadId = ThreadManager::threadId();
480 ElementIterator elemIt = threadedElemIt.beginParallel();
481 for (; !threadedElemIt.isFinished(elemIt); elemIt = threadedElemIt.increment()) {
482 // create an element context (the solution-based quantities are not
483 // available here!)
484 const Element& elem = *elemIt;
485 ElementContext& elemCtx = *elementCtx_[threadId];
486 elemCtx.updateStencil(elem);
487
488 // check if the problem wants to constrain any degree of the current
489 // element's freedom. if yes, add the constraint to the map.
490 for (unsigned primaryDofIdx = 0;
491 primaryDofIdx < elemCtx.numPrimaryDof(/*timeIdx=*/0);
492 ++primaryDofIdx)
493 {
494 Constraints constraints;
495 elemCtx.problem().constraints(constraints,
496 elemCtx,
497 primaryDofIdx,
498 /*timeIdx=*/0);
499 if (constraints.isActive()) {
500 const unsigned globI = elemCtx.globalSpaceIndex(primaryDofIdx, /*timeIdx=*/0);
501 constraintsMap_[globI] = constraints;
502 continue;
503 }
504 }
505 }
506 }
507 }
508
509 // linearize the whole or part of the system
510 template <class SubDomainType>
511 void linearize_(const SubDomainType& domain)
512 {
513 OPM_TIMEBLOCK(linearize_);
514
515 // We do not call resetSystem_() here, since that will set
516 // the full system to zero, not just our part.
517 // Instead, that must be called before starting the linearization.
518
519 // before the first iteration of each time step, we need to update the
520 // constraints. (i.e., we assume that constraints can be time dependent, but they
521 // can't depend on the solution.)
522 if (model_().newtonMethod().numIterations() == 0) {
523 updateConstraintsMap_();
524 }
525
526 applyConstraintsToSolution_();
527
528 // to avoid a race condition if two threads handle an exception at the same time,
529 // we use an explicit lock to control access to the exception storage object
530 // amongst thread-local handlers
531 std::mutex exceptionLock;
532
533 // storage to any exception that needs to be bridged out of the
534 // parallel block below. initialized to null to indicate no exception
535 std::exception_ptr exceptionPtr = nullptr;
536
537 // relinearize the elements...
538 using GridViewType = decltype(domain.view);
539 ThreadedEntityIterator<GridViewType, /*codim=*/0> threadedElemIt(domain.view);
540#ifdef _OPENMP
541#pragma omp parallel
542#endif
543 {
544 auto elemIt = threadedElemIt.beginParallel();
545 auto nextElemIt = elemIt;
546 try {
547 for (; !threadedElemIt.isFinished(elemIt); elemIt = nextElemIt) {
548 // give the model and the problem a chance to prefetch the data required
549 // to linearize the next element, but only if we need to consider it
550 nextElemIt = threadedElemIt.increment();
551 if (!threadedElemIt.isFinished(nextElemIt)) {
552 const auto& nextElem = *nextElemIt;
553 if (linearizeNonLocalElements ||
554 nextElem.partitionType() == Dune::InteriorEntity)
555 {
556 model_().prefetch(nextElem);
557 problem_().prefetch(nextElem);
558 }
559 }
560
561 const auto& elem = *elemIt;
562 if (!linearizeNonLocalElements && elem.partitionType() != Dune::InteriorEntity) {
563 continue;
564 }
565
566 linearizeElement_(elem);
567 }
568 }
569 // If an exception occurs in the parallel block, it won't escape the
570 // block; terminate() is called instead of a handler outside! hence, we
571 // tuck any exceptions that occur away in the pointer. If an exception
572 // occurs in more than one thread at the same time, we must pick one of
573 // them to be rethrown as we cannot have two active exceptions at the
574 // same time. This solution essentially picks one at random. This will
575 // only be a problem if two different kinds of exceptions are thrown, for
576 // instance if one thread experiences a (recoverable) numerical issue
577 // while another is out of memory.
578 catch(...) {
579 std::lock_guard<std::mutex> take(exceptionLock);
580 exceptionPtr = std::current_exception();
581 threadedElemIt.setFinished();
582 }
583 } // parallel block
584
585 // after reduction from the parallel block, exceptionPtr will point to
586 // a valid exception if one occurred in one of the threads; rethrow
587 // it here to let the outer handler take care of it properly
588 if (exceptionPtr) {
589 std::rethrow_exception(exceptionPtr);
590 }
591
592 applyConstraintsToLinearization_();
593 }
594
595 // linearize an element in the interior of the process' grid partition
596 template <class ElementType>
597 void linearizeElement_(const ElementType& elem)
598 {
599 const unsigned threadId = ThreadManager::threadId();
600
601 ElementContext& elementCtx = *elementCtx_[threadId];
602 auto& localLinearizer = model_().localLinearizer(threadId);
603
604 // the actual work of linearization is done by the local linearizer class
605 localLinearizer.linearize(elementCtx, elem);
606
607 // update the right hand side and the Jacobian matrix
608 if (getPropValue<TypeTag, Properties::UseLinearizationLock>()) {
609 globalMatrixMutex_.lock();
610 }
611
612 const std::size_t numPrimaryDof = elementCtx.numPrimaryDof(/*timeIdx=*/0);
613 for (unsigned primaryDofIdx = 0; primaryDofIdx < numPrimaryDof; ++primaryDofIdx) {
614 const unsigned globI = elementCtx.globalSpaceIndex(/*spaceIdx=*/primaryDofIdx, /*timeIdx=*/0);
615
616 // update the right hand side
617 residual_[globI] += localLinearizer.residual(primaryDofIdx);
618
619 // update the global Jacobian matrix
620 for (unsigned dofIdx = 0; dofIdx < elementCtx.numDof(/*timeIdx=*/0); ++dofIdx) {
621 const unsigned globJ = elementCtx.globalSpaceIndex(/*spaceIdx=*/dofIdx, /*timeIdx=*/0);
622
623 jacobian_->addToBlock(globJ, globI, localLinearizer.jacobian(dofIdx, primaryDofIdx));
624 }
625 }
626
627 if (getPropValue<TypeTag, Properties::UseLinearizationLock>()) {
628 globalMatrixMutex_.unlock();
629 }
630 }
631
632 // apply the constraints to the solution. (i.e., the solution of constraint degrees
633 // of freedom is set to the value of the constraint.)
634 void applyConstraintsToSolution_()
635 {
636 if (!enableConstraints_()) {
637 return;
638 }
639
640 // TODO: assuming a history size of 2 only works for Euler time discretizations!
641 auto& sol = model_().solution(/*timeIdx=*/0);
642 auto& oldSol = model_().solution(/*timeIdx=*/1);
643
644 for (const auto& constraint : constraintsMap_) {
645 sol[constraint.first] = constraint.second;
646 oldSol[constraint.first] = constraint.second;
647 }
648 }
649
650 // apply the constraints to the linearization. (i.e., for constrain degrees of
651 // freedom the Jacobian matrix maps to identity and the residual is zero)
652 void applyConstraintsToLinearization_()
653 {
654 if (!enableConstraints_()) {
655 return;
656 }
657
658 for (const auto& constraint : constraintsMap_) {
659 // reset the column of the Jacobian matrix
660 // put an identity matrix on the main diagonal of the Jacobian
661 jacobian_->clearRow(constraint.first, Scalar(1.0));
662
663 // make the right-hand side of constraint DOFs zero
664 residual_[constraint.first] = 0.0;
665 }
666 }
667
668 static bool enableConstraints_()
669 { return getPropValue<TypeTag, Properties::EnableConstraints>(); }
670
671 Simulator* simulatorPtr_{};
672 std::vector<std::unique_ptr<ElementContext>> elementCtx_;
673
674 // The constraint equations (only non-empty if the
675 // EnableConstraints property is true)
676 std::map<unsigned, Constraints> constraintsMap_;
677
678 struct FlowInfo
679 {
680 int faceId;
681 VectorBlock flow;
682 unsigned int nncId;
683 };
684 SparseTable<FlowInfo> flowsInfo_;
685 SparseTable<FlowInfo> floresInfo_;
686
687 // the jacobian matrix
688 std::unique_ptr<SparseMatrixAdapter> jacobian_;
689
690 // the right-hand side
691 GlobalEqVector residual_;
692
693 LinearizationType linearizationType_;
694
695 std::mutex globalMatrixMutex_;
696
697 std::vector<std::set<unsigned int>> sparsityPattern_;
698
699 struct FullDomain
700 {
701 explicit FullDomain(const GridView& v) : view (v) {}
702 GridView view;
703 std::vector<bool> interior; // Should remain empty.
704 };
705 // Simple domain object used for full-domain linearization, it allows
706 // us to have the same interface for sub-domain and full-domain work.
707 // Pointer since it must defer construction, due to GridView member.
708 std::unique_ptr<FullDomain> fullDomain_;
709};
710
711} // namespace Opm
712
713#endif
The common code for the linearizers of non-linear systems of equations.
Definition: fvbaselinearizer.hh:78
const auto & getFlowsInfo() const
Return constant reference to the flowsInfo.
Definition: fvbaselinearizer.hh:325
const GlobalEqVector & residual() const
Return constant reference to global residual vector.
Definition: fvbaselinearizer.hh:285
void setLinearizationType(LinearizationType linearizationType)
Definition: fvbaselinearizer.hh:291
SparseMatrixAdapter & jacobian()
Definition: fvbaselinearizer.hh:279
FvBaseLinearizer()=default
static void registerParameters()
Register all run-time parameters for the Jacobian linearizer.
Definition: fvbaselinearizer.hh:129
void linearize()
Linearize the full system of non-linear equations.
Definition: fvbaselinearizer.hh:170
void finalize()
Definition: fvbaselinearizer.hh:237
void init(Simulator &simulator)
Initialize the linearizer.
Definition: fvbaselinearizer.hh:141
FvBaseLinearizer(const FvBaseLinearizer &)=delete
void updateBoundaryConditionData()
Definition: fvbaselinearizer.hh:302
void linearizeDomain()
Linearize the part of the non-linear system of equations that is associated with the spatial domain.
Definition: fvbaselinearizer.hh:186
void resetSystem_(const SubDomainType &domain)
Definition: fvbaselinearizer.hh:337
GlobalEqVector & residual()
Definition: fvbaselinearizer.hh:288
const SparseMatrixAdapter & jacobian() const
Return constant reference to global Jacobian matrix backend.
Definition: fvbaselinearizer.hh:276
const auto & getFloresInfo() const
Return constant reference to the floresInfo.
Definition: fvbaselinearizer.hh:333
void linearizeAuxiliaryEquations()
Linearize the part of the non-linear system of equations that is associated with the spatial domain.
Definition: fvbaselinearizer.hh:244
void linearizeDomain(const SubDomainType &domain)
Definition: fvbaselinearizer.hh:192
void updateDiscretizationParameters()
Definition: fvbaselinearizer.hh:297
const LinearizationType & getLinearizationType() const
Definition: fvbaselinearizer.hh:294
void updateFlowsInfo()
Definition: fvbaselinearizer.hh:307
const std::map< unsigned, Constraints > & constraintsMap() const
Returns the map of constraint degrees of freedom.
Definition: fvbaselinearizer.hh:317
void eraseMatrix()
Causes the Jacobian matrix to be recreated from scratch before the next iteration.
Definition: fvbaselinearizer.hh:156
Definition: matrixblock.hh:227
Manages the initializing and running of time dependent problems.
Definition: simulator.hh:84
Problem & problem()
Return the object which specifies the pysical setup of the simulation.
Definition: simulator.hh:265
const GridView & gridView() const
Return the grid view for which the simulation is done.
Definition: simulator.hh:246
Model & model()
Return the physical model used in the simulation.
Definition: simulator.hh:252
Simplifies multi-threaded capabilities.
Definition: threadmanager.hpp:36
static unsigned maxThreads()
Return the maximum number of threads of the current process.
Definition: threadmanager.hpp:66
static unsigned threadId()
Return the index of the current OpenMP thread.
Provides an STL-iterator like interface to iterate over the enties of a GridView in OpenMP threaded a...
Definition: threadedentityiterator.hh:42
bool isFinished(const EntityIterator &it) const
Definition: threadedentityiterator.hh:67
EntityIterator increment()
Definition: threadedentityiterator.hh:80
EntityIterator beginParallel()
Definition: threadedentityiterator.hh:54
Declare the properties used by the infrastructure code of the finite volume discretizations.
Provides data handles for parallel communication which operate on DOFs.
Definition: blackoilboundaryratevector.hh:39
ElementType
The types of reference elements available.
Definition: vcfvstencil.hh:56
typename Properties::Detail::GetPropImpl< TypeTag, Property >::type::type GetPropType
get the type alias defined in the property (equivalent to old macro GET_PROP_TYPE(....
Definition: propertysystem.hh:233
Definition: linearizationtype.hh:34