fvbaselinearizer.hh
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 This file is part of the Open Porous Media project (OPM).
5
6 OPM is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 2 of the License, or
9 (at your option) any later version.
10
11 OPM is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with OPM. If not, see <http://www.gnu.org/licenses/>.
18
19 Consult the COPYING file in the top-level source directory of this
20 module for the precise wording of the license and the list of
21 copyright holders.
22*/
28#ifndef EWOMS_FV_BASE_LINEARIZER_HH
29#define EWOMS_FV_BASE_LINEARIZER_HH
30
31#include "fvbaseproperties.hh"
32#include "linearizationtype.hh"
33
34#include <opm/common/Exceptions.hpp>
35#include <opm/common/TimingMacros.hpp>
36#include <opm/grid/utility/SparseTable.hpp>
37
42
43#include <dune/common/version.hh>
44#include <dune/common/fvector.hh>
45#include <dune/common/fmatrix.hh>
46
47#include <type_traits>
48#include <iostream>
49#include <vector>
50#include <thread>
51#include <set>
52#include <exception> // current_exception, rethrow_exception
53#include <mutex>
54
55namespace Opm {
56// forward declarations
57template<class TypeTag>
58class EcfvDiscretization;
59
69template<class TypeTag>
71{
83
91
93
94 using Toolbox = MathToolbox<Evaluation>;
95
96 using Element = typename GridView::template Codim<0>::Entity;
97 using ElementIterator = typename GridView::template Codim<0>::Iterator;
98
99 using Vector = GlobalEqVector;
100
101 using IstlMatrix = typename SparseMatrixAdapter::IstlMatrix;
102
103 enum { numEq = getPropValue<TypeTag, Properties::NumEq>() };
104 enum { historySize = getPropValue<TypeTag, Properties::TimeDiscHistorySize>() };
105
106 using MatrixBlock = typename SparseMatrixAdapter::MatrixBlock;
107 using VectorBlock = Dune::FieldVector<Scalar, numEq>;
108
109 static const bool linearizeNonLocalElements = getPropValue<TypeTag, Properties::LinearizeNonLocalElements>();
110
111 // copying the linearizer is not a good idea
114
115public:
117 : jacobian_()
118 {
119 simulatorPtr_ = 0;
120 }
121
123 {
124 auto it = elementCtx_.begin();
125 const auto& endIt = elementCtx_.end();
126 for (; it != endIt; ++it)
127 delete *it;
128 }
129
133 static void registerParameters()
134 { }
135
145 void init(Simulator& simulator)
146 {
147 simulatorPtr_ = &simulator;
148 eraseMatrix();
149 auto it = elementCtx_.begin();
150 const auto& endIt = elementCtx_.end();
151 for (; it != endIt; ++it){
152 delete *it;
153 }
154 elementCtx_.resize(0);
155 fullDomain_ = std::make_unique<FullDomain>(simulator.gridView());
156 }
157
166 {
167 jacobian_.reset();
168 }
169
180 {
183 }
184
196 {
197 linearizeDomain(*fullDomain_);
198 }
199
200 template <class SubDomainType>
201 void linearizeDomain(const SubDomainType& domain)
202 {
203 OPM_TIMEBLOCK(linearizeDomain);
204 // we defer the initialization of the Jacobian matrix until here because the
205 // auxiliary modules usually assume the problem, model and grid to be fully
206 // initialized...
207 if (!jacobian_)
208 initFirstIteration_();
209
210 // Called here because it is no longer called from linearize_().
211 if (static_cast<std::size_t>(domain.view.size(0)) == model_().numTotalDof()) {
212 // We are on the full domain.
213 resetSystem_();
214 } else {
215 resetSystem_(domain);
216 }
217
218 int succeeded;
219 try {
220 linearize_(domain);
221 succeeded = 1;
222 }
223 catch (const std::exception& e)
224 {
225 std::cout << "rank " << simulator_().gridView().comm().rank()
226 << " caught an exception while linearizing:" << e.what()
227 << "\n" << std::flush;
228 succeeded = 0;
229 }
230 catch (...)
231 {
232 std::cout << "rank " << simulator_().gridView().comm().rank()
233 << " caught an exception while linearizing"
234 << "\n" << std::flush;
235 succeeded = 0;
236 }
237 succeeded = simulator_().gridView().comm().min(succeeded);
238
239 if (!succeeded)
240 throw NumericalProblem("A process did not succeed in linearizing the system");
241 }
242
243 void finalize()
244 { jacobian_->finalize(); }
245
251 {
252 OPM_TIMEBLOCK(linearizeAuxiliaryEquations);
253 // flush possible local caches into matrix structure
254 jacobian_->commit();
255
256 auto& model = model_();
257 const auto& comm = simulator_().gridView().comm();
258 for (unsigned auxModIdx = 0; auxModIdx < model.numAuxiliaryModules(); ++auxModIdx) {
259 bool succeeded = true;
260 try {
261 model.auxiliaryModule(auxModIdx)->linearize(*jacobian_, residual_);
262 }
263 catch (const std::exception& e) {
264 succeeded = false;
265
266 std::cout << "rank " << simulator_().gridView().comm().rank()
267 << " caught an exception while linearizing:" << e.what()
268 << "\n" << std::flush;
269 }
270
271 succeeded = comm.min(succeeded);
272
273 if (!succeeded)
274 throw NumericalProblem("linearization of an auxiliary equation failed");
275 }
276 }
277
281 const SparseMatrixAdapter& jacobian() const
282 { return *jacobian_; }
283
284 SparseMatrixAdapter& jacobian()
285 { return *jacobian_; }
286
290 const GlobalEqVector& residual() const
291 { return residual_; }
292
293 GlobalEqVector& residual()
294 { return residual_; }
295
297 linearizationType_ = linearizationType;
298 };
299
301 return linearizationType_;
302 };
303
305 {
306 // This linearizer stores no such parameters.
307 }
308
310 {
311 // This linearizer stores no such data.
312 }
313
315 // This linearizer stores no such data.
316 }
317
323 const std::map<unsigned, Constraints>& constraintsMap() const
324 { return constraintsMap_; }
325
331 const auto& getFlowsInfo() const
332 {return flowsInfo_;}
333
339 const auto& getFloresInfo() const
340 {return floresInfo_;}
341
342 template <class SubDomainType>
343 void resetSystem_(const SubDomainType& domain)
344 {
345 if (!jacobian_) {
346 initFirstIteration_();
347 }
348
349 // loop over selected elements
350 using GridViewType = decltype(domain.view);
351 ThreadedEntityIterator<GridViewType, /*codim=*/0> threadedElemIt(domain.view);
352#ifdef _OPENMP
353#pragma omp parallel
354#endif
355 {
356 unsigned threadId = ThreadManager::threadId();
357 auto elemIt = threadedElemIt.beginParallel();
358 MatrixBlock zeroBlock;
359 zeroBlock = 0.0;
360 for (; !threadedElemIt.isFinished(elemIt); elemIt = threadedElemIt.increment()) {
361 const Element& elem = *elemIt;
362 ElementContext& elemCtx = *elementCtx_[threadId];
363 elemCtx.updatePrimaryStencil(elem);
364 // Set to zero the relevant residual and jacobian parts.
365 for (unsigned primaryDofIdx = 0;
366 primaryDofIdx < elemCtx.numPrimaryDof(/*timeIdx=*/0);
367 ++primaryDofIdx)
368 {
369 unsigned globI = elemCtx.globalSpaceIndex(primaryDofIdx, /*timeIdx=*/0);
370 residual_[globI] = 0.0;
371 jacobian_->clearRow(globI, 0.0);
372 }
373 }
374 }
375 }
376
377private:
378 Simulator& simulator_()
379 { return *simulatorPtr_; }
380 const Simulator& simulator_() const
381 { return *simulatorPtr_; }
382
383 Problem& problem_()
384 { return simulator_().problem(); }
385 const Problem& problem_() const
386 { return simulator_().problem(); }
387
388 Model& model_()
389 { return simulator_().model(); }
390 const Model& model_() const
391 { return simulator_().model(); }
392
393 const GridView& gridView_() const
394 { return problem_().gridView(); }
395
396 const ElementMapper& elementMapper_() const
397 { return model_().elementMapper(); }
398
399 const DofMapper& dofMapper_() const
400 { return model_().dofMapper(); }
401
402 void initFirstIteration_()
403 {
404 // initialize the BCRS matrix for the Jacobian of the residual function
405 createMatrix_();
406
407 // initialize the Jacobian matrix and the vector for the residual function
408 residual_.resize(model_().numTotalDof());
409 resetSystem_();
410
411 // create the per-thread context objects
412 elementCtx_.resize(ThreadManager::maxThreads());
413 for (unsigned threadId = 0; threadId != ThreadManager::maxThreads(); ++ threadId)
414 elementCtx_[threadId] = new ElementContext(simulator_());
415 }
416
417 // Construct the BCRS matrix for the Jacobian of the residual function
418 void createMatrix_()
419 {
420 const auto& model = model_();
421 Stencil stencil(gridView_(), model_().dofMapper());
422
423 // for the main model, find out the global indices of the neighboring degrees of
424 // freedom of each primary degree of freedom
425 sparsityPattern_.clear();
426 sparsityPattern_.resize(model.numTotalDof());
427
428 for (const auto& elem : elements(gridView_())) {
429 stencil.update(elem);
430
431 for (unsigned primaryDofIdx = 0; primaryDofIdx < stencil.numPrimaryDof(); ++primaryDofIdx) {
432 unsigned myIdx = stencil.globalSpaceIndex(primaryDofIdx);
433
434 for (unsigned dofIdx = 0; dofIdx < stencil.numDof(); ++dofIdx) {
435 unsigned neighborIdx = stencil.globalSpaceIndex(dofIdx);
436 sparsityPattern_[myIdx].insert(neighborIdx);
437 }
438 }
439 }
440
441 // add the additional neighbors and degrees of freedom caused by the auxiliary
442 // equations
443 size_t numAuxMod = model.numAuxiliaryModules();
444 for (unsigned auxModIdx = 0; auxModIdx < numAuxMod; ++auxModIdx)
445 model.auxiliaryModule(auxModIdx)->addNeighbors(sparsityPattern_);
446
447 // allocate raw matrix
448 jacobian_.reset(new SparseMatrixAdapter(simulator_()));
449
450 // create matrix structure based on sparsity pattern
451 jacobian_->reserve(sparsityPattern_);
452 }
453
454 // reset the global linear system of equations.
455 void resetSystem_()
456 {
457 residual_ = 0.0;
458 // zero all matrix entries
459 jacobian_->clear();
460 }
461
462 // query the problem for all constraint degrees of freedom. note that this method is
463 // quite involved and is thus relatively slow.
464 void updateConstraintsMap_()
465 {
466 if (!enableConstraints_())
467 // constraints are not explictly enabled, so we don't need to consider them!
468 return;
469
470 constraintsMap_.clear();
471
472 // loop over all elements...
473 ThreadedEntityIterator<GridView, /*codim=*/0> threadedElemIt(gridView_());
474#ifdef _OPENMP
475#pragma omp parallel
476#endif
477 {
478 unsigned threadId = ThreadManager::threadId();
479 ElementIterator elemIt = threadedElemIt.beginParallel();
480 for (; !threadedElemIt.isFinished(elemIt); elemIt = threadedElemIt.increment()) {
481 // create an element context (the solution-based quantities are not
482 // available here!)
483 const Element& elem = *elemIt;
484 ElementContext& elemCtx = *elementCtx_[threadId];
485 elemCtx.updateStencil(elem);
486
487 // check if the problem wants to constrain any degree of the current
488 // element's freedom. if yes, add the constraint to the map.
489 for (unsigned primaryDofIdx = 0;
490 primaryDofIdx < elemCtx.numPrimaryDof(/*timeIdx=*/0);
491 ++ primaryDofIdx)
492 {
493 Constraints constraints;
494 elemCtx.problem().constraints(constraints,
495 elemCtx,
496 primaryDofIdx,
497 /*timeIdx=*/0);
498 if (constraints.isActive()) {
499 unsigned globI = elemCtx.globalSpaceIndex(primaryDofIdx, /*timeIdx=*/0);
500 constraintsMap_[globI] = constraints;
501 continue;
502 }
503 }
504 }
505 }
506 }
507
508 // linearize the whole or part of the system
509 template <class SubDomainType>
510 void linearize_(const SubDomainType& domain)
511 {
512 OPM_TIMEBLOCK(linearize_);
513
514 // We do not call resetSystem_() here, since that will set
515 // the full system to zero, not just our part.
516 // Instead, that must be called before starting the linearization.
517
518 // before the first iteration of each time step, we need to update the
519 // constraints. (i.e., we assume that constraints can be time dependent, but they
520 // can't depend on the solution.)
521 if (model_().newtonMethod().numIterations() == 0)
522 updateConstraintsMap_();
523
524 applyConstraintsToSolution_();
525
526 // to avoid a race condition if two threads handle an exception at the same time,
527 // we use an explicit lock to control access to the exception storage object
528 // amongst thread-local handlers
529 std::mutex exceptionLock;
530
531 // storage to any exception that needs to be bridged out of the
532 // parallel block below. initialized to null to indicate no exception
533 std::exception_ptr exceptionPtr = nullptr;
534
535 // relinearize the elements...
536 using GridViewType = decltype(domain.view);
537 ThreadedEntityIterator<GridViewType, /*codim=*/0> threadedElemIt(domain.view);
538#ifdef _OPENMP
539#pragma omp parallel
540#endif
541 {
542 auto elemIt = threadedElemIt.beginParallel();
543 auto nextElemIt = elemIt;
544 try {
545 for (; !threadedElemIt.isFinished(elemIt); elemIt = nextElemIt) {
546 // give the model and the problem a chance to prefetch the data required
547 // to linearize the next element, but only if we need to consider it
548 nextElemIt = threadedElemIt.increment();
549 if (!threadedElemIt.isFinished(nextElemIt)) {
550 const auto& nextElem = *nextElemIt;
551 if (linearizeNonLocalElements
552 || nextElem.partitionType() == Dune::InteriorEntity)
553 {
554 model_().prefetch(nextElem);
555 problem_().prefetch(nextElem);
556 }
557 }
558
559 const auto& elem = *elemIt;
560 if (!linearizeNonLocalElements && elem.partitionType() != Dune::InteriorEntity)
561 continue;
562
563 linearizeElement_(elem);
564 }
565 }
566 // If an exception occurs in the parallel block, it won't escape the
567 // block; terminate() is called instead of a handler outside! hence, we
568 // tuck any exceptions that occur away in the pointer. If an exception
569 // occurs in more than one thread at the same time, we must pick one of
570 // them to be rethrown as we cannot have two active exceptions at the
571 // same time. This solution essentially picks one at random. This will
572 // only be a problem if two different kinds of exceptions are thrown, for
573 // instance if one thread experiences a (recoverable) numerical issue
574 // while another is out of memory.
575 catch(...) {
576 std::lock_guard<std::mutex> take(exceptionLock);
577 exceptionPtr = std::current_exception();
578 threadedElemIt.setFinished();
579 }
580 } // parallel block
581
582 // after reduction from the parallel block, exceptionPtr will point to
583 // a valid exception if one occurred in one of the threads; rethrow
584 // it here to let the outer handler take care of it properly
585 if(exceptionPtr) {
586 std::rethrow_exception(exceptionPtr);
587 }
588
589 applyConstraintsToLinearization_();
590 }
591
592
593 // linearize an element in the interior of the process' grid partition
594 template <class ElementType>
595 void linearizeElement_(const ElementType& elem)
596 {
597 unsigned threadId = ThreadManager::threadId();
598
599 ElementContext *elementCtx = elementCtx_[threadId];
600 auto& localLinearizer = model_().localLinearizer(threadId);
601
602 // the actual work of linearization is done by the local linearizer class
603 localLinearizer.linearize(*elementCtx, elem);
604
605 // update the right hand side and the Jacobian matrix
606 if (getPropValue<TypeTag, Properties::UseLinearizationLock>())
607 globalMatrixMutex_.lock();
608
609 size_t numPrimaryDof = elementCtx->numPrimaryDof(/*timeIdx=*/0);
610 for (unsigned primaryDofIdx = 0; primaryDofIdx < numPrimaryDof; ++ primaryDofIdx) {
611 unsigned globI = elementCtx->globalSpaceIndex(/*spaceIdx=*/primaryDofIdx, /*timeIdx=*/0);
612
613 // update the right hand side
614 residual_[globI] += localLinearizer.residual(primaryDofIdx);
615
616 // update the global Jacobian matrix
617 for (unsigned dofIdx = 0; dofIdx < elementCtx->numDof(/*timeIdx=*/0); ++ dofIdx) {
618 unsigned globJ = elementCtx->globalSpaceIndex(/*spaceIdx=*/dofIdx, /*timeIdx=*/0);
619
620 jacobian_->addToBlock(globJ, globI, localLinearizer.jacobian(dofIdx, primaryDofIdx));
621 }
622 }
623
624 if (getPropValue<TypeTag, Properties::UseLinearizationLock>())
625 globalMatrixMutex_.unlock();
626 }
627
628 // apply the constraints to the solution. (i.e., the solution of constraint degrees
629 // of freedom is set to the value of the constraint.)
630 void applyConstraintsToSolution_()
631 {
632 if (!enableConstraints_())
633 return;
634
635 // TODO: assuming a history size of 2 only works for Euler time discretizations!
636 auto& sol = model_().solution(/*timeIdx=*/0);
637 auto& oldSol = model_().solution(/*timeIdx=*/1);
638
639 auto it = constraintsMap_.begin();
640 const auto& endIt = constraintsMap_.end();
641 for (; it != endIt; ++it) {
642 sol[it->first] = it->second;
643 oldSol[it->first] = it->second;
644 }
645 }
646
647 // apply the constraints to the linearization. (i.e., for constrain degrees of
648 // freedom the Jacobian matrix maps to identity and the residual is zero)
649 void applyConstraintsToLinearization_()
650 {
651 if (!enableConstraints_())
652 return;
653
654 auto it = constraintsMap_.begin();
655 const auto& endIt = constraintsMap_.end();
656 for (; it != endIt; ++it) {
657 unsigned constraintDofIdx = it->first;
658
659 // reset the column of the Jacobian matrix
660 // put an identity matrix on the main diagonal of the Jacobian
661 jacobian_->clearRow(constraintDofIdx, Scalar(1.0));
662
663 // make the right-hand side of constraint DOFs zero
664 residual_[constraintDofIdx] = 0.0;
665 }
666 }
667
668 static bool enableConstraints_()
669 { return getPropValue<TypeTag, Properties::EnableConstraints>(); }
670
671 Simulator *simulatorPtr_;
672 std::vector<ElementContext*> elementCtx_;
673
674 // The constraint equations (only non-empty if the
675 // EnableConstraints property is true)
676 std::map<unsigned, Constraints> constraintsMap_;
677
678
679 struct FlowInfo
680 {
681 int faceId;
682 VectorBlock flow;
683 unsigned int nncId;
684 };
685 SparseTable<FlowInfo> flowsInfo_;
686 SparseTable<FlowInfo> floresInfo_;
687
688 // the jacobian matrix
689 std::unique_ptr<SparseMatrixAdapter> jacobian_;
690
691 // the right-hand side
692 GlobalEqVector residual_;
693
694 LinearizationType linearizationType_;
695
696 std::mutex globalMatrixMutex_;
697
698 std::vector<std::set<unsigned int>> sparsityPattern_;
699
700 struct FullDomain
701 {
702 explicit FullDomain(const GridView& v) : view (v) {}
703 GridView view;
704 std::vector<bool> interior; // Should remain empty.
705 };
706 // Simple domain object used for full-domain linearization, it allows
707 // us to have the same interface for sub-domain and full-domain work.
708 // Pointer since it must defer construction, due to GridView member.
709 std::unique_ptr<FullDomain> fullDomain_;
710};
711
712} // namespace Opm
713
714#endif
The common code for the linearizers of non-linear systems of equations.
Definition: fvbaselinearizer.hh:71
const auto & getFlowsInfo() const
Return constant reference to the flowsInfo.
Definition: fvbaselinearizer.hh:331
const GlobalEqVector & residual() const
Return constant reference to global residual vector.
Definition: fvbaselinearizer.hh:290
void setLinearizationType(LinearizationType linearizationType)
Definition: fvbaselinearizer.hh:296
SparseMatrixAdapter & jacobian()
Definition: fvbaselinearizer.hh:284
static void registerParameters()
Register all run-time parameters for the Jacobian linearizer.
Definition: fvbaselinearizer.hh:133
void linearize()
Linearize the full system of non-linear equations.
Definition: fvbaselinearizer.hh:179
void finalize()
Definition: fvbaselinearizer.hh:243
void init(Simulator &simulator)
Initialize the linearizer.
Definition: fvbaselinearizer.hh:145
void updateBoundaryConditionData()
Definition: fvbaselinearizer.hh:309
void linearizeDomain()
Linearize the part of the non-linear system of equations that is associated with the spatial domain.
Definition: fvbaselinearizer.hh:195
void resetSystem_(const SubDomainType &domain)
Definition: fvbaselinearizer.hh:343
GlobalEqVector & residual()
Definition: fvbaselinearizer.hh:293
const SparseMatrixAdapter & jacobian() const
Return constant reference to global Jacobian matrix backend.
Definition: fvbaselinearizer.hh:281
const auto & getFloresInfo() const
Return constant reference to the floresInfo.
Definition: fvbaselinearizer.hh:339
~FvBaseLinearizer()
Definition: fvbaselinearizer.hh:122
void linearizeAuxiliaryEquations()
Linearize the part of the non-linear system of equations that is associated with the spatial domain.
Definition: fvbaselinearizer.hh:250
void linearizeDomain(const SubDomainType &domain)
Definition: fvbaselinearizer.hh:201
void updateDiscretizationParameters()
Definition: fvbaselinearizer.hh:304
FvBaseLinearizer()
Definition: fvbaselinearizer.hh:116
const LinearizationType & getLinearizationType() const
Definition: fvbaselinearizer.hh:300
void updateFlowsInfo()
Definition: fvbaselinearizer.hh:314
const std::map< unsigned, Constraints > & constraintsMap() const
Returns the map of constraint degrees of freedom.
Definition: fvbaselinearizer.hh:323
void eraseMatrix()
Causes the Jacobian matrix to be recreated from scratch before the next iteration.
Definition: fvbaselinearizer.hh:165
Definition: matrixblock.hh:227
Manages the initializing and running of time dependent problems.
Definition: simulator.hh:102
Problem & problem()
Return the object which specifies the pysical setup of the simulation.
Definition: simulator.hh:306
const GridView & gridView() const
Return the grid view for which the simulation is done.
Definition: simulator.hh:287
Model & model()
Return the physical model used in the simulation.
Definition: simulator.hh:293
Simplifies multi-threaded capabilities.
Definition: threadmanager.hh:47
static unsigned maxThreads()
Return the maximum number of threads of the current process.
Definition: threadmanager.hh:118
static unsigned threadId()
Return the index of the current OpenMP thread.
Definition: threadmanager.hh:124
Provides an STL-iterator like interface to iterate over the enties of a GridView in OpenMP threaded a...
Definition: threadedentityiterator.hh:43
bool isFinished(const EntityIterator &it) const
Definition: threadedentityiterator.hh:67
EntityIterator increment()
Definition: threadedentityiterator.hh:80
EntityIterator beginParallel()
Definition: threadedentityiterator.hh:55
Declare the properties used by the infrastructure code of the finite volume discretizations.
Provides data handles for parallel communication which operate on DOFs.
Definition: blackoilboundaryratevector.hh:37
ElementType
The types of reference elements available.
Definition: vcfvstencil.hh:52
typename Properties::Detail::GetPropImpl< TypeTag, Property >::type::type GetPropType
get the type alias defined in the property (equivalent to old macro GET_PROP_TYPE(....
Definition: propertysystem.hh:242
Definition: linearizationtype.hh:35