fvbaselinearizer.hh
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 This file is part of the Open Porous Media project (OPM).
5
6 OPM is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 2 of the License, or
9 (at your option) any later version.
10
11 OPM is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with OPM. If not, see <http://www.gnu.org/licenses/>.
18
19 Consult the COPYING file in the top-level source directory of this
20 module for the precise wording of the license and the list of
21 copyright holders.
22*/
28#ifndef EWOMS_FV_BASE_LINEARIZER_HH
29#define EWOMS_FV_BASE_LINEARIZER_HH
30
31#include <dune/common/fmatrix.hh>
32#include <dune/common/fvector.hh>
33#include <dune/common/version.hh>
34
35#include <dune/grid/common/gridenums.hh>
36
37#include <opm/common/Exceptions.hpp>
38#include <opm/common/TimingMacros.hpp>
39
40#include <opm/grid/utility/SparseTable.hpp>
41
42#include <opm/material/common/MathToolbox.hpp>
43
47
51
52#include <cstddef>
53#include <exception> // current_exception, rethrow_exception
54#include <iostream>
55#include <map>
56#include <memory>
57#include <mutex>
58#include <set>
59#include <vector>
60
61namespace Opm {
62
63// forward declarations
64template<class TypeTag>
65class EcfvDiscretization;
66
76template<class TypeTag>
78{
90
98
100
101 using Toolbox = MathToolbox<Evaluation>;
102
103 using Element = typename GridView::template Codim<0>::Entity;
104 using ElementIterator = typename GridView::template Codim<0>::Iterator;
105
106 using Vector = GlobalEqVector;
107
108 using IstlMatrix = typename SparseMatrixAdapter::IstlMatrix;
109
110 enum { numEq = getPropValue<TypeTag, Properties::NumEq>() };
111 enum { historySize = getPropValue<TypeTag, Properties::TimeDiscHistorySize>() };
112
113 using MatrixBlock = typename SparseMatrixAdapter::MatrixBlock;
114 using VectorBlock = Dune::FieldVector<Scalar, numEq>;
115
116 static constexpr bool linearizeNonLocalElements = getPropValue<TypeTag, Properties::LinearizeNonLocalElements>();
117
119
120public:
121 FvBaseLinearizer() = default;
122
123 // copying the linearizer is not a good idea
125
129 static void registerParameters()
130 {}
131
141 void init(Simulator& simulator)
142 {
143 simulatorPtr_ = &simulator;
144 eraseMatrix();
145 elementCtx_.clear();
146 fullDomain_ = std::make_unique<FullDomain>(simulator.gridView());
147 }
148
157 {
158 jacobian_.reset();
159 }
160
171 {
174 }
175
187 {
188 linearizeDomain(*fullDomain_);
189 }
190
191 template <class SubDomainType>
192 void linearizeDomain(const SubDomainType& domain, bool isNlddLocalSolve = false)
193 {
194 OPM_TIMEBLOCK(linearizeDomain);
195 // we defer the initialization of the Jacobian matrix until here because the
196 // auxiliary modules usually assume the problem, model and grid to be fully
197 // initialized...
198 if (!jacobian_) {
199 initFirstIteration_();
200 }
201
202 // Called here because it is no longer called from linearize_().
203 if (isNlddLocalSolve) {
204 resetSystem_(domain);
205 }
206 else {
207 resetSystem_();
208 }
209
210 int succeeded;
211 try {
212 linearize_(domain);
213 succeeded = 1;
214 }
215 catch (const std::exception& e)
216 {
217 std::cout << "rank " << simulator_().gridView().comm().rank()
218 << " caught an exception while linearizing:" << e.what()
219 << "\n" << std::flush;
220 succeeded = 0;
221 }
222 catch (...)
223 {
224 std::cout << "rank " << simulator_().gridView().comm().rank()
225 << " caught an exception while linearizing"
226 << "\n" << std::flush;
227 succeeded = 0;
228 }
229 succeeded = simulator_().gridView().comm().min(succeeded);
230
231 if (!succeeded) {
232 throw NumericalProblem("A process did not succeed in linearizing the system");
233 }
234 }
235
236 void finalize()
237 { jacobian_->finalize(); }
238
244 {
245 OPM_TIMEBLOCK(linearizeAuxiliaryEquations);
246 // flush possible local caches into matrix structure
247 jacobian_->commit();
248
249 auto& model = model_();
250 const auto& comm = simulator_().gridView().comm();
251 for (unsigned auxModIdx = 0; auxModIdx < model.numAuxiliaryModules(); ++auxModIdx) {
252 bool succeeded = true;
253 try {
254 model.auxiliaryModule(auxModIdx)->linearize(*jacobian_, residual_);
255 }
256 catch (const std::exception& e) {
257 succeeded = false;
258
259 std::cout << "rank " << simulator_().gridView().comm().rank()
260 << " caught an exception while linearizing:" << e.what()
261 << "\n" << std::flush;
262 }
263
264 succeeded = comm.min(succeeded);
265
266 if (!succeeded) {
267 throw NumericalProblem("linearization of an auxiliary equation failed");
268 }
269 }
270 }
271
275 const SparseMatrixAdapter& jacobian() const
276 { return *jacobian_; }
277
278 SparseMatrixAdapter& jacobian()
279 { return *jacobian_; }
280
284 const GlobalEqVector& residual() const
285 { return residual_; }
286
287 GlobalEqVector& residual()
288 { return residual_; }
289
290 void printVector(GlobalEqVector&, const char *name="x")
291 {
292 return;
293 }
294
295 void printResidual(const char *name="r")
296 {
297 printVector(residual_);
298 }
299
300 void printSparsity(const char *name="s")
301 {
302 return;
303 }
304
305 void printNonzeros(const char *name="d")
306 {
307 return;
308 }
309
311 {
312 return;
313 }
314
315 void exportSystem(int idx, char *tag, const char *path="export")
316 {
317 return;
318 }
319
320 void exportVector(GlobalEqVector &x, const char *tag="", const char *name="export/x")
321 {
322 printf("n = %lu\n",x.dim());
323 }
324
325 void exportSparsity(const char *path=".")
326 {
327 return;
328 }
329
330 void exportNonzeros(const char *tag="", const char *path=".")
331 {
332 return;
333 }
334
335
337 linearizationType_ = linearizationType;
338 };
339
341 { return linearizationType_; }
342
344 {
345 // This linearizer stores no such parameters.
346 }
347
349 {
350 // This linearizer stores no such data.
351 }
352
354 {
355 // This linearizer stores no such data.
356 }
357
363 const std::map<unsigned, Constraints>& constraintsMap() const
364 { return constraintsMap_; }
365
371 const auto& getFlowsInfo() const
372 { return flowsInfo_; }
373
379 const auto& getFloresInfo() const
380 { return floresInfo_; }
381
382 template <class SubDomainType>
383 void resetSystem_(const SubDomainType& domain)
384 {
385 if (!jacobian_) {
386 initFirstIteration_();
387 }
388
389 // loop over selected elements
390 using GridViewType = decltype(domain.view);
391 ThreadedEntityIterator<GridViewType, /*codim=*/0> threadedElemIt(domain.view);
392#ifdef _OPENMP
393#pragma omp parallel
394#endif
395 {
396 const unsigned threadId = ThreadManager::threadId();
397 auto elemIt = threadedElemIt.beginParallel();
398 MatrixBlock zeroBlock;
399 zeroBlock = 0.0;
400 for (; !threadedElemIt.isFinished(elemIt); elemIt = threadedElemIt.increment()) {
401 const Element& elem = *elemIt;
402 ElementContext& elemCtx = *elementCtx_[threadId];
403 elemCtx.updatePrimaryStencil(elem);
404 // Set to zero the relevant residual and jacobian parts.
405 for (unsigned primaryDofIdx = 0;
406 primaryDofIdx < elemCtx.numPrimaryDof(/*timeIdx=*/0);
407 ++primaryDofIdx)
408 {
409 const unsigned globI = elemCtx.globalSpaceIndex(primaryDofIdx, /*timeIdx=*/0);
410 residual_[globI] = 0.0;
411 jacobian_->clearRow(globI, 0.0);
412 }
413 }
414 }
415 }
416
417private:
418 Simulator& simulator_()
419 { return *simulatorPtr_; }
420
421 const Simulator& simulator_() const
422 { return *simulatorPtr_; }
423
424 Problem& problem_()
425 { return simulator_().problem(); }
426
427 const Problem& problem_() const
428 { return simulator_().problem(); }
429
430 Model& model_()
431 { return simulator_().model(); }
432
433 const Model& model_() const
434 { return simulator_().model(); }
435
436 const GridView& gridView_() const
437 { return problem_().gridView(); }
438
439 const ElementMapper& elementMapper_() const
440 { return model_().elementMapper(); }
441
442 const DofMapper& dofMapper_() const
443 { return model_().dofMapper(); }
444
445 void initFirstIteration_()
446 {
447 // initialize the BCRS matrix for the Jacobian of the residual function
448 createMatrix_();
449
450 // initialize the Jacobian matrix and the vector for the residual function
451 residual_.resize(model_().numTotalDof());
452 resetSystem_();
453
454 // create the per-thread context objects
455 elementCtx_.clear();
456 elementCtx_.reserve(ThreadManager::maxThreads());
457 for (unsigned threadId = 0; threadId != ThreadManager::maxThreads(); ++ threadId) {
458 elementCtx_.push_back(std::make_unique<ElementContext>(simulator_()));
459 }
460 }
461
462 // Construct the BCRS matrix for the Jacobian of the residual function
463 void createMatrix_()
464 {
465 const auto& model = model_();
466 Stencil stencil(gridView_(), model_().dofMapper());
467
468 // for the main model, find out the global indices of the neighboring degrees of
469 // freedom of each primary degree of freedom
470 sparsityPattern_.clear();
471 sparsityPattern_.resize(model.numTotalDof());
472
473 for (const auto& elem : elements(gridView_())) {
474 stencil.update(elem);
475
476 for (unsigned primaryDofIdx = 0; primaryDofIdx < stencil.numPrimaryDof(); ++primaryDofIdx) {
477 const unsigned myIdx = stencil.globalSpaceIndex(primaryDofIdx);
478
479 for (unsigned dofIdx = 0; dofIdx < stencil.numDof(); ++dofIdx) {
480 const unsigned neighborIdx = stencil.globalSpaceIndex(dofIdx);
481 sparsityPattern_[myIdx].insert(neighborIdx);
482 }
483 }
484 }
485
486 // add the additional neighbors and degrees of freedom caused by the auxiliary
487 // equations
488 const std::size_t numAuxMod = model.numAuxiliaryModules();
489 for (unsigned auxModIdx = 0; auxModIdx < numAuxMod; ++auxModIdx) {
490 model.auxiliaryModule(auxModIdx)->addNeighbors(sparsityPattern_);
491 }
492
493 // allocate raw matrix
494 jacobian_ = std::make_unique<SparseMatrixAdapter>(simulator_());
495
496 // create matrix structure based on sparsity pattern
497 jacobian_->reserve(sparsityPattern_);
498 }
499
500 // reset the global linear system of equations.
501 void resetSystem_()
502 {
503 residual_ = 0.0;
504 // zero all matrix entries
505 jacobian_->clear();
506 }
507
508 // query the problem for all constraint degrees of freedom. note that this method is
509 // quite involved and is thus relatively slow.
510 void updateConstraintsMap_()
511 {
512 if (!enableConstraints_()) {
513 // constraints are not explictly enabled, so we don't need to consider them!
514 return;
515 }
516
517 constraintsMap_.clear();
518
519 // loop over all elements...
520 ThreadedEntityIterator<GridView, /*codim=*/0> threadedElemIt(gridView_());
521#ifdef _OPENMP
522#pragma omp parallel
523#endif
524 {
525 const unsigned threadId = ThreadManager::threadId();
526 ElementIterator elemIt = threadedElemIt.beginParallel();
527 for (; !threadedElemIt.isFinished(elemIt); elemIt = threadedElemIt.increment()) {
528 // create an element context (the solution-based quantities are not
529 // available here!)
530 const Element& elem = *elemIt;
531 ElementContext& elemCtx = *elementCtx_[threadId];
532 elemCtx.updateStencil(elem);
533
534 // check if the problem wants to constrain any degree of the current
535 // element's freedom. if yes, add the constraint to the map.
536 for (unsigned primaryDofIdx = 0;
537 primaryDofIdx < elemCtx.numPrimaryDof(/*timeIdx=*/0);
538 ++primaryDofIdx)
539 {
540 Constraints constraints;
541 elemCtx.problem().constraints(constraints,
542 elemCtx,
543 primaryDofIdx,
544 /*timeIdx=*/0);
545 if (constraints.isActive()) {
546 const unsigned globI = elemCtx.globalSpaceIndex(primaryDofIdx, /*timeIdx=*/0);
547 constraintsMap_[globI] = constraints;
548 continue;
549 }
550 }
551 }
552 }
553 }
554
555 // linearize the whole or part of the system
556 template <class SubDomainType>
557 void linearize_(const SubDomainType& domain)
558 {
559 OPM_TIMEBLOCK(linearize_);
560
561 // We do not call resetSystem_() here, since that will set
562 // the full system to zero, not just our part.
563 // Instead, that must be called before starting the linearization.
564
565 // before the first iteration of each time step, we need to update the
566 // constraints. (i.e., we assume that constraints can be time dependent, but they
567 // can't depend on the solution.)
568 if (model_().newtonMethod().numIterations() == 0) {
569 updateConstraintsMap_();
570 }
571
572 applyConstraintsToSolution_();
573
574 // to avoid a race condition if two threads handle an exception at the same time,
575 // we use an explicit lock to control access to the exception storage object
576 // amongst thread-local handlers
577 std::mutex exceptionLock;
578
579 // storage to any exception that needs to be bridged out of the
580 // parallel block below. initialized to null to indicate no exception
581 std::exception_ptr exceptionPtr = nullptr;
582
583 // relinearize the elements...
584 using GridViewType = decltype(domain.view);
585 ThreadedEntityIterator<GridViewType, /*codim=*/0> threadedElemIt(domain.view);
586#ifdef _OPENMP
587#pragma omp parallel
588#endif
589 {
590 auto elemIt = threadedElemIt.beginParallel();
591 auto nextElemIt = elemIt;
592 try {
593 for (; !threadedElemIt.isFinished(elemIt); elemIt = nextElemIt) {
594 // give the model and the problem a chance to prefetch the data required
595 // to linearize the next element, but only if we need to consider it
596 nextElemIt = threadedElemIt.increment();
597 if (!threadedElemIt.isFinished(nextElemIt)) {
598 const auto& nextElem = *nextElemIt;
599 if (linearizeNonLocalElements ||
600 nextElem.partitionType() == Dune::InteriorEntity)
601 {
602 model_().prefetch(nextElem);
603 problem_().prefetch(nextElem);
604 }
605 }
606
607 const auto& elem = *elemIt;
608 if (!linearizeNonLocalElements && elem.partitionType() != Dune::InteriorEntity) {
609 continue;
610 }
611
612 linearizeElement_(elem);
613 }
614 }
615 // If an exception occurs in the parallel block, it won't escape the
616 // block; terminate() is called instead of a handler outside! hence, we
617 // tuck any exceptions that occur away in the pointer. If an exception
618 // occurs in more than one thread at the same time, we must pick one of
619 // them to be rethrown as we cannot have two active exceptions at the
620 // same time. This solution essentially picks one at random. This will
621 // only be a problem if two different kinds of exceptions are thrown, for
622 // instance if one thread experiences a (recoverable) numerical issue
623 // while another is out of memory.
624 catch(...) {
625 std::lock_guard<std::mutex> take(exceptionLock);
626 exceptionPtr = std::current_exception();
627 threadedElemIt.setFinished();
628 }
629 } // parallel block
630
631 // after reduction from the parallel block, exceptionPtr will point to
632 // a valid exception if one occurred in one of the threads; rethrow
633 // it here to let the outer handler take care of it properly
634 if (exceptionPtr) {
635 std::rethrow_exception(exceptionPtr);
636 }
637
638 applyConstraintsToLinearization_();
639 }
640
641 // linearize an element in the interior of the process' grid partition
642 template <class ElementType>
643 void linearizeElement_(const ElementType& elem)
644 {
645 const unsigned threadId = ThreadManager::threadId();
646
647 ElementContext& elementCtx = *elementCtx_[threadId];
648 auto& localLinearizer = model_().localLinearizer(threadId);
649
650 // the actual work of linearization is done by the local linearizer class
651 localLinearizer.linearize(elementCtx, elem);
652
653 // update the right hand side and the Jacobian matrix
654 if (getPropValue<TypeTag, Properties::UseLinearizationLock>()) {
655 globalMatrixMutex_.lock();
656 }
657
658 const std::size_t numPrimaryDof = elementCtx.numPrimaryDof(/*timeIdx=*/0);
659 for (unsigned primaryDofIdx = 0; primaryDofIdx < numPrimaryDof; ++primaryDofIdx) {
660 const unsigned globI = elementCtx.globalSpaceIndex(/*spaceIdx=*/primaryDofIdx, /*timeIdx=*/0);
661
662 // update the right hand side
663 residual_[globI] += localLinearizer.residual(primaryDofIdx);
664
665 // update the global Jacobian matrix
666 for (unsigned dofIdx = 0; dofIdx < elementCtx.numDof(/*timeIdx=*/0); ++dofIdx) {
667 const unsigned globJ = elementCtx.globalSpaceIndex(/*spaceIdx=*/dofIdx, /*timeIdx=*/0);
668
669 jacobian_->addToBlock(globJ, globI, localLinearizer.jacobian(dofIdx, primaryDofIdx));
670 }
671 }
672
673 if (getPropValue<TypeTag, Properties::UseLinearizationLock>()) {
674 globalMatrixMutex_.unlock();
675 }
676 }
677
678 // apply the constraints to the solution. (i.e., the solution of constraint degrees
679 // of freedom is set to the value of the constraint.)
680 void applyConstraintsToSolution_()
681 {
682 if (!enableConstraints_()) {
683 return;
684 }
685
686 // TODO: assuming a history size of 2 only works for Euler time discretizations!
687 auto& sol = model_().solution(/*timeIdx=*/0);
688 auto& oldSol = model_().solution(/*timeIdx=*/1);
689
690 for (const auto& constraint : constraintsMap_) {
691 sol[constraint.first] = constraint.second;
692 oldSol[constraint.first] = constraint.second;
693 }
694 }
695
696 // apply the constraints to the linearization. (i.e., for constrain degrees of
697 // freedom the Jacobian matrix maps to identity and the residual is zero)
698 void applyConstraintsToLinearization_()
699 {
700 if (!enableConstraints_()) {
701 return;
702 }
703
704 for (const auto& constraint : constraintsMap_) {
705 // reset the column of the Jacobian matrix
706 // put an identity matrix on the main diagonal of the Jacobian
707 jacobian_->clearRow(constraint.first, Scalar(1.0));
708
709 // make the right-hand side of constraint DOFs zero
710 residual_[constraint.first] = 0.0;
711 }
712 }
713
714 static bool enableConstraints_()
715 { return getPropValue<TypeTag, Properties::EnableConstraints>(); }
716
717 Simulator* simulatorPtr_{};
718 std::vector<std::unique_ptr<ElementContext>> elementCtx_;
719
720 // The constraint equations (only non-empty if the
721 // EnableConstraints property is true)
722 std::map<unsigned, Constraints> constraintsMap_;
723
724 struct FlowInfo
725 {
726 int faceId;
727 VectorBlock flow;
728 unsigned int nncId;
729 };
730 SparseTable<FlowInfo> flowsInfo_;
731 SparseTable<FlowInfo> floresInfo_;
732
733 // the jacobian matrix
734 std::unique_ptr<SparseMatrixAdapter> jacobian_;
735
736 // the right-hand side
737 GlobalEqVector residual_;
738
739 LinearizationType linearizationType_;
740
741 std::mutex globalMatrixMutex_;
742
743 std::vector<std::set<unsigned int>> sparsityPattern_;
744
745 struct FullDomain
746 {
747 explicit FullDomain(const GridView& v) : view (v) {}
748 GridView view;
749 std::vector<bool> interior; // Should remain empty.
750 };
751 // Simple domain object used for full-domain linearization, it allows
752 // us to have the same interface for sub-domain and full-domain work.
753 // Pointer since it must defer construction, due to GridView member.
754 std::unique_ptr<FullDomain> fullDomain_;
755};
756
757} // namespace Opm
758
759#endif
The common code for the linearizers of non-linear systems of equations.
Definition: fvbaselinearizer.hh:78
const auto & getFlowsInfo() const
Return constant reference to the flowsInfo.
Definition: fvbaselinearizer.hh:371
const GlobalEqVector & residual() const
Return constant reference to global residual vector.
Definition: fvbaselinearizer.hh:284
void setLinearizationType(LinearizationType linearizationType)
Definition: fvbaselinearizer.hh:336
SparseMatrixAdapter & jacobian()
Definition: fvbaselinearizer.hh:278
void printVector(GlobalEqVector &, const char *name="x")
Definition: fvbaselinearizer.hh:290
FvBaseLinearizer()=default
static void registerParameters()
Register all run-time parameters for the Jacobian linearizer.
Definition: fvbaselinearizer.hh:129
void linearize()
Linearize the full system of non-linear equations.
Definition: fvbaselinearizer.hh:170
void exportSparsity(const char *path=".")
Definition: fvbaselinearizer.hh:325
void finalize()
Definition: fvbaselinearizer.hh:236
void init(Simulator &simulator)
Initialize the linearizer.
Definition: fvbaselinearizer.hh:141
void printNonzeros(const char *name="d")
Definition: fvbaselinearizer.hh:305
void exportVector(GlobalEqVector &x, const char *tag="", const char *name="export/x")
Definition: fvbaselinearizer.hh:320
FvBaseLinearizer(const FvBaseLinearizer &)=delete
void updateBoundaryConditionData()
Definition: fvbaselinearizer.hh:348
void linearizeDomain()
Linearize the part of the non-linear system of equations that is associated with the spatial domain.
Definition: fvbaselinearizer.hh:186
void resetSystem_(const SubDomainType &domain)
Definition: fvbaselinearizer.hh:383
void printSparsity(const char *name="s")
Definition: fvbaselinearizer.hh:300
void printResidual(const char *name="r")
Definition: fvbaselinearizer.hh:295
GlobalEqVector & residual()
Definition: fvbaselinearizer.hh:287
const SparseMatrixAdapter & jacobian() const
Return constant reference to global Jacobian matrix backend.
Definition: fvbaselinearizer.hh:275
const auto & getFloresInfo() const
Return constant reference to the floresInfo.
Definition: fvbaselinearizer.hh:379
void linearizeAuxiliaryEquations()
Linearize the part of the non-linear system of equations that is associated with the spatial domain.
Definition: fvbaselinearizer.hh:243
void updateDiscretizationParameters()
Definition: fvbaselinearizer.hh:343
void exportSystem(int idx, char *tag, const char *path="export")
Definition: fvbaselinearizer.hh:315
void printJacobian()
Definition: fvbaselinearizer.hh:310
void exportNonzeros(const char *tag="", const char *path=".")
Definition: fvbaselinearizer.hh:330
const LinearizationType & getLinearizationType() const
Definition: fvbaselinearizer.hh:340
void updateFlowsInfo()
Definition: fvbaselinearizer.hh:353
void linearizeDomain(const SubDomainType &domain, bool isNlddLocalSolve=false)
Definition: fvbaselinearizer.hh:192
const std::map< unsigned, Constraints > & constraintsMap() const
Returns the map of constraint degrees of freedom.
Definition: fvbaselinearizer.hh:363
void eraseMatrix()
Causes the Jacobian matrix to be recreated from scratch before the next iteration.
Definition: fvbaselinearizer.hh:156
Definition: matrixblock.hh:229
Manages the initializing and running of time dependent problems.
Definition: simulator.hh:84
Problem & problem()
Return the object which specifies the pysical setup of the simulation.
Definition: simulator.hh:265
const GridView & gridView() const
Return the grid view for which the simulation is done.
Definition: simulator.hh:246
Model & model()
Return the physical model used in the simulation.
Definition: simulator.hh:252
Simplifies multi-threaded capabilities.
Definition: threadmanager.hpp:36
static unsigned maxThreads()
Return the maximum number of threads of the current process.
Definition: threadmanager.hpp:66
static unsigned threadId()
Return the index of the current OpenMP thread.
Provides an STL-iterator like interface to iterate over the enties of a GridView in OpenMP threaded a...
Definition: threadedentityiterator.hh:42
bool isFinished(const EntityIterator &it) const
Definition: threadedentityiterator.hh:67
EntityIterator increment()
Definition: threadedentityiterator.hh:80
EntityIterator beginParallel()
Definition: threadedentityiterator.hh:54
Declare the properties used by the infrastructure code of the finite volume discretizations.
Provides data handles for parallel communication which operate on DOFs.
Definition: blackoilbioeffectsmodules.hh:43
ElementType
The types of reference elements available.
Definition: vcfvstencil.hh:56
typename Properties::Detail::GetPropImpl< TypeTag, Property >::type::type GetPropType
get the type alias defined in the property (equivalent to old macro GET_PROP_TYPE(....
Definition: propertysystem.hh:233
Definition: linearizationtype.hh:34