EclGenericWriter_impl.hpp
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 This file is part of the Open Porous Media project (OPM).
5
6 OPM is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 2 of the License, or
9 (at your option) any later version.
10
11 OPM is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with OPM. If not, see <http://www.gnu.org/licenses/>.
18
19 Consult the COPYING file in the top-level source directory of this
20 module for the precise wording of the license and the list of
21 copyright holders.
22*/
23#ifndef OPM_ECL_GENERIC_WRITER_IMPL_HPP
24#define OPM_ECL_GENERIC_WRITER_IMPL_HPP
25
26#include <dune/grid/common/mcmgmapper.hh>
27
28#include <opm/grid/cpgrid/LgrOutputHelpers.hpp>
29#include <opm/grid/GridHelpers.hpp>
30#include <opm/grid/utility/cartesianToCompressed.hpp>
31
32#include <opm/input/eclipse/EclipseState/EclipseState.hpp>
33#include <opm/input/eclipse/EclipseState/Grid/RegionSetMatcher.hpp>
34#include <opm/input/eclipse/EclipseState/SummaryConfig/SummaryConfig.hpp>
35
36#include <opm/input/eclipse/Schedule/Action/State.hpp>
37#include <opm/input/eclipse/Schedule/RPTConfig.hpp>
38#include <opm/input/eclipse/Schedule/Schedule.hpp>
39#include <opm/input/eclipse/Schedule/SummaryState.hpp>
40#include <opm/input/eclipse/Schedule/UDQ/UDQConfig.hpp>
41#include <opm/input/eclipse/Schedule/UDQ/UDQState.hpp>
42#include <opm/input/eclipse/Schedule/Well/WellConnections.hpp>
43#include <opm/input/eclipse/Schedule/Well/WellMatcher.hpp>
44
45#include <opm/input/eclipse/Units/UnitSystem.hpp>
46
47#include <opm/output/eclipse/EclipseIO.hpp>
48#include <opm/output/eclipse/RestartValue.hpp>
49#include <opm/output/eclipse/Summary.hpp>
50
52
53#if HAVE_MPI
55#endif
56
57#if HAVE_MPI
58#include <mpi.h>
59#endif
60
61#include <algorithm>
62#include <array>
63#include <cassert>
64#include <cmath>
65#include <functional>
66#include <map>
67#include <memory>
68#include <string>
69#include <unordered_map>
70#include <utility>
71#include <vector>
72
73namespace {
89bool directVerticalNeighbors(const std::array<int, 3>& cartDims,
90 const std::unordered_map<int,int>& cartesianToActive,
91 int smallGlobalIndex, int largeGlobalIndex)
92{
93 assert(smallGlobalIndex <= largeGlobalIndex);
94 std::array<int, 3> ijk1, ijk2;
95 auto globalToIjk = [cartDims](int gc) {
96 std::array<int, 3> ijk;
97 ijk[0] = gc % cartDims[0];
98 gc /= cartDims[0];
99 ijk[1] = gc % cartDims[1];
100 ijk[2] = gc / cartDims[1];
101 return ijk;
102 };
103 ijk1 = globalToIjk(smallGlobalIndex);
104 ijk2 = globalToIjk(largeGlobalIndex);
105 assert(ijk2[2]>=ijk1[2]);
106
107 if ( ijk1[0] == ijk2[0] && ijk1[1] == ijk2[1] && (ijk2[2] - ijk1[2]) > 1)
108 {
109 assert((largeGlobalIndex-smallGlobalIndex)%(cartDims[0]*cartDims[1])==0);
110 for ( int gi = smallGlobalIndex + cartDims[0] * cartDims[1]; gi < largeGlobalIndex;
111 gi += cartDims[0] * cartDims[1] )
112 {
113 if ( cartesianToActive.find( gi ) != cartesianToActive.end() )
114 {
115 return false;
117 }
118 return true;
119 } else
120 return false;
121}
122
123std::unordered_map<std::string, Opm::data::InterRegFlowMap>
124getInterRegFlowsAsMap(const Opm::InterRegFlowMap& map)
125{
126 auto maps = std::unordered_map<std::string, Opm::data::InterRegFlowMap>{};
127
128 const auto& regionNames = map.names();
129 auto flows = map.getInterRegFlows();
130 const auto nmap = regionNames.size();
131
132 maps.reserve(nmap);
133 for (auto mapID = 0*nmap; mapID < nmap; ++mapID) {
134 maps.emplace(regionNames[mapID], std::move(flows[mapID]));
135 }
136
137 return maps;
138}
140struct EclWriteTasklet : public Opm::TaskletInterface
141{
142 Opm::Action::State actionState_;
143 Opm::WellTestState wtestState_;
144 Opm::SummaryState summaryState_;
145 Opm::UDQState udqState_;
146 Opm::EclipseIO& eclIO_;
147 int reportStepNum_;
148 std::optional<int> timeStepNum_;
149 bool isSubStep_;
150 double secondsElapsed_;
151 std::vector<Opm::RestartValue> restartValue_;
152 bool writeDoublePrecision_;
153
154 explicit EclWriteTasklet(const Opm::Action::State& actionState,
155 const Opm::WellTestState& wtestState,
156 const Opm::SummaryState& summaryState,
157 const Opm::UDQState& udqState,
158 Opm::EclipseIO& eclIO,
159 int reportStepNum,
160 std::optional<int> timeStepNum,
161 bool isSubStep,
162 double secondsElapsed,
163 std::vector<Opm::RestartValue> restartValue,
164 bool writeDoublePrecision)
165 : actionState_(actionState)
166 , wtestState_(wtestState)
167 , summaryState_(summaryState)
168 , udqState_(udqState)
169 , eclIO_(eclIO)
170 , reportStepNum_(reportStepNum)
171 , timeStepNum_(timeStepNum)
172 , isSubStep_(isSubStep)
173 , secondsElapsed_(secondsElapsed)
174 , restartValue_(std::move(restartValue))
175 , writeDoublePrecision_(writeDoublePrecision)
176 {}
177
178 // callback to eclIO serial writeTimeStep method
179 void run() override
180 {
181 if (this->restartValue_.size() == 1) {
182 this->eclIO_.writeTimeStep(this->actionState_,
183 this->wtestState_,
184 this->summaryState_,
185 this->udqState_,
186 this->reportStepNum_,
187 this->isSubStep_,
188 this->secondsElapsed_,
189 std::move(this->restartValue_.back()),
190 this->writeDoublePrecision_,
191 this->timeStepNum_);
192 }
193 else{
194 this->eclIO_.writeTimeStep(this->actionState_,
195 this->wtestState_,
196 this->summaryState_,
197 this->udqState_,
198 this->reportStepNum_,
199 this->isSubStep_,
200 this->secondsElapsed_,
201 std::move(this->restartValue_),
202 this->writeDoublePrecision_,
203 this->timeStepNum_);
204 }
205 }
206};
207
208}
209
210namespace Opm {
211
212template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
214EclGenericWriter(const Schedule& schedule,
215 const EclipseState& eclState,
216 const SummaryConfig& summaryConfig,
217 const Grid& grid,
218 const EquilGrid* equilGrid,
219 const GridView& gridView,
220 const Dune::CartesianIndexMapper<Grid>& cartMapper,
221 const Dune::CartesianIndexMapper<EquilGrid>* equilCartMapper,
222 bool enableAsyncOutput,
223 bool enableEsmry )
224 : collectOnIORank_(grid,
225 equilGrid,
226 gridView,
227 cartMapper,
228 equilCartMapper,
229 summaryConfig.fip_regions_interreg_flow())
230 , grid_ (grid)
231 , gridView_ (gridView)
232 , schedule_ (schedule)
233 , eclState_ (eclState)
234 , cartMapper_ (cartMapper)
235 , equilCartMapper_(equilCartMapper)
236 , equilGrid_ (equilGrid)
237{
238 // Make sure outputNnc_ vector has at least 1 entry in all ranks.
239 outputNnc_.resize(1);
240
241 if (this->collectOnIORank_.isIORank()) {
242 this->eclIO_ = std::make_unique<EclipseIO>
243 (this->eclState_,
244 UgGridHelpers::createEclipseGrid(*equilGrid, eclState_.getInputGrid()),
245 this->schedule_, summaryConfig, "", enableEsmry);
246 }
247
248 // create output thread if enabled and rank is I/O rank
249 // async output is enabled by default if pthread are enabled
250 int numWorkerThreads = 0;
251 if (enableAsyncOutput && collectOnIORank_.isIORank()) {
252 numWorkerThreads = 1;
253 }
254
255 this->taskletRunner_.reset(new TaskletRunner(numWorkerThreads));
256}
257
258template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
260eclIO() const
261{
262 assert(eclIO_);
263 return *eclIO_;
264}
265
266template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
268writeInit()
269{
270 if (collectOnIORank_.isIORank()) {
271 std::map<std::string, std::vector<int>> integerVectors;
272 if (collectOnIORank_.isParallel()) {
273 integerVectors.emplace("MPI_RANK", collectOnIORank_.globalRanks());
274 }
275
276 eclIO_->writeInitial(*this->outputTrans_,
277 integerVectors,
278 this->outputNnc_.front());
279 this->outputTrans_.reset();
280 }
281}
282
283template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
284void
286extractOutputTransAndNNC(const std::function<unsigned int(unsigned int)>& map)
287{
288 if (collectOnIORank_.isIORank()) {
289 constexpr bool equilGridIsCpGrid = std::is_same_v<EquilGrid, Dune::CpGrid>;
290
291 const auto levelCartMapp = this->createLevelCartMapp_<equilGridIsCpGrid>();
292 const auto levelCartToLevelCompressed = this->createCartesianToActiveMaps_<equilGridIsCpGrid>(levelCartMapp);
293 auto computeLevelIndices = this->computeLevelIndices_<equilGridIsCpGrid>();
294 auto computeLevelCartIdx = this->computeLevelCartIdx_<equilGridIsCpGrid>(levelCartMapp, *(this->equilCartMapper_));
295 auto computeLevelCartDimensions = this->computeLevelCartDimensions_<equilGridIsCpGrid>(levelCartMapp, *(this->equilCartMapper_));
296 auto computeOriginIndices = this->computeOriginIndices_<equilGridIsCpGrid>();
297
298 computeTrans_(levelCartToLevelCompressed, map, computeLevelIndices,
299 computeLevelCartIdx, computeLevelCartDimensions, computeOriginIndices);
300 exportNncStructure_(levelCartToLevelCompressed, map, computeLevelIndices, computeLevelCartIdx,
301 computeLevelCartDimensions, computeOriginIndices);
302 }
303
304#if HAVE_MPI
305 if (collectOnIORank_.isParallel()) {
306 const auto& comm = grid_.comm();
307 Parallel::MpiSerializer ser(comm);
308 ser.broadcast(Parallel::RootRank{0}, outputNnc_);
309 }
310#endif
311}
312
313template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
314bool
316isNumAquCell_(const std::size_t cartIdx) const
317{
318 const auto& numAquCell = this->eclState_.aquifer().hasNumericalAquifer()
319 ? this->eclState_.aquifer().numericalAquifers().allAquiferCellIds()
320 : std::vector<std::size_t>{};
321
322 return std::ranges::binary_search(numAquCell.begin(), numAquCell.end(), cartIdx);
323}
324
325template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
326bool
327EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
328isNumAquConn_(const std::size_t cartIdx1,
329 const std::size_t cartIdx2) const
330{
331 return isNumAquCell_(cartIdx1) || isNumAquCell_(cartIdx2);
332}
333
334template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
335template<bool equilGridIsCpGrid>
337EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
338createLevelCartMapp_() const
339{
340 if constexpr (equilGridIsCpGrid) {
341 return Opm::LevelCartesianIndexMapper<EquilGrid>(*this->equilGrid_);
342 } else {
343 return Opm::LevelCartesianIndexMapper<EquilGrid>(*equilCartMapper_); }
344}
345
346template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
347template<bool equilGridIsCpGrid>
348std::vector<std::unordered_map<int,int>>
349EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
350createCartesianToActiveMaps_(const Opm::LevelCartesianIndexMapper<EquilGrid>& levelCartMapp) const
351{
352 if constexpr (equilGridIsCpGrid) {
353 if (this->equilGrid_->maxLevel()) {
354 return Opm::Lgr::levelCartesianToLevelCompressedMaps(*this->equilGrid_, levelCartMapp); }
355 else {
356 return std::vector<std::unordered_map<int,int>>{ cartesianToCompressed(equilGrid_->size(0), UgGridHelpers::globalCell(*equilGrid_)) };
357 }
358 }
359 return std::vector<std::unordered_map<int,int>>{ cartesianToCompressed(equilGrid_->size(0), UgGridHelpers::globalCell(*equilGrid_)) };
360}
361
362template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
363template<bool equilGridIsCpGrid>
364std::function<std::array<int,3>(int)>
365EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
366computeLevelCartDimensions_(const Opm::LevelCartesianIndexMapper<EquilGrid>& levelCartMapp,
367 const Dune::CartesianIndexMapper<EquilGrid>& equilCartMapp) const
368{
369 if constexpr (equilGridIsCpGrid) {
370 return [&](int level)
371 {
372 return levelCartMapp.cartesianDimensions(level);
373 };
374 }
375 else {
376 return [&](int level)
377 {
378 assert(level == 0); // refinement only supported for CpGrid for now
379 return equilCartMapp.cartesianDimensions();
380 };
381 }
382}
383
384template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
385template<bool equilGridIsCpGrid>
386std::function<int(int, int)>
387EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
388computeLevelCartIdx_(const Opm::LevelCartesianIndexMapper<EquilGrid>& levelCartMapp,
389 const Dune::CartesianIndexMapper<EquilGrid>& equilCartMapp) const
390{
391 if constexpr (equilGridIsCpGrid) {
392 return [&](int levelCompressedIdx,
393 int level)
394 {
395 return levelCartMapp.cartesianIndex(levelCompressedIdx, level);
396 };
397 }
398 else {
399 return [&](int levelCompressedIdx,
400 int level)
401 {
402 assert(level == 0); // refinement only supported for CpGrid for now
403 return equilCartMapp.cartesianIndex(levelCompressedIdx);
404 };
405 }
406}
407
408template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
409template <bool equilGridIsCpGrid>
410auto
411EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
412computeLevelIndices_() const
413{
414 if constexpr (equilGridIsCpGrid) {
415 return [](const auto& intersection,
416 const auto&,
417 const auto&)
418 {
419 return std::pair{intersection.inside().getLevelElem().index(), intersection.outside().getLevelElem().index()};
420 };
421 }
422 else {
423 return [](const auto&,
424 const auto& intersectionInsideLeafIdx,
425 const auto& intersectionOutsideLeafIdx)
426 {
427 return std::pair{intersectionInsideLeafIdx, intersectionOutsideLeafIdx};
428 };
429 }
430}
431
432template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
433template <bool equilGridIsCpGrid>
434auto
435EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
436computeOriginIndices_() const
437{
438 if constexpr (equilGridIsCpGrid) {
439 return [](const auto& intersection,
440 const auto&,
441 const auto&)
442 {
443 return std::pair{intersection.inside().getOrigin().index(), intersection.outside().getOrigin().index()};
444 };
445 }
446 else {
447 return [](const auto&,
448 const auto& intersectionInsideLeafIdx,
449 const auto& intersectionOutsideLeafIdx)
450 {
451 return std::pair{intersectionInsideLeafIdx, intersectionOutsideLeafIdx};
452 };
453 }
454}
455
456template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
457void
458EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
459allocateLevelTrans_(const std::array<int,3>& levelCartDims,
460 data::Solution& levelTrans) const
461{
462 auto createLevelCellData = [&levelCartDims]() {
463 return Opm::data::CellData{
464 Opm::UnitSystem::measure::transmissibility,
465 std::vector<double>(levelCartDims[0] * levelCartDims[1] * levelCartDims[2], 0.0),
466 Opm::data::TargetType::INIT
467 };
468 };
469
470 levelTrans.clear();
471 levelTrans.emplace("TRANX", createLevelCellData());
472 levelTrans.emplace("TRANY", createLevelCellData());
473 levelTrans.emplace("TRANZ", createLevelCellData());
474}
475
476template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
477template<typename LevelIndicesFunction, typename OriginIndicesFunction>
478void
479EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
480computeTrans_(const std::vector<std::unordered_map<int,int>>& levelCartToLevelCompressed,
481 const std::function<unsigned int(unsigned int)>& map,
482 const LevelIndicesFunction& computeLevelIndices,
483 const std::function<int(int, int)>& computeLevelCartIdx,
484 const std::function<std::array<int,3>(int)>& computeLevelCartDims,
485 const OriginIndicesFunction& computeOriginIndices) const
486{
487 if (!outputTrans_) {
488 outputTrans_ = std::make_unique<std::vector<data::Solution>>(std::vector<data::Solution>{});
489 }
490
491 using GlobalGridView = typename EquilGrid::LeafGridView;
492 using GlobElementMapper = Dune::MultipleCodimMultipleGeomTypeMapper<GlobalGridView>;
493 const GlobalGridView& globalGridView = this->equilGrid_->leafGridView();
494 const GlobElementMapper globalElemMapper { globalGridView, Dune::mcmgElementLayout() };
495
496 // Refinement supported only for CpGrid for now.
497 int maxLevel = this->equilGrid_->maxLevel();
498
499 outputTrans_->resize(maxLevel+1); // including level zero grid
500
501 for (int level = 0; level <= maxLevel; ++level) {
502 allocateLevelTrans_(computeLevelCartDims(level), this->outputTrans_->at(level));
503 }
504
505 for (const auto& elem : elements(globalGridView)) {
506 for (const auto& is : intersections(globalGridView, elem)) {
507 if (!is.neighbor())
508 continue; // intersection is on the domain boundary
509
510 if ( is.inside().level() != is.outside().level() ) // Those are treated as NNCs
511 continue;
512
513 // Not 'const' because remapped if 'map' is non-null.
514 unsigned c1 = globalElemMapper.index(is.inside());
515 unsigned c2 = globalElemMapper.index(is.outside());
516
517 if (c1 > c2)
518 continue; // we only need to handle each connection once, thank you.
519
520 int level = is.inside().level();
521
522 // For CpGrid with LGRs, level*Idx and c* do not coincide.
523 const auto& [levelInIdx, levelOutIdx] = computeLevelIndices(is, c1, c2);
524
525 const int levelCartIdxIn = computeLevelCartIdx(levelInIdx, level);
526 const int levelCartIdxOut = computeLevelCartIdx(levelOutIdx, level);
527
528 // For CpGrid with LGRs, the origin cell index refers to the coarsest
529 // ancestor cell when the cell is refined. For cells not involved in
530 // any refinement, it corresponds to the geometrically equivalent
531 // cell in the level-zero grid.
532 const auto [originInIdx, originOutIdx] = computeOriginIndices(is, c1, c2);
533
534 const auto originCartIdxIn = computeLevelCartIdx(originInIdx, /* level = */ 0);
535 const auto originCartIdxOut = computeLevelCartIdx(originOutIdx, /* level = */ 0);
536
537 // For level-zero grid, level Cartesian indices coincide with the grid Cartesian indices.
538 if (isNumAquCell_(originCartIdxIn) || isNumAquCell_(originCartIdxOut)) {
539 // Connections involving numerical aquifers are always NNCs
540 // for the purpose of file output. This holds even for
541 // connections between cells like (I,J,K) and (I+1,J,K)
542 // which are nominally neighbours in the Cartesian grid.
543 continue;
544 }
545
546 const auto minLevelCartIdx = std::min(levelCartIdxIn, levelCartIdxOut);
547 const auto maxLevelCartIdx = std::max(levelCartIdxIn, levelCartIdxOut);
548
549 const auto& levelCartDims = computeLevelCartDims(level);
550
551 // Re-ordering in case of non-empty mapping between equilGrid to grid
552 if (map) {
553 c1 = map(c1); // equilGridToGrid map
554 c2 = map(c2);
555 }
556
557 if (maxLevelCartIdx - minLevelCartIdx == 1 && levelCartDims[0] > 1 ) {
558 outputTrans_->at(level).at("TRANX").template data<double>()[minLevelCartIdx] = globalTrans().transmissibility(c1, c2);
559 continue; // skip other if clauses as they are false, last one needs some computation
560 }
561
562 if (maxLevelCartIdx - minLevelCartIdx == levelCartDims[0] && levelCartDims[1] > 1) {
563 outputTrans_->at(level).at("TRANY").template data<double>()[minLevelCartIdx] = globalTrans().transmissibility(c1, c2);
564 continue; // skipt next if clause as it needs some computation
565 }
566
567 if ( maxLevelCartIdx - minLevelCartIdx == levelCartDims[0]*levelCartDims[1] ||
568 directVerticalNeighbors(levelCartDims,
569 levelCartToLevelCompressed[level],
570 minLevelCartIdx,
571 maxLevelCartIdx)) {
572 outputTrans_->at(level).at("TRANZ").template data<double>()[minLevelCartIdx] = globalTrans().transmissibility(c1, c2);
573 }
574 }
575 }
576}
577
578template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
579bool
580EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
581isCartesianNeighbour_(const std::array<int,3>& levelCartDims,
582 const std::size_t levelCartIdx1,
583 const std::size_t levelCartIdx2) const
584{
585 const int diff = levelCartIdx2 - levelCartIdx1;
586
587 return (diff == 1)
588 || (diff == levelCartDims[0])
589 || (diff == (levelCartDims[0] * levelCartDims[1]));
590}
591
592template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
593bool
594EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
595isDirectNeighbours_(const std::unordered_map<int,int>& levelCartesianToActive,
596 const std::array<int,3>& levelCartDims,
597 const std::size_t levelCartIdx1,
598 const std::size_t levelCartIdx2) const
599{
600 return isCartesianNeighbour_(levelCartDims, levelCartIdx1, levelCartIdx2)
601 || directVerticalNeighbors(levelCartDims, levelCartesianToActive, levelCartIdx1, levelCartIdx2);
602}
603
604template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
605auto
606EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
607activeCell_(const std::unordered_map<int,int>& levelCartToLevelCompressed,
608 const std::size_t levelCartIdx) const
609{
610 auto pos = levelCartToLevelCompressed.find(levelCartIdx);
611 return (pos == levelCartToLevelCompressed.end()) ? -1 : pos->second;
612}
613
614template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
615void
616EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
617allocateAllNncs_(int maxLevel) const
618{
619 this->outputNnc_.resize(maxLevel+1); // level 0,1,..., maxLevel
620
621 if (maxLevel) {
622 // NNCs between main (level zero) grid and LGRs: level 1, ...., maxLevel.
623 // Example: grid with maxLevel == 3, outputNncGlobalLocal_.size() is maxLevel = 3
624 // outputNncGlobalLocal_[0] -> NNCs between level 0 and level 1
625 // outputNncGlobalLocal_[1] -> NNCs between level 0 and level 2
626 // outputAmalgamatedNnc_[2] -> NNCs between level 0 and level 3
627 this->outputNncGlobalLocal_.resize(maxLevel);
628
629 // NNCs between different refined level grids: (level1, level2)
630 // with 0 < level1 < level2 <= maxLevel
631 // Example: grid with maxLevel == 3, outputAmalgamatedNnc_.size() is maxLevel-1 = 2
632 // outputAmalgamatedNnc_[0][0] -> NNCs between level 1 and level 2
633 // outputAmalgamatedNnc_[0][1] -> NNCs between level 1 and level 3
634 // outputAmalgamatedNnc_[1][2] -> NNCs between level 2 and level 3
635 this->outputAmalgamatedNnc_.resize(maxLevel-1);
636 for (int i = 0; i < maxLevel-1; ++i) {
637 this->outputAmalgamatedNnc_[i].resize(maxLevel-1-i);
638 }
639 }
640}
641
642template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
643template<typename LevelIndicesFunction, typename OriginIndicesFunction>
644std::vector<std::vector<NNCdata>>
645EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
646exportNncStructure_(const std::vector<std::unordered_map<int,int>>& levelCartToLevelCompressed,
647 const std::function<unsigned int(unsigned int)>& map,
648 const LevelIndicesFunction& computeLevelIndices,
649 const std::function<int(int, int)>& computeLevelCartIdx,
650 const std::function<std::array<int,3>(int)>& computeLevelCartDims,
651 const OriginIndicesFunction& computeOriginIndices) const
652{
653 const auto& nncData = this->eclState_.getInputNNC().input();
654 const auto& unitSystem = this->eclState_.getDeckUnitSystem();
655
656 // Cartesian index mapper for the serial I/O grid
657 const auto& equilCartMapper = *equilCartMapper_;
658
659 const auto& level0CartDims = equilCartMapper.cartesianDimensions();
660
661 int maxLevel = this->equilGrid_->maxLevel();
662 allocateAllNncs_(maxLevel);
663
664 // The NNC keyword in the deck is defined only for faces in the level-0 grid.
665 // The same limitation applies to aquifer data.
666 for (const auto& entry : nncData) {
667 // Ignore most explicit NNCs between otherwise neighbouring cells.
668 // We keep NNCs that involve cells with numerical aquifers even if
669 // these might be between neighbouring cells in the Cartesian
670 // grid--e.g., between cells (I,J,K) and (I+1,J,K). All such
671 // connections should be written to NNC output arrays provided the
672 // transmissibility value is sufficiently large.
673 //
674 // The condition cell2 >= cell1 holds by construction of nncData.
675 assert (entry.cell2 >= entry.cell1);
676
677 if (! isCartesianNeighbour_(level0CartDims, entry.cell1, entry.cell2) ||
678 isNumAquConn_(entry.cell1, entry.cell2))
679 {
680 // Pick up transmissibility value from 'globalTrans()' since
681 // multiplier keywords like MULTREGT might have impacted the
682 // values entered in primary sources like NNC/EDITNNC/EDITNNCR.
683 const auto c1 = activeCell_(levelCartToLevelCompressed[/* level */0], entry.cell1);
684 const auto c2 = activeCell_(levelCartToLevelCompressed[/* level */0], entry.cell2);
685
686 if ((c1 < 0) || (c2 < 0)) {
687 // Connection between inactive cells? Unexpected at this
688 // level. Might consider 'throw'ing if this happens...
689 continue;
690 }
691
692 const auto trans = this->globalTrans().transmissibility(c1, c2);
693 const auto tt = unitSystem
694 .from_si(UnitSystem::measure::transmissibility, trans);
695
696 // ECLIPSE ignores NNCs (with EDITNNC/EDITNNCR applied) with
697 // small transmissibility values. Seems like the threshold is
698 // 1.0e-6 in output units.
699 if (std::isnormal(tt) && ! (tt < 1.0e-6)) {
700 this->outputNnc_[0].emplace_back(entry.cell1, entry.cell2, trans);
701 }
702 }
703 }
704
705 using GlobalGridView = typename EquilGrid::LeafGridView;
706 using GlobElementMapper = Dune::MultipleCodimMultipleGeomTypeMapper<GlobalGridView>;
707 const GlobalGridView& globalGridView = this->equilGrid_->leafGridView();
708 const GlobElementMapper globalElemMapper { globalGridView, Dune::mcmgElementLayout() };
709
710 for (const auto& elem : elements(globalGridView)) {
711 for (const auto& is : intersections(globalGridView, elem)) {
712 if (!is.neighbor())
713 continue; // intersection is on the domain boundary
714
715 // Not 'const' because remapped if 'map' is non-null.
716 unsigned c1 = globalElemMapper.index(is.inside());
717 unsigned c2 = globalElemMapper.index(is.outside());
718
719 if (c1 > c2)
720 continue; // we only need to handle each connection once, thank you.
721
722 if ( is.inside().level() != is.outside().level() ) { // TRANGL and TRANLL
723 // For CpGrid with LGRs, level*Idx and c* do not coincide.
724 const auto& [levelInIdx, levelOutIdx] = computeLevelIndices(is, c1, c2);
725
726 const int levelIn = is.inside().level();
727 const int levelOut = is.outside().level();
728
729 auto levelCartIdxIn = computeLevelCartIdx(levelInIdx, levelIn);
730 auto levelCartIdxOut = computeLevelCartIdx(levelOutIdx, levelOut);
731
732 // To store correctly and only once the corresponding NNC
733 std::pair<int,int> smallerPair = {levelIn, levelCartIdxIn},
734 largerPair = {levelOut, levelCartIdxOut};
735 if (smallerPair.first > largerPair.first) {
736 std::swap(smallerPair, largerPair);
737 }
738
739 const auto& [smallerLevel, smallerLevelCartIdx] = smallerPair;
740 const auto& [largerLevel, largerLevelCartIdx] = largerPair;
741
742 auto t = this->globalTrans().transmissibility(c1, c2);
743
744 // ECLIPSE ignores NNCs with zero transmissibility
745 // (different threshold than for NNC with corresponding
746 // EDITNNC above). In addition we do set small
747 // transmissibilities to zero when setting up the simulator.
748 // These will be ignored here, too.
749 const auto tt = unitSystem
750 .from_si(UnitSystem::measure::transmissibility, t);
751
752 if (std::isnormal(tt) && (tt > 1.0e-12)) {
753 // Store always FIRST the level Cartesian index of the cell belonging to the smaller level grid involved.
754 if (smallerLevel == 0) { // NNC between main (level zero) grid and a refined level/local grid
755 this->outputNncGlobalLocal_[largerLevel-1].emplace_back(smallerLevelCartIdx, largerLevelCartIdx, t);
756 }
757 else { // NNC between different refined level/local grids -> amlgamated NNC
758 assert(smallerLevel >= 1);
759 this->outputAmalgamatedNnc_[smallerLevel-1][largerLevel-smallerLevel-1].emplace_back(smallerLevelCartIdx, largerLevelCartIdx, t);
760 }
761 }
762 }
763 else {
764 // the cells sharing the intersection belong to the same level
765 assert(is.inside().level() == is.outside().level());
766 const int level = is.inside().level();
767
768 // For CpGrid with LGRs, the origin cell index refers to the coarsest
769 // ancestor cell when the cell is refined. For cells not involved in
770 // any refinement, it corresponds to the geometrically equivalent
771 // cell in the level-zero grid.
772 const auto [originInIdx, originOutIdx] = computeOriginIndices(is, c1, c2);
773
774 const std::size_t originCartIdxIn = computeLevelCartIdx(originInIdx, /* level = */ 0);
775 const std::size_t originCartIdxOut = computeLevelCartIdx(originOutIdx, /* level = */ 0);
776
777 // For CpGrid with LGRs, level*Idx and c* do not coincide.
778 const auto& [levelInIdx, levelOutIdx] = computeLevelIndices(is, c1, c2);
779
780 auto levelCartIdxIn = computeLevelCartIdx(levelInIdx, level);
781 auto levelCartIdxOut = computeLevelCartIdx(levelOutIdx, level);
782
783 if ( levelCartIdxOut < levelCartIdxIn )
784 std::swap(levelCartIdxIn, levelCartIdxOut);
785
786 // Re-ordering in case of non-empty mapping between equilGrid to grid
787 if (map) {
788 c1 = map(c1); // equilGridToGrid map
789 c2 = map(c2);
790 }
791
792 const auto& levelCartDims = computeLevelCartDims(level);
793
794 if (isNumAquConn_(originCartIdxIn, originCartIdxOut) ||
795 ! isDirectNeighbours_(levelCartToLevelCompressed[level],
796 levelCartDims,
797 levelCartIdxIn, levelCartIdxOut)) {
798 // We need to check whether an NNC for this face was also
799 // specified via the NNC keyword in the deck.
800 auto t = this->globalTrans().transmissibility(c1, c2);
801
802 if (level == 0) {
803 auto candidate = std::lower_bound(nncData.begin(), nncData.end(),
804 NNCdata { originCartIdxIn, originCartIdxOut, 0.0 });
805
806 while ((candidate != nncData.end()) &&
807 (candidate->cell1 == originCartIdxIn) &&
808 (candidate->cell2 == originCartIdxOut))
809 {
810 t -= candidate->trans;
811 ++candidate;
812 }
813 }
814
815 // ECLIPSE ignores NNCs with zero transmissibility
816 // (different threshold than for NNC with corresponding
817 // EDITNNC above). In addition we do set small
818 // transmissibilities to zero when setting up the simulator.
819 // These will be ignored here, too.
820 const auto tt = unitSystem
821 .from_si(UnitSystem::measure::transmissibility, t);
822
823 if (std::isnormal(tt) && (tt > 1.0e-12)) {
824 this->outputNnc_[level].emplace_back(levelCartIdxIn, levelCartIdxOut, t);
825 }
826 }
827 }
828 }
829 }
830
831 return this->outputNnc_;
832}
833
834template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
836doWriteOutput(const int reportStepNum,
837 const std::optional<int> timeStepNum,
838 const bool isSubStep,
839 data::Solution&& localCellData,
840 data::Wells&& localWellData,
841 data::GroupAndNetworkValues&& localGroupAndNetworkData,
842 data::Aquifers&& localAquiferData,
843 WellTestState&& localWTestState,
844 const Action::State& actionState,
845 const UDQState& udqState,
846 const SummaryState& summaryState,
847 const std::vector<Scalar>& thresholdPressure,
848 Scalar curTime,
849 Scalar nextStepSize,
850 bool doublePrecision,
851 bool isFlowsn,
852 std::array<FlowsData<double>, 3>&& flowsn,
853 bool isFloresn,
854 std::array<FlowsData<double>, 3>&& floresn)
855{
856 const auto isParallel = this->collectOnIORank_.isParallel();
857 const bool needsReordering = this->collectOnIORank_.doesNeedReordering();
858
859 RestartValue restartValue {
860 (isParallel || needsReordering)
861 ? this->collectOnIORank_.globalCellData()
862 : std::move(localCellData),
863
864 isParallel ? this->collectOnIORank_.globalWellData()
865 : std::move(localWellData),
866
867 isParallel ? this->collectOnIORank_.globalGroupAndNetworkData()
868 : std::move(localGroupAndNetworkData),
869
870 isParallel ? this->collectOnIORank_.globalAquiferData()
871 : std::move(localAquiferData)
872 };
873
874 if (eclState_.getSimulationConfig().useThresholdPressure()) {
875 restartValue.addExtra("THRESHPR", UnitSystem::measure::pressure,
876 thresholdPressure);
877 }
878
879 // Add suggested next timestep to extra data.
880 if (! isSubStep) {
881 restartValue.addExtra("OPMEXTRA", std::vector<double>(1, nextStepSize));
882 }
883
884 // Add nnc flows and flores.
885 if (isFlowsn) {
886 const auto flowsn_global = isParallel ? this->collectOnIORank_.globalFlowsn() : std::move(flowsn);
887 for (const auto& flows : flowsn_global) {
888 if (flows.name.empty())
889 continue;
890 if (flows.name == "FLOGASN+") {
891 restartValue.addExtra(flows.name, UnitSystem::measure::gas_surface_rate, flows.values);
892 } else {
893 restartValue.addExtra(flows.name, UnitSystem::measure::liquid_surface_rate, flows.values);
894 }
895 }
896 }
897 if (isFloresn) {
898 const auto floresn_global = isParallel ? this->collectOnIORank_.globalFloresn() : std::move(floresn);
899 for (const auto& flores : floresn_global) {
900 if (flores.name.empty()) {
901 continue;
902 }
903 restartValue.addExtra(flores.name, UnitSystem::measure::rate, flores.values);
904 }
905 }
906
907 std::vector<Opm::RestartValue> restartValues{};
908 // only serial, only CpGrid (for now)
909 if ( !isParallel && !needsReordering && (this->eclState_.getLgrs().size()>0) && (this->grid_.maxLevel()>0) ) {
910 // Level cells that appear on the leaf grid view get the data::Solution values from there.
911 // Other cells (i.e., parent cells that vanished due to refinement) get rubbish values for now.
912 // Only data::Solution is restricted to the level grids. Well, GroupAndNetwork, Aquifer are
913 // not modified in this method.
914 Opm::Lgr::extractRestartValueLevelGrids<Grid>(this->grid_, restartValue, restartValues);
915 }
916 else {
917 restartValues.reserve(1); // minimum size
918 restartValues.push_back(std::move(restartValue)); // no LGRs-> only one restart value
919 }
920
921 // make sure that the previous I/O request has been completed
922 // and the number of incomplete tasklets does not increase between
923 // time steps
924 this->taskletRunner_->barrier();
925
926 // check if there might have been a failure in the TaskletRunner
927 if (this->taskletRunner_->failure()) {
928 throw std::runtime_error("Failure in the TaskletRunner while writing output.");
929 }
930
931 // create a tasklet to write the data for the current time step to disk
932 auto eclWriteTasklet = std::make_shared<EclWriteTasklet>(
933 actionState,
934 isParallel ? this->collectOnIORank_.globalWellTestState() : std::move(localWTestState),
935 summaryState, udqState, *this->eclIO_,
936 reportStepNum, timeStepNum, isSubStep, curTime, std::move(restartValues), doublePrecision);
937
938 // finally, start a new output writing job
939 this->taskletRunner_->dispatch(std::move(eclWriteTasklet));
940}
941
942template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
944evalSummary(const int reportStepNum,
945 const Scalar curTime,
946 const data::Wells& localWellData,
947 const data::WellBlockAveragePressures& localWBPData,
948 const data::GroupAndNetworkValues& localGroupAndNetworkData,
949 const std::map<int,data::AquiferData>& localAquiferData,
950 const std::map<std::pair<std::string, int>, double>& blockData,
951 const std::map<std::string, double>& miscSummaryData,
952 const std::map<std::string, std::vector<double>>& regionData,
953 const Inplace& inplace,
954 const Inplace* initialInPlace,
955 const InterRegFlowMap& interRegFlows,
956 SummaryState& summaryState,
957 UDQState& udqState)
958{
959 if (collectOnIORank_.isIORank()) {
960 const auto& summary = eclIO_->summary();
961
962 const auto& wellData = this->collectOnIORank_.isParallel()
963 ? this->collectOnIORank_.globalWellData()
964 : localWellData;
965
966 const auto& wbpData = this->collectOnIORank_.isParallel()
967 ? this->collectOnIORank_.globalWBPData()
968 : localWBPData;
969
970 const auto& groupAndNetworkData = this->collectOnIORank_.isParallel()
971 ? this->collectOnIORank_.globalGroupAndNetworkData()
972 : localGroupAndNetworkData;
973
974 const auto& aquiferData = this->collectOnIORank_.isParallel()
975 ? this->collectOnIORank_.globalAquiferData()
976 : localAquiferData;
977
978 const auto interreg_flows = getInterRegFlowsAsMap(interRegFlows);
979
980 const auto values = out::Summary::DynamicSimulatorState {
981 /* well_solution = */ &wellData,
982 /* wbp = */ &wbpData,
983 /* group_and_nwrk_solution = */ &groupAndNetworkData,
984 /* single_values = */ &miscSummaryData,
985 /* region_values = */ &regionData,
986 /* block_values = */ &blockData,
987 /* aquifer_values = */ &aquiferData,
988 /* interreg_flows = */ &interreg_flows,
989 /* inplace = */ {
990 /* current = */ &inplace,
991 /* initial = */ initialInPlace
992 }
993 };
994
995 summary.eval(reportStepNum, curTime, values, summaryState);
996
997 // Off-by-one-fun: The reportStepNum argument corresponds to the
998 // report step these results will be written to, whereas the
999 // argument to UDQ function evaluation corresponds to the report
1000 // step we are currently on.
1001 const auto udq_step = reportStepNum - 1;
1002
1003 this->schedule_[udq_step].udq()
1004 .eval(udq_step,
1005 this->schedule_.wellMatcher(udq_step),
1006 this->schedule_[udq_step].group_order(),
1007 this->schedule_.segmentMatcherFactory(udq_step),
1008 [es = std::cref(this->eclState_)]() {
1009 return std::make_unique<RegionSetMatcher>
1010 (es.get().fipRegionStatistics());
1011 },
1012 summaryState, udqState);
1013 }
1014
1015#if HAVE_MPI
1016 if (collectOnIORank_.isParallel()) {
1017 Parallel::MpiSerializer ser(grid_.comm());
1018 ser.append(summaryState);
1019 }
1020#endif
1021}
1022
1023template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
1026globalTrans() const
1027{
1028 assert (globalTrans_);
1029 return *globalTrans_;
1030}
1031
1032} // namespace Opm
1033
1034#endif // OPM_ECL_GENERIC_WRITER_IMPL_HPP
Definition: CollectDataOnIORank.hpp:49
bool isIORank() const
Definition: CollectDataOnIORank.hpp:126
Definition: EclGenericWriter.hpp:67
std::vector< std::vector< NNCdata > > outputNnc_
Definition: EclGenericWriter.hpp:174
const EclipseState & eclState_
Definition: EclGenericWriter.hpp:158
void doWriteOutput(const int reportStepNum, const std::optional< int > timeStepNum, const bool isSubStep, data::Solution &&localCellData, data::Wells &&localWellData, data::GroupAndNetworkValues &&localGroupAndNetworkData, data::Aquifers &&localAquiferData, WellTestState &&localWTestState, const Action::State &actionState, const UDQState &udqState, const SummaryState &summaryState, const std::vector< Scalar > &thresholdPressure, Scalar curTime, Scalar nextStepSize, bool doublePrecision, bool isFlowsn, std::array< FlowsData< double >, 3 > &&flowsn, bool isFloresn, std::array< FlowsData< double >, 3 > &&floresn)
Definition: EclGenericWriter_impl.hpp:836
CollectDataOnIORankType collectOnIORank_
Definition: EclGenericWriter.hpp:154
void extractOutputTransAndNNC(const std::function< unsigned int(unsigned int)> &map)
Definition: EclGenericWriter_impl.hpp:286
std::unique_ptr< TaskletRunner > taskletRunner_
Definition: EclGenericWriter.hpp:160
void writeInit()
Definition: EclGenericWriter_impl.hpp:268
EclGenericWriter(const Schedule &schedule, const EclipseState &eclState, const SummaryConfig &summaryConfig, const Grid &grid, const EquilGrid *equilGrid, const GridView &gridView, const Dune::CartesianIndexMapper< Grid > &cartMapper, const Dune::CartesianIndexMapper< EquilGrid > *equilCartMapper, bool enableAsyncOutput, bool enableEsmry)
Definition: EclGenericWriter_impl.hpp:214
const EclipseIO & eclIO() const
Definition: EclGenericWriter_impl.hpp:260
void evalSummary(int reportStepNum, Scalar curTime, const data::Wells &localWellData, const data::WellBlockAveragePressures &localWBPData, const data::GroupAndNetworkValues &localGroupAndNetworkData, const std::map< int, data::AquiferData > &localAquiferData, const std::map< std::pair< std::string, int >, double > &blockData, const std::map< std::string, double > &miscSummaryData, const std::map< std::string, std::vector< double > > &regionData, const Inplace &inplace, const Inplace *initialInPlace, const InterRegFlowMap &interRegFlows, SummaryState &summaryState, UDQState &udqState)
Definition: EclGenericWriter_impl.hpp:944
const TransmissibilityType & globalTrans() const
Definition: EclGenericWriter_impl.hpp:1026
std::unique_ptr< EclipseIO > eclIO_
Definition: EclGenericWriter.hpp:159
Inter-region flow accumulation maps for all region definition arrays.
Definition: InterRegFlows.hpp:179
std::vector< data::InterRegFlowMap > getInterRegFlows() const
const std::vector< std::string > & names() const
Definition: EclGenericWriter.hpp:50
Class for serializing and broadcasting data using MPI.
Definition: MPISerializer.hpp:38
void append(T &data, int root=0)
Serialize and broadcast on root process, de-serialize and append on others.
Definition: MPISerializer.hpp:82
void broadcast(RootRank rootrank, Args &&... args)
Definition: MPISerializer.hpp:47
The base class for tasklets.
Definition: tasklets.hpp:45
virtual void run()=0
Handles where a given tasklet is run.
Definition: tasklets.hpp:93
Definition: Transmissibility.hpp:54
Definition: blackoilbioeffectsmodules.hh:45
Avoid mistakes in calls to broadcast() by wrapping the root argument in an explicit type.
Definition: MPISerializer.hpp:33