EclGenericWriter_impl.hpp
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 This file is part of the Open Porous Media project (OPM).
5
6 OPM is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 2 of the License, or
9 (at your option) any later version.
10
11 OPM is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with OPM. If not, see <http://www.gnu.org/licenses/>.
18
19 Consult the COPYING file in the top-level source directory of this
20 module for the precise wording of the license and the list of
21 copyright holders.
22*/
23#ifndef OPM_ECL_GENERIC_WRITER_IMPL_HPP
24#define OPM_ECL_GENERIC_WRITER_IMPL_HPP
25
26#include <dune/grid/common/mcmgmapper.hh>
27
28#include <opm/grid/cpgrid/LgrOutputHelpers.hpp>
29#include <opm/grid/GridHelpers.hpp>
30#include <opm/grid/utility/cartesianToCompressed.hpp>
31
32#include <opm/input/eclipse/EclipseState/EclipseState.hpp>
33#include <opm/input/eclipse/EclipseState/Grid/RegionSetMatcher.hpp>
34#include <opm/input/eclipse/EclipseState/SummaryConfig/SummaryConfig.hpp>
35
36#include <opm/input/eclipse/Schedule/Action/State.hpp>
37#include <opm/input/eclipse/Schedule/RPTConfig.hpp>
38#include <opm/input/eclipse/Schedule/Schedule.hpp>
39#include <opm/input/eclipse/Schedule/SummaryState.hpp>
40#include <opm/input/eclipse/Schedule/UDQ/UDQConfig.hpp>
41#include <opm/input/eclipse/Schedule/UDQ/UDQState.hpp>
42#include <opm/input/eclipse/Schedule/Well/WellConnections.hpp>
43#include <opm/input/eclipse/Schedule/Well/WellMatcher.hpp>
44
45#include <opm/input/eclipse/Units/UnitSystem.hpp>
46
47#include <opm/output/eclipse/EclipseIO.hpp>
48#include <opm/output/eclipse/RestartValue.hpp>
49#include <opm/output/eclipse/Summary.hpp>
50
52
53#if HAVE_MPI
55#endif
56
57#if HAVE_MPI
58#include <mpi.h>
59#endif
60
61#include <algorithm>
62#include <array>
63#include <cassert>
64#include <cmath>
65#include <functional>
66#include <map>
67#include <memory>
68#include <string>
69#include <unordered_map>
70#include <utility>
71#include <vector>
72
73namespace {
74
89bool directVerticalNeighbors(const std::array<int, 3>& cartDims,
90 const std::unordered_map<int,int>& cartesianToActive,
91 int smallGlobalIndex, int largeGlobalIndex)
92{
93 assert(smallGlobalIndex <= largeGlobalIndex);
94 std::array<int, 3> ijk1, ijk2;
95 auto globalToIjk = [cartDims](int gc) {
96 std::array<int, 3> ijk;
97 ijk[0] = gc % cartDims[0];
98 gc /= cartDims[0];
99 ijk[1] = gc % cartDims[1];
100 ijk[2] = gc / cartDims[1];
101 return ijk;
102 };
103 ijk1 = globalToIjk(smallGlobalIndex);
104 ijk2 = globalToIjk(largeGlobalIndex);
105 assert(ijk2[2]>=ijk1[2]);
106
107 if ( ijk1[0] == ijk2[0] && ijk1[1] == ijk2[1] && (ijk2[2] - ijk1[2]) > 1)
108 {
109 assert((largeGlobalIndex-smallGlobalIndex)%(cartDims[0]*cartDims[1])==0);
110 for ( int gi = smallGlobalIndex + cartDims[0] * cartDims[1]; gi < largeGlobalIndex;
111 gi += cartDims[0] * cartDims[1] )
113 if ( cartesianToActive.find( gi ) != cartesianToActive.end() )
114 {
115 return false;
116 }
117 }
118 return true;
119 } else
120 return false;
121}
122
123std::unordered_map<std::string, Opm::data::InterRegFlowMap>
124getInterRegFlowsAsMap(const Opm::InterRegFlowMap& map)
125{
126 auto maps = std::unordered_map<std::string, Opm::data::InterRegFlowMap>{};
127
128 const auto& regionNames = map.names();
129 auto flows = map.getInterRegFlows();
130 const auto nmap = regionNames.size();
131
132 maps.reserve(nmap);
133 for (auto mapID = 0*nmap; mapID < nmap; ++mapID) {
134 maps.emplace(regionNames[mapID], std::move(flows[mapID]));
135 }
136
137 return maps;
139
140struct EclWriteTasklet : public Opm::TaskletInterface
141{
142 Opm::Action::State actionState_;
143 Opm::WellTestState wtestState_;
144 Opm::SummaryState summaryState_;
145 Opm::UDQState udqState_;
146 Opm::EclipseIO& eclIO_;
147 int reportStepNum_;
148 std::optional<int> timeStepNum_;
149 bool isSubStep_;
150 double secondsElapsed_;
151 std::vector<Opm::RestartValue> restartValue_;
152 bool writeDoublePrecision_;
153
154 explicit EclWriteTasklet(const Opm::Action::State& actionState,
155 const Opm::WellTestState& wtestState,
156 const Opm::SummaryState& summaryState,
157 const Opm::UDQState& udqState,
158 Opm::EclipseIO& eclIO,
159 int reportStepNum,
160 std::optional<int> timeStepNum,
161 bool isSubStep,
162 double secondsElapsed,
163 std::vector<Opm::RestartValue> restartValue,
164 bool writeDoublePrecision)
165 : actionState_(actionState)
166 , wtestState_(wtestState)
167 , summaryState_(summaryState)
168 , udqState_(udqState)
169 , eclIO_(eclIO)
170 , reportStepNum_(reportStepNum)
171 , timeStepNum_(timeStepNum)
172 , isSubStep_(isSubStep)
173 , secondsElapsed_(secondsElapsed)
174 , restartValue_(std::move(restartValue))
175 , writeDoublePrecision_(writeDoublePrecision)
176 {}
177
178 // callback to eclIO serial writeTimeStep method
179 void run() override
180 {
181 if (this->restartValue_.size() == 1) {
182 this->eclIO_.writeTimeStep(this->actionState_,
183 this->wtestState_,
184 this->summaryState_,
185 this->udqState_,
186 this->reportStepNum_,
187 this->isSubStep_,
188 this->secondsElapsed_,
189 std::move(this->restartValue_.back()),
190 this->writeDoublePrecision_,
191 this->timeStepNum_);
192 }
193 else{
194 this->eclIO_.writeTimeStep(this->actionState_,
195 this->wtestState_,
196 this->summaryState_,
197 this->udqState_,
198 this->reportStepNum_,
199 this->isSubStep_,
200 this->secondsElapsed_,
201 std::move(this->restartValue_),
202 this->writeDoublePrecision_,
203 this->timeStepNum_);
204 }
205 }
206};
207
208}
209
210namespace Opm {
211
212template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
214EclGenericWriter(const Schedule& schedule,
215 const EclipseState& eclState,
216 const SummaryConfig& summaryConfig,
217 const Grid& grid,
218 const EquilGrid* equilGrid,
219 const GridView& gridView,
220 const Dune::CartesianIndexMapper<Grid>& cartMapper,
221 const Dune::CartesianIndexMapper<EquilGrid>* equilCartMapper,
222 bool enableAsyncOutput,
223 bool enableEsmry )
224 : collectOnIORank_(grid,
225 equilGrid,
226 gridView,
227 cartMapper,
228 equilCartMapper,
229 summaryConfig.fip_regions_interreg_flow())
230 , grid_ (grid)
231 , gridView_ (gridView)
232 , schedule_ (schedule)
233 , eclState_ (eclState)
234 , cartMapper_ (cartMapper)
235 , equilCartMapper_(equilCartMapper)
236 , equilGrid_ (equilGrid)
237{
238 if (this->collectOnIORank_.isIORank()) {
239 this->eclIO_ = std::make_unique<EclipseIO>
240 (this->eclState_,
241 UgGridHelpers::createEclipseGrid(*equilGrid, eclState_.getInputGrid()),
242 this->schedule_, summaryConfig, "", enableEsmry);
243 }
244
245 // create output thread if enabled and rank is I/O rank
246 // async output is enabled by default if pthread are enabled
247 int numWorkerThreads = 0;
248 if (enableAsyncOutput && collectOnIORank_.isIORank()) {
249 numWorkerThreads = 1;
250 }
251
252 this->taskletRunner_.reset(new TaskletRunner(numWorkerThreads));
253}
254
255template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
257eclIO() const
258{
259 assert(eclIO_);
260 return *eclIO_;
261}
262
263template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
265writeInit()
266{
267 if (collectOnIORank_.isIORank()) {
268 std::map<std::string, std::vector<int>> integerVectors;
269 if (collectOnIORank_.isParallel()) {
270 integerVectors.emplace("MPI_RANK", collectOnIORank_.globalRanks());
271 }
272
273 eclIO_->writeInitial(*this->outputTrans_,
274 integerVectors,
275 this->outputNnc_);
276 this->outputTrans_.reset();
277 }
278}
279template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
280void
282extractOutputTransAndNNC(const std::function<unsigned int(unsigned int)>& map)
283{
284 if (collectOnIORank_.isIORank()) {
285 auto cartMap = cartesianToCompressed(equilGrid_->size(0), UgGridHelpers::globalCell(*equilGrid_));
286 computeTrans_(cartMap, map);
287 exportNncStructure_(cartMap, map);
288 }
289
290#if HAVE_MPI
291 if (collectOnIORank_.isParallel()) {
292 const auto& comm = grid_.comm();
293 Parallel::MpiSerializer ser(comm);
294 ser.broadcast(Parallel::RootRank{0}, outputNnc_);
295 }
296#endif
297}
298
299template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
300void
302computeTrans_(const std::unordered_map<int,int>& cartesianToActive,
303 const std::function<unsigned int(unsigned int)>& map) const
304{
305 if (!outputTrans_) {
306 outputTrans_ = std::make_unique<data::Solution>();
307 }
308
309 const auto& cartMapper = *equilCartMapper_;
310 const auto& cartDims = cartMapper.cartesianDimensions();
311
312 auto createCellData = [&cartDims]() {
313 return data::CellData{
314 UnitSystem::measure::transmissibility,
315 std::vector<double>(cartDims[0] * cartDims[1] * cartDims[2], 0.0),
316 data::TargetType::INIT
317 };
318 };
319
320 outputTrans_->clear();
321 outputTrans_->emplace("TRANX", createCellData());
322 outputTrans_->emplace("TRANY", createCellData());
323 outputTrans_->emplace("TRANZ", createCellData());
324
325 auto& tranx = this->outputTrans_->at("TRANX");
326 auto& trany = this->outputTrans_->at("TRANY");
327 auto& tranz = this->outputTrans_->at("TRANZ");
328
329 using GlobalGridView = typename EquilGrid::LeafGridView;
330 using GlobElementMapper = Dune::MultipleCodimMultipleGeomTypeMapper<GlobalGridView>;
331 const GlobalGridView& globalGridView = this->equilGrid_->leafGridView();
332 const GlobElementMapper globalElemMapper { globalGridView, Dune::mcmgElementLayout() };
333
334 auto isNumAquCell = [numAquCell = this->eclState_.aquifer().hasNumericalAquifer()
335 ? this->eclState_.aquifer().numericalAquifers().allAquiferCellIds()
336 : std::vector<std::size_t>{}]
337 (const std::size_t cellIdx)
338 {
339 return std::binary_search(numAquCell.begin(), numAquCell.end(), cellIdx);
340 };
341
342 for (const auto& elem : elements(globalGridView)) {
343 for (const auto& is : intersections(globalGridView, elem)) {
344 if (!is.neighbor())
345 continue; // intersection is on the domain boundary
346
347 if ( (is.inside().level()>0) || (is.outside().level()>0))
348 continue; // for CpGrid with LGRs, we only care about level zero cells, for now.
349
350 // Not 'const' because remapped if 'map' is non-null.
351 unsigned c1 = globalElemMapper.index(is.inside());
352 unsigned c2 = globalElemMapper.index(is.outside());
353
354 if (c1 > c2)
355 continue; // we only need to handle each connection once, thank you.
356
357 // Ordering of compressed and uncompressed index should be the same
358 const int cartIdx1 = cartMapper.cartesianIndex( c1 );
359 const int cartIdx2 = cartMapper.cartesianIndex( c2 );
360
361 if (isNumAquCell(cartIdx1) || isNumAquCell(cartIdx2)) {
362 // Connections involving numerical aquifers are always NNCs
363 // for the purpose of file output. This holds even for
364 // connections between cells like (I,J,K) and (I+1,J,K)
365 // which are nominally neighbours in the Cartesian grid.
366 continue;
367 }
368
369 // Ordering of compressed and uncompressed index should be the same
370 assert(cartIdx1 <= cartIdx2);
371 int gc1 = std::min(cartIdx1, cartIdx2);
372 int gc2 = std::max(cartIdx1, cartIdx2);
373
374 // Re-ordering in case of non-empty mapping between equilGrid to grid
375 if (map) {
376 c1 = map(c1); // equilGridToGrid map
377 c2 = map(c2);
378 }
379
380 if (gc2 - gc1 == 1 && cartDims[0] > 1 ) {
381 tranx.template data<double>()[gc1] = globalTrans().transmissibility(c1, c2);
382 continue; // skip other if clauses as they are false, last one needs some computation
383 }
384
385 if (gc2 - gc1 == cartDims[0] && cartDims[1] > 1) {
386 trany.template data<double>()[gc1] = globalTrans().transmissibility(c1, c2);
387 continue; // skipt next if clause as it needs some computation
388 }
389
390 if ( gc2 - gc1 == cartDims[0]*cartDims[1] ||
391 directVerticalNeighbors(cartDims, cartesianToActive, gc1, gc2))
392 tranz.template data<double>()[gc1] = globalTrans().transmissibility(c1, c2);
393 }
394 }
395}
396
397template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
398std::vector<NNCdata>
399EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
400exportNncStructure_(const std::unordered_map<int,int>& cartesianToActive,
401 const std::function<unsigned int(unsigned int)>& map) const
402{
403 auto isNumAquCell = [numAquCell = this->eclState_.aquifer().hasNumericalAquifer()
404 ? this->eclState_.aquifer().numericalAquifers().allAquiferCellIds()
405 : std::vector<std::size_t>{}]
406 (const std::size_t cellIdx)
407 {
408 return std::binary_search(numAquCell.begin(), numAquCell.end(), cellIdx);
409 };
410
411 auto isNumAquConn = [&isNumAquCell](const std::size_t cellIdx1,
412 const std::size_t cellIdx2)
413 {
414 return isNumAquCell(cellIdx1) || isNumAquCell(cellIdx2);
415 };
416
417 auto isCartesianNeighbour = [nx = this->eclState_.getInputGrid().getNX(),
418 ny = this->eclState_.getInputGrid().getNY()]
419 (const std::size_t cellIdx1, const std::size_t cellIdx2)
420 {
421 const auto cellDiff = cellIdx2 - cellIdx1;
422
423 return (cellDiff == 1)
424 || (cellDiff == nx)
425 || (cellDiff == nx * ny);
426 };
427
428 auto activeCell = [&cartesianToActive](const std::size_t cellIdx)
429 {
430 auto pos = cartesianToActive.find(cellIdx);
431 return (pos == cartesianToActive.end()) ? -1 : pos->second;
432 };
433
434 const auto& nncData = this->eclState_.getInputNNC().input();
435 const auto& unitSystem = this->eclState_.getDeckUnitSystem();
436
437 for (const auto& entry : nncData) {
438 // Ignore most explicit NNCs between otherwise neighbouring cells.
439 // We keep NNCs that involve cells with numerical aquifers even if
440 // these might be between neighbouring cells in the Cartesian
441 // grid--e.g., between cells (I,J,K) and (I+1,J,K). All such
442 // connections should be written to NNC output arrays provided the
443 // transmissibility value is sufficiently large.
444 //
445 // The condition cell2 >= cell1 holds by construction of nncData.
446 assert (entry.cell2 >= entry.cell1);
447
448 if (! isCartesianNeighbour(entry.cell1, entry.cell2) ||
449 isNumAquConn(entry.cell1, entry.cell2))
450 {
451 // Pick up transmissibility value from 'globalTrans()' since
452 // multiplier keywords like MULTREGT might have impacted the
453 // values entered in primary sources like NNC/EDITNNC/EDITNNCR.
454 const auto c1 = activeCell(entry.cell1);
455 const auto c2 = activeCell(entry.cell2);
456
457 if ((c1 < 0) || (c2 < 0)) {
458 // Connection between inactive cells? Unexpected at this
459 // level. Might consider 'throw'ing if this happens...
460 continue;
461 }
462
463 const auto trans = this->globalTrans().transmissibility(c1, c2);
464 const auto tt = unitSystem
465 .from_si(UnitSystem::measure::transmissibility, trans);
466
467 // ECLIPSE ignores NNCs (with EDITNNC/EDITNNCR applied) with
468 // small transmissibility values. Seems like the threshold is
469 // 1.0e-6 in output units.
470 if (std::isnormal(tt) && ! (tt < 1.0e-6)) {
471 this->outputNnc_.emplace_back(entry.cell1, entry.cell2, trans);
472 }
473 }
474 }
475
476 auto isDirectNeighbours = [&isCartesianNeighbour, &cartesianToActive,
477 cartDims = &this->cartMapper_.cartesianDimensions()]
478 (const std::size_t cellIdx1, const std::size_t cellIdx2)
479 {
480 return isCartesianNeighbour(cellIdx1, cellIdx2)
481 || directVerticalNeighbors(*cartDims, cartesianToActive, cellIdx1, cellIdx2);
482 };
483
484 using GlobalGridView = typename EquilGrid::LeafGridView;
485 using GlobElementMapper = Dune::MultipleCodimMultipleGeomTypeMapper<GlobalGridView>;
486 const GlobalGridView& globalGridView = this->equilGrid_->leafGridView();
487 const GlobElementMapper globalElemMapper { globalGridView, Dune::mcmgElementLayout() };
488
489 // Cartesian index mapper for the serial I/O grid
490 const auto& equilCartMapper = *equilCartMapper_;
491 for (const auto& elem : elements(globalGridView)) {
492 for (const auto& is : intersections(globalGridView, elem)) {
493 if (!is.neighbor())
494 continue; // intersection is on the domain boundary
495
496 if ( (is.inside().level()>0) || (is.outside().level()>0))
497 continue; // for CpGrid with LGRs, we only care about level zero cells, for now.
498
499 // Not 'const' because remapped if 'map' is non-null.
500 unsigned c1 = globalElemMapper.index(is.inside());
501 unsigned c2 = globalElemMapper.index(is.outside());
502
503 if (c1 > c2)
504 continue; // we only need to handle each connection once, thank you.
505
506 std::size_t cc1 = equilCartMapper.cartesianIndex( c1 );
507 std::size_t cc2 = equilCartMapper.cartesianIndex( c2 );
508
509 if ( cc2 < cc1 )
510 std::swap(cc1, cc2);
511
512 // Re-ordering in case of non-empty mapping between equilGrid to grid
513 if (map) {
514 c1 = map(c1); // equilGridToGrid map
515 c2 = map(c2);
516 }
517
518 if (isNumAquConn(cc1, cc2) || ! isDirectNeighbours(cc1, cc2)) {
519 // We need to check whether an NNC for this face was also
520 // specified via the NNC keyword in the deck.
521 auto t = this->globalTrans().transmissibility(c1, c2);
522 auto candidate = std::lower_bound(nncData.begin(), nncData.end(),
523 NNCdata { cc1, cc2, 0.0 });
524
525 while ((candidate != nncData.end()) &&
526 (candidate->cell1 == cc1) &&
527 (candidate->cell2 == cc2))
528 {
529 t -= candidate->trans;
530 ++candidate;
531 }
532
533 // ECLIPSE ignores NNCs with zero transmissibility
534 // (different threshold than for NNC with corresponding
535 // EDITNNC above). In addition we do set small
536 // transmissibilities to zero when setting up the simulator.
537 // These will be ignored here, too.
538 const auto tt = unitSystem
539 .from_si(UnitSystem::measure::transmissibility, t);
540
541 if (std::isnormal(tt) && (tt > 1.0e-12)) {
542 this->outputNnc_.emplace_back(cc1, cc2, t);
543 }
544 }
545 }
546 }
547
548 return this->outputNnc_;
549}
550
551template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
553doWriteOutput(const int reportStepNum,
554 const std::optional<int> timeStepNum,
555 const bool isSubStep,
556 data::Solution&& localCellData,
557 data::Wells&& localWellData,
558 data::GroupAndNetworkValues&& localGroupAndNetworkData,
559 data::Aquifers&& localAquiferData,
560 WellTestState&& localWTestState,
561 const Action::State& actionState,
562 const UDQState& udqState,
563 const SummaryState& summaryState,
564 const std::vector<Scalar>& thresholdPressure,
565 Scalar curTime,
566 Scalar nextStepSize,
567 bool doublePrecision,
568 bool isFlowsn,
569 std::array<FlowsData<double>, 3>&& flowsn,
570 bool isFloresn,
571 std::array<FlowsData<double>, 3>&& floresn)
572{
573 const auto isParallel = this->collectOnIORank_.isParallel();
574 const bool needsReordering = this->collectOnIORank_.doesNeedReordering();
575
576 RestartValue restartValue {
577 (isParallel || needsReordering)
578 ? this->collectOnIORank_.globalCellData()
579 : std::move(localCellData),
580
581 isParallel ? this->collectOnIORank_.globalWellData()
582 : std::move(localWellData),
583
584 isParallel ? this->collectOnIORank_.globalGroupAndNetworkData()
585 : std::move(localGroupAndNetworkData),
586
587 isParallel ? this->collectOnIORank_.globalAquiferData()
588 : std::move(localAquiferData)
589 };
590
591 if (eclState_.getSimulationConfig().useThresholdPressure()) {
592 restartValue.addExtra("THRESHPR", UnitSystem::measure::pressure,
593 thresholdPressure);
594 }
595
596 // Add suggested next timestep to extra data.
597 if (! isSubStep) {
598 restartValue.addExtra("OPMEXTRA", std::vector<double>(1, nextStepSize));
599 }
600
601 // Add nnc flows and flores.
602 if (isFlowsn) {
603 const auto flowsn_global = isParallel ? this->collectOnIORank_.globalFlowsn() : std::move(flowsn);
604 for (const auto& flows : flowsn_global) {
605 if (flows.name.empty())
606 continue;
607 if (flows.name == "FLOGASN+") {
608 restartValue.addExtra(flows.name, UnitSystem::measure::gas_surface_rate, flows.values);
609 } else {
610 restartValue.addExtra(flows.name, UnitSystem::measure::liquid_surface_rate, flows.values);
611 }
612 }
613 }
614 if (isFloresn) {
615 const auto floresn_global = isParallel ? this->collectOnIORank_.globalFloresn() : std::move(floresn);
616 for (const auto& flores : floresn_global) {
617 if (flores.name.empty()) {
618 continue;
619 }
620 restartValue.addExtra(flores.name, UnitSystem::measure::rate, flores.values);
621 }
622 }
623
624 std::vector<Opm::RestartValue> restartValues{};
625 // only serial, only CpGrid (for now)
626 if ( !isParallel && !needsReordering && (this->eclState_.getLgrs().size()>0) && (this->grid_.maxLevel()>0) ) {
627 // Level cells that appear on the leaf grid view get the data::Solution values from there.
628 // Other cells (i.e., parent cells that vanished due to refinement) get rubbish values for now.
629 // Only data::Solution is restricted to the level grids. Well, GroupAndNetwork, Aquifer are
630 // not modified in this method.
631 Opm::Lgr::extractRestartValueLevelGrids<Grid>(this->grid_, restartValue, restartValues);
632 }
633 else {
634 restartValues.reserve(1); // minimum size
635 restartValues.push_back(std::move(restartValue)); // no LGRs-> only one restart value
636 }
637
638 // make sure that the previous I/O request has been completed
639 // and the number of incomplete tasklets does not increase between
640 // time steps
641 this->taskletRunner_->barrier();
642
643 // check if there might have been a failure in the TaskletRunner
644 if (this->taskletRunner_->failure()) {
645 throw std::runtime_error("Failure in the TaskletRunner while writing output.");
646 }
647
648 // create a tasklet to write the data for the current time step to disk
649 auto eclWriteTasklet = std::make_shared<EclWriteTasklet>(
650 actionState,
651 isParallel ? this->collectOnIORank_.globalWellTestState() : std::move(localWTestState),
652 summaryState, udqState, *this->eclIO_,
653 reportStepNum, timeStepNum, isSubStep, curTime, std::move(restartValues), doublePrecision);
654
655 // finally, start a new output writing job
656 this->taskletRunner_->dispatch(std::move(eclWriteTasklet));
657}
658
659template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
661evalSummary(const int reportStepNum,
662 const Scalar curTime,
663 const data::Wells& localWellData,
664 const data::WellBlockAveragePressures& localWBPData,
665 const data::GroupAndNetworkValues& localGroupAndNetworkData,
666 const std::map<int,data::AquiferData>& localAquiferData,
667 const std::map<std::pair<std::string, int>, double>& blockData,
668 const std::map<std::string, double>& miscSummaryData,
669 const std::map<std::string, std::vector<double>>& regionData,
670 const Inplace& inplace,
671 const Inplace* initialInPlace,
672 const InterRegFlowMap& interRegFlows,
673 SummaryState& summaryState,
674 UDQState& udqState)
675{
676 if (collectOnIORank_.isIORank()) {
677 const auto& summary = eclIO_->summary();
678
679 const auto& wellData = this->collectOnIORank_.isParallel()
680 ? this->collectOnIORank_.globalWellData()
681 : localWellData;
682
683 const auto& wbpData = this->collectOnIORank_.isParallel()
684 ? this->collectOnIORank_.globalWBPData()
685 : localWBPData;
686
687 const auto& groupAndNetworkData = this->collectOnIORank_.isParallel()
688 ? this->collectOnIORank_.globalGroupAndNetworkData()
689 : localGroupAndNetworkData;
690
691 const auto& aquiferData = this->collectOnIORank_.isParallel()
692 ? this->collectOnIORank_.globalAquiferData()
693 : localAquiferData;
694
695 const auto interreg_flows = getInterRegFlowsAsMap(interRegFlows);
696
697 const auto values = out::Summary::DynamicSimulatorState {
698 /* well_solution = */ &wellData,
699 /* wbp = */ &wbpData,
700 /* group_and_nwrk_solution = */ &groupAndNetworkData,
701 /* single_values = */ &miscSummaryData,
702 /* region_values = */ &regionData,
703 /* block_values = */ &blockData,
704 /* aquifer_values = */ &aquiferData,
705 /* interreg_flows = */ &interreg_flows,
706 /* inplace = */ {
707 /* current = */ &inplace,
708 /* initial = */ initialInPlace
709 }
710 };
711
712 summary.eval(reportStepNum, curTime, values, summaryState);
713
714 // Off-by-one-fun: The reportStepNum argument corresponds to the
715 // report step these results will be written to, whereas the
716 // argument to UDQ function evaluation corresponds to the report
717 // step we are currently on.
718 const auto udq_step = reportStepNum - 1;
719
720 this->schedule_[udq_step].udq()
721 .eval(udq_step,
722 this->schedule_.wellMatcher(udq_step),
723 this->schedule_[udq_step].group_order(),
724 this->schedule_.segmentMatcherFactory(udq_step),
725 [es = std::cref(this->eclState_)]() {
726 return std::make_unique<RegionSetMatcher>
727 (es.get().fipRegionStatistics());
728 },
729 summaryState, udqState);
730 }
731
732#if HAVE_MPI
733 if (collectOnIORank_.isParallel()) {
734 Parallel::MpiSerializer ser(grid_.comm());
735 ser.append(summaryState);
736 }
737#endif
738}
739
740template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
743globalTrans() const
744{
745 assert (globalTrans_);
746 return *globalTrans_;
747}
748
749} // namespace Opm
750
751#endif // OPM_ECL_GENERIC_WRITER_IMPL_HPP
Definition: CollectDataOnIORank.hpp:49
bool isIORank() const
Definition: CollectDataOnIORank.hpp:126
Definition: EclGenericWriter.hpp:66
const EclipseState & eclState_
Definition: EclGenericWriter.hpp:157
void doWriteOutput(const int reportStepNum, const std::optional< int > timeStepNum, const bool isSubStep, data::Solution &&localCellData, data::Wells &&localWellData, data::GroupAndNetworkValues &&localGroupAndNetworkData, data::Aquifers &&localAquiferData, WellTestState &&localWTestState, const Action::State &actionState, const UDQState &udqState, const SummaryState &summaryState, const std::vector< Scalar > &thresholdPressure, Scalar curTime, Scalar nextStepSize, bool doublePrecision, bool isFlowsn, std::array< FlowsData< double >, 3 > &&flowsn, bool isFloresn, std::array< FlowsData< double >, 3 > &&floresn)
Definition: EclGenericWriter_impl.hpp:553
CollectDataOnIORankType collectOnIORank_
Definition: EclGenericWriter.hpp:153
void extractOutputTransAndNNC(const std::function< unsigned int(unsigned int)> &map)
Definition: EclGenericWriter_impl.hpp:282
std::unique_ptr< TaskletRunner > taskletRunner_
Definition: EclGenericWriter.hpp:159
void writeInit()
Definition: EclGenericWriter_impl.hpp:265
EclGenericWriter(const Schedule &schedule, const EclipseState &eclState, const SummaryConfig &summaryConfig, const Grid &grid, const EquilGrid *equilGrid, const GridView &gridView, const Dune::CartesianIndexMapper< Grid > &cartMapper, const Dune::CartesianIndexMapper< EquilGrid > *equilCartMapper, bool enableAsyncOutput, bool enableEsmry)
Definition: EclGenericWriter_impl.hpp:214
const EclipseIO & eclIO() const
Definition: EclGenericWriter_impl.hpp:257
void evalSummary(int reportStepNum, Scalar curTime, const data::Wells &localWellData, const data::WellBlockAveragePressures &localWBPData, const data::GroupAndNetworkValues &localGroupAndNetworkData, const std::map< int, data::AquiferData > &localAquiferData, const std::map< std::pair< std::string, int >, double > &blockData, const std::map< std::string, double > &miscSummaryData, const std::map< std::string, std::vector< double > > &regionData, const Inplace &inplace, const Inplace *initialInPlace, const InterRegFlowMap &interRegFlows, SummaryState &summaryState, UDQState &udqState)
Definition: EclGenericWriter_impl.hpp:661
const TransmissibilityType & globalTrans() const
Definition: EclGenericWriter_impl.hpp:743
Inter-region flow accumulation maps for all region definition arrays.
Definition: InterRegFlows.hpp:179
std::vector< data::InterRegFlowMap > getInterRegFlows() const
const std::vector< std::string > & names() const
Class for serializing and broadcasting data using MPI.
Definition: MPISerializer.hpp:38
void append(T &data, int root=0)
Serialize and broadcast on root process, de-serialize and append on others.
Definition: MPISerializer.hpp:82
void broadcast(RootRank rootrank, Args &&... args)
Definition: MPISerializer.hpp:47
The base class for tasklets.
Definition: tasklets.hpp:45
virtual void run()=0
Handles where a given tasklet is run.
Definition: tasklets.hpp:93
Definition: Transmissibility.hpp:54
Definition: blackoilbioeffectsmodules.hh:43
Avoid mistakes in calls to broadcast() by wrapping the root argument in an explicit type.
Definition: MPISerializer.hpp:33