EclGenericWriter_impl.hpp
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 This file is part of the Open Porous Media project (OPM).
5
6 OPM is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 2 of the License, or
9 (at your option) any later version.
10
11 OPM is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with OPM. If not, see <http://www.gnu.org/licenses/>.
18
19 Consult the COPYING file in the top-level source directory of this
20 module for the precise wording of the license and the list of
21 copyright holders.
22*/
23#ifndef OPM_ECL_GENERIC_WRITER_IMPL_HPP
24#define OPM_ECL_GENERIC_WRITER_IMPL_HPP
25
26#include <dune/grid/common/mcmgmapper.hh>
27
28#include <opm/grid/GridHelpers.hpp>
29#include <opm/grid/utility/cartesianToCompressed.hpp>
30
31#include <opm/input/eclipse/EclipseState/EclipseState.hpp>
32#include <opm/input/eclipse/EclipseState/Grid/RegionSetMatcher.hpp>
33#include <opm/input/eclipse/EclipseState/SummaryConfig/SummaryConfig.hpp>
34
35#include <opm/input/eclipse/Schedule/Action/State.hpp>
36#include <opm/input/eclipse/Schedule/RPTConfig.hpp>
37#include <opm/input/eclipse/Schedule/Schedule.hpp>
38#include <opm/input/eclipse/Schedule/SummaryState.hpp>
39#include <opm/input/eclipse/Schedule/UDQ/UDQConfig.hpp>
40#include <opm/input/eclipse/Schedule/UDQ/UDQState.hpp>
41#include <opm/input/eclipse/Schedule/Well/WellConnections.hpp>
42#include <opm/input/eclipse/Schedule/Well/WellMatcher.hpp>
43
44#include <opm/input/eclipse/Units/UnitSystem.hpp>
45
46#include <opm/output/eclipse/EclipseIO.hpp>
47#include <opm/output/eclipse/RestartValue.hpp>
48#include <opm/output/eclipse/Summary.hpp>
49
51
52#if HAVE_MPI
54#endif
55
56#if HAVE_MPI
57#include <mpi.h>
58#endif
59
60#include <algorithm>
61#include <array>
62#include <cassert>
63#include <cmath>
64#include <functional>
65#include <map>
66#include <memory>
67#include <string>
68#include <unordered_map>
69#include <utility>
70#include <vector>
71
72namespace {
73
88bool directVerticalNeighbors(const std::array<int, 3>& cartDims,
89 const std::unordered_map<int,int>& cartesianToActive,
90 int smallGlobalIndex, int largeGlobalIndex)
91{
92 assert(smallGlobalIndex <= largeGlobalIndex);
93 std::array<int, 3> ijk1, ijk2;
94 auto globalToIjk = [cartDims](int gc) {
95 std::array<int, 3> ijk;
96 ijk[0] = gc % cartDims[0];
97 gc /= cartDims[0];
98 ijk[1] = gc % cartDims[1];
99 ijk[2] = gc / cartDims[1];
100 return ijk;
101 };
102 ijk1 = globalToIjk(smallGlobalIndex);
103 ijk2 = globalToIjk(largeGlobalIndex);
104 assert(ijk2[2]>=ijk1[2]);
105
106 if ( ijk1[0] == ijk2[0] && ijk1[1] == ijk2[1] && (ijk2[2] - ijk1[2]) > 1)
107 {
108 assert((largeGlobalIndex-smallGlobalIndex)%(cartDims[0]*cartDims[1])==0);
109 for ( int gi = smallGlobalIndex + cartDims[0] * cartDims[1]; gi < largeGlobalIndex;
110 gi += cartDims[0] * cartDims[1] )
112 if ( cartesianToActive.find( gi ) != cartesianToActive.end() )
113 {
114 return false;
115 }
116 }
117 return true;
118 } else
119 return false;
120}
121
122std::unordered_map<std::string, Opm::data::InterRegFlowMap>
123getInterRegFlowsAsMap(const Opm::InterRegFlowMap& map)
124{
125 auto maps = std::unordered_map<std::string, Opm::data::InterRegFlowMap>{};
126
127 const auto& regionNames = map.names();
128 auto flows = map.getInterRegFlows();
129 const auto nmap = regionNames.size();
130
131 maps.reserve(nmap);
132 for (auto mapID = 0*nmap; mapID < nmap; ++mapID) {
133 maps.emplace(regionNames[mapID], std::move(flows[mapID]));
134 }
135
136 return maps;
138
139struct EclWriteTasklet : public Opm::TaskletInterface
140{
141 Opm::Action::State actionState_;
142 Opm::WellTestState wtestState_;
143 Opm::SummaryState summaryState_;
144 Opm::UDQState udqState_;
145 Opm::EclipseIO& eclIO_;
146 int reportStepNum_;
147 std::optional<int> timeStepNum_;
148 bool isSubStep_;
149 double secondsElapsed_;
150 Opm::RestartValue restartValue_;
151 bool writeDoublePrecision_;
152
153 explicit EclWriteTasklet(const Opm::Action::State& actionState,
154 const Opm::WellTestState& wtestState,
155 const Opm::SummaryState& summaryState,
156 const Opm::UDQState& udqState,
157 Opm::EclipseIO& eclIO,
158 int reportStepNum,
159 std::optional<int> timeStepNum,
160 bool isSubStep,
161 double secondsElapsed,
162 Opm::RestartValue restartValue,
163 bool writeDoublePrecision)
164 : actionState_(actionState)
165 , wtestState_(wtestState)
166 , summaryState_(summaryState)
167 , udqState_(udqState)
168 , eclIO_(eclIO)
169 , reportStepNum_(reportStepNum)
170 , timeStepNum_(timeStepNum)
171 , isSubStep_(isSubStep)
172 , secondsElapsed_(secondsElapsed)
173 , restartValue_(std::move(restartValue))
174 , writeDoublePrecision_(writeDoublePrecision)
175 {}
176
177 // callback to eclIO serial writeTimeStep method
178 void run() override
179 {
180 this->eclIO_.writeTimeStep(this->actionState_,
181 this->wtestState_,
182 this->summaryState_,
183 this->udqState_,
184 this->reportStepNum_,
185 this->isSubStep_,
186 this->secondsElapsed_,
187 std::move(this->restartValue_),
188 this->writeDoublePrecision_,
189 this->timeStepNum_
190);
191 }
192};
193
194}
195
196namespace Opm {
197
198template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
200EclGenericWriter(const Schedule& schedule,
201 const EclipseState& eclState,
202 const SummaryConfig& summaryConfig,
203 const Grid& grid,
204 const EquilGrid* equilGrid,
205 const GridView& gridView,
206 const Dune::CartesianIndexMapper<Grid>& cartMapper,
207 const Dune::CartesianIndexMapper<EquilGrid>* equilCartMapper,
208 bool enableAsyncOutput,
209 bool enableEsmry )
210 : collectOnIORank_(grid,
211 equilGrid,
212 gridView,
213 cartMapper,
214 equilCartMapper,
215 summaryConfig.fip_regions_interreg_flow())
216 , grid_ (grid)
217 , gridView_ (gridView)
218 , schedule_ (schedule)
219 , eclState_ (eclState)
220 , cartMapper_ (cartMapper)
221 , equilCartMapper_(equilCartMapper)
222 , equilGrid_ (equilGrid)
223{
224 if (this->collectOnIORank_.isIORank()) {
225 this->eclIO_ = std::make_unique<EclipseIO>
226 (this->eclState_,
227 UgGridHelpers::createEclipseGrid(*equilGrid, eclState_.getInputGrid()),
228 this->schedule_, summaryConfig, "", enableEsmry);
229 }
230
231 // create output thread if enabled and rank is I/O rank
232 // async output is enabled by default if pthread are enabled
233 int numWorkerThreads = 0;
234 if (enableAsyncOutput && collectOnIORank_.isIORank()) {
235 numWorkerThreads = 1;
236 }
237
238 this->taskletRunner_.reset(new TaskletRunner(numWorkerThreads));
239}
240
241template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
243eclIO() const
244{
245 assert(eclIO_);
246 return *eclIO_;
247}
248
249template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
251writeInit()
252{
253 if (collectOnIORank_.isIORank()) {
254 std::map<std::string, std::vector<int>> integerVectors;
255 if (collectOnIORank_.isParallel()) {
256 integerVectors.emplace("MPI_RANK", collectOnIORank_.globalRanks());
257 }
258
259 eclIO_->writeInitial(*this->outputTrans_,
260 integerVectors,
261 this->outputNnc_);
262 this->outputTrans_.reset();
263 }
264}
265template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
266void
268extractOutputTransAndNNC(const std::function<unsigned int(unsigned int)>& map)
269{
270 if (collectOnIORank_.isIORank()) {
271 auto cartMap = cartesianToCompressed(equilGrid_->size(0), UgGridHelpers::globalCell(*equilGrid_));
272 computeTrans_(cartMap, map);
273 exportNncStructure_(cartMap, map);
274 }
275
276#if HAVE_MPI
277 if (collectOnIORank_.isParallel()) {
278 const auto& comm = grid_.comm();
279 Parallel::MpiSerializer ser(comm);
280 ser.broadcast(Parallel::RootRank{0}, outputNnc_);
281 }
282#endif
283}
284
285template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
286void
288computeTrans_(const std::unordered_map<int,int>& cartesianToActive,
289 const std::function<unsigned int(unsigned int)>& map) const
290{
291 if (!outputTrans_) {
292 outputTrans_ = std::make_unique<data::Solution>();
293 }
294
295 const auto& cartMapper = *equilCartMapper_;
296 const auto& cartDims = cartMapper.cartesianDimensions();
297
298 auto createCellData = [&cartDims]() {
299 return data::CellData{
300 UnitSystem::measure::transmissibility,
301 std::vector<double>(cartDims[0] * cartDims[1] * cartDims[2], 0.0),
302 data::TargetType::INIT
303 };
304 };
305
306 outputTrans_->clear();
307 outputTrans_->emplace("TRANX", createCellData());
308 outputTrans_->emplace("TRANY", createCellData());
309 outputTrans_->emplace("TRANZ", createCellData());
310
311 auto& tranx = this->outputTrans_->at("TRANX");
312 auto& trany = this->outputTrans_->at("TRANY");
313 auto& tranz = this->outputTrans_->at("TRANZ");
314
315 using GlobalGridView = typename EquilGrid::LeafGridView;
316 using GlobElementMapper = Dune::MultipleCodimMultipleGeomTypeMapper<GlobalGridView>;
317 const GlobalGridView& globalGridView = this->equilGrid_->leafGridView();
318 const GlobElementMapper globalElemMapper { globalGridView, Dune::mcmgElementLayout() };
319
320 auto isNumAquCell = [numAquCell = this->eclState_.aquifer().hasNumericalAquifer()
321 ? this->eclState_.aquifer().numericalAquifers().allAquiferCellIds()
322 : std::vector<std::size_t>{}]
323 (const std::size_t cellIdx)
324 {
325 return std::binary_search(numAquCell.begin(), numAquCell.end(), cellIdx);
326 };
327
328 for (const auto& elem : elements(globalGridView)) {
329 for (const auto& is : intersections(globalGridView, elem)) {
330 if (!is.neighbor())
331 continue; // intersection is on the domain boundary
332
333 // Not 'const' because remapped if 'map' is non-null.
334 unsigned c1 = globalElemMapper.index(is.inside());
335 unsigned c2 = globalElemMapper.index(is.outside());
336
337 if (c1 > c2)
338 continue; // we only need to handle each connection once, thank you.
339
340 // Ordering of compressed and uncompressed index should be the same
341 const int cartIdx1 = cartMapper.cartesianIndex( c1 );
342 const int cartIdx2 = cartMapper.cartesianIndex( c2 );
343
344 if (isNumAquCell(cartIdx1) || isNumAquCell(cartIdx2)) {
345 // Connections involving numerical aquifers are always NNCs
346 // for the purpose of file output. This holds even for
347 // connections between cells like (I,J,K) and (I+1,J,K)
348 // which are nominally neighbours in the Cartesian grid.
349 continue;
350 }
351
352 // Ordering of compressed and uncompressed index should be the same
353 assert(cartIdx1 <= cartIdx2);
354 int gc1 = std::min(cartIdx1, cartIdx2);
355 int gc2 = std::max(cartIdx1, cartIdx2);
356
357 // Re-ordering in case of non-empty mapping between equilGrid to grid
358 if (map) {
359 c1 = map(c1); // equilGridToGrid map
360 c2 = map(c2);
361 }
362
363 if (gc2 - gc1 == 1 && cartDims[0] > 1 ) {
364 tranx.template data<double>()[gc1] = globalTrans().transmissibility(c1, c2);
365 continue; // skip other if clauses as they are false, last one needs some computation
366 }
367
368 if (gc2 - gc1 == cartDims[0] && cartDims[1] > 1) {
369 trany.template data<double>()[gc1] = globalTrans().transmissibility(c1, c2);
370 continue; // skipt next if clause as it needs some computation
371 }
372
373 if ( gc2 - gc1 == cartDims[0]*cartDims[1] ||
374 directVerticalNeighbors(cartDims, cartesianToActive, gc1, gc2))
375 tranz.template data<double>()[gc1] = globalTrans().transmissibility(c1, c2);
376 }
377 }
378}
379
380template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
381std::vector<NNCdata>
382EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
383exportNncStructure_(const std::unordered_map<int,int>& cartesianToActive,
384 const std::function<unsigned int(unsigned int)>& map) const
385{
386 auto isNumAquCell = [numAquCell = this->eclState_.aquifer().hasNumericalAquifer()
387 ? this->eclState_.aquifer().numericalAquifers().allAquiferCellIds()
388 : std::vector<std::size_t>{}]
389 (const std::size_t cellIdx)
390 {
391 return std::binary_search(numAquCell.begin(), numAquCell.end(), cellIdx);
392 };
393
394 auto isNumAquConn = [&isNumAquCell](const std::size_t cellIdx1,
395 const std::size_t cellIdx2)
396 {
397 return isNumAquCell(cellIdx1) || isNumAquCell(cellIdx2);
398 };
399
400 auto isCartesianNeighbour = [nx = this->eclState_.getInputGrid().getNX(),
401 ny = this->eclState_.getInputGrid().getNY()]
402 (const std::size_t cellIdx1, const std::size_t cellIdx2)
403 {
404 const auto cellDiff = cellIdx2 - cellIdx1;
405
406 return (cellDiff == 1)
407 || (cellDiff == nx)
408 || (cellDiff == nx * ny);
409 };
410
411 auto activeCell = [&cartesianToActive](const std::size_t cellIdx)
412 {
413 auto pos = cartesianToActive.find(cellIdx);
414 return (pos == cartesianToActive.end()) ? -1 : pos->second;
415 };
416
417 const auto& nncData = this->eclState_.getInputNNC().input();
418 const auto& unitSystem = this->eclState_.getDeckUnitSystem();
419
420 for (const auto& entry : nncData) {
421 // Ignore most explicit NNCs between otherwise neighbouring cells.
422 // We keep NNCs that involve cells with numerical aquifers even if
423 // these might be between neighbouring cells in the Cartesian
424 // grid--e.g., between cells (I,J,K) and (I+1,J,K). All such
425 // connections should be written to NNC output arrays provided the
426 // transmissibility value is sufficiently large.
427 //
428 // The condition cell2 >= cell1 holds by construction of nncData.
429 assert (entry.cell2 >= entry.cell1);
430
431 if (! isCartesianNeighbour(entry.cell1, entry.cell2) ||
432 isNumAquConn(entry.cell1, entry.cell2))
433 {
434 // Pick up transmissibility value from 'globalTrans()' since
435 // multiplier keywords like MULTREGT might have impacted the
436 // values entered in primary sources like NNC/EDITNNC/EDITNNCR.
437 const auto c1 = activeCell(entry.cell1);
438 const auto c2 = activeCell(entry.cell2);
439
440 if ((c1 < 0) || (c2 < 0)) {
441 // Connection between inactive cells? Unexpected at this
442 // level. Might consider 'throw'ing if this happens...
443 continue;
444 }
445
446 const auto trans = this->globalTrans().transmissibility(c1, c2);
447 const auto tt = unitSystem
448 .from_si(UnitSystem::measure::transmissibility, trans);
449
450 // ECLIPSE ignores NNCs (with EDITNNC/EDITNNCR applied) with
451 // small transmissibility values. Seems like the threshold is
452 // 1.0e-6 in output units.
453 if (std::isnormal(tt) && ! (tt < 1.0e-6)) {
454 this->outputNnc_.emplace_back(entry.cell1, entry.cell2, trans);
455 }
456 }
457 }
458
459 auto isDirectNeighbours = [&isCartesianNeighbour, &cartesianToActive,
460 cartDims = &this->cartMapper_.cartesianDimensions()]
461 (const std::size_t cellIdx1, const std::size_t cellIdx2)
462 {
463 return isCartesianNeighbour(cellIdx1, cellIdx2)
464 || directVerticalNeighbors(*cartDims, cartesianToActive, cellIdx1, cellIdx2);
465 };
466
467 using GlobalGridView = typename EquilGrid::LeafGridView;
468 using GlobElementMapper = Dune::MultipleCodimMultipleGeomTypeMapper<GlobalGridView>;
469 const GlobalGridView& globalGridView = this->equilGrid_->leafGridView();
470 const GlobElementMapper globalElemMapper { globalGridView, Dune::mcmgElementLayout() };
471
472 // Cartesian index mapper for the serial I/O grid
473 const auto& equilCartMapper = *equilCartMapper_;
474 for (const auto& elem : elements(globalGridView)) {
475 for (const auto& is : intersections(globalGridView, elem)) {
476 if (!is.neighbor())
477 continue; // intersection is on the domain boundary
478
479 // Not 'const' because remapped if 'map' is non-null.
480 unsigned c1 = globalElemMapper.index(is.inside());
481 unsigned c2 = globalElemMapper.index(is.outside());
482
483 if (c1 > c2)
484 continue; // we only need to handle each connection once, thank you.
485
486 std::size_t cc1 = equilCartMapper.cartesianIndex( c1 );
487 std::size_t cc2 = equilCartMapper.cartesianIndex( c2 );
488
489 if ( cc2 < cc1 )
490 std::swap(cc1, cc2);
491
492 // Re-ordering in case of non-empty mapping between equilGrid to grid
493 if (map) {
494 c1 = map(c1); // equilGridToGrid map
495 c2 = map(c2);
496 }
497
498 if (isNumAquConn(cc1, cc2) || ! isDirectNeighbours(cc1, cc2)) {
499 // We need to check whether an NNC for this face was also
500 // specified via the NNC keyword in the deck.
501 auto t = this->globalTrans().transmissibility(c1, c2);
502 auto candidate = std::lower_bound(nncData.begin(), nncData.end(),
503 NNCdata { cc1, cc2, 0.0 });
504
505 while ((candidate != nncData.end()) &&
506 (candidate->cell1 == cc1) &&
507 (candidate->cell2 == cc2))
508 {
509 t -= candidate->trans;
510 ++candidate;
511 }
512
513 // ECLIPSE ignores NNCs with zero transmissibility
514 // (different threshold than for NNC with corresponding
515 // EDITNNC above). In addition we do set small
516 // transmissibilities to zero when setting up the simulator.
517 // These will be ignored here, too.
518 const auto tt = unitSystem
519 .from_si(UnitSystem::measure::transmissibility, t);
520
521 if (std::isnormal(tt) && (tt > 1.0e-12)) {
522 this->outputNnc_.emplace_back(cc1, cc2, t);
523 }
524 }
525 }
526 }
527
528 return this->outputNnc_;
529}
530
531template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
533doWriteOutput(const int reportStepNum,
534 const std::optional<int> timeStepNum,
535 const bool isSubStep,
536 data::Solution&& localCellData,
537 data::Wells&& localWellData,
538 data::GroupAndNetworkValues&& localGroupAndNetworkData,
539 data::Aquifers&& localAquiferData,
540 WellTestState&& localWTestState,
541 const Action::State& actionState,
542 const UDQState& udqState,
543 const SummaryState& summaryState,
544 const std::vector<Scalar>& thresholdPressure,
545 Scalar curTime,
546 Scalar nextStepSize,
547 bool doublePrecision,
548 bool isFlowsn,
549 std::array<FlowsData<double>, 3>&& flowsn,
550 bool isFloresn,
551 std::array<FlowsData<double>, 3>&& floresn)
552{
553 const auto isParallel = this->collectOnIORank_.isParallel();
554 const bool needsReordering = this->collectOnIORank_.doesNeedReordering();
555
556 RestartValue restartValue {
557 (isParallel || needsReordering)
558 ? this->collectOnIORank_.globalCellData()
559 : std::move(localCellData),
560
561 isParallel ? this->collectOnIORank_.globalWellData()
562 : std::move(localWellData),
563
564 isParallel ? this->collectOnIORank_.globalGroupAndNetworkData()
565 : std::move(localGroupAndNetworkData),
566
567 isParallel ? this->collectOnIORank_.globalAquiferData()
568 : std::move(localAquiferData)
569 };
570
571 if (eclState_.getSimulationConfig().useThresholdPressure()) {
572 restartValue.addExtra("THRESHPR", UnitSystem::measure::pressure,
573 thresholdPressure);
574 }
575
576 // Add suggested next timestep to extra data.
577 if (! isSubStep) {
578 restartValue.addExtra("OPMEXTRA", std::vector<double>(1, nextStepSize));
579 }
580
581 // Add nnc flows and flores.
582 if (isFlowsn) {
583 const auto flowsn_global = isParallel ? this->collectOnIORank_.globalFlowsn() : std::move(flowsn);
584 for (const auto& flows : flowsn_global) {
585 if (flows.name.empty())
586 continue;
587 if (flows.name == "FLOGASN+") {
588 restartValue.addExtra(flows.name, UnitSystem::measure::gas_surface_rate, flows.values);
589 } else {
590 restartValue.addExtra(flows.name, UnitSystem::measure::liquid_surface_rate, flows.values);
591 }
592 }
593 }
594 if (isFloresn) {
595 const auto floresn_global = isParallel ? this->collectOnIORank_.globalFloresn() : std::move(floresn);
596 for (const auto& flores : floresn_global) {
597 if (flores.name.empty()) {
598 continue;
599 }
600 restartValue.addExtra(flores.name, UnitSystem::measure::rate, flores.values);
601 }
602 }
603 // make sure that the previous I/O request has been completed
604 // and the number of incomplete tasklets does not increase between
605 // time steps
606 this->taskletRunner_->barrier();
607
608 // check if there might have been a failure in the TaskletRunner
609 if (this->taskletRunner_->failure()) {
610 throw std::runtime_error("Failure in the TaskletRunner while writing output.");
611 }
612
613 // create a tasklet to write the data for the current time step to disk
614 auto eclWriteTasklet = std::make_shared<EclWriteTasklet>(
615 actionState,
616 isParallel ? this->collectOnIORank_.globalWellTestState() : std::move(localWTestState),
617 summaryState, udqState, *this->eclIO_,
618 reportStepNum, timeStepNum, isSubStep, curTime, std::move(restartValue), doublePrecision);
619
620 // finally, start a new output writing job
621 this->taskletRunner_->dispatch(std::move(eclWriteTasklet));
622}
623
624template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
626evalSummary(const int reportStepNum,
627 const Scalar curTime,
628 const data::Wells& localWellData,
629 const data::WellBlockAveragePressures& localWBPData,
630 const data::GroupAndNetworkValues& localGroupAndNetworkData,
631 const std::map<int,data::AquiferData>& localAquiferData,
632 const std::map<std::pair<std::string, int>, double>& blockData,
633 const std::map<std::string, double>& miscSummaryData,
634 const std::map<std::string, std::vector<double>>& regionData,
635 const Inplace& inplace,
636 const std::optional<Inplace>& initialInPlace,
637 const InterRegFlowMap& interRegFlows,
638 SummaryState& summaryState,
639 UDQState& udqState)
640{
641 if (collectOnIORank_.isIORank()) {
642 const auto& summary = eclIO_->summary();
643
644 const auto& wellData = this->collectOnIORank_.isParallel()
645 ? this->collectOnIORank_.globalWellData()
646 : localWellData;
647
648 const auto& wbpData = this->collectOnIORank_.isParallel()
649 ? this->collectOnIORank_.globalWBPData()
650 : localWBPData;
651
652 const auto& groupAndNetworkData = this->collectOnIORank_.isParallel()
653 ? this->collectOnIORank_.globalGroupAndNetworkData()
654 : localGroupAndNetworkData;
655
656 const auto& aquiferData = this->collectOnIORank_.isParallel()
657 ? this->collectOnIORank_.globalAquiferData()
658 : localAquiferData;
659
660 summary.eval(summaryState,
661 reportStepNum,
662 curTime,
663 wellData,
664 wbpData,
665 groupAndNetworkData,
666 miscSummaryData,
667 initialInPlace,
668 inplace,
669 regionData,
670 blockData,
671 aquiferData,
672 getInterRegFlowsAsMap(interRegFlows));
673
674 // Off-by-one-fun: The reportStepNum argument corresponds to the
675 // report step these results will be written to, whereas the
676 // argument to UDQ function evaluation corresponds to the report
677 // step we are currently on.
678 const auto udq_step = reportStepNum - 1;
679
680 this->schedule_[udq_step].udq()
681 .eval(udq_step,
682 this->schedule_.wellMatcher(udq_step),
683 this->schedule_[udq_step].group_order(),
684 this->schedule_.segmentMatcherFactory(udq_step),
685 [es = std::cref(this->eclState_)]() {
686 return std::make_unique<RegionSetMatcher>
687 (es.get().fipRegionStatistics());
688 },
689 summaryState, udqState);
690 }
691
692#if HAVE_MPI
693 if (collectOnIORank_.isParallel()) {
694 Parallel::MpiSerializer ser(grid_.comm());
695 ser.append(summaryState);
696 }
697#endif
698}
699
700template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
703globalTrans() const
704{
705 assert (globalTrans_);
706 return *globalTrans_;
707}
708
709} // namespace Opm
710
711#endif // OPM_ECL_GENERIC_WRITER_IMPL_HPP
Definition: CollectDataOnIORank.hpp:49
bool isIORank() const
Definition: CollectDataOnIORank.hpp:126
Definition: EclGenericWriter.hpp:65
const EclipseState & eclState_
Definition: EclGenericWriter.hpp:156
void doWriteOutput(const int reportStepNum, const std::optional< int > timeStepNum, const bool isSubStep, data::Solution &&localCellData, data::Wells &&localWellData, data::GroupAndNetworkValues &&localGroupAndNetworkData, data::Aquifers &&localAquiferData, WellTestState &&localWTestState, const Action::State &actionState, const UDQState &udqState, const SummaryState &summaryState, const std::vector< Scalar > &thresholdPressure, Scalar curTime, Scalar nextStepSize, bool doublePrecision, bool isFlowsn, std::array< FlowsData< double >, 3 > &&flowsn, bool isFloresn, std::array< FlowsData< double >, 3 > &&floresn)
Definition: EclGenericWriter_impl.hpp:533
CollectDataOnIORankType collectOnIORank_
Definition: EclGenericWriter.hpp:152
void evalSummary(int reportStepNum, Scalar curTime, const data::Wells &localWellData, const data::WellBlockAveragePressures &localWBPData, const data::GroupAndNetworkValues &localGroupAndNetworkData, const std::map< int, data::AquiferData > &localAquiferData, const std::map< std::pair< std::string, int >, double > &blockData, const std::map< std::string, double > &miscSummaryData, const std::map< std::string, std::vector< double > > &regionData, const Inplace &inplace, const std::optional< Inplace > &initialInPlace, const InterRegFlowMap &interRegFlows, SummaryState &summaryState, UDQState &udqState)
Definition: EclGenericWriter_impl.hpp:626
void extractOutputTransAndNNC(const std::function< unsigned int(unsigned int)> &map)
Definition: EclGenericWriter_impl.hpp:268
std::unique_ptr< TaskletRunner > taskletRunner_
Definition: EclGenericWriter.hpp:158
void writeInit()
Definition: EclGenericWriter_impl.hpp:251
EclGenericWriter(const Schedule &schedule, const EclipseState &eclState, const SummaryConfig &summaryConfig, const Grid &grid, const EquilGrid *equilGrid, const GridView &gridView, const Dune::CartesianIndexMapper< Grid > &cartMapper, const Dune::CartesianIndexMapper< EquilGrid > *equilCartMapper, bool enableAsyncOutput, bool enableEsmry)
Definition: EclGenericWriter_impl.hpp:200
const EclipseIO & eclIO() const
Definition: EclGenericWriter_impl.hpp:243
const TransmissibilityType & globalTrans() const
Definition: EclGenericWriter_impl.hpp:703
Inter-region flow accumulation maps for all region definition arrays.
Definition: InterRegFlows.hpp:179
std::vector< data::InterRegFlowMap > getInterRegFlows() const
const std::vector< std::string > & names() const
Class for serializing and broadcasting data using MPI.
Definition: MPISerializer.hpp:38
void append(T &data, int root=0)
Serialize and broadcast on root process, de-serialize and append on others.
Definition: MPISerializer.hpp:82
void broadcast(RootRank rootrank, Args &&... args)
Definition: MPISerializer.hpp:47
The base class for tasklets.
Definition: tasklets.hpp:45
virtual void run()=0
Handles where a given tasklet is run.
Definition: tasklets.hpp:93
Definition: Transmissibility.hpp:54
Definition: blackoilboundaryratevector.hh:39
Avoid mistakes in calls to broadcast() by wrapping the root argument in an explicit type.
Definition: MPISerializer.hpp:33