EclGenericWriter_impl.hpp
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 This file is part of the Open Porous Media project (OPM).
5
6 OPM is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 2 of the License, or
9 (at your option) any later version.
10
11 OPM is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with OPM. If not, see <http://www.gnu.org/licenses/>.
18
19 Consult the COPYING file in the top-level source directory of this
20 module for the precise wording of the license and the list of
21 copyright holders.
22*/
23#ifndef OPM_ECL_GENERIC_WRITER_IMPL_HPP
24#define OPM_ECL_GENERIC_WRITER_IMPL_HPP
25
26#include <dune/grid/common/mcmgmapper.hh>
27
28#include <opm/grid/GridHelpers.hpp>
29#include <opm/grid/utility/cartesianToCompressed.hpp>
30
31#include <opm/input/eclipse/EclipseState/EclipseState.hpp>
32#include <opm/input/eclipse/EclipseState/Grid/RegionSetMatcher.hpp>
33#include <opm/input/eclipse/EclipseState/SummaryConfig/SummaryConfig.hpp>
34
35#include <opm/input/eclipse/Schedule/Action/State.hpp>
36#include <opm/input/eclipse/Schedule/Schedule.hpp>
37#include <opm/input/eclipse/Schedule/SummaryState.hpp>
38#include <opm/input/eclipse/Schedule/UDQ/UDQConfig.hpp>
39#include <opm/input/eclipse/Schedule/UDQ/UDQState.hpp>
40#include <opm/input/eclipse/Schedule/Well/WellMatcher.hpp>
41
42#include <opm/input/eclipse/Units/UnitSystem.hpp>
43
44#include <opm/output/eclipse/EclipseIO.hpp>
45#include <opm/output/eclipse/RestartValue.hpp>
46#include <opm/output/eclipse/Summary.hpp>
47
49
50#if HAVE_MPI
52#endif
53
54#if HAVE_MPI
55#include <mpi.h>
56#endif
57
58#include <algorithm>
59#include <array>
60#include <cassert>
61#include <cmath>
62#include <functional>
63#include <map>
64#include <memory>
65#include <string>
66#include <unordered_map>
67#include <utility>
68#include <vector>
69
70namespace {
71
86bool directVerticalNeighbors(const std::array<int, 3>& cartDims,
87 const std::unordered_map<int,int>& cartesianToActive,
88 int smallGlobalIndex, int largeGlobalIndex)
89{
90 assert(smallGlobalIndex <= largeGlobalIndex);
91 std::array<int, 3> ijk1, ijk2;
92 auto globalToIjk = [cartDims](int gc) {
93 std::array<int, 3> ijk;
94 ijk[0] = gc % cartDims[0];
95 gc /= cartDims[0];
96 ijk[1] = gc % cartDims[1];
97 ijk[2] = gc / cartDims[1];
98 return ijk;
99 };
100 ijk1 = globalToIjk(smallGlobalIndex);
101 ijk2 = globalToIjk(largeGlobalIndex);
102 assert(ijk2[2]>=ijk1[2]);
103
104 if ( ijk1[0] == ijk2[0] && ijk1[1] == ijk2[1] && (ijk2[2] - ijk1[2]) > 1)
105 {
106 assert((largeGlobalIndex-smallGlobalIndex)%(cartDims[0]*cartDims[1])==0);
107 for ( int gi = smallGlobalIndex + cartDims[0] * cartDims[1]; gi < largeGlobalIndex;
108 gi += cartDims[0] * cartDims[1] )
109 {
110 if ( cartesianToActive.find( gi ) != cartesianToActive.end() )
112 return false;
113 }
115 return true;
116 } else
117 return false;
118}
119
120std::unordered_map<std::string, Opm::data::InterRegFlowMap>
121getInterRegFlowsAsMap(const Opm::InterRegFlowMap& map)
122{
123 auto maps = std::unordered_map<std::string, Opm::data::InterRegFlowMap>{};
124
125 const auto& regionNames = map.names();
126 auto flows = map.getInterRegFlows();
127 const auto nmap = regionNames.size();
128
129 maps.reserve(nmap);
130 for (auto mapID = 0*nmap; mapID < nmap; ++mapID) {
131 maps.emplace(regionNames[mapID], std::move(flows[mapID]));
132 }
133
134 return maps;
135}
136
137struct EclWriteTasklet : public Opm::TaskletInterface
138{
139 Opm::Action::State actionState_;
140 Opm::WellTestState wtestState_;
141 Opm::SummaryState summaryState_;
142 Opm::UDQState udqState_;
143 Opm::EclipseIO& eclIO_;
144 int reportStepNum_;
145 std::optional<int> timeStepNum_;
146 bool isSubStep_;
147 double secondsElapsed_;
148 Opm::RestartValue restartValue_;
149 bool writeDoublePrecision_;
150
151 explicit EclWriteTasklet(const Opm::Action::State& actionState,
152 const Opm::WellTestState& wtestState,
153 const Opm::SummaryState& summaryState,
154 const Opm::UDQState& udqState,
155 Opm::EclipseIO& eclIO,
156 int reportStepNum,
157 std::optional<int> timeStepNum,
158 bool isSubStep,
159 double secondsElapsed,
160 Opm::RestartValue restartValue,
161 bool writeDoublePrecision)
162 : actionState_(actionState)
163 , wtestState_(wtestState)
164 , summaryState_(summaryState)
165 , udqState_(udqState)
166 , eclIO_(eclIO)
167 , reportStepNum_(reportStepNum)
168 , timeStepNum_(timeStepNum)
169 , isSubStep_(isSubStep)
170 , secondsElapsed_(secondsElapsed)
171 , restartValue_(std::move(restartValue))
172 , writeDoublePrecision_(writeDoublePrecision)
173 {}
174
175 // callback to eclIO serial writeTimeStep method
176 void run()
177 {
178 this->eclIO_.writeTimeStep(this->actionState_,
179 this->wtestState_,
180 this->summaryState_,
181 this->udqState_,
182 this->reportStepNum_,
183 this->isSubStep_,
184 this->secondsElapsed_,
185 std::move(this->restartValue_),
186 this->writeDoublePrecision_,
187 this->timeStepNum_
188);
189 }
190};
191
192}
193
194namespace Opm {
195
196template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
198EclGenericWriter(const Schedule& schedule,
199 const EclipseState& eclState,
200 const SummaryConfig& summaryConfig,
201 const Grid& grid,
202 const EquilGrid* equilGrid,
203 const GridView& gridView,
204 const Dune::CartesianIndexMapper<Grid>& cartMapper,
205 const Dune::CartesianIndexMapper<EquilGrid>* equilCartMapper,
206 bool enableAsyncOutput,
207 bool enableEsmry )
208 : collectOnIORank_(grid,
209 equilGrid,
210 gridView,
211 cartMapper,
212 equilCartMapper,
213 summaryConfig.fip_regions_interreg_flow())
214 , grid_ (grid)
215 , gridView_ (gridView)
216 , schedule_ (schedule)
217 , eclState_ (eclState)
218 , cartMapper_ (cartMapper)
219 , equilCartMapper_(equilCartMapper)
220 , equilGrid_ (equilGrid)
221{
222 if (this->collectOnIORank_.isIORank()) {
223 this->eclIO_ = std::make_unique<EclipseIO>
224 (this->eclState_,
225 UgGridHelpers::createEclipseGrid(*equilGrid, eclState_.getInputGrid()),
226 this->schedule_, summaryConfig, "", enableEsmry);
227 }
228
229 // create output thread if enabled and rank is I/O rank
230 // async output is enabled by default if pthread are enabled
231 int numWorkerThreads = 0;
232 if (enableAsyncOutput && collectOnIORank_.isIORank()) {
233 numWorkerThreads = 1;
234 }
235
236 this->taskletRunner_.reset(new TaskletRunner(numWorkerThreads));
237}
238
239template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
241eclIO() const
242{
243 assert(eclIO_);
244 return *eclIO_;
245}
246
247template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
249writeInit()
250{
251 if (collectOnIORank_.isIORank()) {
252 std::map<std::string, std::vector<int>> integerVectors;
253 if (collectOnIORank_.isParallel()) {
254 integerVectors.emplace("MPI_RANK", collectOnIORank_.globalRanks());
255 }
256
257 eclIO_->writeInitial(*this->outputTrans_,
258 integerVectors,
259 this->outputNnc_);
260 this->outputTrans_.reset();
261 }
262}
263template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
264void
266extractOutputTransAndNNC(const std::function<unsigned int(unsigned int)>& map)
267{
268 if (collectOnIORank_.isIORank()) {
269 auto cartMap = cartesianToCompressed(equilGrid_->size(0), UgGridHelpers::globalCell(*equilGrid_));
270 computeTrans_(cartMap, map);
271 exportNncStructure_(cartMap, map);
272 }
273
274#if HAVE_MPI
275 if (collectOnIORank_.isParallel()) {
276 const auto& comm = grid_.comm();
278 ser.broadcast(outputNnc_);
279 }
280#endif
281}
282
283template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
284void
286computeTrans_(const std::unordered_map<int,int>& cartesianToActive,
287 const std::function<unsigned int(unsigned int)>& map) const
288{
289 if (!outputTrans_) {
290 outputTrans_ = std::make_unique<data::Solution>();
291 }
292
293 const auto& cartMapper = *equilCartMapper_;
294 const auto& cartDims = cartMapper.cartesianDimensions();
295
296 auto createCellData = [&cartDims]() {
297 return data::CellData{
298 UnitSystem::measure::transmissibility,
299 std::vector<double>(cartDims[0] * cartDims[1] * cartDims[2], 0.0),
300 data::TargetType::INIT
301 };
302 };
303
304 outputTrans_->clear();
305 outputTrans_->emplace("TRANX", createCellData());
306 outputTrans_->emplace("TRANY", createCellData());
307 outputTrans_->emplace("TRANZ", createCellData());
308
309 auto& tranx = this->outputTrans_->at("TRANX");
310 auto& trany = this->outputTrans_->at("TRANY");
311 auto& tranz = this->outputTrans_->at("TRANZ");
312
313 using GlobalGridView = typename EquilGrid::LeafGridView;
314 using GlobElementMapper = Dune::MultipleCodimMultipleGeomTypeMapper<GlobalGridView>;
315 const GlobalGridView& globalGridView = this->equilGrid_->leafGridView();
316 const GlobElementMapper globalElemMapper { globalGridView, Dune::mcmgElementLayout() };
317
318 auto isNumAquCell = [numAquCell = this->eclState_.aquifer().hasNumericalAquifer()
319 ? this->eclState_.aquifer().numericalAquifers().allAquiferCellIds()
320 : std::vector<std::size_t>{}]
321 (const std::size_t cellIdx)
322 {
323 return std::binary_search(numAquCell.begin(), numAquCell.end(), cellIdx);
324 };
325
326 for (const auto& elem : elements(globalGridView)) {
327 for (const auto& is : intersections(globalGridView, elem)) {
328 if (!is.neighbor())
329 continue; // intersection is on the domain boundary
330
331 // Not 'const' because remapped if 'map' is non-null.
332 unsigned c1 = globalElemMapper.index(is.inside());
333 unsigned c2 = globalElemMapper.index(is.outside());
334
335 if (c1 > c2)
336 continue; // we only need to handle each connection once, thank you.
337
338 // Ordering of compressed and uncompressed index should be the same
339 const int cartIdx1 = cartMapper.cartesianIndex( c1 );
340 const int cartIdx2 = cartMapper.cartesianIndex( c2 );
341
342 if (isNumAquCell(cartIdx1) || isNumAquCell(cartIdx2)) {
343 // Connections involving numerical aquifers are always NNCs
344 // for the purpose of file output. This holds even for
345 // connections between cells like (I,J,K) and (I+1,J,K)
346 // which are nominally neighbours in the Cartesian grid.
347 continue;
348 }
349
350 // Ordering of compressed and uncompressed index should be the same
351 assert(cartIdx1 <= cartIdx2);
352 int gc1 = std::min(cartIdx1, cartIdx2);
353 int gc2 = std::max(cartIdx1, cartIdx2);
354
355 // Re-ordering in case of non-empty mapping between equilGrid to grid
356 if (map) {
357 c1 = map(c1); // equilGridToGrid map
358 c2 = map(c2);
359 }
360
361 if (gc2 - gc1 == 1 && cartDims[0] > 1 ) {
362 tranx.template data<double>()[gc1] = globalTrans().transmissibility(c1, c2);
363 continue; // skip other if clauses as they are false, last one needs some computation
364 }
365
366 if (gc2 - gc1 == cartDims[0] && cartDims[1] > 1) {
367 trany.template data<double>()[gc1] = globalTrans().transmissibility(c1, c2);
368 continue; // skipt next if clause as it needs some computation
369 }
370
371 if ( gc2 - gc1 == cartDims[0]*cartDims[1] ||
372 directVerticalNeighbors(cartDims, cartesianToActive, gc1, gc2))
373 tranz.template data<double>()[gc1] = globalTrans().transmissibility(c1, c2);
374 }
375 }
376}
377
378template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
379std::vector<NNCdata>
380EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
381exportNncStructure_(const std::unordered_map<int,int>& cartesianToActive,
382 const std::function<unsigned int(unsigned int)>& map) const
383{
384 auto isNumAquCell = [numAquCell = this->eclState_.aquifer().hasNumericalAquifer()
385 ? this->eclState_.aquifer().numericalAquifers().allAquiferCellIds()
386 : std::vector<std::size_t>{}]
387 (const std::size_t cellIdx)
388 {
389 return std::binary_search(numAquCell.begin(), numAquCell.end(), cellIdx);
390 };
391
392 auto isNumAquConn = [&isNumAquCell](const std::size_t cellIdx1,
393 const std::size_t cellIdx2)
394 {
395 return isNumAquCell(cellIdx1) || isNumAquCell(cellIdx2);
396 };
397
398 auto isCartesianNeighbour = [nx = this->eclState_.getInputGrid().getNX(),
399 ny = this->eclState_.getInputGrid().getNY()]
400 (const std::size_t cellIdx1, const std::size_t cellIdx2)
401 {
402 const auto cellDiff = cellIdx2 - cellIdx1;
403
404 return (cellDiff == 1)
405 || (cellDiff == nx)
406 || (cellDiff == nx * ny);
407 };
408
409 auto activeCell = [&cartesianToActive](const std::size_t cellIdx)
410 {
411 auto pos = cartesianToActive.find(cellIdx);
412 return (pos == cartesianToActive.end()) ? -1 : pos->second;
413 };
414
415 const auto& nncData = this->eclState_.getInputNNC().input();
416 const auto& unitSystem = this->eclState_.getDeckUnitSystem();
417
418 for (const auto& entry : nncData) {
419 // Ignore most explicit NNCs between otherwise neighbouring cells.
420 // We keep NNCs that involve cells with numerical aquifers even if
421 // these might be between neighbouring cells in the Cartesian
422 // grid--e.g., between cells (I,J,K) and (I+1,J,K). All such
423 // connections should be written to NNC output arrays provided the
424 // transmissibility value is sufficiently large.
425 //
426 // The condition cell2 >= cell1 holds by construction of nncData.
427 assert (entry.cell2 >= entry.cell1);
428
429 if (! isCartesianNeighbour(entry.cell1, entry.cell2) ||
430 isNumAquConn(entry.cell1, entry.cell2))
431 {
432 // Pick up transmissibility value from 'globalTrans()' since
433 // multiplier keywords like MULTREGT might have impacted the
434 // values entered in primary sources like NNC/EDITNNC/EDITNNCR.
435 const auto c1 = activeCell(entry.cell1);
436 const auto c2 = activeCell(entry.cell2);
437
438 if ((c1 < 0) || (c2 < 0)) {
439 // Connection between inactive cells? Unexpected at this
440 // level. Might consider 'throw'ing if this happens...
441 continue;
442 }
443
444 const auto trans = this->globalTrans().transmissibility(c1, c2);
445 const auto tt = unitSystem
446 .from_si(UnitSystem::measure::transmissibility, trans);
447
448 // ECLIPSE ignores NNCs (with EDITNNC/EDITNNCR applied) with
449 // small transmissibility values. Seems like the threshold is
450 // 1.0e-6 in output units.
451 if (std::isnormal(tt) && ! (tt < 1.0e-6)) {
452 this->outputNnc_.emplace_back(entry.cell1, entry.cell2, trans);
453 }
454 }
455 }
456
457 auto isDirectNeighbours = [&isCartesianNeighbour, &cartesianToActive,
458 cartDims = &this->cartMapper_.cartesianDimensions()]
459 (const std::size_t cellIdx1, const std::size_t cellIdx2)
460 {
461 return isCartesianNeighbour(cellIdx1, cellIdx2)
462 || directVerticalNeighbors(*cartDims, cartesianToActive, cellIdx1, cellIdx2);
463 };
464
465 using GlobalGridView = typename EquilGrid::LeafGridView;
466 using GlobElementMapper = Dune::MultipleCodimMultipleGeomTypeMapper<GlobalGridView>;
467 const GlobalGridView& globalGridView = this->equilGrid_->leafGridView();
468 const GlobElementMapper globalElemMapper { globalGridView, Dune::mcmgElementLayout() };
469
470 // Cartesian index mapper for the serial I/O grid
471 const auto& equilCartMapper = *equilCartMapper_;
472 for (const auto& elem : elements(globalGridView)) {
473 for (const auto& is : intersections(globalGridView, elem)) {
474 if (!is.neighbor())
475 continue; // intersection is on the domain boundary
476
477 // Not 'const' because remapped if 'map' is non-null.
478 unsigned c1 = globalElemMapper.index(is.inside());
479 unsigned c2 = globalElemMapper.index(is.outside());
480
481 if (c1 > c2)
482 continue; // we only need to handle each connection once, thank you.
483
484 std::size_t cc1 = equilCartMapper.cartesianIndex( c1 );
485 std::size_t cc2 = equilCartMapper.cartesianIndex( c2 );
486
487 if ( cc2 < cc1 )
488 std::swap(cc1, cc2);
489
490 // Re-ordering in case of non-empty mapping between equilGrid to grid
491 if (map) {
492 c1 = map(c1); // equilGridToGrid map
493 c2 = map(c2);
494 }
495
496 if (isNumAquConn(cc1, cc2) || ! isDirectNeighbours(cc1, cc2)) {
497 // We need to check whether an NNC for this face was also
498 // specified via the NNC keyword in the deck.
499 auto t = this->globalTrans().transmissibility(c1, c2);
500 auto candidate = std::lower_bound(nncData.begin(), nncData.end(),
501 NNCdata { cc1, cc2, 0.0 });
502
503 while ((candidate != nncData.end()) &&
504 (candidate->cell1 == cc1) &&
505 (candidate->cell2 == cc2))
506 {
507 t -= candidate->trans;
508 ++candidate;
509 }
510
511 // ECLIPSE ignores NNCs with zero transmissibility
512 // (different threshold than for NNC with corresponding
513 // EDITNNC above). In addition we do set small
514 // transmissibilities to zero when setting up the simulator.
515 // These will be ignored here, too.
516 const auto tt = unitSystem
517 .from_si(UnitSystem::measure::transmissibility, t);
518
519 if (std::isnormal(tt) && (tt > 1.0e-12)) {
520 this->outputNnc_.emplace_back(cc1, cc2, t);
521 }
522 }
523 }
524 }
525
526 return this->outputNnc_;
527}
528
529template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
531doWriteOutput(const int reportStepNum,
532 const std::optional<int> timeStepNum,
533 const bool isSubStep,
534 data::Solution&& localCellData,
535 data::Wells&& localWellData,
536 data::GroupAndNetworkValues&& localGroupAndNetworkData,
537 data::Aquifers&& localAquiferData,
538 WellTestState&& localWTestState,
539 const Action::State& actionState,
540 const UDQState& udqState,
541 const SummaryState& summaryState,
542 const std::vector<Scalar>& thresholdPressure,
543 Scalar curTime,
544 Scalar nextStepSize,
545 bool doublePrecision,
546 bool isFlowsn,
547 std::array<FlowsData<double>, 3>&& flowsn,
548 bool isFloresn,
549 std::array<FlowsData<double>, 3>&& floresn)
550{
551 const auto isParallel = this->collectOnIORank_.isParallel();
552 const bool needsReordering = this->collectOnIORank_.doesNeedReordering();
553
554 RestartValue restartValue {
555 (isParallel || needsReordering)
556 ? this->collectOnIORank_.globalCellData()
557 : std::move(localCellData),
558
559 isParallel ? this->collectOnIORank_.globalWellData()
560 : std::move(localWellData),
561
562 isParallel ? this->collectOnIORank_.globalGroupAndNetworkData()
563 : std::move(localGroupAndNetworkData),
564
565 isParallel ? this->collectOnIORank_.globalAquiferData()
566 : std::move(localAquiferData)
567 };
568
569 if (eclState_.getSimulationConfig().useThresholdPressure()) {
570 restartValue.addExtra("THRESHPR", UnitSystem::measure::pressure,
571 thresholdPressure);
572 }
573
574 // Add suggested next timestep to extra data.
575 if (! isSubStep) {
576 restartValue.addExtra("OPMEXTRA", std::vector<double>(1, nextStepSize));
577 }
578
579 // Add nnc flows and flores.
580 if (isFlowsn) {
581 const auto flowsn_global = isParallel ? this->collectOnIORank_.globalFlowsn() : std::move(flowsn);
582 for (const auto& flows : flowsn_global) {
583 if (flows.name.empty())
584 continue;
585 if (flows.name == "FLOGASN+") {
586 restartValue.addExtra(flows.name, UnitSystem::measure::gas_surface_rate, flows.values);
587 } else {
588 restartValue.addExtra(flows.name, UnitSystem::measure::liquid_surface_rate, flows.values);
589 }
590 }
591 }
592 if (isFloresn) {
593 const auto floresn_global = isParallel ? this->collectOnIORank_.globalFloresn() : std::move(floresn);
594 for (const auto& flores : floresn_global) {
595 if (flores.name.empty()) {
596 continue;
597 }
598 restartValue.addExtra(flores.name, UnitSystem::measure::rate, flores.values);
599 }
600 }
601 // make sure that the previous I/O request has been completed
602 // and the number of incomplete tasklets does not increase between
603 // time steps
604 this->taskletRunner_->barrier();
605
606 // check if there might have been a failure in the TaskletRunner
607 if (this->taskletRunner_->failure()) {
608 throw std::runtime_error("Failure in the TaskletRunner while writing output.");
609 }
610
611 // create a tasklet to write the data for the current time step to disk
612 auto eclWriteTasklet = std::make_shared<EclWriteTasklet>(
613 actionState,
614 isParallel ? this->collectOnIORank_.globalWellTestState() : std::move(localWTestState),
615 summaryState, udqState, *this->eclIO_,
616 reportStepNum, timeStepNum, isSubStep, curTime, std::move(restartValue), doublePrecision);
617
618 // finally, start a new output writing job
619 this->taskletRunner_->dispatch(std::move(eclWriteTasklet));
620}
621
622template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
624evalSummary(const int reportStepNum,
625 const Scalar curTime,
626 const data::Wells& localWellData,
627 const data::WellBlockAveragePressures& localWBPData,
628 const data::GroupAndNetworkValues& localGroupAndNetworkData,
629 const std::map<int,data::AquiferData>& localAquiferData,
630 const std::map<std::pair<std::string, int>, double>& blockData,
631 const std::map<std::string, double>& miscSummaryData,
632 const std::map<std::string, std::vector<double>>& regionData,
633 const Inplace& inplace,
634 const Inplace& initialInPlace,
635 const InterRegFlowMap& interRegFlows,
636 SummaryState& summaryState,
637 UDQState& udqState)
638{
639 if (collectOnIORank_.isIORank()) {
640 const auto& summary = eclIO_->summary();
641
642 const auto& wellData = this->collectOnIORank_.isParallel()
643 ? this->collectOnIORank_.globalWellData()
644 : localWellData;
645
646 const auto& wbpData = this->collectOnIORank_.isParallel()
647 ? this->collectOnIORank_.globalWBPData()
648 : localWBPData;
649
650 const auto& groupAndNetworkData = this->collectOnIORank_.isParallel()
651 ? this->collectOnIORank_.globalGroupAndNetworkData()
652 : localGroupAndNetworkData;
653
654 const auto& aquiferData = this->collectOnIORank_.isParallel()
655 ? this->collectOnIORank_.globalAquiferData()
656 : localAquiferData;
657
658 summary.eval(summaryState,
659 reportStepNum,
660 curTime,
661 wellData,
662 wbpData,
663 groupAndNetworkData,
664 miscSummaryData,
665 initialInPlace,
666 inplace,
667 regionData,
668 blockData,
669 aquiferData,
670 getInterRegFlowsAsMap(interRegFlows));
671
672 // Off-by-one-fun: The reportStepNum argument corresponds to the
673 // report step these results will be written to, whereas the
674 // argument to UDQ function evaluation corresponds to the report
675 // step we are currently on.
676 const auto udq_step = reportStepNum - 1;
677
678 this->schedule_.getUDQConfig(udq_step)
679 .eval(udq_step,
680 this->schedule_,
681 this->schedule_.wellMatcher(udq_step),
682 this->schedule_.segmentMatcherFactory(udq_step),
683 [es = std::cref(this->eclState_)]() {
684 return std::make_unique<RegionSetMatcher>
685 (es.get().fipRegionStatistics());
686 },
687 summaryState, udqState);
688 }
689
690#if HAVE_MPI
691 if (collectOnIORank_.isParallel()) {
692 Parallel::MpiSerializer ser(grid_.comm());
693 ser.append(summaryState);
694 }
695#endif
696}
697
698template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
701globalTrans() const
702{
703 assert (globalTrans_);
704 return *globalTrans_;
705}
706
707} // namespace Opm
708
709#endif // OPM_ECL_GENERIC_WRITER_IMPL_HPP
Definition: CollectDataOnIORank.hpp:49
bool isIORank() const
Definition: CollectDataOnIORank.hpp:125
Definition: EclGenericWriter.hpp:65
const EclipseState & eclState_
Definition: EclGenericWriter.hpp:156
void doWriteOutput(const int reportStepNum, const std::optional< int > timeStepNum, const bool isSubStep, data::Solution &&localCellData, data::Wells &&localWellData, data::GroupAndNetworkValues &&localGroupAndNetworkData, data::Aquifers &&localAquiferData, WellTestState &&localWTestState, const Action::State &actionState, const UDQState &udqState, const SummaryState &summaryState, const std::vector< Scalar > &thresholdPressure, Scalar curTime, Scalar nextStepSize, bool doublePrecision, bool isFlowsn, std::array< FlowsData< double >, 3 > &&flowsn, bool isFloresn, std::array< FlowsData< double >, 3 > &&floresn)
Definition: EclGenericWriter_impl.hpp:531
CollectDataOnIORankType collectOnIORank_
Definition: EclGenericWriter.hpp:152
void extractOutputTransAndNNC(const std::function< unsigned int(unsigned int)> &map)
Definition: EclGenericWriter_impl.hpp:266
std::unique_ptr< TaskletRunner > taskletRunner_
Definition: EclGenericWriter.hpp:158
void writeInit()
Definition: EclGenericWriter_impl.hpp:249
void evalSummary(int reportStepNum, Scalar curTime, const data::Wells &localWellData, const data::WellBlockAveragePressures &localWBPData, const data::GroupAndNetworkValues &localGroupAndNetworkData, const std::map< int, data::AquiferData > &localAquiferData, const std::map< std::pair< std::string, int >, double > &blockData, const std::map< std::string, double > &miscSummaryData, const std::map< std::string, std::vector< double > > &regionData, const Inplace &inplace, const Inplace &initialInPlace, const InterRegFlowMap &interRegFlows, SummaryState &summaryState, UDQState &udqState)
Definition: EclGenericWriter_impl.hpp:624
EclGenericWriter(const Schedule &schedule, const EclipseState &eclState, const SummaryConfig &summaryConfig, const Grid &grid, const EquilGrid *equilGrid, const GridView &gridView, const Dune::CartesianIndexMapper< Grid > &cartMapper, const Dune::CartesianIndexMapper< EquilGrid > *equilCartMapper, bool enableAsyncOutput, bool enableEsmry)
Definition: EclGenericWriter_impl.hpp:198
const EclipseIO & eclIO() const
Definition: EclGenericWriter_impl.hpp:241
const TransmissibilityType & globalTrans() const
Definition: EclGenericWriter_impl.hpp:701
std::unique_ptr< EclipseIO > eclIO_
Definition: EclGenericWriter.hpp:157
Inter-region flow accumulation maps for all region definition arrays.
Definition: InterRegFlows.hpp:179
std::vector< data::InterRegFlowMap > getInterRegFlows() const
const std::vector< std::string > & names() const
Class for serializing and broadcasting data using MPI.
Definition: MPISerializer.hpp:31
void append(T &data, int root=0)
Serialize and broadcast on root process, de-serialize and append on others.
Definition: MPISerializer.hpp:106
void broadcast(T &data, int root=0)
Serialize and broadcast on root process, de-serialize on others.
Definition: MPISerializer.hpp:46
The base class for tasklets.
Definition: tasklets.hpp:44
virtual void run()=0
Handles where a given tasklet is run.
Definition: tasklets.hpp:90
Definition: Transmissibility.hpp:54
Definition: blackoilboundaryratevector.hh:37