EclGenericWriter_impl.hpp
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 This file is part of the Open Porous Media project (OPM).
5
6 OPM is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 2 of the License, or
9 (at your option) any later version.
10
11 OPM is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with OPM. If not, see <http://www.gnu.org/licenses/>.
18
19 Consult the COPYING file in the top-level source directory of this
20 module for the precise wording of the license and the list of
21 copyright holders.
22*/
23#ifndef OPM_ECL_GENERIC_WRITER_IMPL_HPP
24#define OPM_ECL_GENERIC_WRITER_IMPL_HPP
25
26#include <dune/grid/common/mcmgmapper.hh>
27
28#include <opm/grid/GridHelpers.hpp>
29#include <opm/grid/utility/cartesianToCompressed.hpp>
30
31#include <opm/input/eclipse/EclipseState/EclipseState.hpp>
32#include <opm/input/eclipse/EclipseState/Grid/RegionSetMatcher.hpp>
33#include <opm/input/eclipse/EclipseState/SummaryConfig/SummaryConfig.hpp>
34
35#include <opm/input/eclipse/Schedule/Action/State.hpp>
36#include <opm/input/eclipse/Schedule/Schedule.hpp>
37#include <opm/input/eclipse/Schedule/SummaryState.hpp>
38#include <opm/input/eclipse/Schedule/UDQ/UDQConfig.hpp>
39#include <opm/input/eclipse/Schedule/UDQ/UDQState.hpp>
40#include <opm/input/eclipse/Schedule/Well/WellMatcher.hpp>
41
42#include <opm/input/eclipse/Units/UnitSystem.hpp>
43
44#include <opm/output/eclipse/EclipseIO.hpp>
45#include <opm/output/eclipse/RestartValue.hpp>
46#include <opm/output/eclipse/Summary.hpp>
47
49
50#if HAVE_MPI
52#endif
53
54#if HAVE_MPI
55#include <mpi.h>
56#endif
57
58#include <algorithm>
59#include <array>
60#include <cassert>
61#include <cmath>
62#include <functional>
63#include <map>
64#include <memory>
65#include <string>
66#include <unordered_map>
67#include <utility>
68#include <vector>
69
70namespace {
71
86bool directVerticalNeighbors(const std::array<int, 3>& cartDims,
87 const std::unordered_map<int,int>& cartesianToActive,
88 int smallGlobalIndex, int largeGlobalIndex)
89{
90 assert(smallGlobalIndex <= largeGlobalIndex);
91 std::array<int, 3> ijk1, ijk2;
92 auto globalToIjk = [cartDims](int gc) {
93 std::array<int, 3> ijk;
94 ijk[0] = gc % cartDims[0];
95 gc /= cartDims[0];
96 ijk[1] = gc % cartDims[1];
97 ijk[2] = gc / cartDims[1];
98 return ijk;
99 };
100 ijk1 = globalToIjk(smallGlobalIndex);
101 ijk2 = globalToIjk(largeGlobalIndex);
102 assert(ijk2[2]>=ijk1[2]);
103
104 if ( ijk1[0] == ijk2[0] && ijk1[1] == ijk2[1] && (ijk2[2] - ijk1[2]) > 1)
105 {
106 assert((largeGlobalIndex-smallGlobalIndex)%(cartDims[0]*cartDims[1])==0);
107 for ( int gi = smallGlobalIndex + cartDims[0] * cartDims[1]; gi < largeGlobalIndex;
108 gi += cartDims[0] * cartDims[1] )
109 {
110 if ( cartesianToActive.find( gi ) != cartesianToActive.end() )
111 {
112 return false;
113 }
114 }
115 return true;
116 } else
117 return false;
119
120std::unordered_map<std::string, Opm::data::InterRegFlowMap>
121getInterRegFlowsAsMap(const Opm::InterRegFlowMap& map)
122{
123 auto maps = std::unordered_map<std::string, Opm::data::InterRegFlowMap>{};
124
125 const auto& regionNames = map.names();
126 auto flows = map.getInterRegFlows();
127 const auto nmap = regionNames.size();
128
129 maps.reserve(nmap);
130 for (auto mapID = 0*nmap; mapID < nmap; ++mapID) {
131 maps.emplace(regionNames[mapID], std::move(flows[mapID]));
132 }
133
134 return maps;
135}
136
137struct EclWriteTasklet : public Opm::TaskletInterface
138{
139 Opm::Action::State actionState_;
140 Opm::WellTestState wtestState_;
141 Opm::SummaryState summaryState_;
142 Opm::UDQState udqState_;
143 Opm::EclipseIO& eclIO_;
144 int reportStepNum_;
145 bool isSubStep_;
146 double secondsElapsed_;
147 Opm::RestartValue restartValue_;
148 bool writeDoublePrecision_;
149
150 explicit EclWriteTasklet(const Opm::Action::State& actionState,
151 const Opm::WellTestState& wtestState,
152 const Opm::SummaryState& summaryState,
153 const Opm::UDQState& udqState,
154 Opm::EclipseIO& eclIO,
155 int reportStepNum,
156 bool isSubStep,
157 double secondsElapsed,
158 Opm::RestartValue restartValue,
159 bool writeDoublePrecision)
160 : actionState_(actionState)
161 , wtestState_(wtestState)
162 , summaryState_(summaryState)
163 , udqState_(udqState)
164 , eclIO_(eclIO)
165 , reportStepNum_(reportStepNum)
166 , isSubStep_(isSubStep)
167 , secondsElapsed_(secondsElapsed)
168 , restartValue_(std::move(restartValue))
169 , writeDoublePrecision_(writeDoublePrecision)
170 {}
171
172 // callback to eclIO serial writeTimeStep method
173 void run()
174 {
175 this->eclIO_.writeTimeStep(this->actionState_,
176 this->wtestState_,
177 this->summaryState_,
178 this->udqState_,
179 this->reportStepNum_,
180 this->isSubStep_,
181 this->secondsElapsed_,
182 std::move(this->restartValue_),
183 this->writeDoublePrecision_);
184 }
185};
186
187}
188
189namespace Opm {
190
191template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
193EclGenericWriter(const Schedule& schedule,
194 const EclipseState& eclState,
195 const SummaryConfig& summaryConfig,
196 const Grid& grid,
197 const EquilGrid* equilGrid,
198 const GridView& gridView,
199 const Dune::CartesianIndexMapper<Grid>& cartMapper,
200 const Dune::CartesianIndexMapper<EquilGrid>* equilCartMapper,
201 bool enableAsyncOutput,
202 bool enableEsmry )
203 : collectOnIORank_(grid,
204 equilGrid,
205 gridView,
206 cartMapper,
207 equilCartMapper,
208 summaryConfig.fip_regions_interreg_flow())
209 , grid_ (grid)
210 , gridView_ (gridView)
211 , schedule_ (schedule)
212 , eclState_ (eclState)
213 , cartMapper_ (cartMapper)
214 , equilCartMapper_(equilCartMapper)
215 , equilGrid_ (equilGrid)
216{
217 if (this->collectOnIORank_.isIORank()) {
218 this->eclIO_ = std::make_unique<EclipseIO>
219 (this->eclState_,
220 UgGridHelpers::createEclipseGrid(*equilGrid, eclState_.getInputGrid()),
221 this->schedule_, summaryConfig, "", enableEsmry);
222 }
223
224 // create output thread if enabled and rank is I/O rank
225 // async output is enabled by default if pthread are enabled
226 int numWorkerThreads = 0;
227 if (enableAsyncOutput && collectOnIORank_.isIORank()) {
228 numWorkerThreads = 1;
229 }
230
231 this->taskletRunner_.reset(new TaskletRunner(numWorkerThreads));
232}
233
234template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
236eclIO() const
237{
238 assert(eclIO_);
239 return *eclIO_;
240}
241
242template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
244writeInit(const std::function<unsigned int(unsigned int)>& map)
245{
246 if (collectOnIORank_.isIORank()) {
247 std::map<std::string, std::vector<int>> integerVectors;
248 if (collectOnIORank_.isParallel()) {
249 integerVectors.emplace("MPI_RANK", collectOnIORank_.globalRanks());
250 }
251
252 auto cartMap = cartesianToCompressed(equilGrid_->size(0), UgGridHelpers::globalCell(*equilGrid_));
253
254 eclIO_->writeInitial(computeTrans_(cartMap, map),
255 integerVectors,
256 exportNncStructure_(cartMap, map));
257 }
258
259#if HAVE_MPI
260 if (collectOnIORank_.isParallel()) {
261 const auto& comm = grid_.comm();
263 ser.broadcast(outputNnc_);
264 }
265#endif
266}
267
268template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
269data::Solution
271computeTrans_(const std::unordered_map<int,int>& cartesianToActive,
272 const std::function<unsigned int(unsigned int)>& map) const
273{
274 const auto& cartMapper = *equilCartMapper_;
275 const auto& cartDims = cartMapper.cartesianDimensions();
276
277 auto tranx = data::CellData {
278 UnitSystem::measure::transmissibility,
279 std::vector<double>(cartDims[0] * cartDims[1] * cartDims[2], 0.0),
280 data::TargetType::INIT
281 };
282
283 auto trany = tranx;
284 auto tranz = tranx;
285
286 using GlobalGridView = typename EquilGrid::LeafGridView;
287 using GlobElementMapper = Dune::MultipleCodimMultipleGeomTypeMapper<GlobalGridView>;
288 const GlobalGridView& globalGridView = this->equilGrid_->leafGridView();
289 const GlobElementMapper globalElemMapper { globalGridView, Dune::mcmgElementLayout() };
290
291 auto isNumAquCell = [numAquCell = this->eclState_.aquifer().hasNumericalAquifer()
292 ? this->eclState_.aquifer().numericalAquifers().allAquiferCellIds()
293 : std::vector<std::size_t>{}]
294 (const std::size_t cellIdx)
295 {
296 return std::binary_search(numAquCell.begin(), numAquCell.end(), cellIdx);
297 };
298
299 for (const auto& elem : elements(globalGridView)) {
300 for (const auto& is : intersections(globalGridView, elem)) {
301 if (!is.neighbor())
302 continue; // intersection is on the domain boundary
303
304 // Not 'const' because remapped if 'map' is non-null.
305 unsigned c1 = globalElemMapper.index(is.inside());
306 unsigned c2 = globalElemMapper.index(is.outside());
307
308 if (c1 > c2)
309 continue; // we only need to handle each connection once, thank you.
310
311 // Ordering of compressed and uncompressed index should be the same
312 const int cartIdx1 = cartMapper.cartesianIndex( c1 );
313 const int cartIdx2 = cartMapper.cartesianIndex( c2 );
314
315 if (isNumAquCell(cartIdx1) || isNumAquCell(cartIdx2)) {
316 // Connections involving numerical aquifers are always NNCs
317 // for the purpose of file output. This holds even for
318 // connections between cells like (I,J,K) and (I+1,J,K)
319 // which are nominally neighbours in the Cartesian grid.
320 continue;
321 }
322
323 // Ordering of compressed and uncompressed index should be the same
324 assert(cartIdx1 <= cartIdx2);
325 int gc1 = std::min(cartIdx1, cartIdx2);
326 int gc2 = std::max(cartIdx1, cartIdx2);
327
328 // Re-ordering in case of non-empty mapping between equilGrid to grid
329 if (map) {
330 c1 = map(c1); // equilGridToGrid map
331 c2 = map(c2);
332 }
333
334 if (gc2 - gc1 == 1 && cartDims[0] > 1 ) {
335 tranx.data<double>()[gc1] = globalTrans().transmissibility(c1, c2);
336 continue; // skip other if clauses as they are false, last one needs some computation
337 }
338
339 if (gc2 - gc1 == cartDims[0] && cartDims[1] > 1) {
340 trany.data<double>()[gc1] = globalTrans().transmissibility(c1, c2);
341 continue; // skipt next if clause as it needs some computation
342 }
343
344 if ( gc2 - gc1 == cartDims[0]*cartDims[1] ||
345 directVerticalNeighbors(cartDims, cartesianToActive, gc1, gc2))
346 tranz.data<double>()[gc1] = globalTrans().transmissibility(c1, c2);
347 }
348 }
349
350 return {
351 {"TRANX", tranx},
352 {"TRANY", trany},
353 {"TRANZ", tranz},
354 };
355}
356
357template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
358std::vector<NNCdata>
359EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
360exportNncStructure_(const std::unordered_map<int,int>& cartesianToActive,
361 const std::function<unsigned int(unsigned int)>& map) const
362{
363 auto isNumAquCell = [numAquCell = this->eclState_.aquifer().hasNumericalAquifer()
364 ? this->eclState_.aquifer().numericalAquifers().allAquiferCellIds()
365 : std::vector<std::size_t>{}]
366 (const std::size_t cellIdx)
367 {
368 return std::binary_search(numAquCell.begin(), numAquCell.end(), cellIdx);
369 };
370
371 auto isNumAquConn = [&isNumAquCell](const std::size_t cellIdx1,
372 const std::size_t cellIdx2)
373 {
374 return isNumAquCell(cellIdx1) || isNumAquCell(cellIdx2);
375 };
376
377 auto isCartesianNeighbour = [nx = this->eclState_.getInputGrid().getNX(),
378 ny = this->eclState_.getInputGrid().getNY()]
379 (const std::size_t cellIdx1, const std::size_t cellIdx2)
380 {
381 const auto cellDiff = cellIdx2 - cellIdx1;
382
383 return (cellDiff == 1)
384 || (cellDiff == nx)
385 || (cellDiff == nx * ny);
386 };
387
388 auto activeCell = [&cartesianToActive](const std::size_t cellIdx)
389 {
390 auto pos = cartesianToActive.find(cellIdx);
391 return (pos == cartesianToActive.end()) ? -1 : pos->second;
392 };
393
394 const auto& nncData = this->eclState_.getInputNNC().input();
395 const auto& unitSystem = this->eclState_.getDeckUnitSystem();
396
397 for (const auto& entry : nncData) {
398 // Ignore most explicit NNCs between otherwise neighbouring cells.
399 // We keep NNCs that involve cells with numerical aquifers even if
400 // these might be between neighbouring cells in the Cartesian
401 // grid--e.g., between cells (I,J,K) and (I+1,J,K). All such
402 // connections should be written to NNC output arrays provided the
403 // transmissibility value is sufficiently large.
404 //
405 // The condition cell2 >= cell1 holds by construction of nncData.
406 assert (entry.cell2 >= entry.cell1);
407
408 if (! isCartesianNeighbour(entry.cell1, entry.cell2) ||
409 isNumAquConn(entry.cell1, entry.cell2))
410 {
411 // Pick up transmissibility value from 'globalTrans()' since
412 // multiplier keywords like MULTREGT might have impacted the
413 // values entered in primary sources like NNC/EDITNNC/EDITNNCR.
414 const auto c1 = activeCell(entry.cell1);
415 const auto c2 = activeCell(entry.cell2);
416
417 if ((c1 < 0) || (c2 < 0)) {
418 // Connection between inactive cells? Unexpected at this
419 // level. Might consider 'throw'ing if this happens...
420 continue;
421 }
422
423 const auto trans = this->globalTrans().transmissibility(c1, c2);
424 const auto tt = unitSystem
425 .from_si(UnitSystem::measure::transmissibility, trans);
426
427 // ECLIPSE ignores NNCs (with EDITNNC/EDITNNCR applied) with
428 // small transmissibility values. Seems like the threshold is
429 // 1.0e-6 in output units.
430 if (std::isnormal(tt) && ! (tt < 1.0e-6)) {
431 this->outputNnc_.emplace_back(entry.cell1, entry.cell2, trans);
432 }
433 }
434 }
435
436 auto isDirectNeighbours = [&isCartesianNeighbour, &cartesianToActive,
437 cartDims = &this->cartMapper_.cartesianDimensions()]
438 (const std::size_t cellIdx1, const std::size_t cellIdx2)
439 {
440 return isCartesianNeighbour(cellIdx1, cellIdx2)
441 || directVerticalNeighbors(*cartDims, cartesianToActive, cellIdx1, cellIdx2);
442 };
443
444 using GlobalGridView = typename EquilGrid::LeafGridView;
445 using GlobElementMapper = Dune::MultipleCodimMultipleGeomTypeMapper<GlobalGridView>;
446 const GlobalGridView& globalGridView = this->equilGrid_->leafGridView();
447 const GlobElementMapper globalElemMapper { globalGridView, Dune::mcmgElementLayout() };
448
449 // Cartesian index mapper for the serial I/O grid
450 const auto& equilCartMapper = *equilCartMapper_;
451 for (const auto& elem : elements(globalGridView)) {
452 for (const auto& is : intersections(globalGridView, elem)) {
453 if (!is.neighbor())
454 continue; // intersection is on the domain boundary
455
456 // Not 'const' because remapped if 'map' is non-null.
457 unsigned c1 = globalElemMapper.index(is.inside());
458 unsigned c2 = globalElemMapper.index(is.outside());
459
460 if (c1 > c2)
461 continue; // we only need to handle each connection once, thank you.
462
463 std::size_t cc1 = equilCartMapper.cartesianIndex( c1 );
464 std::size_t cc2 = equilCartMapper.cartesianIndex( c2 );
465
466 if ( cc2 < cc1 )
467 std::swap(cc1, cc2);
468
469 // Re-ordering in case of non-empty mapping between equilGrid to grid
470 if (map) {
471 c1 = map(c1); // equilGridToGrid map
472 c2 = map(c2);
473 }
474
475 if (isNumAquConn(cc1, cc2) || ! isDirectNeighbours(cc1, cc2)) {
476 // We need to check whether an NNC for this face was also
477 // specified via the NNC keyword in the deck.
478 auto t = this->globalTrans().transmissibility(c1, c2);
479 auto candidate = std::lower_bound(nncData.begin(), nncData.end(),
480 NNCdata { cc1, cc2, 0.0 });
481
482 while ((candidate != nncData.end()) &&
483 (candidate->cell1 == cc1) &&
484 (candidate->cell2 == cc2))
485 {
486 t -= candidate->trans;
487 ++candidate;
488 }
489
490 // ECLIPSE ignores NNCs with zero transmissibility
491 // (different threshold than for NNC with corresponding
492 // EDITNNC above). In addition we do set small
493 // transmissibilities to zero when setting up the simulator.
494 // These will be ignored here, too.
495 const auto tt = unitSystem
496 .from_si(UnitSystem::measure::transmissibility, t);
497
498 if (std::isnormal(tt) && (tt > 1.0e-12)) {
499 this->outputNnc_.emplace_back(cc1, cc2, t);
500 }
501 }
502 }
503 }
504
505 return this->outputNnc_;
506}
507
508template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
510doWriteOutput(const int reportStepNum,
511 const bool isSubStep,
512 data::Solution&& localCellData,
513 data::Wells&& localWellData,
514 data::GroupAndNetworkValues&& localGroupAndNetworkData,
515 data::Aquifers&& localAquiferData,
516 WellTestState&& localWTestState,
517 const Action::State& actionState,
518 const UDQState& udqState,
519 const SummaryState& summaryState,
520 const std::vector<Scalar>& thresholdPressure,
521 Scalar curTime,
522 Scalar nextStepSize,
523 bool doublePrecision,
524 bool isFlowsn,
525 std::array<FlowsData<double>, 3>&& flowsn,
526 bool isFloresn,
527 std::array<FlowsData<double>, 3>&& floresn)
528{
529 const auto isParallel = this->collectOnIORank_.isParallel();
530 const bool needsReordering = this->collectOnIORank_.doesNeedReordering();
531
532 RestartValue restartValue {
533 (isParallel || needsReordering)
534 ? this->collectOnIORank_.globalCellData()
535 : std::move(localCellData),
536
537 isParallel ? this->collectOnIORank_.globalWellData()
538 : std::move(localWellData),
539
540 isParallel ? this->collectOnIORank_.globalGroupAndNetworkData()
541 : std::move(localGroupAndNetworkData),
542
543 isParallel ? this->collectOnIORank_.globalAquiferData()
544 : std::move(localAquiferData)
545 };
546
547 if (eclState_.getSimulationConfig().useThresholdPressure()) {
548 restartValue.addExtra("THRESHPR", UnitSystem::measure::pressure,
549 thresholdPressure);
550 }
551
552 // Add suggested next timestep to extra data.
553 if (! isSubStep) {
554 restartValue.addExtra("OPMEXTRA", std::vector<double>(1, nextStepSize));
555 }
556
557 // Add nnc flows and flores.
558 if (isFlowsn) {
559 const auto flowsn_global = isParallel ? this->collectOnIORank_.globalFlowsn() : std::move(flowsn);
560 for (const auto& flows : flowsn_global) {
561 if (flows.name.empty())
562 continue;
563 if (flows.name == "FLOGASN+") {
564 restartValue.addExtra(flows.name, UnitSystem::measure::gas_surface_rate, flows.values);
565 } else {
566 restartValue.addExtra(flows.name, UnitSystem::measure::liquid_surface_rate, flows.values);
567 }
568 }
569 }
570 if (isFloresn) {
571 const auto floresn_global = isParallel ? this->collectOnIORank_.globalFloresn() : std::move(floresn);
572 for (const auto& flores : floresn_global) {
573 if (flores.name.empty()) {
574 continue;
575 }
576 restartValue.addExtra(flores.name, UnitSystem::measure::rate, flores.values);
577 }
578 }
579
580 // first, create a tasklet to write the data for the current time
581 // step to disk
582 auto eclWriteTasklet = std::make_shared<EclWriteTasklet>(
583 actionState,
584 isParallel ? this->collectOnIORank_.globalWellTestState() : std::move(localWTestState),
585 summaryState, udqState, *this->eclIO_,
586 reportStepNum, isSubStep, curTime, std::move(restartValue), doublePrecision);
587
588 // then, make sure that the previous I/O request has been completed
589 // and the number of incomplete tasklets does not increase between
590 // time steps
591 this->taskletRunner_->barrier();
592
593 // finally, start a new output writing job
594 this->taskletRunner_->dispatch(std::move(eclWriteTasklet));
595}
596
597template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
599evalSummary(const int reportStepNum,
600 const Scalar curTime,
601 const data::Wells& localWellData,
602 const data::WellBlockAveragePressures& localWBPData,
603 const data::GroupAndNetworkValues& localGroupAndNetworkData,
604 const std::map<int,data::AquiferData>& localAquiferData,
605 const std::map<std::pair<std::string, int>, double>& blockData,
606 const std::map<std::string, double>& miscSummaryData,
607 const std::map<std::string, std::vector<double>>& regionData,
608 const Inplace& inplace,
609 const Inplace& initialInPlace,
610 const InterRegFlowMap& interRegFlows,
611 SummaryState& summaryState,
612 UDQState& udqState)
613{
614 if (collectOnIORank_.isIORank()) {
615 const auto& summary = eclIO_->summary();
616
617 const auto& wellData = this->collectOnIORank_.isParallel()
618 ? this->collectOnIORank_.globalWellData()
619 : localWellData;
620
621 const auto& wbpData = this->collectOnIORank_.isParallel()
622 ? this->collectOnIORank_.globalWBPData()
623 : localWBPData;
624
625 const auto& groupAndNetworkData = this->collectOnIORank_.isParallel()
626 ? this->collectOnIORank_.globalGroupAndNetworkData()
627 : localGroupAndNetworkData;
628
629 const auto& aquiferData = this->collectOnIORank_.isParallel()
630 ? this->collectOnIORank_.globalAquiferData()
631 : localAquiferData;
632
633 summary.eval(summaryState,
634 reportStepNum,
635 curTime,
636 wellData,
637 wbpData,
638 groupAndNetworkData,
639 miscSummaryData,
640 initialInPlace,
641 inplace,
642 regionData,
643 blockData,
644 aquiferData,
645 getInterRegFlowsAsMap(interRegFlows));
646
647 // Off-by-one-fun: The reportStepNum argument corresponds to the
648 // report step these results will be written to, whereas the
649 // argument to UDQ function evaluation corresponds to the report
650 // step we are currently on.
651 const auto udq_step = reportStepNum - 1;
652
653 this->schedule_.getUDQConfig(udq_step)
654 .eval(udq_step,
655 this->schedule_,
656 this->schedule_.wellMatcher(udq_step),
657 this->schedule_.segmentMatcherFactory(udq_step),
658 [es = std::cref(this->eclState_)]() {
659 return std::make_unique<RegionSetMatcher>
660 (es.get().fipRegionStatistics());
661 },
662 summaryState, udqState);
663 }
664
665#if HAVE_MPI
666 if (collectOnIORank_.isParallel()) {
667 Parallel::MpiSerializer ser(grid_.comm());
668 ser.append(summaryState);
669 }
670#endif
671}
672
673template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
676globalTrans() const
677{
678 assert (globalTrans_);
679 return *globalTrans_;
680}
681
682} // namespace Opm
683
684#endif // OPM_ECL_GENERIC_WRITER_IMPL_HPP
Definition: CollectDataOnIORank.hpp:49
bool isIORank() const
Definition: CollectDataOnIORank.hpp:125
Definition: EclGenericWriter.hpp:65
void writeInit(const std::function< unsigned int(unsigned int)> &map)
Definition: EclGenericWriter_impl.hpp:244
const EclipseState & eclState_
Definition: EclGenericWriter.hpp:156
CollectDataOnIORankType collectOnIORank_
Definition: EclGenericWriter.hpp:152
void doWriteOutput(const int reportStepNum, const bool isSubStep, data::Solution &&localCellData, data::Wells &&localWellData, data::GroupAndNetworkValues &&localGroupAndNetworkData, data::Aquifers &&localAquiferData, WellTestState &&localWTestState, const Action::State &actionState, const UDQState &udqState, const SummaryState &summaryState, const std::vector< Scalar > &thresholdPressure, Scalar curTime, Scalar nextStepSize, bool doublePrecision, bool isFlowsn, std::array< FlowsData< double >, 3 > &&flowsn, bool isFloresn, std::array< FlowsData< double >, 3 > &&floresn)
Definition: EclGenericWriter_impl.hpp:510
std::unique_ptr< TaskletRunner > taskletRunner_
Definition: EclGenericWriter.hpp:158
void evalSummary(int reportStepNum, Scalar curTime, const data::Wells &localWellData, const data::WellBlockAveragePressures &localWBPData, const data::GroupAndNetworkValues &localGroupAndNetworkData, const std::map< int, data::AquiferData > &localAquiferData, const std::map< std::pair< std::string, int >, double > &blockData, const std::map< std::string, double > &miscSummaryData, const std::map< std::string, std::vector< double > > &regionData, const Inplace &inplace, const Inplace &initialInPlace, const InterRegFlowMap &interRegFlows, SummaryState &summaryState, UDQState &udqState)
Definition: EclGenericWriter_impl.hpp:599
EclGenericWriter(const Schedule &schedule, const EclipseState &eclState, const SummaryConfig &summaryConfig, const Grid &grid, const EquilGrid *equilGrid, const GridView &gridView, const Dune::CartesianIndexMapper< Grid > &cartMapper, const Dune::CartesianIndexMapper< EquilGrid > *equilCartMapper, bool enableAsyncOutput, bool enableEsmry)
Definition: EclGenericWriter_impl.hpp:193
const EclipseIO & eclIO() const
Definition: EclGenericWriter_impl.hpp:236
const TransmissibilityType & globalTrans() const
Definition: EclGenericWriter_impl.hpp:676
std::unique_ptr< EclipseIO > eclIO_
Definition: EclGenericWriter.hpp:157
Inter-region flow accumulation maps for all region definition arrays.
Definition: InterRegFlows.hpp:179
std::vector< data::InterRegFlowMap > getInterRegFlows() const
const std::vector< std::string > & names() const
Class for serializing and broadcasting data using MPI.
Definition: MPISerializer.hpp:31
void append(T &data, int root=0)
Serialize and broadcast on root process, de-serialize and append on others.
Definition: MPISerializer.hpp:107
void broadcast(T &data, int root=0)
Serialize and broadcast on root process, de-serialize on others.
Definition: MPISerializer.hpp:46
Definition: Transmissibility.hpp:54
Definition: BlackoilPhases.hpp:27