DamarisWriter.hpp
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 Copyright 2022 SINTEF Digital, Mathematics and Cybernetics.
5 Copyright 2023 Inria, Bretagne–Atlantique Research Center
6
7 This file is part of the Open Porous Media project (OPM).
8
9 OPM is free software: you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation, either version 2 of the License, or
12 (at your option) any later version.
13
14 OPM is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with OPM. If not, see <http://www.gnu.org/licenses/>.
21
22 Consult the COPYING file in the top-level source directory of this
23 module for the precise wording of the license and the list of
24 copyright holders.
25*/
31#ifndef OPM_DAMARIS_WRITER_HPP
32#define OPM_DAMARIS_WRITER_HPP
33
34#include <dune/grid/common/partitionset.hh>
35
36
37#include <opm/common/OpmLog/OpmLog.hpp>
38
48
49#include <fmt/format.h>
50
51#include <algorithm>
52#include <memory>
53#include <numeric>
54#include <string>
55#include <vector>
56
57namespace Opm {
58
59namespace DamarisOutput {
60
61int endIteration(int rank);
62int setParameter(const char* field, int rank, int value);
63int setPosition(const char* field, int rank, int64_t pos);
64int write(const char* field, int rank, const void* data);
66 const int n_elements_local_grid,
67 std::vector<unsigned long long>& elements_rank_offsets);
68}
69
82template <class TypeTag>
83class DamarisWriter : public EclGenericWriter<GetPropType<TypeTag, Properties::Grid>,
84 GetPropType<TypeTag, Properties::EquilGrid>,
85 GetPropType<TypeTag, Properties::GridView>,
86 GetPropType<TypeTag, Properties::ElementMapper>,
87 GetPropType<TypeTag, Properties::Scalar>>
88{
89 using Simulator = GetPropType<TypeTag, Properties::Simulator>;
90 using GridView = GetPropType<TypeTag, Properties::GridView>;
91 using Grid = GetPropType<TypeTag, Properties::Grid>;
92 using EquilGrid = GetPropType<TypeTag, Properties::EquilGrid>;
93 using Scalar = GetPropType<TypeTag, Properties::Scalar>;
94 using ElementContext = GetPropType<TypeTag, Properties::ElementContext>;
95 using Element = typename GridView::template Codim<0>::Entity;
96 using ElementMapper = GetPropType<TypeTag, Properties::ElementMapper>;
97
102
103public:
104 static void registerParameters()
105 {
106 Parameters::registerParam<TypeTag, Properties::DamarisOutputHdfCollective>
107 ("Write output via Damaris using parallel HDF5 to "
108 "get single file and dataset per timestep instead "
109 "of one per Damaris core with multiple datasets.");
110 Parameters::registerParam<TypeTag, Properties::DamarisSaveToHdf>
111 ("Set to false to prevent output to HDF5. "
112 "Uses collective output by default or "
113 "set --enable-damaris-collective=false to"
114 "use file per core (file per Damaris server).");
115 Parameters::registerParam<TypeTag, Properties::DamarisSaveMeshToHdf>
116 ("Saves the mesh data to the HDF5 file (1st iteration only). "
117 "Will set --damaris-output-hdf-collective to false "
118 "so will use file per core (file per Damaris server) output "
119 "(global sizes and offset values of mesh variables are not being provided as yet).");
120 Parameters::registerParam<TypeTag, Properties::DamarisPythonScript>
121 ("Set to the path and filename of a Python script to run on "
122 "Damaris server resources with access to OPM flow data.");
123 Parameters::registerParam<TypeTag, Properties::DamarisPythonParaviewScript>
124 ("Set to the path and filename of a Paraview Python script "
125 "to run on Paraview Catalyst (1 or 2) on Damaris server "
126 "resources with access to OPM flow data.");
127 Parameters::registerParam<TypeTag, Properties::DamarisSimName>
128 ("The name of the simulation to be used by Damaris. "
129 "If empty (the default) then Damaris uses \"opm-sim-<random-number>\". "
130 "This name is used for the Damaris HDF5 file name prefix. "
131 "Make unique if writing to the same output directory.");
132 Parameters::registerParam<TypeTag, Properties::DamarisLogLevel>
133 ("The log level for the Damaris logging system (boost log based). "
134 "Levels are: [trace, debug, info, warning, error, fatal]. "
135 "Currently debug and info are useful. ");
136 Parameters::registerParam<TypeTag, Properties::DamarisDaskFile>
137 ("The name of a Dask json configuration file (if using Dask for processing).");
138 Parameters::registerParam<TypeTag, Properties::DamarisDedicatedCores>
139 ("Set the number of dedicated cores (MPI processes) "
140 "that should be used for Damaris processing (per node). "
141 "Must divide evenly into the number of simulation ranks (client ranks).");
142 Parameters::registerParam<TypeTag, Properties::DamarisDedicatedNodes>
143 ("Set the number of dedicated nodes (full nodes) "
144 "that should be used for Damaris processing (per simulation). "
145 "Must divide evenly into the number of simulation nodes.");
146 Parameters::registerParam<TypeTag, Properties::DamarisSharedMemorySizeBytes>
147 ("Set the size of the shared memory buffer used for IPC "
148 "between the simulation and the Damaris resources. "
149 "Needs to hold all the variables published, possibly over "
150 "multiple simulation iterations.");
151 Parameters::registerParam<TypeTag, Properties::DamarisSharedMemoryName>
152 ("The name of the shared memory area to be used by Damaris for the current. "
153 "If empty (the default) then Damaris uses \"opm-damaris-<random-string>\". "
154 "This name should be unique if multiple simulations are running on "
155 "the same node/server as it is used for the Damaris shmem name and by "
156 "the Python Dask library to locate sections of variables.");
157 }
158
159 // The Simulator object should preferably have been const - the
160 // only reason that is not the case is due to the SummaryState
161 // object owned deep down by the vanguard.
162 DamarisWriter(Simulator& simulator)
163 : BaseType(simulator.vanguard().schedule(),
164 simulator.vanguard().eclState(),
165 simulator.vanguard().summaryConfig(),
166 simulator.vanguard().grid(),
167 ((simulator.vanguard().grid().comm().rank() == 0)
168 ? &simulator.vanguard().equilGrid()
169 : nullptr),
170 simulator.vanguard().gridView(),
171 simulator.vanguard().cartesianIndexMapper(),
172 ((simulator.vanguard().grid().comm().rank() == 0)
173 ? &simulator.vanguard().equilCartesianIndexMapper()
174 : nullptr),
175 false, false)
176 , simulator_(simulator)
177 {
178 this->damarisUpdate_ = true ;
179
180 this->rank_ = this->simulator_.vanguard().grid().comm().rank() ;
181 this->nranks_ = this->simulator_.vanguard().grid().comm().size();
182
183 this->elements_rank_offsets_.resize(this->nranks_);
184
185 // Get the size of the unique vector elements (excludes the shared 'ghost' elements)
186 //
187 // Might possibly use
188 //
189 // detail::countLocalInteriorCellsGridView(this->simulator_.gridView())
190 //
191 // from countGlobalCells.hpp instead of calling std::distance() directly.
192 {
193 const auto& gridView = this->simulator_.gridView();
194 const auto& interior_elements = elements(gridView, Dune::Partitions::interior);
195
196 this->numElements_ = std::distance(interior_elements.begin(), interior_elements.end());
197 }
198
199 if (this->nranks_ > 1) {
200 auto smryCfg = (this->rank_ == 0)
201 ? this->eclIO_->finalSummaryConfig()
202 : SummaryConfig{};
203
204 eclBroadcast(this->simulator_.vanguard().grid().comm(), smryCfg);
205
206 this->damarisOutputModule_ = std::make_unique<OutputBlackOilModule<TypeTag>>
207 (simulator, smryCfg, this->collectOnIORank_);
208 }
209 else {
210 this->damarisOutputModule_ = std::make_unique<OutputBlackOilModule<TypeTag>>
211 (simulator, this->eclIO_->finalSummaryConfig(), this->collectOnIORank_);
212 }
213 }
214
218 void writeOutput(data::Solution& localCellData , bool isSubStep)
219 {
220 OPM_TIMEBLOCK(writeOutput);
221 const int reportStepNum = simulator_.episodeIndex() + 1;
222
223 // added this as localCellData was not being written
224 if (!isSubStep)
225 this->damarisOutputModule_->invalidateLocalData() ;
226 this->prepareLocalCellData(isSubStep, reportStepNum);
227 this->damarisOutputModule_->outputErrorLog(simulator_.gridView().comm());
228
229 // The damarisWriter is not outputing well or aquifer data (yet)
230 auto localWellData = simulator_.problem().wellModel().wellData(); // data::Well
231
232 if (! isSubStep)
233 {
234 if (localCellData.size() == 0) {
235 this->damarisOutputModule_->assignToSolution(localCellData);
236 }
237
238 // add cell data to perforations for Rft output
239 this->damarisOutputModule_->addRftDataToWells(localWellData, reportStepNum);
240
241 // On first call and if the mesh and variable size change then set damarisUpdate_ to true
242 if (damarisUpdate_ == true) {
243 // Sets the damaris parameter values "n_elements_local" and "n_elements_total"
244 // which define sizes of the Damaris variables, per-rank and globally (over all ranks).
245 // Also sets the offsets to where a ranks array data sits within the global array.
246 // This is usefull for HDF5 output and for defining distributed arrays in Dask.
247 dam_err_ = DamarisOutput::setupWritingPars(simulator_.vanguard().grid().comm(),
248 numElements_, elements_rank_offsets_);
249
250 // sets data for non-time-varying variables MPI_RANK and GLOBAL_CELL_INDEX
251 this->setGlobalIndexForDamaris() ;
252
253 // Set the geometry data for the mesh model.
254 // this function writes the mesh data directly to Damaris shared memory using Opm::DamarisOutput::DamarisVar objects.
255 this->writeDamarisGridOutput() ;
256
257 // Currently by default we assume a static mesh grid (the geometry unchanging through the simulation)
258 // Set damarisUpdate_ to true if we want to update the geometry sent to Damaris
259 this->damarisUpdate_ = false;
260 }
261
262 if (this->damarisOutputModule_->getPRESSURE_ptr() != nullptr)
263 {
264 dam_err_ = DamarisOutput::setPosition("PRESSURE", rank_,
265 this->elements_rank_offsets_[rank_]);
266 dam_err_ = DamarisOutput::write("PRESSURE", rank_,
267 this->damarisOutputModule_->getPRESSURE_ptr());
268
269 dam_err_ = DamarisOutput::endIteration(rank_);
270 }
271 } // end of ! isSubstep
272 }
273
274private:
275 int dam_err_ ;
276 int rank_ ;
277 int nranks_ ;
278 int numElements_ ;
279
280 Simulator& simulator_;
281 std::unique_ptr<OutputBlackOilModule<TypeTag>> damarisOutputModule_;
282 std::vector<unsigned long long> elements_rank_offsets_ ;
283 bool damarisUpdate_ = false;
284
285 static bool enableDamarisOutput_()
286 {
287 return Parameters::get<TypeTag, Properties::EnableDamarisOutput>();
288 }
289
290 void setGlobalIndexForDamaris ()
291 {
292 // GLOBAL_CELL_INDEX is used to reorder variable data when writing to disk
293 // This is enabled using select-file="GLOBAL_CELL_INDEX" in the <variable> XML tag
294 if (this->collectOnIORank_.isParallel()) {
295 const std::vector<int>& local_to_global =
297 dam_err_ = DamarisOutput::write("GLOBAL_CELL_INDEX", rank_, local_to_global.data());
298 } else {
299 std::vector<int> local_to_global_filled ;
300 local_to_global_filled.resize(this->numElements_) ;
301 std::iota(local_to_global_filled.begin(), local_to_global_filled.end(), 0);
302 dam_err_ = DamarisOutput::write("GLOBAL_CELL_INDEX", rank_, local_to_global_filled.data());
303 }
304
305 // This is an example of writing to the Damaris shared memory directly (i.e. not using
306 // damaris_write() to copy data there)
307 // We will add the MPI rank value directly into shared memory using the DamarisVar
308 // wrapper of the C based Damaris API.
309 // The shared memory is given back to Damaris when the DamarisVarInt goes out of scope.
310 DamarisVarInt mpi_rank_var_test(1, {"n_elements_local"}, "MPI_RANK", rank_);
311 mpi_rank_var_test.setDamarisParameterAndShmem( {this->numElements_ } ) ;
312 // Fill the created memory area
313 std::fill(mpi_rank_var_test.data(), mpi_rank_var_test.data() + numElements_, rank_);
314 }
315
316 void writeDamarisGridOutput()
317 {
318 const auto& gridView = simulator_.gridView();
319 GridDataOutput::SimMeshDataAccessor geomData(gridView, Dune::Partitions::interior) ;
320
321 try {
322 const bool hasPolyCells = geomData.polyhedralCellPresent() ;
323 if ( hasPolyCells ) {
324 OpmLog::error(fmt::format("ERORR: rank {} The DUNE geometry grid has polyhedral elements - These elements are currently not supported.", rank_ ));
325 }
326
327 // This is the template XML model for x,y,z coordinates defined in initDamarisXmlFile.cpp which is used to
328 // build the internally generated Damaris XML configuration file.
329 // <parameter name="n_coords_local" type="int" value="1" />
330 // <parameter name="n_coords_global" type="int" value="1" comment="only needed if we need to write to HDF5 in Collective mode"/>
331 // <layout name="n_coords_layout" type="double" dimensions="n_coords_local" comment="For the individual x, y and z coordinates of the mesh vertices" />
332 // <group name="coordset/coords/values">
333 // <variable name="x" layout="n_coords_layout" type="scalar" visualizable="false" unit="m" script="PythonConduitTest" time-varying="false" />
334 // <variable name="y" layout="n_coords_layout" type="scalar" visualizable="false" unit="m" script="PythonConduitTest" time-varying="false" />
335 // <variable name="z" layout="n_coords_layout" type="scalar" visualizable="false" unit="m" script="PythonConduitTest" time-varying="false" />
336 // </group>
337
338 DamarisVarDbl var_x(1, {"n_coords_local"}, "coordset/coords/values/x", rank_) ;
339 // N.B. We have not set any position/offset values (using DamarisVar::SetDamarisPosition).
340 // They are not needed for mesh data as each process has a local geometric model.
341 // However, HDF5 collective and Dask arrays cannot be used for this data.
342 var_x.setDamarisParameterAndShmem( { geomData.getNVertices() } ) ;
343
344 DamarisVarDbl var_y(1, {"n_coords_local"}, "coordset/coords/values/y", rank_) ;
345 var_y.setDamarisParameterAndShmem( { geomData.getNVertices() } ) ;
346
347 DamarisVarDbl var_z(1, {"n_coords_local"}, "coordset/coords/values/z", rank_) ;
348 var_z.setDamarisParameterAndShmem( { geomData.getNVertices() } ) ;
349
350 // Now we can use the shared memory area that Damaris has allocated and use it to write the x,y,z coordinates
351 if ( geomData.writeGridPoints(var_x, var_y, var_z) < 0)
352 DUNE_THROW(Dune::IOError, geomData.getError() );
353
354 // This is the template XML model for connectivity, offsets and types, as defined in initDamarisXmlFile.cpp which is used to
355 // build the internally generated Damaris XML configuration file.
356 // <parameter name="n_connectivity_ph" type="int" value="1" />
357 // <layout name="n_connections_layout_ph" type="int" dimensions="n_connectivity_ph" comment="Layout for connectivities " />
358 // <parameter name="n_offsets_types_ph" type="int" value="1" />
359 // <layout name="n_offsets_layout_ph" type="int" dimensions="n_offsets_types_ph+1" comment="Layout for the offsets_ph" />
360 // <layout name="n_types_layout_ph" type="char" dimensions="n_offsets_types_ph" comment="Layout for the types_ph " />
361 // <group name="topologies/topo/elements">
362 // <variable name="connectivity" layout="n_connections_layout_ph" type="scalar" visualizable="false" unit="" script="PythonConduitTest" time-varying="false" />
363 // <variable name="offsets" layout="n_offsets_layout_ph" type="scalar" visualizable="false" unit="" script="PythonConduitTest" time-varying="false" />
364 // <variable name="types" layout="n_types_layout_ph" type="scalar" visualizable="false" unit="" script="PythonConduitTest" time-varying="false" />
365 // </group>
366
367 DamarisVarInt var_connectivity(1, {"n_connectivity_ph"},
368 "topologies/topo/elements/connectivity", rank_) ;
369 var_connectivity.setDamarisParameterAndShmem({ geomData.getNCorners()}) ;
370 DamarisVarInt var_offsets(1, {"n_offsets_types_ph"},
371 "topologies/topo/elements/offsets", rank_) ;
372 var_offsets.setDamarisParameterAndShmem({ geomData.getNCells()+1}) ;
373 DamarisVarChar var_types(1, {"n_offsets_types_ph"},
374 "topologies/topo/elements/types", rank_) ;
375 var_types.setDamarisParameterAndShmem({ geomData.getNCells()}) ;
376
377 // Copy the mesh data from the Durne grid
378 long i = 0 ;
380
381 i = geomData.writeConnectivity(var_connectivity, vtkorder) ;
382 if ( i != geomData.getNCorners())
383 DUNE_THROW(Dune::IOError, geomData.getError());
384
385 i = geomData.writeOffsetsCells(var_offsets);
386 if ( i != geomData.getNCells()+1)
387 DUNE_THROW(Dune::IOError,geomData.getError());
388
389 i = geomData.writeCellTypes(var_types) ;
390 if ( i != geomData.getNCells())
391 DUNE_THROW(Dune::IOError,geomData.getError());
392 }
393 catch (std::exception& e)
394 {
395 OpmLog::error(e.what());
396 }
397 }
398
399 void prepareLocalCellData(const bool isSubStep,
400 const int reportStepNum)
401 {
402 OPM_TIMEBLOCK(prepareLocalCellData);
403 if (damarisOutputModule_->localDataValid()) {
404 return;
405 }
406
407 const auto& gridView = simulator_.vanguard().gridView();
408 const int num_interior = detail::
410 const bool log = this->collectOnIORank_.isIORank();
411
412 damarisOutputModule_->allocBuffers(num_interior, reportStepNum,
413 isSubStep, log, /*isRestart*/ false);
414
415 ElementContext elemCtx(simulator_);
417 {
418 OPM_TIMEBLOCK(prepareCellBasedData);
419 for (const auto& elem : elements(gridView, Dune::Partitions::interior)) {
420 elemCtx.updatePrimaryStencil(elem);
421 elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
422
423 damarisOutputModule_->processElement(elemCtx);
424 }
425 }
426 if(!simulator_.model().linearizer().getFlowsInfo().empty()){
427 OPM_TIMEBLOCK(prepareFlowsData);
428 for (const auto& elem : elements(gridView, Dune::Partitions::interior)) {
429 elemCtx.updatePrimaryStencil(elem);
430 elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
431 damarisOutputModule_->processElementFlows(elemCtx);
432 }
433 }
434 {
435 OPM_TIMEBLOCK(prepareBlockData);
436 for (const auto& elem : elements(gridView, Dune::Partitions::interior)) {
437 elemCtx.updatePrimaryStencil(elem);
438 elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
439 damarisOutputModule_->processElementBlockData(elemCtx);
440 }
441 }
442 {
443 OPM_TIMEBLOCK(prepareFluidInPlace);
444#ifdef _OPENMP
445#pragma omp parallel for
446#endif
447 for (int dofIdx=0; dofIdx < num_interior; ++dofIdx){
448 const auto& intQuants = *(simulator_.model().cachedIntensiveQuantities(dofIdx, /*timeIdx=*/0));
449 const auto totVolume = simulator_.model().dofTotalVolume(dofIdx);
450 damarisOutputModule_->updateFluidInPlace(dofIdx, intQuants, totVolume);
451 }
452 }
453 damarisOutputModule_->validateLocalData();
454 OPM_END_PARALLEL_TRY_CATCH("DamarisWriter::prepareLocalCellData() failed: ", simulator_.vanguard().grid().comm());
455 }
456};
457
458} // namespace Opm
459
460#endif // OPM_DAMARIS_WRITER_HPP
#define OPM_END_PARALLEL_TRY_CATCH(prefix, comm)
Catch exception and throw in a parallel try-catch clause.
Definition: DeferredLoggingErrorHelpers.hpp:172
#define OPM_BEGIN_PARALLEL_TRY_CATCH()
Macro to setup the try of a parallel try-catch.
Definition: DeferredLoggingErrorHelpers.hpp:138
Allows model geometry data to be passed to external code - via a copy direct to input pointers.
bool isParallel() const
Definition: CollectDataOnIORank.hpp:128
bool isIORank() const
Definition: CollectDataOnIORank.hpp:125
const std::vector< int > & localIdxToGlobalIdxMapping() const
Definition: CollectDataOnIORank.hpp:133
Definition: DamarisVar.hpp:101
Collects necessary output values and pass them to Damaris server processes.
Definition: DamarisWriter.hpp:88
void writeOutput(data::Solution &localCellData, bool isSubStep)
Writes localCellData through to Damaris servers. Sets up the unstructured mesh which is passed to Dam...
Definition: DamarisWriter.hpp:218
static void registerParameters()
Definition: DamarisWriter.hpp:104
DamarisWriter(Simulator &simulator)
Definition: DamarisWriter.hpp:162
Definition: EclGenericWriter.hpp:65
int setParameter(const char *field, int rank, int value)
int setPosition(const char *field, int rank, int64_t pos)
int endIteration(int rank)
int write(const char *field, int rank, const void *data)
int setupWritingPars(Parallel::Communication comm, const int n_elements_local_grid, std::vector< unsigned long long > &elements_rank_offsets)
ConnectivityVertexOrder
Definition: GridDataOutput.hpp:93
@ VTK
Definition: GridDataOutput.hpp:93
Dune::Communication< MPIComm > Communication
Definition: ParallelCommunication.hpp:30
std::size_t countLocalInteriorCellsGridView(const GridView &gridView)
Get the number of local interior cells in a grid view.
Definition: countGlobalCells.hpp:47
Definition: BlackoilPhases.hpp:27
void eclBroadcast(Parallel::Communication, T &)