DamarisWriter.hpp
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 Copyright 2022 SINTEF Digital, Mathematics and Cybernetics.
5 Copyright 2023 Inria, Bretagne–Atlantique Research Center
6
7 This file is part of the Open Porous Media project (OPM).
8
9 OPM is free software: you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation, either version 2 of the License, or
12 (at your option) any later version.
13
14 OPM is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with OPM. If not, see <http://www.gnu.org/licenses/>.
21
22 Consult the COPYING file in the top-level source directory of this
23 module for the precise wording of the license and the list of
24 copyright holders.
25*/
31#ifndef OPM_DAMARIS_WRITER_HPP
32#define OPM_DAMARIS_WRITER_HPP
33
34#include <dune/grid/common/partitionset.hh>
35
36#include <opm/common/OpmLog/OpmLog.hpp>
37
48
49#include <fmt/format.h>
50
51#include <algorithm>
52#include <memory>
53#include <numeric>
54#include <string>
55#include <vector>
56#include <unordered_set>
57
58namespace Opm {
59
60namespace DamarisOutput {
61
63int setParameter(const char* field, int value);
64int setPosition(const char* field, int64_t pos);
65int write(const char* field, const void* data);
67 const int n_elements_local_grid,
68 std::vector<unsigned long long>& elements_rank_offsets);
69void handleError(const int dam_err, Parallel::Communication comm, const std::string& message);
70}
71
84template <class TypeTag>
85class DamarisWriter : public EclGenericWriter<GetPropType<TypeTag, Properties::Grid>,
86 GetPropType<TypeTag, Properties::EquilGrid>,
87 GetPropType<TypeTag, Properties::GridView>,
88 GetPropType<TypeTag, Properties::ElementMapper>,
89 GetPropType<TypeTag, Properties::Scalar>>
90{
97 using Element = typename GridView::template Codim<0>::Entity;
99
104
105public:
106 static void registerParameters()
107 {
109 }
110
111 // The Simulator object should preferably have been const - the
112 // only reason that is not the case is due to the SummaryState
113 // object owned deep down by the vanguard.
114 DamarisWriter(Simulator& simulator)
115 : BaseType(simulator.vanguard().schedule(),
116 simulator.vanguard().eclState(),
117 simulator.vanguard().summaryConfig(),
118 simulator.vanguard().grid(),
119 ((simulator.vanguard().grid().comm().rank() == 0)
120 ? &simulator.vanguard().equilGrid()
121 : nullptr),
122 simulator.vanguard().gridView(),
123 simulator.vanguard().cartesianIndexMapper(),
124 ((simulator.vanguard().grid().comm().rank() == 0)
125 ? &simulator.vanguard().equilCartesianIndexMapper()
126 : nullptr),
127 false, false)
128 , simulator_(simulator)
129 {
130 this->damarisUpdate_ = true ;
131
132 this->rank_ = this->simulator_.vanguard().grid().comm().rank() ;
133 this->nranks_ = this->simulator_.vanguard().grid().comm().size();
134
135 this->elements_rank_offsets_.resize(this->nranks_);
136
137 // Get the size of the unique vector elements (excludes the shared 'ghost' elements)
138 //
139 // Might possibly use
140 //
141 // detail::countLocalInteriorCellsGridView(this->simulator_.gridView())
142 //
143 // from countGlobalCells.hpp instead of calling std::distance() directly.
144 {
145 const auto& gridView = this->simulator_.gridView();
146 const auto& interior_elements = elements(gridView, Dune::Partitions::interior);
147
148 this->numElements_ = std::distance(interior_elements.begin(), interior_elements.end());
149 }
150
151 if (this->nranks_ > 1) {
152 auto smryCfg = (this->rank_ == 0)
153 ? this->eclIO_->finalSummaryConfig()
154 : SummaryConfig{};
155
156 eclBroadcast(this->simulator_.vanguard().grid().comm(), smryCfg);
157
158 this->damarisOutputModule_ = std::make_unique<OutputBlackOilModule<TypeTag>>
159 (simulator, smryCfg, this->collectOnIORank_);
160 }
161 else {
162 this->damarisOutputModule_ = std::make_unique<OutputBlackOilModule<TypeTag>>
163 (simulator, this->eclIO_->finalSummaryConfig(), this->collectOnIORank_);
164 }
165
166 wanted_vars_set_ = Opm::DamarisOutput::getSetOfIncludedVariables<TypeTag>();
167 }
168
172 void writeOutput(data::Solution& localCellData , bool isSubStep)
173 {
174 OPM_TIMEBLOCK(writeOutput);
175 const int reportStepNum = simulator_.episodeIndex() + 1;
176 const auto& cc = simulator_.vanguard().grid().comm();
177
178 // added this as localCellData was not being written
179 if (!isSubStep)
180 this->damarisOutputModule_->invalidateLocalData() ;
181 this->prepareLocalCellData(isSubStep, reportStepNum);
182 this->damarisOutputModule_->outputErrorLog(cc);
183
184 // The damarisWriter is not outputing well or aquifer data (yet)
185 auto localWellData = simulator_.problem().wellModel().wellData(); // data::Well
186
187 if (! isSubStep)
188 {
189 if (localCellData.size() == 0) {
190 this->damarisOutputModule_->assignToSolution(localCellData);
191 }
192
193 // add cell data to perforations for Rft output
194 this->damarisOutputModule_->addRftDataToWells(localWellData, reportStepNum);
195
196 // On first call and if the mesh and variable size change then set damarisUpdate_ to true
197 if (damarisUpdate_ == true) {
198 // Sets the damaris parameter values "n_elements_local" and "n_elements_total"
199 // which define sizes of the Damaris variables, per-rank and globally (over all ranks).
200 // Also sets the offsets to where a ranks array data sits within the global array.
201 // This is usefull for HDF5 output and for defining distributed arrays in Dask.
202 dam_err_ = DamarisOutput::setupWritingPars(cc, numElements_, elements_rank_offsets_);
203
204 // sets positions and data for non-time-varying variables MPI_RANK and GLOBAL_CELL_INDEX
205 this->setGlobalIndexForDamaris() ;
206
207 // Set the geometry data for the mesh model.
208 // this function writes the mesh data directly to Damaris shared memory using Opm::DamarisOutput::DamarisVar objects.
209 this->writeDamarisGridOutput() ;
210
211 // Currently by default we assume a static mesh grid (the geometry unchanging through the simulation)
212 // Set damarisUpdate_ to true if we want to update the geometry sent to Damaris
213 this->damarisUpdate_ = false;
214 }
215
216
217 int cell_data_written = 0 ;
218 // Call damaris_write() for all available variables
219 for ( const auto& damVar : localCellData )
220 {
221 // std::map<std::string, data::CellData>
222 const std::string& name = damVar.first ;
223 // If the wanted_vars_set_ set is empty then we default to passing through
224 // all the variables/
225 if (wanted_vars_set_.count(name) || wanted_vars_set_.empty())
226 {
227 const data::CellData& dataCol = damVar.second ;
228 OpmLog::debug(fmt::format("Name of Damaris Variable : ( rank:{}) name: {} ", rank_, name));
229
230 // Call damaris_set_position() for all available variables
231 // There is an assumption that all variables are the same size, with the same offset.
232 // see initDamarisTemplateXmlFile.cpp for the Damaris XML descriptions.
233 dam_err_ = DamarisOutput::setPosition(name.c_str(), this->elements_rank_offsets_[rank_]);
234
235 // It does not seem I can test for what type of data is present (double or int)
236 // in the std::variant within the data::CellData, so I will use a try catch block.
237 try {
238 if (dataCol.data<double>().size() >= static_cast<std::vector<double>::size_type>(this->numElements_)) {
239 dam_err_ = DamarisOutput::write(name.c_str(), dataCol.data<double>().data()) ;
240 } else {
241 OpmLog::info(fmt::format("( rank:{}) The variable \"{}\" was found to be of a different size {} (not {}).", rank_, name, dataCol.data<double>().size(), this->numElements_ ));
242 }
243 }
244 catch (std::bad_variant_access const& ex) {
245 // Not a std::vector<double>, must be a std::vector<int>
246 if (dataCol.data<int>().size() >= static_cast<std::vector<int>::size_type>(this->numElements_)) {
247 dam_err_ = DamarisOutput::write(name.c_str(), dataCol.data<int>().data()) ;
248 } else {
249 OpmLog::info(fmt::format("( rank:{}) The variable \"{}\" was found to be of a different size {} (not {}).", rank_, name, dataCol.data<int>().size(), this->numElements_ ));
250 }
251 }
252 ++cell_data_written ;
253 }
254 }
255 DamarisOutput::handleError(dam_err_, cc, "setPosition() and write() for available variables");
256
257 if (!cell_data_written) {
258 OpmLog::info(fmt::format("( rank:{}) No simulation data written to the Damaris server - check --damaris-limit-variables command line option (if used) has valid variable name(s) and that the Damaris XML file contains variable names that are available in your simulation.", rank_));
259 } else {
260 OpmLog::debug(fmt::format("( rank:{}) {} Damaris Variables written to the Damaris servers", rank_, cell_data_written));
261 }
262
263 /*
264 Code for when we want to pass to Damaris the single cell 'block' data variables
265 auto mybloc = damarisOutputModule_->getBlockData() ;
266 for ( auto damVar : mybloc ) {
267 // std::map<std::string, data::CellData>
268 const std::string name = std::get<0>(damVar.first) ;
269 const int part = std::get<1>(damVar.first) ;
270 double dataCol = damVar.second ;
271 std::cout << "Name of Damaris Block Varaiable : (" << rank_ << ") " << name << " part : " << part << " Value : " << dataCol << std::endl ;
272 }
273
274 dam_err_ = DamarisOutput::endIteration();
275 */
276 if (this->damarisOutputModule_->getPRESSURE_ptr() != nullptr)
277 {
278 dam_err_ = DamarisOutput::endIteration();
279 }
280 DamarisOutput::handleError(dam_err_, cc, "endIteration()");
281
282 } // end of ! isSubstep
283 }
284
285private:
286 int dam_err_ ;
287 int rank_ ;
288 int nranks_ ;
289 int numElements_ ;
290 std::unordered_set<std::string> wanted_vars_set_ ;
291
292 Simulator& simulator_;
293 std::unique_ptr<OutputBlackOilModule<TypeTag>> damarisOutputModule_;
294 std::vector<unsigned long long> elements_rank_offsets_ ;
295 bool damarisUpdate_ = false;
296
297 static bool enableDamarisOutput_()
298 {
299 static bool enable = Parameters::Get<Parameters::EnableDamarisOutput>();
300 return enable;
301 }
302
303 void setGlobalIndexForDamaris ()
304 {
305 const auto& cc = simulator_.vanguard().grid().comm();
306 // Use damaris_set_position to set the offset in the global size of the array.
307 // This is used so that output functionality (e.g. HDF5Store) knows the global offsets of
308 // the data of the ranks data.
309 dam_err_ = DamarisOutput::setPosition("GLOBAL_CELL_INDEX", elements_rank_offsets_[rank_]);
310 DamarisOutput::handleError(dam_err_, cc, "setPosition() for GLOBAL_CELL_INDEX");
311
312 // This is an example of writing to the Damaris shared memory directly (i.e. we allocate the
313 // variable directly in the shared memory region and do not use damaris_write() to copy data there.
314 // The shared memory is given back to Damaris when the DamarisVarInt goes out of scope.
315 // N.B. MPI_RANK is only saved to HDF5 if --damaris-save-mesh-to-hdf=true is specified
316 DamarisVarInt mpi_rank_var(1, {"n_elements_local"}, "MPI_RANK", rank_);
317 mpi_rank_var.setDamarisPosition({static_cast<int64_t>(elements_rank_offsets_[rank_])});
318
319 // GLOBAL_CELL_INDEX is used to reorder variable data when writing to disk
320 // This is enabled using select-file="GLOBAL_CELL_INDEX" in the <variable> XML tag
321 if (this->collectOnIORank_.isParallel()) {
322 const std::vector<int>& local_to_global =
324 dam_err_ = DamarisOutput::write("GLOBAL_CELL_INDEX", local_to_global.data());
325 } else {
326 std::vector<int> local_to_global_filled ;
327 local_to_global_filled.resize(this->numElements_) ;
328 std::iota(local_to_global_filled.begin(), local_to_global_filled.end(), 0);
329 dam_err_ = DamarisOutput::write("GLOBAL_CELL_INDEX", local_to_global_filled.data());
330 }
331 DamarisOutput::handleError(dam_err_, cc, "write() for GLOBAL_CELL_INDEX");
332
333 mpi_rank_var.setDamarisParameterAndShmem( {this->numElements_ } ) ;
334 // Fill the created memory area
335 std::fill(mpi_rank_var.data(), mpi_rank_var.data() + numElements_, rank_);
336
337 // Pass the output directory string through as a Damaris variable so that
338 // Python code (as an example) can use the path as required.
339 const auto& outputDir = simulator_.vanguard().eclState().cfg().io().getOutputDir();
340 if (outputDir.size() > 0) {
341 dam_err_ = DamarisOutput::setParameter("path_string_length", outputDir.size()) ;
342 DamarisOutput::handleError(dam_err_, cc, "setParameter() for path_string_length");
343 dam_err_ = DamarisOutput::write("OUTPUTDIR", outputDir.c_str());
344 DamarisOutput::handleError(dam_err_, cc, "write() for OUTPUTDIR");
345 }
346 }
347
348 void writeDamarisGridOutput()
349 {
350 const auto& gridView = simulator_.gridView();
351 GridDataOutput::SimMeshDataAccessor geomData(gridView, Dune::Partitions::interior) ;
352
353 try {
354 const bool hasPolyCells = geomData.polyhedralCellPresent() ;
355 if ( hasPolyCells ) {
356 OpmLog::error(fmt::format("ERORR: rank {} The DUNE geometry grid has polyhedral elements - These elements are currently not supported.", rank_ ));
357 }
358
359 // This is the template XML model for x,y,z coordinates defined in initDamarisXmlFile.cpp which is used to
360 // build the internally generated Damaris XML configuration file.
361 // <parameter name="n_coords_local" type="int" value="1" />
362 // <parameter name="n_coords_global" type="int" value="1" comment="only needed if we need to write to HDF5 in Collective mode"/>
363 // <layout name="n_coords_layout" type="double" dimensions="n_coords_local" comment="For the individual x, y and z coordinates of the mesh vertices" />
364 // <group name="coordset/coords/values">
365 // <variable name="x" layout="n_coords_layout" type="scalar" visualizable="false" unit="m" script="PythonConduitTest" time-varying="false" />
366 // <variable name="y" layout="n_coords_layout" type="scalar" visualizable="false" unit="m" script="PythonConduitTest" time-varying="false" />
367 // <variable name="z" layout="n_coords_layout" type="scalar" visualizable="false" unit="m" script="PythonConduitTest" time-varying="false" />
368 // </group>
369
370 DamarisVarDbl var_x(1, {"n_coords_local"}, "coordset/coords/values/x", rank_) ;
371 // N.B. We have not set any position/offset values (using DamarisVar::SetDamarisPosition).
372 // They are not needed for mesh data as each process has a local geometric model.
373 // However, HDF5 collective and Dask arrays cannot be used for this data.
374 var_x.setDamarisParameterAndShmem( { geomData.getNVertices() } ) ;
375
376 DamarisVarDbl var_y(1, {"n_coords_local"}, "coordset/coords/values/y", rank_) ;
377 var_y.setDamarisParameterAndShmem( { geomData.getNVertices() } ) ;
378
379 DamarisVarDbl var_z(1, {"n_coords_local"}, "coordset/coords/values/z", rank_) ;
380 var_z.setDamarisParameterAndShmem( { geomData.getNVertices() } ) ;
381
382 // Now we can use the shared memory area that Damaris has allocated and use it to write the x,y,z coordinates
383 if ( geomData.writeGridPoints(var_x, var_y, var_z) < 0)
384 DUNE_THROW(Dune::IOError, geomData.getError() );
385
386 // This is the template XML model for connectivity, offsets and types, as defined in initDamarisXmlFile.cpp which is used to
387 // build the internally generated Damaris XML configuration file.
388 // <parameter name="n_connectivity_ph" type="int" value="1" />
389 // <layout name="n_connections_layout_ph" type="int" dimensions="n_connectivity_ph" comment="Layout for connectivities " />
390 // <parameter name="n_offsets_types_ph" type="int" value="1" />
391 // <layout name="n_offsets_layout_ph" type="int" dimensions="n_offsets_types_ph+1" comment="Layout for the offsets_ph" />
392 // <layout name="n_types_layout_ph" type="char" dimensions="n_offsets_types_ph" comment="Layout for the types_ph " />
393 // <group name="topologies/topo/elements">
394 // <variable name="connectivity" layout="n_connections_layout_ph" type="scalar" visualizable="false" unit="" script="PythonConduitTest" time-varying="false" />
395 // <variable name="offsets" layout="n_offsets_layout_ph" type="scalar" visualizable="false" unit="" script="PythonConduitTest" time-varying="false" />
396 // <variable name="types" layout="n_types_layout_ph" type="scalar" visualizable="false" unit="" script="PythonConduitTest" time-varying="false" />
397 // </group>
398
399 DamarisVarInt var_connectivity(1, {"n_connectivity_ph"},
400 "topologies/topo/elements/connectivity", rank_) ;
401 var_connectivity.setDamarisParameterAndShmem({ geomData.getNCorners()}) ;
402 DamarisVarInt var_offsets(1, {"n_offsets_types_ph"},
403 "topologies/topo/elements/offsets", rank_) ;
404 var_offsets.setDamarisParameterAndShmem({ geomData.getNCells()+1}) ;
405 DamarisVarChar var_types(1, {"n_offsets_types_ph"},
406 "topologies/topo/elements/types", rank_) ;
407 var_types.setDamarisParameterAndShmem({ geomData.getNCells()}) ;
408
409 // Copy the mesh data from the Dune grid
410 long i = 0 ;
412
413 i = geomData.writeConnectivity(var_connectivity, vtkorder) ;
414 if ( i != geomData.getNCorners())
415 DUNE_THROW(Dune::IOError, geomData.getError());
416
417 i = geomData.writeOffsetsCells(var_offsets);
418 if ( i != geomData.getNCells()+1)
419 DUNE_THROW(Dune::IOError,geomData.getError());
420
421 i = geomData.writeCellTypes(var_types) ;
422 if ( i != geomData.getNCells())
423 DUNE_THROW(Dune::IOError,geomData.getError());
424 }
425 catch (std::exception& e)
426 {
427 OpmLog::error(e.what());
428 }
429 }
430
431 void prepareLocalCellData(const bool isSubStep,
432 const int reportStepNum)
433 {
434 OPM_TIMEBLOCK(prepareLocalCellData);
435 if (damarisOutputModule_->localDataValid()) {
436 return;
437 }
438
439 const auto& gridView = simulator_.vanguard().gridView();
440 const int num_interior = detail::
442 const bool log = this->collectOnIORank_.isIORank();
443
444 damarisOutputModule_->allocBuffers(num_interior, reportStepNum,
445 isSubStep, log, /*isRestart*/ false);
446
447 ElementContext elemCtx(simulator_);
449 {
450 OPM_TIMEBLOCK(prepareCellBasedData);
451 for (const auto& elem : elements(gridView, Dune::Partitions::interior)) {
452 elemCtx.updatePrimaryStencil(elem);
453 elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
454
455 damarisOutputModule_->processElement(elemCtx);
456 }
457 }
458 if(!simulator_.model().linearizer().getFlowsInfo().empty()){
459 OPM_TIMEBLOCK(prepareFlowsData);
460 for (const auto& elem : elements(gridView, Dune::Partitions::interior)) {
461 elemCtx.updatePrimaryStencil(elem);
462 elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
463 damarisOutputModule_->processElementFlows(elemCtx);
464 }
465 }
466 {
467 OPM_TIMEBLOCK(prepareBlockData);
468 for (const auto& elem : elements(gridView, Dune::Partitions::interior)) {
469 elemCtx.updatePrimaryStencil(elem);
470 elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
471 damarisOutputModule_->processElementBlockData(elemCtx);
472 }
473 }
474 {
475 OPM_TIMEBLOCK(prepareFluidInPlace);
476#ifdef _OPENMP
477#pragma omp parallel for
478#endif
479 for (int dofIdx=0; dofIdx < num_interior; ++dofIdx){
480 const auto& intQuants = *(simulator_.model().cachedIntensiveQuantities(dofIdx, /*timeIdx=*/0));
481 const auto totVolume = simulator_.model().dofTotalVolume(dofIdx);
482 damarisOutputModule_->updateFluidInPlace(dofIdx, intQuants, totVolume);
483 }
484 }
485 damarisOutputModule_->validateLocalData();
486 OPM_END_PARALLEL_TRY_CATCH("DamarisWriter::prepareLocalCellData() failed: ", simulator_.vanguard().grid().comm());
487 }
488
489};
490
491} // namespace Opm
492
493#endif // OPM_DAMARIS_WRITER_HPP
#define OPM_END_PARALLEL_TRY_CATCH(prefix, comm)
Catch exception and throw in a parallel try-catch clause.
Definition: DeferredLoggingErrorHelpers.hpp:192
#define OPM_BEGIN_PARALLEL_TRY_CATCH()
Macro to setup the try of a parallel try-catch.
Definition: DeferredLoggingErrorHelpers.hpp:158
Allows model geometry data to be passed to external code - via a copy direct to input pointers.
bool isParallel() const
Definition: CollectDataOnIORank.hpp:128
bool isIORank() const
Definition: CollectDataOnIORank.hpp:125
const std::vector< int > & localIdxToGlobalIdxMapping() const
Definition: CollectDataOnIORank.hpp:133
Definition: DamarisVar.hpp:101
Collects necessary output values and pass them to Damaris server processes.
Definition: DamarisWriter.hpp:90
void writeOutput(data::Solution &localCellData, bool isSubStep)
Writes localCellData through to Damaris servers. Sets up the unstructured mesh which is passed to Dam...
Definition: DamarisWriter.hpp:172
static void registerParameters()
Definition: DamarisWriter.hpp:106
DamarisWriter(Simulator &simulator)
Definition: DamarisWriter.hpp:114
Definition: EclGenericWriter.hpp:65
Manages the initializing and running of time dependent problems.
Definition: simulator.hh:92
Vanguard & vanguard()
Return a reference to the grid manager of simulation.
Definition: simulator.hh:260
const GridView & gridView() const
Return the grid view for which the simulation is done.
Definition: simulator.hh:272
Model & model()
Return the physical model used in the simulation.
Definition: simulator.hh:278
int setParameter(const char *field, int value)
int write(const char *field, const void *data)
int setPosition(const char *field, int64_t pos)
void handleError(const int dam_err, Parallel::Communication comm, const std::string &message)
int setupWritingPars(Parallel::Communication comm, const int n_elements_local_grid, std::vector< unsigned long long > &elements_rank_offsets)
ConnectivityVertexOrder
Definition: GridDataOutput.hpp:93
@ VTK
Definition: GridDataOutput.hpp:93
Dune::Communication< MPIComm > Communication
Definition: ParallelCommunication.hpp:30
std::size_t countLocalInteriorCellsGridView(const GridView &gridView)
Get the number of local interior cells in a grid view.
Definition: countGlobalCells.hpp:47
Definition: blackoilboundaryratevector.hh:37
void registerDamarisParameters()
Register damaris runtime parameters.
void eclBroadcast(Parallel::Communication, T &)
typename Properties::Detail::GetPropImpl< TypeTag, Property >::type::type GetPropType
get the type alias defined in the property (equivalent to old macro GET_PROP_TYPE(....
Definition: propertysystem.hh:235