ParallelDebugOutput.hpp
Go to the documentation of this file.
1 /*
2  Copyright 2015 IRIS AS
3 
4  This file is part of the Open Porous Media project (OPM).
5 
6  OPM is free software: you can redistribute it and/or modify
7  it under the terms of the GNU General Public License as published by
8  the Free Software Foundation, either version 3 of the License, or
9  (at your option) any later version.
10 
11  OPM is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with OPM. If not, see <http://www.gnu.org/licenses/>.
18 */
19 #ifndef OPM_PARALLELDEBUGOUTPUT_HEADER_INCLUDED
20 #define OPM_PARALLELDEBUGOUTPUT_HEADER_INCLUDED
21 
22 #include <opm/core/grid.h>
23 #include <opm/core/simulator/SimulatorState.hpp>
24 #include <opm/core/simulator/WellState.hpp>
25 #include <opm/core/wells/WellsManager.hpp>
26 
29 
30 #if HAVE_DUNE_CORNERPOINT
31 #include <dune/grid/common/p2pcommunicator.hh>
32 #endif
33 
34 namespace Opm
35 {
36 
38  {
39  protected:
41  public:
43 
44  // gather solution to rank 0 for EclipseWriter
45  virtual bool collectToIORank( const SimulatorState& localReservoirState,
46  const WellState& localWellState ) = 0;
47 
48  virtual const SimulatorState& globalReservoirState() const = 0 ;
49  virtual const WellState& globalWellState() const = 0 ;
50  virtual bool isIORank() const = 0;
51  virtual bool isParallel() const = 0;
52  virtual int numCells() const = 0 ;
53  virtual const int* globalCell() const = 0;
54  };
55 
56  template <class GridImpl>
58  {
59  protected:
60  const GridImpl& grid_;
61 
62  const SimulatorState* globalState_;
63  const WellState* wellState_;
64 
65  public:
66  ParallelDebugOutput ( const GridImpl& grid,
67  Opm::EclipseStateConstPtr /* eclipseState */,
68  const int,
69  const double* )
70  : grid_( grid ) {}
71 
72  // gather solution to rank 0 for EclipseWriter
73  virtual bool collectToIORank( const SimulatorState& localReservoirState,
74  const WellState& localWellState )
75  {
76  globalState_ = &localReservoirState;
77  wellState_ = &localWellState;
78  return true ;
79  }
80 
81  virtual const SimulatorState& globalReservoirState() const { return *globalState_; }
82  virtual const WellState& globalWellState() const { return *wellState_; }
83  virtual bool isIORank () const { return true; }
84  virtual bool isParallel () const { return false; }
85  virtual int numCells() const { return grid_.number_of_cells; }
86  virtual const int* globalCell() const { return grid_.global_cell; }
87  };
88 
89 #if HAVE_DUNE_CORNERPOINT
90  template <>
91  class ParallelDebugOutput< Dune::CpGrid> : public ParallelDebugOutputInterface
92  {
93  public:
94  typedef Dune::CpGrid Grid;
95  typedef typename Grid :: CollectiveCommunication CollectiveCommunication;
96 
97  // global id
98  class GlobalCellIndex
99  {
100  int globalId_;
101  int localIndex_;
102  bool isInterior_;
103  public:
104  GlobalCellIndex() : globalId_(-1), localIndex_(-1), isInterior_(true) {}
105  void setGhost() { isInterior_ = false; }
106 
107  void setId( const int globalId ) { globalId_ = globalId; }
108  void setIndex( const int localIndex ) { localIndex_ = localIndex; }
109 
110  int localIndex () const { return localIndex_; }
111  int id () const { return globalId_; }
112  bool isInterior() const { return isInterior_; }
113  };
114 
115  typedef typename Dune::PersistentContainer< Grid, GlobalCellIndex > GlobalIndexContainer;
116 
117  static const int dimension = Grid :: dimension ;
118 
119  typedef typename Grid :: LeafGridView GridView;
120  typedef GridView AllGridView;
121 
122  typedef Dune :: Point2PointCommunicator< Dune :: SimpleMessageBuffer > P2PCommunicatorType;
123  typedef typename P2PCommunicatorType :: MessageBufferType MessageBufferType;
124 
125  typedef std::vector< GlobalCellIndex > LocalIndexMapType;
126 
127  typedef std::vector<int> IndexMapType;
128  typedef std::vector< IndexMapType > IndexMapStorageType;
129 
130  class DistributeIndexMapping : public P2PCommunicatorType::DataHandleInterface
131  {
132  protected:
133  const std::vector<int>& distributedGlobalIndex_;
134  IndexMapType& localIndexMap_;
135  IndexMapStorageType& indexMaps_;
136  std::map< const int, const int > globalPosition_;
137 #ifndef NDEBUG
138  std::set< int > checkPosition_;
139 #endif
140 
141  public:
142  DistributeIndexMapping( const std::vector<int>& globalIndex,
143  const std::vector<int>& distributedGlobalIndex,
144  IndexMapType& localIndexMap,
145  IndexMapStorageType& indexMaps )
146  : distributedGlobalIndex_( distributedGlobalIndex ),
147  localIndexMap_( localIndexMap ),
148  indexMaps_( indexMaps ),
149  globalPosition_()
150  {
151  const size_t size = globalIndex.size();
152  // create mapping globalIndex --> localIndex
153  for ( size_t index = 0; index < size; ++index )
154  {
155  globalPosition_.insert( std::make_pair( globalIndex[ index ], index ) );
156  }
157 
158  // on I/O rank we need to create a mapping from local to global
159  if( ! indexMaps_.empty() )
160  {
161  // for the ioRank create a localIndex to index in global state map
162  IndexMapType& indexMap = indexMaps_.back();
163  const size_t localSize = localIndexMap_.size();
164  indexMap.resize( localSize );
165  for( size_t i=0; i<localSize; ++i )
166  {
167  const int id = distributedGlobalIndex_[ localIndexMap_[ i ] ];
168  indexMap[ i ] = globalPosition_[ id ] ;
169 #ifndef NDEBUG
170  assert( checkPosition_.find( id ) == checkPosition_.end() );
171  checkPosition_.insert( id );
172 #endif
173  }
174  }
175  }
176 
177  void pack( const int link, MessageBufferType& buffer )
178  {
179  // we should only get one link
180  if( link != 0 ) {
181  OPM_THROW(std::logic_error,"link in method pack is not 0 as execpted");
182  }
183 
184  // pack all interior global cell id's
185  const int size = localIndexMap_.size();
186  buffer.write( size );
187 
188  for( int index = 0; index < size; ++index )
189  {
190  const int globalIdx = distributedGlobalIndex_[ localIndexMap_[ index ] ];
191  buffer.write( globalIdx );
192  }
193  }
194 
195  void unpack( const int link, MessageBufferType& buffer )
196  {
197  // get index map for current link
198  IndexMapType& indexMap = indexMaps_[ link ];
199  assert( ! globalPosition_.empty() );
200 
201  // unpack all interior global cell id's
202  int numCells = 0;
203  buffer.read( numCells );
204  indexMap.resize( numCells );
205  for( int index = 0; index < numCells; ++index )
206  {
207  int globalId = -1;
208  buffer.read( globalId );
209  assert( globalPosition_.find( globalId ) != globalPosition_.end() );
210  indexMap[ index ] = globalPosition_[ globalId ];
211 #ifndef NDEBUG
212  assert( checkPosition_.find( globalId ) == checkPosition_.end() );
213  checkPosition_.insert( globalId );
214 #endif
215  }
216  }
217  };
218 
219  enum { ioRank = 0 };
220 
221  ParallelDebugOutput( const Dune::CpGrid& otherGrid,
222  Opm::EclipseStateConstPtr eclipseState,
223  const int numPhases,
224  const double* permeability )
225  : toIORankComm_( otherGrid.comm() ),
226  isIORank_( otherGrid.comm().rank() == ioRank )
227  {
228  const CollectiveCommunication& comm = otherGrid.comm();
229  if( comm.size() > 1 )
230  {
231  std::set< int > send, recv;
232  // the I/O rank receives from all other ranks
233  if( isIORank() )
234  {
235  Dune::CpGrid globalGrid( otherGrid );
236  globalGrid.switchToGlobalView();
237 
238  // initialize global state with correct sizes
239  globalReservoirState_.init( globalGrid.numCells(), globalGrid.numFaces(), numPhases );
240  // TODO init well state
241 
242  // Create wells and well state.
243  WellsManager wells_manager(eclipseState,
244  0,
245  Opm::UgGridHelpers::numCells( globalGrid ),
246  Opm::UgGridHelpers::globalCell( globalGrid ),
247  Opm::UgGridHelpers::cartDims( globalGrid ),
248  Opm::UgGridHelpers::dimensions( globalGrid ),
249  Opm::UgGridHelpers::cell2Faces( globalGrid ),
250  Opm::UgGridHelpers::beginFaceCentroids( globalGrid ),
251  permeability,
252  false);
253 
254  const Wells* wells = wells_manager.c_wells();
255  globalWellState_.init(wells, globalReservoirState_, globalWellState_ );
256 
257  // copy global cartesian index
258  globalIndex_ = globalGrid.globalCell();
259 
260  unsigned int count = 0;
261  auto gridView = globalGrid.leafGridView();
262  for( auto it = gridView.begin< 0 >(),
263  end = gridView.end< 0 >(); it != end; ++it, ++count )
264  {
265  }
266  assert( count == globalIndex_.size() );
267 
268  for(int i=0; i<comm.size(); ++i)
269  {
270  if( i != ioRank )
271  {
272  recv.insert( i );
273  }
274  }
275  }
276  else // all other simply send to the I/O rank
277  {
278  send.insert( ioRank );
279  }
280 
281  localIndexMap_.clear();
282  localIndexMap_.reserve( otherGrid.size( 0 ) );
283 
284  unsigned int index = 0;
285  auto localView = otherGrid.leafGridView();
286  for( auto it = localView.begin< 0 >(),
287  end = localView.end< 0 >(); it != end; ++it, ++index )
288  {
289  const auto element = *it ;
290  // only store interior element for collection
291  if( element.partitionType() == Dune :: InteriorEntity )
292  {
293  localIndexMap_.push_back( index );
294  }
295  }
296 
297  // insert send and recv linkage to communicator
298  toIORankComm_.insertRequest( send, recv );
299 
300  if( isIORank() )
301  {
302  // need an index map for each rank
303  indexMaps_.clear();
304  indexMaps_.resize( comm.size() );
305  }
306 
307  // distribute global id's to io rank for later association of dof's
308  DistributeIndexMapping distIndexMapping( globalIndex_, otherGrid.globalCell(), localIndexMap_, indexMaps_ );
309  toIORankComm_.exchange( distIndexMapping );
310  }
311  else // serial run
312  {
313  // copy global cartesian index
314  globalIndex_ = otherGrid.globalCell();
315  }
316  }
317 
318  class PackUnPackSimulatorState : public P2PCommunicatorType::DataHandleInterface
319  {
320  const SimulatorState& localState_;
321  SimulatorState& globalState_;
322  const WellState& localWellState_;
323  WellState& globalWellState_;
324  const IndexMapType& localIndexMap_;
325  const IndexMapStorageType& indexMaps_;
326 
327  public:
328  PackUnPackSimulatorState( const SimulatorState& localState,
329  SimulatorState& globalState,
330  const WellState& localWellState,
331  WellState& globalWellState,
332  const IndexMapType& localIndexMap,
333  const IndexMapStorageType& indexMaps,
334  const bool isIORank )
335  : localState_( localState ),
336  globalState_( globalState ),
337  localWellState_( localWellState ),
338  globalWellState_( globalWellState ),
339  localIndexMap_( localIndexMap ),
340  indexMaps_( indexMaps )
341  {
342  if( isIORank )
343  {
344  // add missing data to global state
345  for( size_t i=globalState_.cellData().size();
346  i<localState.cellData().size(); ++i )
347  {
348  const size_t components = localState.cellData()[ i ].size() / localState.numCells();
349  assert( components * localState.numCells() == localState.cellData()[ i ].size() );
350  globalState_.registerCellData( localState.cellDataNames()[ i ], components );
351  }
352 
353  MessageBufferType buffer;
354  pack( 0, buffer );
355  // the last index map is the local one
356  doUnpack( indexMaps.back(), buffer );
357  }
358  }
359 
360  // pack all data associated with link
361  void pack( const int link, MessageBufferType& buffer )
362  {
363  // we should only get one link
364  if( link != 0 ) {
365  OPM_THROW(std::logic_error,"link in method pack is not 0 as execpted");
366  }
367 
368  // write all cell data registered in local state
369  const size_t numCells = localState_.numCells();
370  const size_t numCellData = localState_.cellData().size();
371  for( size_t d=0; d<numCellData; ++d )
372  {
373  const std::vector< double >& data = localState_.cellData()[ d ];
374  const size_t stride = data.size() / numCells ;
375  assert( numCells * stride == data.size() );
376 
377  for( size_t i=0; i<stride; ++i )
378  {
379  // write all data from local state to buffer
380  write( buffer, localIndexMap_, data, i, stride );
381  }
382  }
383 
384  // write all data from local well state to buffer
385  writeWells( buffer );
386  }
387 
388  void doUnpack( const IndexMapType& indexMap, MessageBufferType& buffer )
389  {
390  // read all cell data registered in local state
391  const size_t numCells = globalState_.numCells();
392  const size_t numCellData = globalState_.cellData().size();
393  for( size_t d=0; d<numCellData; ++d )
394  {
395  std::vector< double >& data = globalState_.cellData()[ d ];
396  const size_t stride = data.size() / numCells ;
397  assert( numCells * stride == data.size() );
398 
399  for( size_t i=0; i<stride; ++i )
400  {
401  // write all data from local state to buffer
402  read( buffer, indexMap, data, i, stride );
403  }
404  }
405 
406  // read well data from buffer
407  readWells( buffer );
408  }
409 
410  // unpack all data associated with link
411  void unpack( const int link, MessageBufferType& buffer )
412  {
413  doUnpack( indexMaps_[ link ], buffer );
414  }
415 
416  protected:
417  template <class Vector>
418  void write( MessageBufferType& buffer, const IndexMapType& localIndexMap,
419  const Vector& vector,
420  const unsigned int offset = 0, const unsigned int stride = 1 ) const
421  {
422  unsigned int size = localIndexMap.size();
423  buffer.write( size );
424  assert( vector.size() >= stride * size );
425  for( unsigned int i=0; i<size; ++i )
426  {
427  const unsigned int index = localIndexMap[ i ] * stride + offset;
428  assert( index < vector.size() );
429  buffer.write( vector[ index ] );
430  }
431  }
432 
433  template <class Vector>
434  void read( MessageBufferType& buffer,
435  const IndexMapType& indexMap,
436  Vector& vector,
437  const unsigned int offset = 0, const unsigned int stride = 1 ) const
438  {
439  unsigned int size = 0;
440  buffer.read( size );
441  assert( size == indexMap.size() );
442  for( unsigned int i=0; i<size; ++i )
443  {
444  const unsigned int index = indexMap[ i ] * stride + offset;
445  assert( index < vector.size() );
446  buffer.read( vector[ index ] );
447  }
448  }
449 
450  void writeString( MessageBufferType& buffer, const std::string& s) const
451  {
452  const int size = s.size();
453  buffer.write( size );
454  for( int i=0; i<size; ++i )
455  {
456  buffer.write( s[ i ] );
457  }
458  }
459 
460  void readString( MessageBufferType& buffer, std::string& s) const
461  {
462  int size = -1;
463  buffer.read( size );
464  s.resize( size );
465  for( int i=0; i<size; ++i )
466  {
467  buffer.read( s[ i ] );
468  }
469  }
470 
471  void writeWells( MessageBufferType& buffer ) const
472  {
473  int nWells = localWellState_.wellMap().size();
474  buffer.write( nWells );
475  auto end = localWellState_.wellMap().end();
476  for( auto it = localWellState_.wellMap().begin(); it != end; ++it )
477  {
478  const std::string& name = it->first;
479  const int wellIdx = it->second[ 0 ];
480 
481  // write well name
482  writeString( buffer, name );
483 
484  // write well data
485  buffer.write( localWellState_.bhp()[ wellIdx ] );
486  buffer.write( localWellState_.thp()[ wellIdx ] );
487  const int wellRateIdx = wellIdx * localWellState_.numPhases();
488  for( int np=0; np<localWellState_.numPhases(); ++np )
489  buffer.write( localWellState_.wellRates()[ wellRateIdx + np ] );
490 
491  // TODO: perfRates and perfPress, need to figure out the index
492  // mapping there.
493  }
494  }
495 
496  void readWells( MessageBufferType& buffer )
497  {
498  int nWells = -1;
499  buffer.read( nWells );
500  // unpack all wells that have been sent
501  std::string name ;
502  for( int well = 0; well < nWells ; ++well )
503  {
504  // read well name for local identification
505  readString( buffer, name );
506 
507  // unpack values
508  auto it = globalWellState_.wellMap().find( name );
509  if( it == globalWellState_.wellMap().end() )
510  {
511  OPM_THROW(std::logic_error,"global state does not contain well " << name );
512  }
513  const int wellIdx = it->second[ 0 ];
514 
515  buffer.read( globalWellState_.bhp()[ wellIdx ] );
516  buffer.read( globalWellState_.thp()[ wellIdx ] );
517  const int wellRateIdx = wellIdx * globalWellState_.numPhases();
518  for( int np=0; np<globalWellState_.numPhases(); ++np )
519  buffer.read( globalWellState_.wellRates()[ wellRateIdx + np ] );
520 
521  // TODO: perfRates and perfPress, need to figure out the index
522  // mapping there.
523  }
524  }
525  };
526 
527  // gather solution to rank 0 for EclipseWriter
528  bool collectToIORank( const SimulatorState& localReservoirState,
529  const WellState& localWellState )
530  {
531  PackUnPackSimulatorState packUnpack( localReservoirState, globalReservoirState_,
532  localWellState, globalWellState_,
533  localIndexMap_, indexMaps_, isIORank() );
534 
535  //toIORankComm_.exchangeCached( packUnpack );
536  toIORankComm_.exchange( packUnpack );
537 #ifndef NDEBUG
538  // mkae sure every process is on the same page
539  toIORankComm_.barrier();
540 #endif
541  return isIORank();
542  }
543 
544  const SimulatorState& globalReservoirState() const { return globalReservoirState_; }
545  const WellState& globalWellState() const { return globalWellState_; }
546 
547  bool isIORank() const
548  {
549  return isIORank_;
550  }
551 
552  bool isParallel() const
553  {
554  return toIORankComm_.size() > 1;
555  }
556 
557  int numCells () const { return globalIndex_.size(); }
558  const int* globalCell () const
559  {
560  assert( ! globalIndex_.empty() );
561  return globalIndex_.data();
562  }
563 
564  protected:
565  P2PCommunicatorType toIORankComm_;
566  IndexMapType globalIndex_;
567  IndexMapType localIndexMap_;
568  IndexMapStorageType indexMaps_;
569  SimulatorState globalReservoirState_;
570  // this needs to be revised
571  WellStateFullyImplicitBlackoil globalWellState_;
572  // true if we are on I/O rank
573  const bool isIORank_;
574  };
575 #endif // #if HAVE_DUNE_CORNERPOINT
576 
577 } // end namespace Opm
578 #endif
virtual bool isIORank() const
Definition: ParallelDebugOutput.hpp:83
virtual bool isParallel() const =0
Definition: AdditionalObjectDeleter.hpp:22
virtual const WellState & globalWellState() const =0
Definition: ParallelDebugOutput.hpp:37
virtual const int * globalCell() const =0
virtual const SimulatorState & globalReservoirState() const =0
virtual bool isIORank() const =0
virtual const SimulatorState & globalReservoirState() const
Definition: ParallelDebugOutput.hpp:81
virtual int numCells() const
Definition: ParallelDebugOutput.hpp:85
virtual int numCells() const =0
const WellState * wellState_
Definition: ParallelDebugOutput.hpp:63
virtual ~ParallelDebugOutputInterface()
Definition: ParallelDebugOutput.hpp:42
virtual bool collectToIORank(const SimulatorState &localReservoirState, const WellState &localWellState)
Definition: ParallelDebugOutput.hpp:73
Definition: ParallelDebugOutput.hpp:57
virtual bool isParallel() const
Definition: ParallelDebugOutput.hpp:84
const GridImpl & grid_
Definition: ParallelDebugOutput.hpp:60
virtual const WellState & globalWellState() const
Definition: ParallelDebugOutput.hpp:82
virtual bool collectToIORank(const SimulatorState &localReservoirState, const WellState &localWellState)=0
const SimulatorState * globalState_
Definition: ParallelDebugOutput.hpp:62
ParallelDebugOutput(const GridImpl &grid, Opm::EclipseStateConstPtr, const int, const double *)
Definition: ParallelDebugOutput.hpp:66
ParallelDebugOutputInterface()
Definition: ParallelDebugOutput.hpp:40
virtual const int * globalCell() const
Definition: ParallelDebugOutput.hpp:86