CpGridData.hpp
Go to the documentation of this file.
1//===========================================================================
2//
3// File: CpGridData.hpp
4//
5// Created: Sep 17 21:11:41 2013
6//
7// Author(s): Atgeirr F Rasmussen <atgeirr@sintef.no>
8// Bård Skaflestad <bard.skaflestad@sintef.no>
9// Markus Blatt <markus@dr-blatt.de>
10// Antonella Ritorto <antonella.ritorto@opm-op.com>
11//
12// Comment: Major parts of this file originated in dune/grid/CpGrid.hpp
13// and got transfered here during refactoring for the parallelization.
14//
15// $Date$
16//
17// $Revision$
18//
19//===========================================================================
20
21/*
22 Copyright 2009, 2010 SINTEF ICT, Applied Mathematics.
23 Copyright 2009, 2010, 2013, 2022-2023 Equinor ASA.
24 Copyright 2013 Dr. Blatt - HPC-Simulation-Software & Services
25
26 This file is part of The Open Porous Media project (OPM).
27
28 OPM is free software: you can redistribute it and/or modify
29 it under the terms of the GNU General Public License as published by
30 the Free Software Foundation, either version 3 of the License, or
31 (at your option) any later version.
32
33 OPM is distributed in the hope that it will be useful,
34 but WITHOUT ANY WARRANTY; without even the implied warranty of
35 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
36 GNU General Public License for more details.
37
38 You should have received a copy of the GNU General Public License
39 along with OPM. If not, see <http://www.gnu.org/licenses/>.
40*/
48#ifndef OPM_CPGRIDDATA_HEADER
49#define OPM_CPGRIDDATA_HEADER
50
51
52#include <dune/common/parallel/mpihelper.hh>
53#ifdef HAVE_DUNE_ISTL
54#include <dune/istl/owneroverlapcopy.hh>
55#endif
56
57#include <dune/common/parallel/communication.hh>
58#include <dune/common/parallel/variablesizecommunicator.hh>
59#include <dune/grid/common/gridenums.hh>
60
61#if HAVE_OPM_COMMON
62#include <opm/input/eclipse/EclipseState/Grid/EclipseGrid.hpp>
63#include <opm/input/eclipse/EclipseState/Grid/NNC.hpp>
64#endif
65
67
69#include "CpGridDataTraits.hpp"
70//#include "DataHandleWrappers.hpp"
71//#include "GlobalIdMapping.hpp"
72#include "Geometry.hpp"
73
74#include <array>
75#include <initializer_list>
76#include <set>
77#include <vector>
78
79namespace Opm
80{
81class EclipseState;
82}
83namespace Dune
84{
85class CpGrid;
86
87namespace cpgrid
88{
89
90class IndexSet;
91class IdSet;
92class LevelGlobalIdSet;
93class PartitionTypeIndicator;
94template<int,int> class Geometry;
95template<int> class Entity;
96template<int> class EntityRep;
97}
98}
99
101 const std::array<int, 3>&,
102 bool);
103
104namespace Dune
105{
106namespace cpgrid
107{
108namespace mover
109{
110template<class T, int i> struct Mover;
111}
112
118{
119 template<class T, int i> friend struct mover::Mover;
120 friend class GlobalIdSet;
121 friend class HierarchicIterator;
125
126 friend
128 const std::array<int, 3>&,
129 bool);
130
131private:
132 CpGridData(const CpGridData& g);
133
134public:
135 enum{
136#ifndef MAX_DATA_COMMUNICATED_PER_ENTITY
144#else
149 MAX_DATA_PER_CELL = MAX_DATA_COMMUNICATED_PER_ENTITY
150#endif
151 };
152
153 CpGridData() = delete;
154
159 explicit CpGridData(MPIHelper::MPICommunicator comm, std::vector<std::shared_ptr<CpGridData>>& data);
160
161
162
164 explicit CpGridData(std::vector<std::shared_ptr<CpGridData>>& data);
167
168
169
170
172 int size(int codim) const;
173
175 int size (GeometryType type) const
176 {
177 if (type.isCube()) {
178 return size(3 - type.dim());
179 } else {
180 return 0;
181 }
182 }
183
199 void readEclipseFormat(const std::string& filename,
200 bool periodic_extension,
201 bool turn_normals = false,
202 bool edge_conformal = false);
203
204#if HAVE_OPM_COMMON
226 void processEclipseFormat(const Opm::Deck& deck,
227 bool periodic_extension,
228 bool turn_normals = false,
229 bool clip_z = false,
230 const std::vector<double>& poreVolume = std::vector<double>{},
231 bool edge_conformal = false);
232
267 std::vector<std::size_t>
268 processEclipseFormat(const Opm::EclipseGrid* ecl_grid,
269 Opm::EclipseState* ecl_state,
270 bool periodic_extension,
271 bool turn_normals = false,
272 bool clip_z = false,
273 bool pinchActive = true,
274 bool edge_conformal = false);
275#endif
276
305 void processEclipseFormat(const grdecl& input_data,
306#if HAVE_OPM_COMMON
307 Opm::EclipseState* ecl_state,
308#endif
309 std::array<std::set<std::pair<int, int>>, 2>& nnc,
310 bool remove_ij_boundary,
311 bool turn_normals,
312 bool pinchActive,
313 double tolerance_unique_points,
314 bool edge_conformal);
315
323 void getIJK(int c, std::array<int,3>& ijk) const;
324
325 int cellFace(int cell, int local_index) const
326 {
327 return cell_to_face_[cpgrid::EntityRep<0>(cell, true)][local_index].index();
328 }
329
330 auto cellToFace(int cellIdx) const
331 {
332 return cell_to_face_[cpgrid::EntityRep<0>(cellIdx, true)];
333 }
334
335 const auto& cellToPoint() const
336 {
337 return cell_to_point_;
338 }
339
340 const auto& cellToPoint(int cellIdx) const
341 {
342 return cell_to_point_[cellIdx];
343 }
344
345 int faceToCellSize(int face) const {
346 Dune::cpgrid::EntityRep<1> faceRep(face, true);
347 return face_to_cell_[faceRep].size();
348 }
349
350 auto faceTag(int faceIdx) const
351 {
352 Dune::cpgrid::EntityRep<1> faceRep(faceIdx, true);
353 return face_tag_[faceRep];
354 }
355
356 auto faceNormals(int faceIdx) const
357 {
358 Dune::cpgrid::EntityRep<1> faceRep(faceIdx, true);
359 return face_normals_[faceRep];
360 }
361
362 auto faceToPoint(int faceIdx) const
363 {
364 return face_to_point_[faceIdx];
365 }
366
367 int numFaces() const
368 {
369 return face_to_cell_.size();
370 }
371
372 auto cornerHistorySize() const
373 {
374 return corner_history_.size();
375 }
376
377 const auto& getCornerHistory(int cornerIdx) const
378 {
379 if(cornerHistorySize()) {
380 return corner_history_[cornerIdx];
381 }
382 else {
383 OPM_THROW(std::logic_error, "Vertex has no history record.\n");
384 }
385 }
386
393 const std::vector<int>& globalCell() const
394 {
395 return global_cell_;
396 }
397
400 bool hasNNCs(const std::vector<int>& cellIndices) const;
401
415 bool mark(int refCount, const cpgrid::Entity<0>& element, bool throwOnFailure = false);
416
420 int getMark(const cpgrid::Entity<0>& element) const;
421
431 bool preAdapt();
432
434 bool adapt();
435
437 void postAdapt();
438
439private:
440 std::array<Dune::FieldVector<double,3>,8> getReferenceRefinedCorners(int idx_in_parent_cell, const std::array<int,3>& cells_per_dim) const;
441
442public:
444 int getGridIdx() const {
445 // Not the nicest way of checking if "this" points at the leaf grid view of a mixed grid (with coarse and refined cells).
446 // 1. When the grid has been refined at least onece, level_data_ptr_ ->size() >1. Therefore, there is a chance of "this" pointing at the leaf grid view.
447 // 2. Unfortunately, level_ is default initialized by 0. This implies, in particular, that if someone wants to check the value of
448 // "this->level_" when "this" points at the leaf grid view of a grid that has been refined, this value is - unfortunately - equal to 0.
449 // 3. Due to 2. we need an extra bool value to distinguish between the actual level 0 grid and such a leaf grid view (with incorrect level_ == 0). For this
450 // reason we check if child_to_parent_cells_.empty() [true for actual level 0 grid, false for the leaf grid view].
451 // --- TO BE IMPROVED ---
452 if ((level_data_ptr_ ->size() >1) && (level_ == 0) && (!child_to_parent_cells_.empty())) {
453 return level_data_ptr_->size() -1;
454 }
455 return level_;
456 }
458 const std::vector<std::shared_ptr<Dune::cpgrid::CpGridData>>& levelData() const
459 {
460 if (level_data_ptr_->empty()) {
461 OPM_THROW(std::logic_error, "Level data has not been initialized\n");
462 }
463 return *level_data_ptr_;
464 }
465
473 const std::tuple<int,std::vector<int>>& getChildrenLevelAndIndexList(int elemIdx) const {
474 return parent_to_children_cells_[elemIdx];
475 }
476
477 const std::vector<std::tuple<int,std::vector<int>>>& getParentToChildren() const {
478 return parent_to_children_cells_;
479 }
480
482 {
483 return geometry_;
484 }
485
486 int getLeafIdxFromLevelIdx(int level_cell_idx) const
487 {
488 if (level_to_leaf_cells_.empty()) {
489 OPM_THROW(std::logic_error, "Grid has no LGRs. No mapping to the leaf.\n");
490 }
491 return level_to_leaf_cells_[level_cell_idx];
492 }
493
515 std::tuple< const std::shared_ptr<CpGridData>,
516 const std::vector<std::array<int,2>>> // parent_to_refined_corners(~boundary_old_to_new_corners)
517 refineSingleCell(const std::array<int,3>& cells_per_dim,
518 const int& parent_idx,
519 std::vector<std::vector<std::pair<int, std::vector<int>>>>& faceInMarkedElemAndRefinedFaces) const;
520
521 // @breif Compute center of an entity/element/cell in the Eclipse way:
522 // - Average of the 4 corners of the bottom face.
523 // - Average of the 4 corners of the top face.
524 // Return average of the previous computations.
525 // @param [in] int Index of a cell.
526 // @return 'eclipse centroid'
527 std::array<double,3> computeEclCentroid(const int idx) const;
528
529 // @breif Compute center of an entity/element/cell in the Eclipse way:
530 // - Average of the 4 corners of the bottom face.
531 // - Average of the 4 corners of the top face.
532 // Return average of the previous computations.
533 // @param [in] Entity<0> Entity
534 // @return 'eclipse centroid'
535 std::array<double,3> computeEclCentroid(const Entity<0>& elem) const;
536
537 // Make unique boundary ids for all intersections.
539
543 bool uniqueBoundaryIds() const
544 {
545 return use_unique_boundary_ids_;
546 }
547
550 void setUniqueBoundaryIds(bool uids)
551 {
552 use_unique_boundary_ids_ = uids;
553 if (use_unique_boundary_ids_ && unique_boundary_ids_.empty()) {
555 }
556 }
557
561 const std::vector<double>& zcornData() const {
562 return zcorn;
563 }
564
565
568 const IndexSet& indexSet() const
569 {
570 return *index_set_;
571 }
572
575 {
576 return *local_id_set_;
577 }
578
581 {
582 return *global_id_set_;
583 }
584
588 const std::array<int, 3>& logicalCartesianSize() const
589 {
590 return logical_cartesian_size_;
591 }
592
597 const CpGridData& view_data,
598 const std::vector<int>& cell_part);
599
605 template<class DataHandle>
606 void communicate(DataHandle& data, InterfaceType iftype, CommunicationDirection dir);
607
609
611
612 void computeCommunicationInterfaces(int noexistingPoints);
613
619
622#if HAVE_MPI
625
628
631
634
637
642 {
643 return cell_comm_;
644 }
645
650 {
651 return cell_comm_;
652 }
653
655 {
656 return cellCommunication().indexSet();
657 }
658
660 {
661 return cellCommunication().indexSet();
662 }
663
665 {
666 return cellCommunication().remoteIndices();
667 }
668
670 {
671 return cellCommunication().remoteIndices();
672 }
673#endif
674
676 const std::vector<int>& sortedNumAquiferCells() const
677 {
678 return aquifer_cells_;
679 }
680
681private:
682
684 void populateGlobalCellIndexSet();
685
686#if HAVE_MPI
687
693 template<class DataHandle>
694 void gatherData(DataHandle& data, CpGridData* global_view,
695 CpGridData* distributed_view);
696
697
704 template<int codim, class DataHandle>
705 void gatherCodimData(DataHandle& data, CpGridData* global_data,
706 CpGridData* distributed_data);
707
714 template<class DataHandle>
715 void scatterData(DataHandle& data, const CpGridData* global_data,
716 const CpGridData* distributed_data, const InterfaceMap& cell_inf,
717 const InterfaceMap& point_inf);
718
726 template<int codim, class DataHandle>
727 void scatterCodimData(DataHandle& data, CpGridData* global_data,
728 CpGridData* distributed_data);
729
738 template<int codim, class DataHandle>
739 void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
740 const Interface& interface);
741
750 template<int codim, class DataHandle>
751 void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
752 const InterfaceMap& interface);
753
754#endif
755
756 void computeGeometry(const CpGrid& grid,
757 const DefaultGeometryPolicy& globalGeometry,
758 const std::vector<int>& globalAquiferCells,
759 const OrientedEntityTable<0, 1>& globalCell2Faces,
760 DefaultGeometryPolicy& geometry,
761 std::vector<int>& aquiferCells,
763 const std::vector< std::array<int,8> >& cell2Points);
764
765 // Representing the topology
777 Opm::SparseTable<int> face_to_point_;
779 std::vector< std::array<int,8> > cell_to_point_;
786 std::array<int, 3> logical_cartesian_size_{};
793 std::vector<int> global_cell_;
799 typedef FieldVector<double, 3> PointType;
803 cpgrid::EntityVariable<int, 1> unique_boundary_ids_;
805 std::unique_ptr<cpgrid::IndexSet> index_set_;
807 std::shared_ptr<const cpgrid::IdSet> local_id_set_;
809 std::shared_ptr<LevelGlobalIdSet> global_id_set_;
811 std::shared_ptr<PartitionTypeIndicator> partition_type_indicator_;
813 std::vector<int> mark_;
815 int level_{0};
817 std::vector<std::shared_ptr<CpGridData>>* level_data_ptr_;
818 // SUITABLE FOR ALL LEVELS EXCEPT FOR LEAFVIEW
820 std::vector<int> level_to_leaf_cells_; // In entry 'level cell index', we store 'leafview cell index' // {level LGR, {child0, child1, ...}}
822 std::vector<std::tuple<int,std::vector<int>>> parent_to_children_cells_; // {# children in x-direction, ... y-, ... z-}
824 std::array<int,3> cells_per_dim_;
825 // SUITABLE ONLY FOR LEAFVIEW // {level, cell index in that level}
827 std::vector<std::array<int,2>> leaf_to_level_cells_;
829 std::vector<std::array<int,2>> corner_history_;
830 // SUITABLE FOR ALL LEVELS INCLUDING LEAFVIEW // {level parent cell, parent cell index}
832 std::vector<std::array<int,2>> child_to_parent_cells_;
835 std::vector<int> cell_to_idxInParentCell_;
837 int refinement_max_level_{0};
838
840 Communication ccobj_;
841
842 // Boundary information (optional).
843 bool use_unique_boundary_ids_;
844
850 std::vector<double> zcorn;
851
853 std::vector<int> aquifer_cells_;
854
855#if HAVE_MPI
856
858 CommunicationType cell_comm_;
859
861 std::tuple<Interface,Interface,Interface,Interface,Interface> cell_interfaces_;
862 /*
863 // code deactivated, because users cannot access face indices and therefore
864 // communication on faces makes no sense!
866 std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
867 face_interfaces_;
868 */
870 std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
871 point_interfaces_;
872
873#endif
874
875 // Return the geometry vector corresponding to the given codim.
876 template <int codim>
877 const EntityVariable<Geometry<3 - codim, 3>, codim>& geomVector() const
878 {
879 return geometry_.geomVector<codim>();
880 }
881
882 friend class Dune::CpGrid;
883 template<int> friend class Entity;
884 template<int> friend class EntityRep;
885 friend class Intersection;
887};
888
889
890
891#if HAVE_MPI
892
893namespace
894{
899template<class T>
900T& getInterface(InterfaceType iftype,
901 std::tuple<T,T,T,T,T>& interfaces)
902{
903 switch(iftype)
904 {
905 case 0:
906 return std::get<0>(interfaces);
907 case 1:
908 return std::get<1>(interfaces);
909 case 2:
910 return std::get<2>(interfaces);
911 case 3:
912 return std::get<3>(interfaces);
913 case 4:
914 return std::get<4>(interfaces);
915 }
916 OPM_THROW(std::runtime_error, "Invalid Interface type was used during communication");
917}
918
919} // end unnamed namespace
920
921template<int codim, class DataHandle>
922void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
923 const Interface& interface)
924{
925 this->template communicateCodim<codim>(data, dir, interface.interfaces());
926}
927
928template<int codim, class DataHandle>
929void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data_wrapper, CommunicationDirection dir,
930 const InterfaceMap& interface)
931{
932 Communicator comm(ccobj_, interface);
933
934 if(dir==ForwardCommunication)
935 comm.forward(data_wrapper);
936 else
937 comm.backward(data_wrapper);
938}
939#endif
940
941template<class DataHandle>
942void CpGridData::communicate(DataHandle& data, InterfaceType iftype,
943 CommunicationDirection dir)
944{
945#if HAVE_MPI
946 if(data.contains(3,0))
947 {
948 Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*this, data);
949 communicateCodim<0>(data_wrapper, dir, getInterface(iftype, cell_interfaces_));
950 }
951 if(data.contains(3,3))
952 {
953 Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*this, data);
954 communicateCodim<3>(data_wrapper, dir, getInterface(iftype, point_interfaces_));
955 }
956#else
957 // Suppress warnings for unused arguments.
958 (void) data;
959 (void) iftype;
960 (void) dir;
961#endif
962}
963}}
964
965#if HAVE_MPI
968
969namespace Dune {
970namespace cpgrid {
971
972namespace mover
973{
974template<class T>
976{
978public:
979 void read(T& data)
980 {
981 data=buffer_[index_++];
982 }
983 void write(const T& data)
984 {
985 buffer_[index_++]=data;
986 }
987 void reset()
988 {
989 index_=0;
990 }
991 void resize(std::size_t size)
992 {
993 buffer_.resize(size);
994 index_=0;
995 }
996private:
997 std::vector<T> buffer_;
998 typename std::vector<T>::size_type index_;
999};
1000template<class DataHandle,int codim>
1001struct Mover
1002{
1003};
1004
1005template<class DataHandle>
1007{
1008 explicit BaseMover(DataHandle& data)
1009 : data_(data)
1010 {}
1011 template<class E>
1012 void moveData(const E& from, const E& to)
1013 {
1014 std::size_t size=data_.size(from);
1015 buffer.resize(size);
1016 data_.gather(buffer, from);
1017 buffer.reset();
1018 data_.scatter(buffer, to, size);
1019 }
1020 DataHandle& data_;
1022};
1023
1024
1025template<class DataHandle>
1027{
1028 Mover(DataHandle& data, CpGridData* gatherView,
1029 CpGridData* scatterView)
1030 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
1031 {}
1032
1033 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
1034 {
1035 Entity<0> from_entity=Entity<0>(*gatherView_, from_cell_index, true);
1036 Entity<0> to_entity=Entity<0>(*scatterView_, to_cell_index, true);
1037 this->moveData(from_entity, to_entity);
1038 }
1041};
1042
1043template<class DataHandle>
1045{
1046 Mover(DataHandle& data, CpGridData* gatherView,
1047 CpGridData* scatterView)
1048 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
1049 {}
1050
1051 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
1052 {
1053 typedef typename OrientedEntityTable<0,1>::row_type row_type;
1054 EntityRep<0> from_cell=EntityRep<0>(from_cell_index, true);
1055 EntityRep<0> to_cell=EntityRep<0>(to_cell_index, true);
1056 const OrientedEntityTable<0,1>& table = gatherView_->cell_to_face_;
1057 row_type from_faces=table.operator[](from_cell);
1058 row_type to_faces=scatterView_->cell_to_face_[to_cell];
1059
1060 for(int i=0; i<from_faces.size(); ++i)
1061 this->moveData(from_faces[i], to_faces[i]);
1062 }
1065};
1066
1067template<class DataHandle>
1069{
1070 Mover(DataHandle& data, CpGridData* gatherView,
1071 CpGridData* scatterView)
1072 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
1073 {}
1074 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
1075 {
1076 const std::array<int,8>& from_cell_points=
1077 gatherView_->cell_to_point_[from_cell_index];
1078 const std::array<int,8>& to_cell_points=
1079 scatterView_->cell_to_point_[to_cell_index];
1080 for(std::size_t i=0; i<8; ++i)
1081 {
1082 this->moveData(Entity<3>(*gatherView_, from_cell_points[i], true),
1083 Entity<3>(*scatterView_, to_cell_points[i], true));
1084 }
1085 }
1088};
1089
1090} // end mover namespace
1091
1092template<class DataHandle>
1093void CpGridData::scatterData(DataHandle& data, const CpGridData* global_data,
1094 const CpGridData* distributed_data, const InterfaceMap& cell_inf,
1095 const InterfaceMap& point_inf)
1096{
1097#if HAVE_MPI
1098 if(data.contains(3,0))
1099 {
1100 Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*global_data, *distributed_data, data);
1101 communicateCodim<0>(data_wrapper, ForwardCommunication, cell_inf);
1102 }
1103 if(data.contains(3,3))
1104 {
1105 Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*global_data, *distributed_data, data);
1106 communicateCodim<3>(data_wrapper, ForwardCommunication, point_inf);
1107 }
1108#endif
1109}
1110
1111template<int codim, class DataHandle>
1112void CpGridData::scatterCodimData(DataHandle& data, CpGridData* global_data,
1113 CpGridData* distributed_data)
1114{
1115 CpGridData *gather_view, *scatter_view;
1116 gather_view=global_data;
1117 scatter_view=distributed_data;
1118
1119 mover::Mover<DataHandle,codim> mover(data, gather_view, scatter_view);
1120
1121
1122 for(auto index=distributed_data->cellIndexSet().begin(),
1123 end = distributed_data->cellIndexSet().end();
1124 index!=end; ++index)
1125 {
1126 std::size_t from=index->global();
1127 std::size_t to=index->local();
1128 mover(from,to);
1129 }
1130}
1131
1132namespace
1133{
1134
1135template<int codim, class T, class F>
1136void visitInterior(CpGridData& distributed_data, T begin, T endit, F& func)
1137{
1138 for(T it=begin; it!=endit; ++it)
1139 {
1140 Entity<codim> entity(distributed_data, it-begin, true);
1141 PartitionType pt = entity.partitionType();
1142 if(pt==Dune::InteriorEntity)
1143 {
1144 func(*it, entity);
1145 }
1146 }
1147}
1148
1149template<class DataHandle>
1150struct GlobalIndexSizeGatherer
1151{
1152 GlobalIndexSizeGatherer(DataHandle& data_,
1153 std::vector<int>& ownedGlobalIndices_,
1154 std::vector<int>& ownedSizes_)
1155 : data(data_), ownedGlobalIndices(ownedGlobalIndices_), ownedSizes(ownedSizes_)
1156 {}
1157
1158 template<class T, class E>
1159 void operator()(T& i, E& entity)
1160 {
1161 ownedGlobalIndices.push_back(i);
1162 ownedSizes.push_back(data.size(entity));
1163 }
1164 DataHandle& data;
1165 std::vector<int>& ownedGlobalIndices;
1166 std::vector<int>& ownedSizes;
1167};
1168
1169template<class DataHandle>
1170struct DataGatherer
1171{
1172 DataGatherer(mover::MoveBuffer<typename DataHandle::DataType>& buffer_,
1173 DataHandle& data_)
1174 : buffer(buffer_), data(data_)
1175 {}
1176
1177 template<class T, class E>
1178 void operator()(T& /* it */, E& entity)
1179 {
1180 data.gather(buffer, entity);
1181 }
1182 mover::MoveBuffer<typename DataHandle::DataType>& buffer;
1183 DataHandle& data;
1184};
1185
1186}
1187
1188template<class DataHandle>
1189void CpGridData::gatherData(DataHandle& data, CpGridData* global_data,
1190 CpGridData* distributed_data)
1191{
1192#if HAVE_MPI
1193 if(data.contains(3,0))
1194 gatherCodimData<0>(data, global_data, distributed_data);
1195 if(data.contains(3,3))
1196 gatherCodimData<3>(data, global_data, distributed_data);
1197#endif
1198}
1199
1200template<int codim, class DataHandle>
1201void CpGridData::gatherCodimData(DataHandle& data, CpGridData* global_data,
1202 CpGridData* distributed_data)
1203{
1204#if HAVE_MPI
1205 // Get the mapping to global index from the global id set
1206 const std::vector<int>& mapping =
1207 distributed_data->global_id_set_->getMapping<codim>();
1208
1209 // Get the global indices and data size for the entities whose data is
1210 // to be sent, i.e. the ones that we own.
1211 std::vector<int> owned_global_indices;
1212 std::vector<int> owned_sizes;
1213 owned_global_indices.reserve(mapping.size());
1214 owned_sizes.reserve(mapping.size());
1215
1216 GlobalIndexSizeGatherer<DataHandle> gisg(data, owned_global_indices, owned_sizes);
1217 visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gisg);
1218
1219 // communicate the number of indices that each processor sends
1220 int no_indices=owned_sizes.size();
1221 // We will take the address of the first elemet for MPI_Allgather below.
1222 // Make sure the containers have such an element.
1223 if ( owned_global_indices.empty() )
1224 owned_global_indices.resize(1);
1225 if ( owned_sizes.empty() )
1226 owned_sizes.resize(1);
1227 std::vector<int> no_indices_to_recv(distributed_data->ccobj_.size());
1228 distributed_data->ccobj_.allgather(&no_indices, 1, &(no_indices_to_recv[0]));
1229 // compute size of the vector capable for receiving all indices
1230 // and allgather the global indices and the sizes.
1231 // calculate displacements
1232 std::vector<int> displ(distributed_data->ccobj_.size()+1, 0);
1233 std::transform(displ.begin(), displ.end()-1, no_indices_to_recv.begin(), displ.begin()+1,
1234 std::plus<int>());
1235 int global_size=displ[displ.size()-1];//+no_indices_to_recv[displ.size()-1];
1236 std::vector<int> global_indices(global_size);
1237 std::vector<int> global_sizes(global_size);
1238 MPI_Allgatherv(&(owned_global_indices[0]), no_indices, MPITraits<int>::getType(),
1239 &(global_indices[0]), &(no_indices_to_recv[0]), &(displ[0]),
1240 MPITraits<int>::getType(),
1241 distributed_data->ccobj_);
1242 MPI_Allgatherv(&(owned_sizes[0]), no_indices, MPITraits<int>::getType(),
1243 &(global_sizes[0]), &(no_indices_to_recv[0]), &(displ[0]),
1244 MPITraits<int>::getType(),
1245 distributed_data->ccobj_);
1246 std::vector<int>().swap(owned_global_indices); // free data for reuse.
1247 // Compute the number of data items to send
1248 std::vector<int> no_data_send(distributed_data->ccobj_.size());
1249 for(typename std::vector<int>::iterator begin=no_data_send.begin(),
1250 i=begin, end=no_data_send.end(); i!=end; ++i)
1251 *i = std::accumulate(global_sizes.begin()+displ[i-begin],
1252 global_sizes.begin()+displ[i-begin+1], std::size_t());
1253 // free at least some memory that can be reused.
1254 std::vector<int>().swap(owned_sizes);
1255 // compute the displacements for receiving with allgatherv
1256 displ[0]=0;
1257 std::transform(displ.begin(), displ.end()-1, no_data_send.begin(), displ.begin()+1,
1258 std::plus<std::size_t>());
1259 // Compute the number of data items we will receive
1260 int no_data_recv = displ[displ.size()-1];//+global_sizes[displ.size()-1];
1261
1262 // Collect the data to send, gather it
1263 mover::MoveBuffer<typename DataHandle::DataType> local_data_buffer, global_data_buffer;
1264 if ( no_data_send[distributed_data->ccobj_.rank()] )
1265 {
1266 local_data_buffer.resize(no_data_send[distributed_data->ccobj_.rank()]);
1267 }
1268 else
1269 {
1270 local_data_buffer.resize(1);
1271 }
1272 global_data_buffer.resize(no_data_recv);
1273
1274 DataGatherer<DataHandle> gatherer(local_data_buffer, data);
1275 visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gatherer);
1276 MPI_Allgatherv(&(local_data_buffer.buffer_[0]), no_data_send[distributed_data->ccobj_.rank()],
1277 MPITraits<typename DataHandle::DataType>::getType(),
1278 &(global_data_buffer.buffer_[0]), &(no_data_send[0]), &(displ[0]),
1279 MPITraits<typename DataHandle::DataType>::getType(),
1280 distributed_data->ccobj_);
1281 Entity2IndexDataHandle<DataHandle, codim> edata(*global_data, data);
1282 int offset=0;
1283 for(int i=0; i< codim; ++i)
1284 offset+=global_data->size(i);
1285
1286 typename std::vector<int>::const_iterator s=global_sizes.begin();
1287 for(typename std::vector<int>::const_iterator i=global_indices.begin(),
1288 end=global_indices.end();
1289 i!=end; ++s, ++i)
1290 {
1291 edata.scatter(global_data_buffer, *i-offset, *s);
1292 }
1293#endif
1294}
1295
1296} // end namespace cpgrid
1297} // end namespace Dune
1298
1299#endif
1300
1301#endif
DataHandle & data
Definition: CpGridData.hpp:1164
mover::MoveBuffer< typename DataHandle::DataType > & buffer
Definition: CpGridData.hpp:1182
std::vector< int > & ownedGlobalIndices
Definition: CpGridData.hpp:1165
void refine_and_check(const Dune::cpgrid::Geometry< 3, 3 > &, const std::array< int, 3 > &, bool)
std::vector< int > & ownedSizes
Definition: CpGridData.hpp:1166
#define OPM_THROW(Exception, message)
Definition: ErrorMacros.hpp:29
[ provides Dune::Grid ]
Definition: CpGrid.hpp:203
Struct that hods all the data needed to represent a Cpgrid.
Definition: CpGridData.hpp:118
auto faceToPoint(int faceIdx) const
Definition: CpGridData.hpp:362
void postAdapt()
Clean up refinement/coarsening markers - set every element to the mark 0 which represents 'doing noth...
const cpgrid::LevelGlobalIdSet & globalIdSet() const
Get the global index set.
Definition: CpGridData.hpp:580
CpGridDataTraits::CommunicationType CommunicationType
type of OwnerOverlap communication for cells
Definition: CpGridData.hpp:630
@ MAX_DATA_PER_CELL
The maximum data items allowed per cell (DUNE < 2.5.2)
Definition: CpGridData.hpp:143
CpGridDataTraits::ParallelIndexSet ParallelIndexSet
The type of the parallel index set.
Definition: CpGridData.hpp:633
int size(GeometryType type) const
number of leaf entities per geometry type in this process
Definition: CpGridData.hpp:175
const std::array< int, 3 > & logicalCartesianSize() const
Definition: CpGridData.hpp:588
void communicate(DataHandle &data, InterfaceType iftype, CommunicationDirection dir)
communicate objects for all codims on a given level
Definition: CpGridData.hpp:942
auto faceTag(int faceIdx) const
Definition: CpGridData.hpp:350
bool uniqueBoundaryIds() const
Definition: CpGridData.hpp:543
const std::tuple< int, std::vector< int > > & getChildrenLevelAndIndexList(int elemIdx) const
Retrieves the level and child indices of a given parent cell.
Definition: CpGridData.hpp:473
std::array< double, 3 > computeEclCentroid(const Entity< 0 > &elem) const
void computeCommunicationInterfaces(int noexistingPoints)
int getLeafIdxFromLevelIdx(int level_cell_idx) const
Definition: CpGridData.hpp:486
const auto & getCornerHistory(int cornerIdx) const
Definition: CpGridData.hpp:377
RemoteIndices & cellRemoteIndices()
Definition: CpGridData.hpp:664
int size(int codim) const
number of leaf entities per codim in this process
void readEclipseFormat(const std::string &filename, bool periodic_extension, bool turn_normals=false, bool edge_conformal=false)
CpGridDataTraits::CollectiveCommunication CollectiveCommunication
Definition: CpGridData.hpp:618
const std::vector< int > & globalCell() const
Definition: CpGridData.hpp:393
CpGridDataTraits::InterfaceMap InterfaceMap
The type of the map describing communication interfaces.
Definition: CpGridData.hpp:627
CpGridDataTraits::Communication Communication
The type of the collective communication.
Definition: CpGridData.hpp:617
int numFaces() const
Definition: CpGridData.hpp:367
const std::vector< std::tuple< int, std::vector< int > > > & getParentToChildren() const
Definition: CpGridData.hpp:477
void getIJK(int c, std::array< int, 3 > &ijk) const
Extract Cartesian index triplet (i,j,k) of an active cell.
const IndexSet & indexSet() const
Definition: CpGridData.hpp:568
CommunicationType & cellCommunication()
Get the owner-overlap-copy communication for cells.
Definition: CpGridData.hpp:641
auto cellToFace(int cellIdx) const
Definition: CpGridData.hpp:330
auto faceNormals(int faceIdx) const
Definition: CpGridData.hpp:356
CpGridDataTraits::RemoteIndices RemoteIndices
The type of the remote indices information.
Definition: CpGridData.hpp:636
auto cornerHistorySize() const
Definition: CpGridData.hpp:372
ParallelIndexSet & cellIndexSet()
Definition: CpGridData.hpp:654
const auto & cellToPoint(int cellIdx) const
Definition: CpGridData.hpp:340
int getMark(const cpgrid::Entity< 0 > &element) const
Return refinement mark for entity.
const ParallelIndexSet & cellIndexSet() const
Definition: CpGridData.hpp:659
CpGridDataTraits::MPICommunicator MPICommunicator
The type of the mpi communicator.
Definition: CpGridData.hpp:615
bool mark(int refCount, const cpgrid::Entity< 0 > &element, bool throwOnFailure=false)
Mark entity for refinement or coarsening.
CpGridData(std::vector< std::shared_ptr< CpGridData > > &data)
Constructor.
void distributeGlobalGrid(CpGrid &grid, const CpGridData &view_data, const std::vector< int > &cell_part)
Redistribute a global grid.
const std::vector< std::shared_ptr< Dune::cpgrid::CpGridData > > & levelData() const
Add doc/or remove method and replace it with better approach.
Definition: CpGridData.hpp:458
const auto & cellToPoint() const
Definition: CpGridData.hpp:335
const std::vector< double > & zcornData() const
Definition: CpGridData.hpp:561
void setUniqueBoundaryIds(bool uids)
Definition: CpGridData.hpp:550
bool preAdapt()
Set mightVanish flags for elements that will be refined in the next adapt() call Need to be called af...
int getGridIdx() const
Add doc/or remove method and replace it with better approach.
Definition: CpGridData.hpp:444
void processEclipseFormat(const grdecl &input_data, std::array< std::set< std::pair< int, int > >, 2 > &nnc, bool remove_ij_boundary, bool turn_normals, bool pinchActive, double tolerance_unique_points, bool edge_conformal)
bool hasNNCs(const std::vector< int > &cellIndices) const
Check all cells selected for refinement have no NNCs (no neighbor connections). Assumption: all grid ...
const CommunicationType & cellCommunication() const
Get the owner-overlap-copy communication for cells.
Definition: CpGridData.hpp:649
int cellFace(int cell, int local_index) const
Definition: CpGridData.hpp:325
const cpgrid::IdSet & localIdSet() const
Get the local index set.
Definition: CpGridData.hpp:574
const cpgrid::DefaultGeometryPolicy getGeometry() const
Definition: CpGridData.hpp:481
bool adapt()
TO DO: Documentation. Triggers the grid refinement process - Currently, returns preAdapt()
int faceToCellSize(int face) const
Definition: CpGridData.hpp:345
CpGridDataTraits::Communicator Communicator
The type of the Communicator.
Definition: CpGridData.hpp:624
std::tuple< const std::shared_ptr< CpGridData >, const std::vector< std::array< int, 2 > > > refineSingleCell(const std::array< int, 3 > &cells_per_dim, const int &parent_idx, std::vector< std::vector< std::pair< int, std::vector< int > > > > &faceInMarkedElemAndRefinedFaces) const
Refine a single cell and return a shared pointer of CpGridData type.
std::array< double, 3 > computeEclCentroid(const int idx) const
const std::vector< int > & sortedNumAquiferCells() const
Get sorted active cell indices of numerical aquifer.
Definition: CpGridData.hpp:676
const RemoteIndices & cellRemoteIndices() const
Definition: CpGridData.hpp:669
CpGridData(MPIHelper::MPICommunicator comm, std::vector< std::shared_ptr< CpGridData > > &data)
Definition: DefaultGeometryPolicy.hpp:53
const EntityVariable< cpgrid::Geometry< 3 - codim, 3 >, codim > & geomVector() const
Definition: DefaultGeometryPolicy.hpp:86
Wrapper that turns a data handle suitable for dune-grid into one based on integers instead of entitie...
Definition: Entity2IndexDataHandle.hpp:56
Represents an entity of a given codim, with positive or negative orientation.
Definition: EntityRep.hpp:98
The global id set for Dune.
Definition: Indexsets.hpp:483
Only needs to provide interface for doing nothing.
Definition: Iterators.hpp:118
Definition: Indexsets.hpp:199
Definition: Indexsets.hpp:57
Definition: Intersection.hpp:63
Definition: Indexsets.hpp:367
OPM_HOST_DEVICE int size() const
Returns the number of rows in the table.
Definition: SparseTable.hpp:195
Definition: PartitionTypeIndicator.hpp:50
Definition: CpGridData.hpp:976
void write(const T &data)
Definition: CpGridData.hpp:983
void reset()
Definition: CpGridData.hpp:987
void read(T &data)
Definition: CpGridData.hpp:979
void resize(std::size_t size)
Definition: CpGridData.hpp:991
The namespace Dune is the main namespace for all Dune code.
Definition: common/CartesianIndexMapper.hpp:10
Dune::cpgrid::Cell2FacesContainer cell2Faces(const Dune::CpGrid &grid)
Get the cell to faces mapping of a grid.
Holds the implementation of the CpGrid as a pimple.
Definition: CellQuadrature.hpp:26
MPIHelper::MPICommunicator MPICommunicator
The type of the collective communication.
Definition: CpGridDataTraits.hpp:56
Dune::VariableSizeCommunicator<> Communicator
The type of the Communicator.
Definition: CpGridDataTraits.hpp:71
Dune::RemoteIndices< ParallelIndexSet > RemoteIndices
The type of the remote indices information.
Definition: CpGridDataTraits.hpp:83
typename CommunicationType::ParallelIndexSet ParallelIndexSet
The type of the parallel index set.
Definition: CpGridDataTraits.hpp:80
Dune::Communication< MPICommunicator > CollectiveCommunication
Definition: CpGridDataTraits.hpp:59
Dune::OwnerOverlapCopyCommunication< int, int > CommunicationType
type of OwnerOverlap communication for cells
Definition: CpGridDataTraits.hpp:77
Dune::Communication< MPICommunicator > Communication
Definition: CpGridDataTraits.hpp:58
AttributeSet
The type of the set of the attributes.
Definition: CpGridDataTraits.hpp:66
Communicator::InterfaceMap InterfaceMap
The type of the map describing communication interfaces.
Definition: CpGridDataTraits.hpp:74
Definition: CpGridData.hpp:1007
BaseMover(DataHandle &data)
Definition: CpGridData.hpp:1008
void moveData(const E &from, const E &to)
Definition: CpGridData.hpp:1012
MoveBuffer< typename DataHandle::DataType > buffer
Definition: CpGridData.hpp:1021
DataHandle & data_
Definition: CpGridData.hpp:1020
Definition: CpGridData.hpp:1027
void operator()(std::size_t from_cell_index, std::size_t to_cell_index)
Definition: CpGridData.hpp:1033
CpGridData * scatterView_
Definition: CpGridData.hpp:1040
CpGridData * gatherView_
Definition: CpGridData.hpp:1039
Mover(DataHandle &data, CpGridData *gatherView, CpGridData *scatterView)
Definition: CpGridData.hpp:1028
Definition: CpGridData.hpp:1045
Mover(DataHandle &data, CpGridData *gatherView, CpGridData *scatterView)
Definition: CpGridData.hpp:1046
CpGridData * gatherView_
Definition: CpGridData.hpp:1063
CpGridData * scatterView_
Definition: CpGridData.hpp:1064
void operator()(std::size_t from_cell_index, std::size_t to_cell_index)
Definition: CpGridData.hpp:1051
Definition: CpGridData.hpp:1069
CpGridData * scatterView_
Definition: CpGridData.hpp:1087
CpGridData * gatherView_
Definition: CpGridData.hpp:1086
Mover(DataHandle &data, CpGridData *gatherView, CpGridData *scatterView)
Definition: CpGridData.hpp:1070
void operator()(std::size_t from_cell_index, std::size_t to_cell_index)
Definition: CpGridData.hpp:1074
Definition: CpGridData.hpp:1002
Definition: preprocess.h:56