CpGridData.hpp
Go to the documentation of this file.
1//===========================================================================
2//
3// File: CpGridData.hpp
4//
5// Created: Sep 17 21:11:41 2013
6//
7// Author(s): Atgeirr F Rasmussen <atgeirr@sintef.no>
8// Bård Skaflestad <bard.skaflestad@sintef.no>
9// Markus Blatt <markus@dr-blatt.de>
10// Antonella Ritorto <antonella.ritorto@opm-op.com>
11//
12// Comment: Major parts of this file originated in dune/grid/CpGrid.hpp
13// and got transfered here during refactoring for the parallelization.
14//
15// $Date$
16//
17// $Revision$
18//
19//===========================================================================
20
21/*
22 Copyright 2009, 2010 SINTEF ICT, Applied Mathematics.
23 Copyright 2009, 2010, 2013, 2022-2023 Equinor ASA.
24 Copyright 2013 Dr. Blatt - HPC-Simulation-Software & Services
25
26 This file is part of The Open Porous Media project (OPM).
27
28 OPM is free software: you can redistribute it and/or modify
29 it under the terms of the GNU General Public License as published by
30 the Free Software Foundation, either version 3 of the License, or
31 (at your option) any later version.
32
33 OPM is distributed in the hope that it will be useful,
34 but WITHOUT ANY WARRANTY; without even the implied warranty of
35 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
36 GNU General Public License for more details.
37
38 You should have received a copy of the GNU General Public License
39 along with OPM. If not, see <http://www.gnu.org/licenses/>.
40*/
48#ifndef OPM_CPGRIDDATA_HEADER
49#define OPM_CPGRIDDATA_HEADER
50
51
52#include <dune/common/parallel/mpihelper.hh>
53#ifdef HAVE_DUNE_ISTL
54#include <dune/istl/owneroverlapcopy.hh>
55#endif
56
57#include <dune/common/parallel/communication.hh>
58#include <dune/common/parallel/variablesizecommunicator.hh>
59#include <dune/grid/common/gridenums.hh>
60
61#if HAVE_ECL_INPUT
62#include <opm/input/eclipse/EclipseState/Grid/EclipseGrid.hpp>
63#include <opm/input/eclipse/EclipseState/Grid/NNC.hpp>
64#endif
65
67
69#include "CpGridDataTraits.hpp"
70//#include "DataHandleWrappers.hpp"
71//#include "GlobalIdMapping.hpp"
72#include "Geometry.hpp"
73
74#include <array>
75#include <initializer_list>
76#include <set>
77#include <vector>
78
79namespace Opm
80{
81class EclipseState;
82}
83namespace Dune
84{
85class CpGrid;
86
87namespace cpgrid
88{
89
90class IndexSet;
91class IdSet;
92class LevelGlobalIdSet;
93class PartitionTypeIndicator;
94template<int,int> class Geometry;
95template<int> class Entity;
96template<int> class EntityRep;
97}
98}
99
101 const std::array<int, 3>&,
102 bool);
103
104namespace Dune
105{
106namespace cpgrid
107{
108namespace mover
109{
110template<class T, int i> struct Mover;
111}
112
118{
119 template<class T, int i> friend struct mover::Mover;
120 friend class GlobalIdSet;
121 friend class HierarchicIterator;
125
126 friend
128 const std::array<int, 3>&,
129 bool);
130
131private:
132 CpGridData(const CpGridData& g);
133
134public:
135 enum{
136#ifndef MAX_DATA_COMMUNICATED_PER_ENTITY
144#else
149 MAX_DATA_PER_CELL = MAX_DATA_COMMUNICATED_PER_ENTITY
150#endif
151 };
152
153 CpGridData() = delete;
154
159 explicit CpGridData(MPIHelper::MPICommunicator comm, std::vector<std::shared_ptr<CpGridData>>& data);
160
161
162
164 explicit CpGridData(std::vector<std::shared_ptr<CpGridData>>& data);
167
168
169
170
172 int size(int codim) const;
173
175 int size (GeometryType type) const
176 {
177 if (type.isCube()) {
178 return size(3 - type.dim());
179 } else {
180 return 0;
181 }
182 }
183
199 void readEclipseFormat(const std::string& filename,
200 bool periodic_extension,
201 bool turn_normals = false,
202 bool edge_conformal = false);
203
204#if HAVE_ECL_INPUT
226 void processEclipseFormat(const Opm::Deck& deck,
227 bool periodic_extension,
228 bool turn_normals = false,
229 bool clip_z = false,
230 const std::vector<double>& poreVolume = std::vector<double>{},
231 bool edge_conformal = false);
232
267 std::vector<std::size_t>
268 processEclipseFormat(const Opm::EclipseGrid* ecl_grid,
269 Opm::EclipseState* ecl_state,
270 bool periodic_extension,
271 bool turn_normals = false,
272 bool clip_z = false,
273 bool pinchActive = true,
274 bool edge_conformal = false);
275#endif
276
305 void processEclipseFormat(const grdecl& input_data,
306#if HAVE_ECL_INPUT
307 Opm::EclipseState* ecl_state,
308#endif
309 std::array<std::set<std::pair<int, int>>, 2>& nnc,
310 bool remove_ij_boundary,
311 bool turn_normals,
312 bool pinchActive,
313 double tolerance_unique_points,
314 bool edge_conformal);
315
323 void getIJK(int c, std::array<int,3>& ijk) const;
324
325 int cellFace(int cell, int local_index) const
326 {
327 return cell_to_face_[cpgrid::EntityRep<0>(cell, true)][local_index].index();
328 }
329
330 auto cellToFace(int cellIdx) const
331 {
332 return cell_to_face_[cpgrid::EntityRep<0>(cellIdx, true)];
333 }
334
335 const auto& cellToPoint() const
336 {
337 return cell_to_point_;
338 }
339
340 const auto& cellToPoint(int cellIdx) const
341 {
342 return cell_to_point_[cellIdx];
343 }
344
345 int faceToCellSize(int face) const {
346 Dune::cpgrid::EntityRep<1> faceRep(face, true);
347 return face_to_cell_[faceRep].size();
348 }
349
350 auto faceTag(int faceIdx) const
351 {
352 Dune::cpgrid::EntityRep<1> faceRep(faceIdx, true);
353 return face_tag_[faceRep];
354 }
355
356 auto faceNormals(int faceIdx) const
357 {
358 Dune::cpgrid::EntityRep<1> faceRep(faceIdx, true);
359 return face_normals_[faceRep];
360 }
361
362 auto faceToPoint(int faceIdx) const
363 {
364 return face_to_point_[faceIdx];
365 }
366
367 int numFaces() const
368 {
369 return face_to_cell_.size();
370 }
371
372 auto cornerHistorySize() const
373 {
374 return corner_history_.size();
375 }
376
377 const auto& getCornerHistory(int cornerIdx) const
378 {
379 if(cornerHistorySize()) {
380 return corner_history_[cornerIdx];
381 }
382 else {
383 OPM_THROW(std::logic_error, "Vertex has no history record.\n");
384 }
385 }
386
393 const std::vector<int>& globalCell() const
394 {
395 return global_cell_;
396 }
397
400 bool hasNNCs(const std::vector<int>& cellIndices) const;
401
415 bool mark(int refCount, const cpgrid::Entity<0>& element, bool throwOnFailure = false);
416
420 int getMark(const cpgrid::Entity<0>& element) const;
421
431 bool preAdapt();
432
434 bool adapt();
435
437 void postAdapt();
438
439private:
440 std::array<Dune::FieldVector<double,3>,8> getReferenceRefinedCorners(int idx_in_parent_cell, const std::array<int,3>& cells_per_dim) const;
441
442public:
444 int getGridIdx() const {
445 // Not the nicest way of checking if "this" points at the leaf grid view of a mixed grid (with coarse and refined cells).
446 // 1. When the grid has been refined at least onece, level_data_ptr_ ->size() >1. Therefore, there is a chance of "this" pointing at the leaf grid view.
447 // 2. Unfortunately, level_ is default initialized by 0. This implies, in particular, that if someone wants to check the value of
448 // "this->level_" when "this" points at the leaf grid view of a grid that has been refined, this value is - unfortunately - equal to 0.
449 // 3. Due to 2. we need an extra bool value to distinguish between the actual level 0 grid and such a leaf grid view (with incorrect level_ == 0). For this
450 // reason we check if child_to_parent_cells_.empty() [true for actual level 0 grid, false for the leaf grid view].
451 // --- TO BE IMPROVED ---
452 if ((level_data_ptr_ ->size() >1) && (level_ == 0) && (!child_to_parent_cells_.empty())) {
453 return level_data_ptr_->size() -1;
454 }
455 return level_;
456 }
458 const std::vector<std::shared_ptr<Dune::cpgrid::CpGridData>>& levelData() const
459 {
460 if (level_data_ptr_->empty()) {
461 OPM_THROW(std::logic_error, "Level data has not been initialized\n");
462 }
463 return *level_data_ptr_;
464 }
465
473 const std::tuple<int,std::vector<int>>& getChildrenLevelAndIndexList(int elemIdx) const {
474 return parent_to_children_cells_[elemIdx];
475 }
476
477 const std::vector<std::tuple<int,std::vector<int>>>& getParentToChildren() const {
478 return parent_to_children_cells_;
479 }
480
482 {
483 return geometry_;
484 }
485
486 int getLeafIdxFromLevelIdx(int level_cell_idx) const
487 {
488 if (level_to_leaf_cells_.empty()) {
489 OPM_THROW(std::logic_error, "Grid has no LGRs. No mapping to the leaf.\n");
490 }
491 return level_to_leaf_cells_[level_cell_idx];
492 }
493
515 std::tuple< const std::shared_ptr<CpGridData>,
516 const std::vector<std::array<int,2>>, // parent_to_refined_corners(~boundary_old_to_new_corners)
517 const std::vector<std::tuple<int,std::vector<int>>>, // parent_to_children_faces (~boundary_old_to_new_faces)
518 const std::tuple<int, std::vector<int>>, // parent_to_children_cells
519 const std::vector<std::array<int,2>>, // child_to_parent_faces
520 const std::vector<std::array<int,2>>> // child_to_parent_cells
521 refineSingleCell(const std::array<int,3>& cells_per_dim, const int& parent_idx) const;
522
523 // @breif Compute center of an entity/element/cell in the Eclipse way:
524 // - Average of the 4 corners of the bottom face.
525 // - Average of the 4 corners of the top face.
526 // Return average of the previous computations.
527 // @param [in] int Index of a cell.
528 // @return 'eclipse centroid'
529 std::array<double,3> computeEclCentroid(const int idx) const;
530
531 // @breif Compute center of an entity/element/cell in the Eclipse way:
532 // - Average of the 4 corners of the bottom face.
533 // - Average of the 4 corners of the top face.
534 // Return average of the previous computations.
535 // @param [in] Entity<0> Entity
536 // @return 'eclipse centroid'
537 std::array<double,3> computeEclCentroid(const Entity<0>& elem) const;
538
539 // Make unique boundary ids for all intersections.
541
545 bool uniqueBoundaryIds() const
546 {
547 return use_unique_boundary_ids_;
548 }
549
552 void setUniqueBoundaryIds(bool uids)
553 {
554 use_unique_boundary_ids_ = uids;
555 if (use_unique_boundary_ids_ && unique_boundary_ids_.empty()) {
557 }
558 }
559
563 const std::vector<double>& zcornData() const {
564 return zcorn;
565 }
566
567
570 const IndexSet& indexSet() const
571 {
572 return *index_set_;
573 }
574
577 {
578 return *local_id_set_;
579 }
580
583 {
584 return *global_id_set_;
585 }
586
590 const std::array<int, 3>& logicalCartesianSize() const
591 {
592 return logical_cartesian_size_;
593 }
594
599 const CpGridData& view_data,
600 const std::vector<int>& cell_part);
601
607 template<class DataHandle>
608 void communicate(DataHandle& data, InterfaceType iftype, CommunicationDirection dir);
609
611
613
614 void computeCommunicationInterfaces(int noexistingPoints);
615
621
624#if HAVE_MPI
627
630
633
636
639
644 {
645 return cell_comm_;
646 }
647
652 {
653 return cell_comm_;
654 }
655
657 {
658 return cellCommunication().indexSet();
659 }
660
662 {
663 return cellCommunication().indexSet();
664 }
665
667 {
668 return cellCommunication().remoteIndices();
669 }
670
672 {
673 return cellCommunication().remoteIndices();
674 }
675#endif
676
678 const std::vector<int>& sortedNumAquiferCells() const
679 {
680 return aquifer_cells_;
681 }
682
683private:
684
686 void populateGlobalCellIndexSet();
687
688#if HAVE_MPI
689
695 template<class DataHandle>
696 void gatherData(DataHandle& data, CpGridData* global_view,
697 CpGridData* distributed_view);
698
699
706 template<int codim, class DataHandle>
707 void gatherCodimData(DataHandle& data, CpGridData* global_data,
708 CpGridData* distributed_data);
709
716 template<class DataHandle>
717 void scatterData(DataHandle& data, const CpGridData* global_data,
718 const CpGridData* distributed_data, const InterfaceMap& cell_inf,
719 const InterfaceMap& point_inf);
720
728 template<int codim, class DataHandle>
729 void scatterCodimData(DataHandle& data, CpGridData* global_data,
730 CpGridData* distributed_data);
731
740 template<int codim, class DataHandle>
741 void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
742 const Interface& interface);
743
752 template<int codim, class DataHandle>
753 void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
754 const InterfaceMap& interface);
755
756#endif
757
758 void computeGeometry(const CpGrid& grid,
759 const DefaultGeometryPolicy& globalGeometry,
760 const std::vector<int>& globalAquiferCells,
761 const OrientedEntityTable<0, 1>& globalCell2Faces,
762 DefaultGeometryPolicy& geometry,
763 std::vector<int>& aquiferCells,
765 const std::vector< std::array<int,8> >& cell2Points);
766
767 // Representing the topology
779 Opm::SparseTable<int> face_to_point_;
781 std::vector< std::array<int,8> > cell_to_point_;
788 std::array<int, 3> logical_cartesian_size_{};
795 std::vector<int> global_cell_;
801 typedef FieldVector<double, 3> PointType;
805 cpgrid::EntityVariable<int, 1> unique_boundary_ids_;
807 std::unique_ptr<cpgrid::IndexSet> index_set_;
809 std::shared_ptr<const cpgrid::IdSet> local_id_set_;
811 std::shared_ptr<LevelGlobalIdSet> global_id_set_;
813 std::shared_ptr<PartitionTypeIndicator> partition_type_indicator_;
815 std::vector<int> mark_;
817 int level_{0};
819 std::vector<std::shared_ptr<CpGridData>>* level_data_ptr_;
820 // SUITABLE FOR ALL LEVELS EXCEPT FOR LEAFVIEW
822 std::vector<int> level_to_leaf_cells_; // In entry 'level cell index', we store 'leafview cell index' // {level LGR, {child0, child1, ...}}
824 std::vector<std::tuple<int,std::vector<int>>> parent_to_children_cells_; // {# children in x-direction, ... y-, ... z-}
826 std::array<int,3> cells_per_dim_;
827 // SUITABLE ONLY FOR LEAFVIEW // {level, cell index in that level}
829 std::vector<std::array<int,2>> leaf_to_level_cells_;
831 std::vector<std::array<int,2>> corner_history_;
832 // SUITABLE FOR ALL LEVELS INCLUDING LEAFVIEW // {level parent cell, parent cell index}
834 std::vector<std::array<int,2>> child_to_parent_cells_;
837 std::vector<int> cell_to_idxInParentCell_;
839 int refinement_max_level_{0};
840
842 Communication ccobj_;
843
844 // Boundary information (optional).
845 bool use_unique_boundary_ids_;
846
852 std::vector<double> zcorn;
853
855 std::vector<int> aquifer_cells_;
856
857#if HAVE_MPI
858
860 CommunicationType cell_comm_;
861
863 std::tuple<Interface,Interface,Interface,Interface,Interface> cell_interfaces_;
864 /*
865 // code deactivated, because users cannot access face indices and therefore
866 // communication on faces makes no sense!
868 std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
869 face_interfaces_;
870 */
872 std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
873 point_interfaces_;
874
875#endif
876
877 // Return the geometry vector corresponding to the given codim.
878 template <int codim>
879 const EntityVariable<Geometry<3 - codim, 3>, codim>& geomVector() const
880 {
881 return geometry_.geomVector<codim>();
882 }
883
884 friend class Dune::CpGrid;
885 template<int> friend class Entity;
886 template<int> friend class EntityRep;
887 friend class Intersection;
889};
890
891
892
893#if HAVE_MPI
894
895namespace
896{
901template<class T>
902T& getInterface(InterfaceType iftype,
903 std::tuple<T,T,T,T,T>& interfaces)
904{
905 switch(iftype)
906 {
907 case 0:
908 return std::get<0>(interfaces);
909 case 1:
910 return std::get<1>(interfaces);
911 case 2:
912 return std::get<2>(interfaces);
913 case 3:
914 return std::get<3>(interfaces);
915 case 4:
916 return std::get<4>(interfaces);
917 }
918 OPM_THROW(std::runtime_error, "Invalid Interface type was used during communication");
919}
920
921} // end unnamed namespace
922
923template<int codim, class DataHandle>
924void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
925 const Interface& interface)
926{
927 this->template communicateCodim<codim>(data, dir, interface.interfaces());
928}
929
930template<int codim, class DataHandle>
931void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data_wrapper, CommunicationDirection dir,
932 const InterfaceMap& interface)
933{
934 Communicator comm(ccobj_, interface);
935
936 if(dir==ForwardCommunication)
937 comm.forward(data_wrapper);
938 else
939 comm.backward(data_wrapper);
940}
941#endif
942
943template<class DataHandle>
944void CpGridData::communicate(DataHandle& data, InterfaceType iftype,
945 CommunicationDirection dir)
946{
947#if HAVE_MPI
948 if(data.contains(3,0))
949 {
950 Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*this, data);
951 communicateCodim<0>(data_wrapper, dir, getInterface(iftype, cell_interfaces_));
952 }
953 if(data.contains(3,3))
954 {
955 Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*this, data);
956 communicateCodim<3>(data_wrapper, dir, getInterface(iftype, point_interfaces_));
957 }
958#else
959 // Suppress warnings for unused arguments.
960 (void) data;
961 (void) iftype;
962 (void) dir;
963#endif
964}
965}}
966
967#if HAVE_MPI
970
971namespace Dune {
972namespace cpgrid {
973
974namespace mover
975{
976template<class T>
978{
980public:
981 void read(T& data)
982 {
983 data=buffer_[index_++];
984 }
985 void write(const T& data)
986 {
987 buffer_[index_++]=data;
988 }
989 void reset()
990 {
991 index_=0;
992 }
993 void resize(std::size_t size)
994 {
995 buffer_.resize(size);
996 index_=0;
997 }
998private:
999 std::vector<T> buffer_;
1000 typename std::vector<T>::size_type index_;
1001};
1002template<class DataHandle,int codim>
1003struct Mover
1004{
1005};
1006
1007template<class DataHandle>
1009{
1010 explicit BaseMover(DataHandle& data)
1011 : data_(data)
1012 {}
1013 template<class E>
1014 void moveData(const E& from, const E& to)
1015 {
1016 std::size_t size=data_.size(from);
1017 buffer.resize(size);
1018 data_.gather(buffer, from);
1019 buffer.reset();
1020 data_.scatter(buffer, to, size);
1021 }
1022 DataHandle& data_;
1024};
1025
1026
1027template<class DataHandle>
1029{
1030 Mover(DataHandle& data, CpGridData* gatherView,
1031 CpGridData* scatterView)
1032 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
1033 {}
1034
1035 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
1036 {
1037 Entity<0> from_entity=Entity<0>(*gatherView_, from_cell_index, true);
1038 Entity<0> to_entity=Entity<0>(*scatterView_, to_cell_index, true);
1039 this->moveData(from_entity, to_entity);
1040 }
1043};
1044
1045template<class DataHandle>
1047{
1048 Mover(DataHandle& data, CpGridData* gatherView,
1049 CpGridData* scatterView)
1050 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
1051 {}
1052
1053 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
1054 {
1055 typedef typename OrientedEntityTable<0,1>::row_type row_type;
1056 EntityRep<0> from_cell=EntityRep<0>(from_cell_index, true);
1057 EntityRep<0> to_cell=EntityRep<0>(to_cell_index, true);
1058 const OrientedEntityTable<0,1>& table = gatherView_->cell_to_face_;
1059 row_type from_faces=table.operator[](from_cell);
1060 row_type to_faces=scatterView_->cell_to_face_[to_cell];
1061
1062 for(int i=0; i<from_faces.size(); ++i)
1063 this->moveData(from_faces[i], to_faces[i]);
1064 }
1067};
1068
1069template<class DataHandle>
1071{
1072 Mover(DataHandle& data, CpGridData* gatherView,
1073 CpGridData* scatterView)
1074 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
1075 {}
1076 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
1077 {
1078 const std::array<int,8>& from_cell_points=
1079 gatherView_->cell_to_point_[from_cell_index];
1080 const std::array<int,8>& to_cell_points=
1081 scatterView_->cell_to_point_[to_cell_index];
1082 for(std::size_t i=0; i<8; ++i)
1083 {
1084 this->moveData(Entity<3>(*gatherView_, from_cell_points[i], true),
1085 Entity<3>(*scatterView_, to_cell_points[i], true));
1086 }
1087 }
1090};
1091
1092} // end mover namespace
1093
1094template<class DataHandle>
1095void CpGridData::scatterData(DataHandle& data, const CpGridData* global_data,
1096 const CpGridData* distributed_data, const InterfaceMap& cell_inf,
1097 const InterfaceMap& point_inf)
1098{
1099#if HAVE_MPI
1100 if(data.contains(3,0))
1101 {
1102 Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*global_data, *distributed_data, data);
1103 communicateCodim<0>(data_wrapper, ForwardCommunication, cell_inf);
1104 }
1105 if(data.contains(3,3))
1106 {
1107 Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*global_data, *distributed_data, data);
1108 communicateCodim<3>(data_wrapper, ForwardCommunication, point_inf);
1109 }
1110#endif
1111}
1112
1113template<int codim, class DataHandle>
1114void CpGridData::scatterCodimData(DataHandle& data, CpGridData* global_data,
1115 CpGridData* distributed_data)
1116{
1117 CpGridData *gather_view, *scatter_view;
1118 gather_view=global_data;
1119 scatter_view=distributed_data;
1120
1121 mover::Mover<DataHandle,codim> mover(data, gather_view, scatter_view);
1122
1123
1124 for(auto index=distributed_data->cellIndexSet().begin(),
1125 end = distributed_data->cellIndexSet().end();
1126 index!=end; ++index)
1127 {
1128 std::size_t from=index->global();
1129 std::size_t to=index->local();
1130 mover(from,to);
1131 }
1132}
1133
1134namespace
1135{
1136
1137template<int codim, class T, class F>
1138void visitInterior(CpGridData& distributed_data, T begin, T endit, F& func)
1139{
1140 for(T it=begin; it!=endit; ++it)
1141 {
1142 Entity<codim> entity(distributed_data, it-begin, true);
1143 PartitionType pt = entity.partitionType();
1144 if(pt==Dune::InteriorEntity)
1145 {
1146 func(*it, entity);
1147 }
1148 }
1149}
1150
1151template<class DataHandle>
1152struct GlobalIndexSizeGatherer
1153{
1154 GlobalIndexSizeGatherer(DataHandle& data_,
1155 std::vector<int>& ownedGlobalIndices_,
1156 std::vector<int>& ownedSizes_)
1157 : data(data_), ownedGlobalIndices(ownedGlobalIndices_), ownedSizes(ownedSizes_)
1158 {}
1159
1160 template<class T, class E>
1161 void operator()(T& i, E& entity)
1162 {
1163 ownedGlobalIndices.push_back(i);
1164 ownedSizes.push_back(data.size(entity));
1165 }
1166 DataHandle& data;
1167 std::vector<int>& ownedGlobalIndices;
1168 std::vector<int>& ownedSizes;
1169};
1170
1171template<class DataHandle>
1172struct DataGatherer
1173{
1174 DataGatherer(mover::MoveBuffer<typename DataHandle::DataType>& buffer_,
1175 DataHandle& data_)
1176 : buffer(buffer_), data(data_)
1177 {}
1178
1179 template<class T, class E>
1180 void operator()(T& /* it */, E& entity)
1181 {
1182 data.gather(buffer, entity);
1183 }
1184 mover::MoveBuffer<typename DataHandle::DataType>& buffer;
1185 DataHandle& data;
1186};
1187
1188}
1189
1190template<class DataHandle>
1191void CpGridData::gatherData(DataHandle& data, CpGridData* global_data,
1192 CpGridData* distributed_data)
1193{
1194#if HAVE_MPI
1195 if(data.contains(3,0))
1196 gatherCodimData<0>(data, global_data, distributed_data);
1197 if(data.contains(3,3))
1198 gatherCodimData<3>(data, global_data, distributed_data);
1199#endif
1200}
1201
1202template<int codim, class DataHandle>
1203void CpGridData::gatherCodimData(DataHandle& data, CpGridData* global_data,
1204 CpGridData* distributed_data)
1205{
1206#if HAVE_MPI
1207 // Get the mapping to global index from the global id set
1208 const std::vector<int>& mapping =
1209 distributed_data->global_id_set_->getMapping<codim>();
1210
1211 // Get the global indices and data size for the entities whose data is
1212 // to be sent, i.e. the ones that we own.
1213 std::vector<int> owned_global_indices;
1214 std::vector<int> owned_sizes;
1215 owned_global_indices.reserve(mapping.size());
1216 owned_sizes.reserve(mapping.size());
1217
1218 GlobalIndexSizeGatherer<DataHandle> gisg(data, owned_global_indices, owned_sizes);
1219 visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gisg);
1220
1221 // communicate the number of indices that each processor sends
1222 int no_indices=owned_sizes.size();
1223 // We will take the address of the first elemet for MPI_Allgather below.
1224 // Make sure the containers have such an element.
1225 if ( owned_global_indices.empty() )
1226 owned_global_indices.resize(1);
1227 if ( owned_sizes.empty() )
1228 owned_sizes.resize(1);
1229 std::vector<int> no_indices_to_recv(distributed_data->ccobj_.size());
1230 distributed_data->ccobj_.allgather(&no_indices, 1, &(no_indices_to_recv[0]));
1231 // compute size of the vector capable for receiving all indices
1232 // and allgather the global indices and the sizes.
1233 // calculate displacements
1234 std::vector<int> displ(distributed_data->ccobj_.size()+1, 0);
1235 std::transform(displ.begin(), displ.end()-1, no_indices_to_recv.begin(), displ.begin()+1,
1236 std::plus<int>());
1237 int global_size=displ[displ.size()-1];//+no_indices_to_recv[displ.size()-1];
1238 std::vector<int> global_indices(global_size);
1239 std::vector<int> global_sizes(global_size);
1240 MPI_Allgatherv(&(owned_global_indices[0]), no_indices, MPITraits<int>::getType(),
1241 &(global_indices[0]), &(no_indices_to_recv[0]), &(displ[0]),
1242 MPITraits<int>::getType(),
1243 distributed_data->ccobj_);
1244 MPI_Allgatherv(&(owned_sizes[0]), no_indices, MPITraits<int>::getType(),
1245 &(global_sizes[0]), &(no_indices_to_recv[0]), &(displ[0]),
1246 MPITraits<int>::getType(),
1247 distributed_data->ccobj_);
1248 std::vector<int>().swap(owned_global_indices); // free data for reuse.
1249 // Compute the number of data items to send
1250 std::vector<int> no_data_send(distributed_data->ccobj_.size());
1251 for(typename std::vector<int>::iterator begin=no_data_send.begin(),
1252 i=begin, end=no_data_send.end(); i!=end; ++i)
1253 *i = std::accumulate(global_sizes.begin()+displ[i-begin],
1254 global_sizes.begin()+displ[i-begin+1], std::size_t());
1255 // free at least some memory that can be reused.
1256 std::vector<int>().swap(owned_sizes);
1257 // compute the displacements for receiving with allgatherv
1258 displ[0]=0;
1259 std::transform(displ.begin(), displ.end()-1, no_data_send.begin(), displ.begin()+1,
1260 std::plus<std::size_t>());
1261 // Compute the number of data items we will receive
1262 int no_data_recv = displ[displ.size()-1];//+global_sizes[displ.size()-1];
1263
1264 // Collect the data to send, gather it
1265 mover::MoveBuffer<typename DataHandle::DataType> local_data_buffer, global_data_buffer;
1266 if ( no_data_send[distributed_data->ccobj_.rank()] )
1267 {
1268 local_data_buffer.resize(no_data_send[distributed_data->ccobj_.rank()]);
1269 }
1270 else
1271 {
1272 local_data_buffer.resize(1);
1273 }
1274 global_data_buffer.resize(no_data_recv);
1275
1276 DataGatherer<DataHandle> gatherer(local_data_buffer, data);
1277 visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gatherer);
1278 MPI_Allgatherv(&(local_data_buffer.buffer_[0]), no_data_send[distributed_data->ccobj_.rank()],
1279 MPITraits<typename DataHandle::DataType>::getType(),
1280 &(global_data_buffer.buffer_[0]), &(no_data_send[0]), &(displ[0]),
1281 MPITraits<typename DataHandle::DataType>::getType(),
1282 distributed_data->ccobj_);
1283 Entity2IndexDataHandle<DataHandle, codim> edata(*global_data, data);
1284 int offset=0;
1285 for(int i=0; i< codim; ++i)
1286 offset+=global_data->size(i);
1287
1288 typename std::vector<int>::const_iterator s=global_sizes.begin();
1289 for(typename std::vector<int>::const_iterator i=global_indices.begin(),
1290 end=global_indices.end();
1291 i!=end; ++s, ++i)
1292 {
1293 edata.scatter(global_data_buffer, *i-offset, *s);
1294 }
1295#endif
1296}
1297
1298} // end namespace cpgrid
1299} // end namespace Dune
1300
1301#endif
1302
1303#endif
DataHandle & data
Definition: CpGridData.hpp:1166
mover::MoveBuffer< typename DataHandle::DataType > & buffer
Definition: CpGridData.hpp:1184
std::vector< int > & ownedGlobalIndices
Definition: CpGridData.hpp:1167
void refine_and_check(const Dune::cpgrid::Geometry< 3, 3 > &, const std::array< int, 3 > &, bool)
std::vector< int > & ownedSizes
Definition: CpGridData.hpp:1168
[ provides Dune::Grid ]
Definition: CpGrid.hpp:203
Struct that hods all the data needed to represent a Cpgrid.
Definition: CpGridData.hpp:118
auto faceToPoint(int faceIdx) const
Definition: CpGridData.hpp:362
void postAdapt()
Clean up refinement/coarsening markers - set every element to the mark 0 which represents 'doing noth...
const cpgrid::LevelGlobalIdSet & globalIdSet() const
Get the global index set.
Definition: CpGridData.hpp:582
CpGridDataTraits::CommunicationType CommunicationType
type of OwnerOverlap communication for cells
Definition: CpGridData.hpp:632
@ MAX_DATA_PER_CELL
The maximum data items allowed per cell (DUNE < 2.5.2)
Definition: CpGridData.hpp:143
CpGridDataTraits::ParallelIndexSet ParallelIndexSet
The type of the parallel index set.
Definition: CpGridData.hpp:635
int size(GeometryType type) const
number of leaf entities per geometry type in this process
Definition: CpGridData.hpp:175
const std::array< int, 3 > & logicalCartesianSize() const
Definition: CpGridData.hpp:590
void communicate(DataHandle &data, InterfaceType iftype, CommunicationDirection dir)
communicate objects for all codims on a given level
Definition: CpGridData.hpp:944
auto faceTag(int faceIdx) const
Definition: CpGridData.hpp:350
bool uniqueBoundaryIds() const
Definition: CpGridData.hpp:545
const std::tuple< int, std::vector< int > > & getChildrenLevelAndIndexList(int elemIdx) const
Retrieves the level and child indices of a given parent cell.
Definition: CpGridData.hpp:473
std::array< double, 3 > computeEclCentroid(const Entity< 0 > &elem) const
void computeCommunicationInterfaces(int noexistingPoints)
int getLeafIdxFromLevelIdx(int level_cell_idx) const
Definition: CpGridData.hpp:486
const auto & getCornerHistory(int cornerIdx) const
Definition: CpGridData.hpp:377
RemoteIndices & cellRemoteIndices()
Definition: CpGridData.hpp:666
int size(int codim) const
number of leaf entities per codim in this process
void readEclipseFormat(const std::string &filename, bool periodic_extension, bool turn_normals=false, bool edge_conformal=false)
CpGridDataTraits::CollectiveCommunication CollectiveCommunication
Definition: CpGridData.hpp:620
const std::vector< int > & globalCell() const
Definition: CpGridData.hpp:393
CpGridDataTraits::InterfaceMap InterfaceMap
The type of the map describing communication interfaces.
Definition: CpGridData.hpp:629
CpGridDataTraits::Communication Communication
The type of the collective communication.
Definition: CpGridData.hpp:619
int numFaces() const
Definition: CpGridData.hpp:367
const std::vector< std::tuple< int, std::vector< int > > > & getParentToChildren() const
Definition: CpGridData.hpp:477
void getIJK(int c, std::array< int, 3 > &ijk) const
Extract Cartesian index triplet (i,j,k) of an active cell.
const IndexSet & indexSet() const
Definition: CpGridData.hpp:570
CommunicationType & cellCommunication()
Get the owner-overlap-copy communication for cells.
Definition: CpGridData.hpp:643
auto cellToFace(int cellIdx) const
Definition: CpGridData.hpp:330
auto faceNormals(int faceIdx) const
Definition: CpGridData.hpp:356
CpGridDataTraits::RemoteIndices RemoteIndices
The type of the remote indices information.
Definition: CpGridData.hpp:638
auto cornerHistorySize() const
Definition: CpGridData.hpp:372
ParallelIndexSet & cellIndexSet()
Definition: CpGridData.hpp:656
const auto & cellToPoint(int cellIdx) const
Definition: CpGridData.hpp:340
int getMark(const cpgrid::Entity< 0 > &element) const
Return refinement mark for entity.
const ParallelIndexSet & cellIndexSet() const
Definition: CpGridData.hpp:661
CpGridDataTraits::MPICommunicator MPICommunicator
The type of the mpi communicator.
Definition: CpGridData.hpp:617
bool mark(int refCount, const cpgrid::Entity< 0 > &element, bool throwOnFailure=false)
Mark entity for refinement or coarsening.
CpGridData(std::vector< std::shared_ptr< CpGridData > > &data)
Constructor.
std::tuple< const std::shared_ptr< CpGridData >, const std::vector< std::array< int, 2 > >, const std::vector< std::tuple< int, std::vector< int > > >, const std::tuple< int, std::vector< int > >, const std::vector< std::array< int, 2 > >, const std::vector< std::array< int, 2 > > > refineSingleCell(const std::array< int, 3 > &cells_per_dim, const int &parent_idx) const
Refine a single cell and return a shared pointer of CpGridData type.
void distributeGlobalGrid(CpGrid &grid, const CpGridData &view_data, const std::vector< int > &cell_part)
Redistribute a global grid.
const std::vector< std::shared_ptr< Dune::cpgrid::CpGridData > > & levelData() const
Add doc/or remove method and replace it with better approach.
Definition: CpGridData.hpp:458
const auto & cellToPoint() const
Definition: CpGridData.hpp:335
const std::vector< double > & zcornData() const
Definition: CpGridData.hpp:563
void setUniqueBoundaryIds(bool uids)
Definition: CpGridData.hpp:552
bool preAdapt()
Set mightVanish flags for elements that will be refined in the next adapt() call Need to be called af...
int getGridIdx() const
Add doc/or remove method and replace it with better approach.
Definition: CpGridData.hpp:444
void processEclipseFormat(const grdecl &input_data, std::array< std::set< std::pair< int, int > >, 2 > &nnc, bool remove_ij_boundary, bool turn_normals, bool pinchActive, double tolerance_unique_points, bool edge_conformal)
bool hasNNCs(const std::vector< int > &cellIndices) const
Check all cells selected for refinement have no NNCs (no neighbor connections). Assumption: all grid ...
const CommunicationType & cellCommunication() const
Get the owner-overlap-copy communication for cells.
Definition: CpGridData.hpp:651
int cellFace(int cell, int local_index) const
Definition: CpGridData.hpp:325
const cpgrid::IdSet & localIdSet() const
Get the local index set.
Definition: CpGridData.hpp:576
const cpgrid::DefaultGeometryPolicy getGeometry() const
Definition: CpGridData.hpp:481
bool adapt()
TO DO: Documentation. Triggers the grid refinement process - Currently, returns preAdapt()
int faceToCellSize(int face) const
Definition: CpGridData.hpp:345
CpGridDataTraits::Communicator Communicator
The type of the Communicator.
Definition: CpGridData.hpp:626
std::array< double, 3 > computeEclCentroid(const int idx) const
const std::vector< int > & sortedNumAquiferCells() const
Get sorted active cell indices of numerical aquifer.
Definition: CpGridData.hpp:678
const RemoteIndices & cellRemoteIndices() const
Definition: CpGridData.hpp:671
CpGridData(MPIHelper::MPICommunicator comm, std::vector< std::shared_ptr< CpGridData > > &data)
Definition: DefaultGeometryPolicy.hpp:53
const EntityVariable< cpgrid::Geometry< 3 - codim, 3 >, codim > & geomVector() const
Definition: DefaultGeometryPolicy.hpp:86
Wrapper that turns a data handle suitable for dune-grid into one based on integers instead of entitie...
Definition: Entity2IndexDataHandle.hpp:56
Represents an entity of a given codim, with positive or negative orientation.
Definition: EntityRep.hpp:99
The global id set for Dune.
Definition: Indexsets.hpp:483
Only needs to provide interface for doing nothing.
Definition: Iterators.hpp:118
Definition: Indexsets.hpp:199
Definition: Indexsets.hpp:57
Definition: Intersection.hpp:66
Definition: Indexsets.hpp:367
OPM_HOST_DEVICE int size() const
Returns the number of rows in the table.
Definition: SparseTable.hpp:191
Definition: PartitionTypeIndicator.hpp:50
Definition: CpGridData.hpp:978
void write(const T &data)
Definition: CpGridData.hpp:985
void reset()
Definition: CpGridData.hpp:989
void read(T &data)
Definition: CpGridData.hpp:981
void resize(std::size_t size)
Definition: CpGridData.hpp:993
The namespace Dune is the main namespace for all Dune code.
Definition: common/CartesianIndexMapper.hpp:10
Dune::cpgrid::Cell2FacesContainer cell2Faces(const Dune::CpGrid &grid)
Get the cell to faces mapping of a grid.
Holds the implementation of the CpGrid as a pimple.
Definition: CellQuadrature.hpp:26
MPIHelper::MPICommunicator MPICommunicator
The type of the collective communication.
Definition: CpGridDataTraits.hpp:56
Dune::VariableSizeCommunicator<> Communicator
The type of the Communicator.
Definition: CpGridDataTraits.hpp:71
Dune::RemoteIndices< ParallelIndexSet > RemoteIndices
The type of the remote indices information.
Definition: CpGridDataTraits.hpp:83
typename CommunicationType::ParallelIndexSet ParallelIndexSet
The type of the parallel index set.
Definition: CpGridDataTraits.hpp:80
Dune::Communication< MPICommunicator > CollectiveCommunication
Definition: CpGridDataTraits.hpp:59
Dune::OwnerOverlapCopyCommunication< int, int > CommunicationType
type of OwnerOverlap communication for cells
Definition: CpGridDataTraits.hpp:77
Dune::Communication< MPICommunicator > Communication
Definition: CpGridDataTraits.hpp:58
AttributeSet
The type of the set of the attributes.
Definition: CpGridDataTraits.hpp:66
Communicator::InterfaceMap InterfaceMap
The type of the map describing communication interfaces.
Definition: CpGridDataTraits.hpp:74
Definition: CpGridData.hpp:1009
BaseMover(DataHandle &data)
Definition: CpGridData.hpp:1010
void moveData(const E &from, const E &to)
Definition: CpGridData.hpp:1014
MoveBuffer< typename DataHandle::DataType > buffer
Definition: CpGridData.hpp:1023
DataHandle & data_
Definition: CpGridData.hpp:1022
Definition: CpGridData.hpp:1029
void operator()(std::size_t from_cell_index, std::size_t to_cell_index)
Definition: CpGridData.hpp:1035
CpGridData * scatterView_
Definition: CpGridData.hpp:1042
CpGridData * gatherView_
Definition: CpGridData.hpp:1041
Mover(DataHandle &data, CpGridData *gatherView, CpGridData *scatterView)
Definition: CpGridData.hpp:1030
Definition: CpGridData.hpp:1047
Mover(DataHandle &data, CpGridData *gatherView, CpGridData *scatterView)
Definition: CpGridData.hpp:1048
CpGridData * gatherView_
Definition: CpGridData.hpp:1065
CpGridData * scatterView_
Definition: CpGridData.hpp:1066
void operator()(std::size_t from_cell_index, std::size_t to_cell_index)
Definition: CpGridData.hpp:1053
Definition: CpGridData.hpp:1071
CpGridData * scatterView_
Definition: CpGridData.hpp:1089
CpGridData * gatherView_
Definition: CpGridData.hpp:1088
Mover(DataHandle &data, CpGridData *gatherView, CpGridData *scatterView)
Definition: CpGridData.hpp:1072
void operator()(std::size_t from_cell_index, std::size_t to_cell_index)
Definition: CpGridData.hpp:1076
Definition: CpGridData.hpp:1004
Definition: preprocess.h:56