CpGridData.hpp
Go to the documentation of this file.
1//===========================================================================
2//
3// File: CpGridData.hpp
4//
5// Created: Sep 17 21:11:41 2013
6//
7// Author(s): Atgeirr F Rasmussen <atgeirr@sintef.no>
8// Bård Skaflestad <bard.skaflestad@sintef.no>
9// Markus Blatt <markus@dr-blatt.de>
10// Antonella Ritorto <antonella.ritorto@opm-op.com>
11//
12// Comment: Major parts of this file originated in dune/grid/CpGrid.hpp
13// and got transfered here during refactoring for the parallelization.
14//
15// $Date$
16//
17// $Revision$
18//
19//===========================================================================
20
21/*
22 Copyright 2009, 2010 SINTEF ICT, Applied Mathematics.
23 Copyright 2009, 2010, 2013, 2022-2023 Equinor ASA.
24 Copyright 2013 Dr. Blatt - HPC-Simulation-Software & Services
25
26 This file is part of The Open Porous Media project (OPM).
27
28 OPM is free software: you can redistribute it and/or modify
29 it under the terms of the GNU General Public License as published by
30 the Free Software Foundation, either version 3 of the License, or
31 (at your option) any later version.
32
33 OPM is distributed in the hope that it will be useful,
34 but WITHOUT ANY WARRANTY; without even the implied warranty of
35 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
36 GNU General Public License for more details.
37
38 You should have received a copy of the GNU General Public License
39 along with OPM. If not, see <http://www.gnu.org/licenses/>.
40*/
48#ifndef OPM_CPGRIDDATA_HEADER
49#define OPM_CPGRIDDATA_HEADER
50
51
52#include <dune/common/parallel/mpihelper.hh>
53#ifdef HAVE_DUNE_ISTL
54#include <dune/istl/owneroverlapcopy.hh>
55#endif
56
57#include <dune/common/parallel/communication.hh>
58#include <dune/common/parallel/variablesizecommunicator.hh>
59#include <dune/grid/common/gridenums.hh>
60
61#if HAVE_ECL_INPUT
62#include <opm/input/eclipse/EclipseState/Grid/EclipseGrid.hpp>
63#include <opm/input/eclipse/EclipseState/Grid/NNC.hpp>
64#endif
65
67
69#include "CpGridDataTraits.hpp"
70//#include "DataHandleWrappers.hpp"
71//#include "GlobalIdMapping.hpp"
72#include "Geometry.hpp"
73
74#include <array>
75#include <initializer_list>
76#include <set>
77#include <vector>
78
79namespace Opm
80{
81class EclipseState;
82}
83namespace Dune
84{
85class CpGrid;
86
87namespace cpgrid
88{
89
90class IndexSet;
91class IdSet;
92class LevelGlobalIdSet;
93class PartitionTypeIndicator;
94template<int,int> class Geometry;
95template<int> class Entity;
96template<int> class EntityRep;
97}
98}
99
101 const std::array<int, 3>&,
102 bool);
103
104namespace Dune
105{
106namespace cpgrid
107{
108namespace mover
109{
110template<class T, int i> struct Mover;
111}
112
118{
119 template<class T, int i> friend struct mover::Mover;
120 friend class GlobalIdSet;
121 friend class HierarchicIterator;
125
126 friend
128 const std::array<int, 3>&,
129 bool);
130
131private:
132 CpGridData(const CpGridData& g);
133
134public:
135 enum{
136#ifndef MAX_DATA_COMMUNICATED_PER_ENTITY
144#else
149 MAX_DATA_PER_CELL = MAX_DATA_COMMUNICATED_PER_ENTITY
150#endif
151 };
152
153 CpGridData() = delete;
154
159 explicit CpGridData(MPIHelper::MPICommunicator comm, std::vector<std::shared_ptr<CpGridData>>& data);
160
161
162
164 explicit CpGridData(std::vector<std::shared_ptr<CpGridData>>& data);
167
168
169
170
172 int size(int codim) const;
173
175 int size (GeometryType type) const
176 {
177 if (type.isCube()) {
178 return size(3 - type.dim());
179 } else {
180 return 0;
181 }
182 }
183
199 void readEclipseFormat(const std::string& filename,
200 bool periodic_extension,
201 bool turn_normals = false,
202 bool edge_conformal = false);
203
204#if HAVE_ECL_INPUT
226 void processEclipseFormat(const Opm::Deck& deck,
227 bool periodic_extension,
228 bool turn_normals = false,
229 bool clip_z = false,
230 const std::vector<double>& poreVolume = std::vector<double>{},
231 bool edge_conformal = false);
232
267 std::vector<std::size_t>
268 processEclipseFormat(const Opm::EclipseGrid* ecl_grid,
269 Opm::EclipseState* ecl_state,
270 bool periodic_extension,
271 bool turn_normals = false,
272 bool clip_z = false,
273 bool pinchActive = true,
274 bool edge_conformal = false);
275#endif
276
303 void processEclipseFormat(const grdecl& input_data,
304#if HAVE_ECL_INPUT
305 Opm::EclipseState* ecl_state,
306#endif
307 std::array<std::set<std::pair<int, int>>, 2>& nnc,
308 bool remove_ij_boundary,
309 bool turn_normals,
310 bool pinchActive,
311 double tolerance_unique_points,
312 bool edge_conformal);
313
321 void getIJK(int c, std::array<int,3>& ijk) const;
322
323 int cellFace(int cell, int local_index) const
324 {
325 return cell_to_face_[cpgrid::EntityRep<0>(cell, true)][local_index].index();
326 }
327
328 auto cellToFace(int cellIdx) const
329 {
330 return cell_to_face_[cpgrid::EntityRep<0>(cellIdx, true)];
331 }
332
333 const auto& cellToPoint() const
334 {
335 return cell_to_point_;
336 }
337
338 const auto& cellToPoint(int cellIdx) const
339 {
340 return cell_to_point_[cellIdx];
341 }
342
343 int faceToCellSize(int face) const {
344 Dune::cpgrid::EntityRep<1> faceRep(face, true);
345 return face_to_cell_[faceRep].size();
346 }
347
348 auto faceTag(int faceIdx) const
349 {
350 Dune::cpgrid::EntityRep<1> faceRep(faceIdx, true);
351 return face_tag_[faceRep];
352 }
353
354 auto faceNormals(int faceIdx) const
355 {
356 Dune::cpgrid::EntityRep<1> faceRep(faceIdx, true);
357 return face_normals_[faceRep];
358 }
359
360 auto faceToPoint(int faceIdx) const
361 {
362 return face_to_point_[faceIdx];
363 }
364
365 int numFaces() const
366 {
367 return face_to_cell_.size();
368 }
369
370 auto cornerHistorySize() const
371 {
372 return corner_history_.size();
373 }
374
375 const auto& getCornerHistory(int cornerIdx) const
376 {
377 if(cornerHistorySize()) {
378 return corner_history_[cornerIdx];
379 }
380 else {
381 OPM_THROW(std::logic_error, "Vertex has no history record.\n");
382 }
383 }
384
391 const std::vector<int>& globalCell() const
392 {
393 return global_cell_;
394 }
395
398 bool hasNNCs(const std::vector<int>& cellIndices) const;
399
412 bool mark(int refCount, const cpgrid::Entity<0>& element);
413
417 int getMark(const cpgrid::Entity<0>& element) const;
418
428 bool preAdapt();
429
431 bool adapt();
432
434 void postAdapt();
435
436private:
452 bool compatibleSubdivisions(const std::vector<std::array<int,3>>& cells_per_dim_vec,
453 const std::vector<std::array<int,3>>& startIJK_vec,
454 const std::vector<std::array<int,3>>& endIJK_vec) const;
455
456 std::array<Dune::FieldVector<double,3>,8> getReferenceRefinedCorners(int idx_in_parent_cell, const std::array<int,3>& cells_per_dim) const;
457
458public:
460 int getGridIdx() const {
461 // Not the nicest way of checking if "this" points at the leaf grid view of a mixed grid (with coarse and refined cells).
462 // 1. When the grid has been refined at least onece, level_data_ptr_ ->size() >1. Therefore, there is a chance of "this" pointing at the leaf grid view.
463 // 2. Unfortunately, level_ is default initialized by 0. This implies, in particular, that if someone wants to check the value of
464 // "this->level_" when "this" points at the leaf grid view of a grid that has been refined, this value is - unfortunately - equal to 0.
465 // 3. Due to 2. we need an extra bool value to distinguish between the actual level 0 grid and such a leaf grid view (with incorrect level_ == 0). For this
466 // reason we check if child_to_parent_cells_.empty() [true for actual level 0 grid, false for the leaf grid view].
467 // --- TO BE IMPROVED ---
468 if ((level_data_ptr_ ->size() >1) && (level_ == 0) && (!child_to_parent_cells_.empty())) {
469 return level_data_ptr_->size() -1;
470 }
471 return level_;
472 }
474 const std::vector<std::shared_ptr<Dune::cpgrid::CpGridData>>& levelData() const
475 {
476 if (level_data_ptr_->empty()) {
477 OPM_THROW(std::logic_error, "Level data has not been initialized\n");
478 }
479 return *level_data_ptr_;
480 }
481
489 const std::tuple<int,std::vector<int>>& getChildrenLevelAndIndexList(int elemIdx) const {
490 return parent_to_children_cells_[elemIdx];
491 }
492
493 const std::vector<std::tuple<int,std::vector<int>>>& getParentToChildren() const {
494 return parent_to_children_cells_;
495 }
496
498 {
499 return geometry_;
500 }
501
502 int getLeafIdxFromLevelIdx(int level_cell_idx) const
503 {
504 if (level_to_leaf_cells_.empty()) {
505 OPM_THROW(std::logic_error, "Grid has no LGRs. No mapping to the leaf.\n");
506 }
507 return level_to_leaf_cells_[level_cell_idx];
508 }
509
531 std::tuple< const std::shared_ptr<CpGridData>,
532 const std::vector<std::array<int,2>>, // parent_to_refined_corners(~boundary_old_to_new_corners)
533 const std::vector<std::tuple<int,std::vector<int>>>, // parent_to_children_faces (~boundary_old_to_new_faces)
534 const std::tuple<int, std::vector<int>>, // parent_to_children_cells
535 const std::vector<std::array<int,2>>, // child_to_parent_faces
536 const std::vector<std::array<int,2>>> // child_to_parent_cells
537 refineSingleCell(const std::array<int,3>& cells_per_dim, const int& parent_idx) const;
538
539 // @breif Compute center of an entity/element/cell in the Eclipse way:
540 // - Average of the 4 corners of the bottom face.
541 // - Average of the 4 corners of the top face.
542 // Return average of the previous computations.
543 // @param [in] int Index of a cell.
544 // @return 'eclipse centroid'
545 std::array<double,3> computeEclCentroid(const int idx) const;
546
547 // @breif Compute center of an entity/element/cell in the Eclipse way:
548 // - Average of the 4 corners of the bottom face.
549 // - Average of the 4 corners of the top face.
550 // Return average of the previous computations.
551 // @param [in] Entity<0> Entity
552 // @return 'eclipse centroid'
553 std::array<double,3> computeEclCentroid(const Entity<0>& elem) const;
554
555 // Make unique boundary ids for all intersections.
557
561 bool uniqueBoundaryIds() const
562 {
563 return use_unique_boundary_ids_;
564 }
565
568 void setUniqueBoundaryIds(bool uids)
569 {
570 use_unique_boundary_ids_ = uids;
571 if (use_unique_boundary_ids_ && unique_boundary_ids_.empty()) {
573 }
574 }
575
579 const std::vector<double>& zcornData() const {
580 return zcorn;
581 }
582
583
586 const IndexSet& indexSet() const
587 {
588 return *index_set_;
589 }
590
593 {
594 return *local_id_set_;
595 }
596
599 {
600 return *global_id_set_;
601 }
602
606 const std::array<int, 3>& logicalCartesianSize() const
607 {
608 return logical_cartesian_size_;
609 }
610
615 const CpGridData& view_data,
616 const std::vector<int>& cell_part);
617
623 template<class DataHandle>
624 void communicate(DataHandle& data, InterfaceType iftype, CommunicationDirection dir);
625
627
629
630 void computeCommunicationInterfaces(int noexistingPoints);
631
637
640#if HAVE_MPI
643
646
649
652
655
660 {
661 return cell_comm_;
662 }
663
668 {
669 return cell_comm_;
670 }
671
673 {
674 return cellCommunication().indexSet();
675 }
676
678 {
679 return cellCommunication().indexSet();
680 }
681
683 {
684 return cellCommunication().remoteIndices();
685 }
686
688 {
689 return cellCommunication().remoteIndices();
690 }
691#endif
692
694 const std::vector<int>& sortedNumAquiferCells() const
695 {
696 return aquifer_cells_;
697 }
698
699private:
700
702 void populateGlobalCellIndexSet();
703
704#if HAVE_MPI
705
711 template<class DataHandle>
712 void gatherData(DataHandle& data, CpGridData* global_view,
713 CpGridData* distributed_view);
714
715
722 template<int codim, class DataHandle>
723 void gatherCodimData(DataHandle& data, CpGridData* global_data,
724 CpGridData* distributed_data);
725
732 template<class DataHandle>
733 void scatterData(DataHandle& data, const CpGridData* global_data,
734 const CpGridData* distributed_data, const InterfaceMap& cell_inf,
735 const InterfaceMap& point_inf);
736
744 template<int codim, class DataHandle>
745 void scatterCodimData(DataHandle& data, CpGridData* global_data,
746 CpGridData* distributed_data);
747
756 template<int codim, class DataHandle>
757 void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
758 const Interface& interface);
759
768 template<int codim, class DataHandle>
769 void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
770 const InterfaceMap& interface);
771
772#endif
773
774 void computeGeometry(const CpGrid& grid,
775 const DefaultGeometryPolicy& globalGeometry,
776 const std::vector<int>& globalAquiferCells,
777 const OrientedEntityTable<0, 1>& globalCell2Faces,
778 DefaultGeometryPolicy& geometry,
779 std::vector<int>& aquiferCells,
781 const std::vector< std::array<int,8> >& cell2Points);
782
783 // Representing the topology
795 Opm::SparseTable<int> face_to_point_;
797 std::vector< std::array<int,8> > cell_to_point_;
804 std::array<int, 3> logical_cartesian_size_{};
811 std::vector<int> global_cell_;
817 typedef FieldVector<double, 3> PointType;
821 cpgrid::EntityVariable<int, 1> unique_boundary_ids_;
823 std::unique_ptr<cpgrid::IndexSet> index_set_;
825 std::shared_ptr<const cpgrid::IdSet> local_id_set_;
827 std::shared_ptr<LevelGlobalIdSet> global_id_set_;
829 std::shared_ptr<PartitionTypeIndicator> partition_type_indicator_;
831 std::vector<int> mark_;
833 int level_{0};
835 std::vector<std::shared_ptr<CpGridData>>* level_data_ptr_;
836 // SUITABLE FOR ALL LEVELS EXCEPT FOR LEAFVIEW
838 std::vector<int> level_to_leaf_cells_; // In entry 'level cell index', we store 'leafview cell index' // {level LGR, {child0, child1, ...}}
840 std::vector<std::tuple<int,std::vector<int>>> parent_to_children_cells_; // {# children in x-direction, ... y-, ... z-}
842 std::array<int,3> cells_per_dim_;
843 // SUITABLE ONLY FOR LEAFVIEW // {level, cell index in that level}
845 std::vector<std::array<int,2>> leaf_to_level_cells_;
847 std::vector<std::array<int,2>> corner_history_;
848 // SUITABLE FOR ALL LEVELS INCLUDING LEAFVIEW // {level parent cell, parent cell index}
850 std::vector<std::array<int,2>> child_to_parent_cells_;
853 std::vector<int> cell_to_idxInParentCell_;
855 int refinement_max_level_{0};
856
857
859 Communication ccobj_;
860
861 // Boundary information (optional).
862 bool use_unique_boundary_ids_;
863
869 std::vector<double> zcorn;
870
872 std::vector<int> aquifer_cells_;
873
874#if HAVE_MPI
875
877 CommunicationType cell_comm_;
878
880 std::tuple<Interface,Interface,Interface,Interface,Interface> cell_interfaces_;
881 /*
882 // code deactivated, because users cannot access face indices and therefore
883 // communication on faces makes no sense!
885 std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
886 face_interfaces_;
887 */
889 std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
890 point_interfaces_;
891
892#endif
893
894 // Return the geometry vector corresponding to the given codim.
895 template <int codim>
896 const EntityVariable<Geometry<3 - codim, 3>, codim>& geomVector() const
897 {
898 return geometry_.geomVector<codim>();
899 }
900
901 friend class Dune::CpGrid;
902 template<int> friend class Entity;
903 template<int> friend class EntityRep;
904 friend class Intersection;
906};
907
908
909
910#if HAVE_MPI
911
912namespace
913{
918template<class T>
919T& getInterface(InterfaceType iftype,
920 std::tuple<T,T,T,T,T>& interfaces)
921{
922 switch(iftype)
923 {
924 case 0:
925 return std::get<0>(interfaces);
926 case 1:
927 return std::get<1>(interfaces);
928 case 2:
929 return std::get<2>(interfaces);
930 case 3:
931 return std::get<3>(interfaces);
932 case 4:
933 return std::get<4>(interfaces);
934 }
935 OPM_THROW(std::runtime_error, "Invalid Interface type was used during communication");
936}
937
938} // end unnamed namespace
939
940template<int codim, class DataHandle>
941void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
942 const Interface& interface)
943{
944 this->template communicateCodim<codim>(data, dir, interface.interfaces());
945}
946
947template<int codim, class DataHandle>
948void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data_wrapper, CommunicationDirection dir,
949 const InterfaceMap& interface)
950{
951 Communicator comm(ccobj_, interface);
952
953 if(dir==ForwardCommunication)
954 comm.forward(data_wrapper);
955 else
956 comm.backward(data_wrapper);
957}
958#endif
959
960template<class DataHandle>
961void CpGridData::communicate(DataHandle& data, InterfaceType iftype,
962 CommunicationDirection dir)
963{
964#if HAVE_MPI
965 if(data.contains(3,0))
966 {
967 Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*this, data);
968 communicateCodim<0>(data_wrapper, dir, getInterface(iftype, cell_interfaces_));
969 }
970 if(data.contains(3,3))
971 {
972 Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*this, data);
973 communicateCodim<3>(data_wrapper, dir, getInterface(iftype, point_interfaces_));
974 }
975#else
976 // Suppress warnings for unused arguments.
977 (void) data;
978 (void) iftype;
979 (void) dir;
980#endif
981}
982}}
983
984#if HAVE_MPI
987
988namespace Dune {
989namespace cpgrid {
990
991namespace mover
992{
993template<class T>
995{
997public:
998 void read(T& data)
999 {
1000 data=buffer_[index_++];
1001 }
1002 void write(const T& data)
1003 {
1004 buffer_[index_++]=data;
1005 }
1006 void reset()
1007 {
1008 index_=0;
1009 }
1010 void resize(std::size_t size)
1011 {
1012 buffer_.resize(size);
1013 index_=0;
1014 }
1015private:
1016 std::vector<T> buffer_;
1017 typename std::vector<T>::size_type index_;
1018};
1019template<class DataHandle,int codim>
1020struct Mover
1021{
1022};
1023
1024template<class DataHandle>
1026{
1027 explicit BaseMover(DataHandle& data)
1028 : data_(data)
1029 {}
1030 template<class E>
1031 void moveData(const E& from, const E& to)
1032 {
1033 std::size_t size=data_.size(from);
1034 buffer.resize(size);
1035 data_.gather(buffer, from);
1036 buffer.reset();
1037 data_.scatter(buffer, to, size);
1038 }
1039 DataHandle& data_;
1041};
1042
1043
1044template<class DataHandle>
1046{
1047 Mover(DataHandle& data, CpGridData* gatherView,
1048 CpGridData* scatterView)
1049 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
1050 {}
1051
1052 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
1053 {
1054 Entity<0> from_entity=Entity<0>(*gatherView_, from_cell_index, true);
1055 Entity<0> to_entity=Entity<0>(*scatterView_, to_cell_index, true);
1056 this->moveData(from_entity, to_entity);
1057 }
1060};
1061
1062template<class DataHandle>
1064{
1065 Mover(DataHandle& data, CpGridData* gatherView,
1066 CpGridData* scatterView)
1067 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
1068 {}
1069
1070 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
1071 {
1072 typedef typename OrientedEntityTable<0,1>::row_type row_type;
1073 EntityRep<0> from_cell=EntityRep<0>(from_cell_index, true);
1074 EntityRep<0> to_cell=EntityRep<0>(to_cell_index, true);
1075 const OrientedEntityTable<0,1>& table = gatherView_->cell_to_face_;
1076 row_type from_faces=table.operator[](from_cell);
1077 row_type to_faces=scatterView_->cell_to_face_[to_cell];
1078
1079 for(int i=0; i<from_faces.size(); ++i)
1080 this->moveData(from_faces[i], to_faces[i]);
1081 }
1084};
1085
1086template<class DataHandle>
1088{
1089 Mover(DataHandle& data, CpGridData* gatherView,
1090 CpGridData* scatterView)
1091 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
1092 {}
1093 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
1094 {
1095 const std::array<int,8>& from_cell_points=
1096 gatherView_->cell_to_point_[from_cell_index];
1097 const std::array<int,8>& to_cell_points=
1098 scatterView_->cell_to_point_[to_cell_index];
1099 for(std::size_t i=0; i<8; ++i)
1100 {
1101 this->moveData(Entity<3>(*gatherView_, from_cell_points[i], true),
1102 Entity<3>(*scatterView_, to_cell_points[i], true));
1103 }
1104 }
1107};
1108
1109} // end mover namespace
1110
1111template<class DataHandle>
1112void CpGridData::scatterData(DataHandle& data, const CpGridData* global_data,
1113 const CpGridData* distributed_data, const InterfaceMap& cell_inf,
1114 const InterfaceMap& point_inf)
1115{
1116#if HAVE_MPI
1117 if(data.contains(3,0))
1118 {
1119 Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*global_data, *distributed_data, data);
1120 communicateCodim<0>(data_wrapper, ForwardCommunication, cell_inf);
1121 }
1122 if(data.contains(3,3))
1123 {
1124 Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*global_data, *distributed_data, data);
1125 communicateCodim<3>(data_wrapper, ForwardCommunication, point_inf);
1126 }
1127#endif
1128}
1129
1130template<int codim, class DataHandle>
1131void CpGridData::scatterCodimData(DataHandle& data, CpGridData* global_data,
1132 CpGridData* distributed_data)
1133{
1134 CpGridData *gather_view, *scatter_view;
1135 gather_view=global_data;
1136 scatter_view=distributed_data;
1137
1138 mover::Mover<DataHandle,codim> mover(data, gather_view, scatter_view);
1139
1140
1141 for(auto index=distributed_data->cellIndexSet().begin(),
1142 end = distributed_data->cellIndexSet().end();
1143 index!=end; ++index)
1144 {
1145 std::size_t from=index->global();
1146 std::size_t to=index->local();
1147 mover(from,to);
1148 }
1149}
1150
1151namespace
1152{
1153
1154template<int codim, class T, class F>
1155void visitInterior(CpGridData& distributed_data, T begin, T endit, F& func)
1156{
1157 for(T it=begin; it!=endit; ++it)
1158 {
1159 Entity<codim> entity(distributed_data, it-begin, true);
1160 PartitionType pt = entity.partitionType();
1161 if(pt==Dune::InteriorEntity)
1162 {
1163 func(*it, entity);
1164 }
1165 }
1166}
1167
1168template<class DataHandle>
1169struct GlobalIndexSizeGatherer
1170{
1171 GlobalIndexSizeGatherer(DataHandle& data_,
1172 std::vector<int>& ownedGlobalIndices_,
1173 std::vector<int>& ownedSizes_)
1174 : data(data_), ownedGlobalIndices(ownedGlobalIndices_), ownedSizes(ownedSizes_)
1175 {}
1176
1177 template<class T, class E>
1178 void operator()(T& i, E& entity)
1179 {
1180 ownedGlobalIndices.push_back(i);
1181 ownedSizes.push_back(data.size(entity));
1182 }
1183 DataHandle& data;
1184 std::vector<int>& ownedGlobalIndices;
1185 std::vector<int>& ownedSizes;
1186};
1187
1188template<class DataHandle>
1189struct DataGatherer
1190{
1191 DataGatherer(mover::MoveBuffer<typename DataHandle::DataType>& buffer_,
1192 DataHandle& data_)
1193 : buffer(buffer_), data(data_)
1194 {}
1195
1196 template<class T, class E>
1197 void operator()(T& /* it */, E& entity)
1198 {
1199 data.gather(buffer, entity);
1200 }
1201 mover::MoveBuffer<typename DataHandle::DataType>& buffer;
1202 DataHandle& data;
1203};
1204
1205}
1206
1207template<class DataHandle>
1208void CpGridData::gatherData(DataHandle& data, CpGridData* global_data,
1209 CpGridData* distributed_data)
1210{
1211#if HAVE_MPI
1212 if(data.contains(3,0))
1213 gatherCodimData<0>(data, global_data, distributed_data);
1214 if(data.contains(3,3))
1215 gatherCodimData<3>(data, global_data, distributed_data);
1216#endif
1217}
1218
1219template<int codim, class DataHandle>
1220void CpGridData::gatherCodimData(DataHandle& data, CpGridData* global_data,
1221 CpGridData* distributed_data)
1222{
1223#if HAVE_MPI
1224 // Get the mapping to global index from the global id set
1225 const std::vector<int>& mapping =
1226 distributed_data->global_id_set_->getMapping<codim>();
1227
1228 // Get the global indices and data size for the entities whose data is
1229 // to be sent, i.e. the ones that we own.
1230 std::vector<int> owned_global_indices;
1231 std::vector<int> owned_sizes;
1232 owned_global_indices.reserve(mapping.size());
1233 owned_sizes.reserve(mapping.size());
1234
1235 GlobalIndexSizeGatherer<DataHandle> gisg(data, owned_global_indices, owned_sizes);
1236 visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gisg);
1237
1238 // communicate the number of indices that each processor sends
1239 int no_indices=owned_sizes.size();
1240 // We will take the address of the first elemet for MPI_Allgather below.
1241 // Make sure the containers have such an element.
1242 if ( owned_global_indices.empty() )
1243 owned_global_indices.resize(1);
1244 if ( owned_sizes.empty() )
1245 owned_sizes.resize(1);
1246 std::vector<int> no_indices_to_recv(distributed_data->ccobj_.size());
1247 distributed_data->ccobj_.allgather(&no_indices, 1, &(no_indices_to_recv[0]));
1248 // compute size of the vector capable for receiving all indices
1249 // and allgather the global indices and the sizes.
1250 // calculate displacements
1251 std::vector<int> displ(distributed_data->ccobj_.size()+1, 0);
1252 std::transform(displ.begin(), displ.end()-1, no_indices_to_recv.begin(), displ.begin()+1,
1253 std::plus<int>());
1254 int global_size=displ[displ.size()-1];//+no_indices_to_recv[displ.size()-1];
1255 std::vector<int> global_indices(global_size);
1256 std::vector<int> global_sizes(global_size);
1257 MPI_Allgatherv(&(owned_global_indices[0]), no_indices, MPITraits<int>::getType(),
1258 &(global_indices[0]), &(no_indices_to_recv[0]), &(displ[0]),
1259 MPITraits<int>::getType(),
1260 distributed_data->ccobj_);
1261 MPI_Allgatherv(&(owned_sizes[0]), no_indices, MPITraits<int>::getType(),
1262 &(global_sizes[0]), &(no_indices_to_recv[0]), &(displ[0]),
1263 MPITraits<int>::getType(),
1264 distributed_data->ccobj_);
1265 std::vector<int>().swap(owned_global_indices); // free data for reuse.
1266 // Compute the number of data items to send
1267 std::vector<int> no_data_send(distributed_data->ccobj_.size());
1268 for(typename std::vector<int>::iterator begin=no_data_send.begin(),
1269 i=begin, end=no_data_send.end(); i!=end; ++i)
1270 *i = std::accumulate(global_sizes.begin()+displ[i-begin],
1271 global_sizes.begin()+displ[i-begin+1], std::size_t());
1272 // free at least some memory that can be reused.
1273 std::vector<int>().swap(owned_sizes);
1274 // compute the displacements for receiving with allgatherv
1275 displ[0]=0;
1276 std::transform(displ.begin(), displ.end()-1, no_data_send.begin(), displ.begin()+1,
1277 std::plus<std::size_t>());
1278 // Compute the number of data items we will receive
1279 int no_data_recv = displ[displ.size()-1];//+global_sizes[displ.size()-1];
1280
1281 // Collect the data to send, gather it
1282 mover::MoveBuffer<typename DataHandle::DataType> local_data_buffer, global_data_buffer;
1283 if ( no_data_send[distributed_data->ccobj_.rank()] )
1284 {
1285 local_data_buffer.resize(no_data_send[distributed_data->ccobj_.rank()]);
1286 }
1287 else
1288 {
1289 local_data_buffer.resize(1);
1290 }
1291 global_data_buffer.resize(no_data_recv);
1292
1293 DataGatherer<DataHandle> gatherer(local_data_buffer, data);
1294 visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gatherer);
1295 MPI_Allgatherv(&(local_data_buffer.buffer_[0]), no_data_send[distributed_data->ccobj_.rank()],
1296 MPITraits<typename DataHandle::DataType>::getType(),
1297 &(global_data_buffer.buffer_[0]), &(no_data_send[0]), &(displ[0]),
1298 MPITraits<typename DataHandle::DataType>::getType(),
1299 distributed_data->ccobj_);
1300 Entity2IndexDataHandle<DataHandle, codim> edata(*global_data, data);
1301 int offset=0;
1302 for(int i=0; i< codim; ++i)
1303 offset+=global_data->size(i);
1304
1305 typename std::vector<int>::const_iterator s=global_sizes.begin();
1306 for(typename std::vector<int>::const_iterator i=global_indices.begin(),
1307 end=global_indices.end();
1308 i!=end; ++s, ++i)
1309 {
1310 edata.scatter(global_data_buffer, *i-offset, *s);
1311 }
1312#endif
1313}
1314
1315} // end namespace cpgrid
1316} // end namespace Dune
1317
1318#endif
1319
1320#endif
DataHandle & data
Definition: CpGridData.hpp:1183
mover::MoveBuffer< typename DataHandle::DataType > & buffer
Definition: CpGridData.hpp:1201
std::vector< int > & ownedGlobalIndices
Definition: CpGridData.hpp:1184
void refine_and_check(const Dune::cpgrid::Geometry< 3, 3 > &, const std::array< int, 3 > &, bool)
std::vector< int > & ownedSizes
Definition: CpGridData.hpp:1185
[ provides Dune::Grid ]
Definition: CpGrid.hpp:203
Struct that hods all the data needed to represent a Cpgrid.
Definition: CpGridData.hpp:118
auto faceToPoint(int faceIdx) const
Definition: CpGridData.hpp:360
void postAdapt()
Clean up refinement/coarsening markers - set every element to the mark 0 which represents 'doing noth...
const cpgrid::LevelGlobalIdSet & globalIdSet() const
Get the global index set.
Definition: CpGridData.hpp:598
CpGridDataTraits::CommunicationType CommunicationType
type of OwnerOverlap communication for cells
Definition: CpGridData.hpp:648
@ MAX_DATA_PER_CELL
The maximum data items allowed per cell (DUNE < 2.5.2)
Definition: CpGridData.hpp:143
CpGridDataTraits::ParallelIndexSet ParallelIndexSet
The type of the parallel index set.
Definition: CpGridData.hpp:651
int size(GeometryType type) const
number of leaf entities per geometry type in this process
Definition: CpGridData.hpp:175
const std::array< int, 3 > & logicalCartesianSize() const
Definition: CpGridData.hpp:606
void communicate(DataHandle &data, InterfaceType iftype, CommunicationDirection dir)
communicate objects for all codims on a given level
Definition: CpGridData.hpp:961
auto faceTag(int faceIdx) const
Definition: CpGridData.hpp:348
bool uniqueBoundaryIds() const
Definition: CpGridData.hpp:561
const std::tuple< int, std::vector< int > > & getChildrenLevelAndIndexList(int elemIdx) const
Retrieves the level and child indices of a given parent cell.
Definition: CpGridData.hpp:489
std::array< double, 3 > computeEclCentroid(const Entity< 0 > &elem) const
void computeCommunicationInterfaces(int noexistingPoints)
int getLeafIdxFromLevelIdx(int level_cell_idx) const
Definition: CpGridData.hpp:502
const auto & getCornerHistory(int cornerIdx) const
Definition: CpGridData.hpp:375
RemoteIndices & cellRemoteIndices()
Definition: CpGridData.hpp:682
int size(int codim) const
number of leaf entities per codim in this process
void readEclipseFormat(const std::string &filename, bool periodic_extension, bool turn_normals=false, bool edge_conformal=false)
CpGridDataTraits::CollectiveCommunication CollectiveCommunication
Definition: CpGridData.hpp:636
const std::vector< int > & globalCell() const
Definition: CpGridData.hpp:391
CpGridDataTraits::InterfaceMap InterfaceMap
The type of the map describing communication interfaces.
Definition: CpGridData.hpp:645
CpGridDataTraits::Communication Communication
The type of the collective communication.
Definition: CpGridData.hpp:635
int numFaces() const
Definition: CpGridData.hpp:365
const std::vector< std::tuple< int, std::vector< int > > > & getParentToChildren() const
Definition: CpGridData.hpp:493
void getIJK(int c, std::array< int, 3 > &ijk) const
Extract Cartesian index triplet (i,j,k) of an active cell.
const IndexSet & indexSet() const
Definition: CpGridData.hpp:586
CommunicationType & cellCommunication()
Get the owner-overlap-copy communication for cells.
Definition: CpGridData.hpp:659
auto cellToFace(int cellIdx) const
Definition: CpGridData.hpp:328
auto faceNormals(int faceIdx) const
Definition: CpGridData.hpp:354
CpGridDataTraits::RemoteIndices RemoteIndices
The type of the remote indices information.
Definition: CpGridData.hpp:654
auto cornerHistorySize() const
Definition: CpGridData.hpp:370
ParallelIndexSet & cellIndexSet()
Definition: CpGridData.hpp:672
const auto & cellToPoint(int cellIdx) const
Definition: CpGridData.hpp:338
int getMark(const cpgrid::Entity< 0 > &element) const
Return refinement mark for entity.
const ParallelIndexSet & cellIndexSet() const
Definition: CpGridData.hpp:677
CpGridDataTraits::MPICommunicator MPICommunicator
The type of the mpi communicator.
Definition: CpGridData.hpp:633
CpGridData(std::vector< std::shared_ptr< CpGridData > > &data)
Constructor.
std::tuple< const std::shared_ptr< CpGridData >, const std::vector< std::array< int, 2 > >, const std::vector< std::tuple< int, std::vector< int > > >, const std::tuple< int, std::vector< int > >, const std::vector< std::array< int, 2 > >, const std::vector< std::array< int, 2 > > > refineSingleCell(const std::array< int, 3 > &cells_per_dim, const int &parent_idx) const
Refine a single cell and return a shared pointer of CpGridData type.
void distributeGlobalGrid(CpGrid &grid, const CpGridData &view_data, const std::vector< int > &cell_part)
Redistribute a global grid.
const std::vector< std::shared_ptr< Dune::cpgrid::CpGridData > > & levelData() const
Add doc/or remove method and replace it with better approach.
Definition: CpGridData.hpp:474
const auto & cellToPoint() const
Definition: CpGridData.hpp:333
const std::vector< double > & zcornData() const
Definition: CpGridData.hpp:579
void setUniqueBoundaryIds(bool uids)
Definition: CpGridData.hpp:568
bool preAdapt()
Set mightVanish flags for elements that will be refined in the next adapt() call Need to be called af...
int getGridIdx() const
Add doc/or remove method and replace it with better approach.
Definition: CpGridData.hpp:460
void processEclipseFormat(const grdecl &input_data, std::array< std::set< std::pair< int, int > >, 2 > &nnc, bool remove_ij_boundary, bool turn_normals, bool pinchActive, double tolerance_unique_points, bool edge_conformal)
bool hasNNCs(const std::vector< int > &cellIndices) const
Check all cells selected for refinement have no NNCs (no neighbor connections). Assumption: all grid ...
bool mark(int refCount, const cpgrid::Entity< 0 > &element)
Mark entity for refinement or coarsening.
const CommunicationType & cellCommunication() const
Get the owner-overlap-copy communication for cells.
Definition: CpGridData.hpp:667
int cellFace(int cell, int local_index) const
Definition: CpGridData.hpp:323
const cpgrid::IdSet & localIdSet() const
Get the local index set.
Definition: CpGridData.hpp:592
const cpgrid::DefaultGeometryPolicy getGeometry() const
Definition: CpGridData.hpp:497
bool adapt()
TO DO: Documentation. Triggers the grid refinement process - Currently, returns preAdapt()
int faceToCellSize(int face) const
Definition: CpGridData.hpp:343
CpGridDataTraits::Communicator Communicator
The type of the Communicator.
Definition: CpGridData.hpp:642
std::array< double, 3 > computeEclCentroid(const int idx) const
const std::vector< int > & sortedNumAquiferCells() const
Get sorted active cell indices of numerical aquifer.
Definition: CpGridData.hpp:694
const RemoteIndices & cellRemoteIndices() const
Definition: CpGridData.hpp:687
CpGridData(MPIHelper::MPICommunicator comm, std::vector< std::shared_ptr< CpGridData > > &data)
Definition: DefaultGeometryPolicy.hpp:53
const EntityVariable< cpgrid::Geometry< 3 - codim, 3 >, codim > & geomVector() const
Definition: DefaultGeometryPolicy.hpp:86
Wrapper that turns a data handle suitable for dune-grid into one based on integers instead of entitie...
Definition: Entity2IndexDataHandle.hpp:56
Represents an entity of a given codim, with positive or negative orientation.
Definition: EntityRep.hpp:99
The global id set for Dune.
Definition: Indexsets.hpp:487
Only needs to provide interface for doing nothing.
Definition: Iterators.hpp:118
Definition: Indexsets.hpp:201
Definition: Indexsets.hpp:56
Definition: Intersection.hpp:66
Definition: Indexsets.hpp:371
int size() const
Returns the number of rows in the table.
Definition: SparseTable.hpp:121
Definition: PartitionTypeIndicator.hpp:50
Definition: CpGridData.hpp:995
void write(const T &data)
Definition: CpGridData.hpp:1002
void reset()
Definition: CpGridData.hpp:1006
void read(T &data)
Definition: CpGridData.hpp:998
void resize(std::size_t size)
Definition: CpGridData.hpp:1010
The namespace Dune is the main namespace for all Dune code.
Definition: common/CartesianIndexMapper.hpp:10
Dune::cpgrid::Cell2FacesContainer cell2Faces(const Dune::CpGrid &grid)
Get the cell to faces mapping of a grid.
Holds the implementation of the CpGrid as a pimple.
Definition: CellQuadrature.hpp:26
MPIHelper::MPICommunicator MPICommunicator
The type of the collective communication.
Definition: CpGridDataTraits.hpp:56
Dune::VariableSizeCommunicator<> Communicator
The type of the Communicator.
Definition: CpGridDataTraits.hpp:71
Dune::RemoteIndices< ParallelIndexSet > RemoteIndices
The type of the remote indices information.
Definition: CpGridDataTraits.hpp:83
typename CommunicationType::ParallelIndexSet ParallelIndexSet
The type of the parallel index set.
Definition: CpGridDataTraits.hpp:80
Dune::Communication< MPICommunicator > CollectiveCommunication
Definition: CpGridDataTraits.hpp:59
Dune::OwnerOverlapCopyCommunication< int, int > CommunicationType
type of OwnerOverlap communication for cells
Definition: CpGridDataTraits.hpp:77
Dune::Communication< MPICommunicator > Communication
Definition: CpGridDataTraits.hpp:58
AttributeSet
The type of the set of the attributes.
Definition: CpGridDataTraits.hpp:66
Communicator::InterfaceMap InterfaceMap
The type of the map describing communication interfaces.
Definition: CpGridDataTraits.hpp:74
Definition: CpGridData.hpp:1026
BaseMover(DataHandle &data)
Definition: CpGridData.hpp:1027
void moveData(const E &from, const E &to)
Definition: CpGridData.hpp:1031
MoveBuffer< typename DataHandle::DataType > buffer
Definition: CpGridData.hpp:1040
DataHandle & data_
Definition: CpGridData.hpp:1039
Definition: CpGridData.hpp:1046
void operator()(std::size_t from_cell_index, std::size_t to_cell_index)
Definition: CpGridData.hpp:1052
CpGridData * scatterView_
Definition: CpGridData.hpp:1059
CpGridData * gatherView_
Definition: CpGridData.hpp:1058
Mover(DataHandle &data, CpGridData *gatherView, CpGridData *scatterView)
Definition: CpGridData.hpp:1047
Definition: CpGridData.hpp:1064
Mover(DataHandle &data, CpGridData *gatherView, CpGridData *scatterView)
Definition: CpGridData.hpp:1065
CpGridData * gatherView_
Definition: CpGridData.hpp:1082
CpGridData * scatterView_
Definition: CpGridData.hpp:1083
void operator()(std::size_t from_cell_index, std::size_t to_cell_index)
Definition: CpGridData.hpp:1070
Definition: CpGridData.hpp:1088
CpGridData * scatterView_
Definition: CpGridData.hpp:1106
CpGridData * gatherView_
Definition: CpGridData.hpp:1105
Mover(DataHandle &data, CpGridData *gatherView, CpGridData *scatterView)
Definition: CpGridData.hpp:1089
void operator()(std::size_t from_cell_index, std::size_t to_cell_index)
Definition: CpGridData.hpp:1093
Definition: CpGridData.hpp:1021
Definition: preprocess.h:56