CpGridData.hpp
Go to the documentation of this file.
1//===========================================================================
2//
3// File: CpGridData.hpp
4//
5// Created: Sep 17 21:11:41 2013
6//
7// Author(s): Atgeirr F Rasmussen <atgeirr@sintef.no>
8// Bård Skaflestad <bard.skaflestad@sintef.no>
9// Markus Blatt <markus@dr-blatt.de>
10// Antonella Ritorto <antonella.ritorto@opm-op.com>
11//
12// Comment: Major parts of this file originated in dune/grid/CpGrid.hpp
13// and got transfered here during refactoring for the parallelization.
14//
15// $Date$
16//
17// $Revision$
18//
19//===========================================================================
20
21/*
22 Copyright 2009, 2010 SINTEF ICT, Applied Mathematics.
23 Copyright 2009, 2010, 2013, 2022-2023 Equinor ASA.
24 Copyright 2013 Dr. Blatt - HPC-Simulation-Software & Services
25
26 This file is part of The Open Porous Media project (OPM).
27
28 OPM is free software: you can redistribute it and/or modify
29 it under the terms of the GNU General Public License as published by
30 the Free Software Foundation, either version 3 of the License, or
31 (at your option) any later version.
32
33 OPM is distributed in the hope that it will be useful,
34 but WITHOUT ANY WARRANTY; without even the implied warranty of
35 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
36 GNU General Public License for more details.
37
38 You should have received a copy of the GNU General Public License
39 along with OPM. If not, see <http://www.gnu.org/licenses/>.
40*/
48#ifndef OPM_CPGRIDDATA_HEADER
49#define OPM_CPGRIDDATA_HEADER
50
51
52#include <dune/common/parallel/mpihelper.hh>
53#ifdef HAVE_DUNE_ISTL
54#include <dune/istl/owneroverlapcopy.hh>
55#endif
56
57#include <dune/common/parallel/communication.hh>
58#include <dune/common/parallel/variablesizecommunicator.hh>
59#include <dune/grid/common/gridenums.hh>
60
61#if HAVE_ECL_INPUT
62#include <opm/input/eclipse/EclipseState/Grid/EclipseGrid.hpp>
63#include <opm/input/eclipse/EclipseState/Grid/NNC.hpp>
64#endif
65
67
69#include "CpGridDataTraits.hpp"
70//#include "DataHandleWrappers.hpp"
71//#include "GlobalIdMapping.hpp"
72#include "Geometry.hpp"
73
74#include <array>
75#include <initializer_list>
76#include <set>
77#include <vector>
78
79namespace Opm
80{
81class EclipseState;
82}
83namespace Dune
84{
85class CpGrid;
86
87namespace cpgrid
88{
89
90class IndexSet;
91class IdSet;
92class LevelGlobalIdSet;
93class PartitionTypeIndicator;
94template<int,int> class Geometry;
95template<int> class Entity;
96template<int> class EntityRep;
97}
98}
99
101 const std::array<int, 3>&,
102 bool);
103
104namespace Dune
105{
106namespace cpgrid
107{
108namespace mover
109{
110template<class T, int i> struct Mover;
111}
112
118{
119 template<class T, int i> friend struct mover::Mover;
120 friend class GlobalIdSet;
121 friend class HierarchicIterator;
125
126 friend
128 const std::array<int, 3>&,
129 bool);
130
131private:
132 CpGridData(const CpGridData& g);
133
134public:
135 enum{
136#ifndef MAX_DATA_COMMUNICATED_PER_ENTITY
144#else
149 MAX_DATA_PER_CELL = MAX_DATA_COMMUNICATED_PER_ENTITY
150#endif
151 };
152
153 CpGridData() = delete;
154
159 explicit CpGridData(MPIHelper::MPICommunicator comm, std::vector<std::shared_ptr<CpGridData>>& data);
160
161
162
164 explicit CpGridData(std::vector<std::shared_ptr<CpGridData>>& data);
167
168
169
170
172 int size(int codim) const;
173
175 int size (GeometryType type) const
176 {
177 if (type.isCube()) {
178 return size(3 - type.dim());
179 } else {
180 return 0;
181 }
182 }
183
199 void readEclipseFormat(const std::string& filename,
200 bool periodic_extension,
201 bool turn_normals = false,
202 bool edge_conformal = false);
203
204#if HAVE_ECL_INPUT
226 void processEclipseFormat(const Opm::Deck& deck,
227 bool periodic_extension,
228 bool turn_normals = false,
229 bool clip_z = false,
230 const std::vector<double>& poreVolume = std::vector<double>{},
231 bool edge_conformal = false);
232
267 std::vector<std::size_t>
268 processEclipseFormat(const Opm::EclipseGrid* ecl_grid,
269 Opm::EclipseState* ecl_state,
270 bool periodic_extension,
271 bool turn_normals = false,
272 bool clip_z = false,
273 bool pinchActive = true,
274 bool edge_conformal = false);
275#endif
276
305 void processEclipseFormat(const grdecl& input_data,
306#if HAVE_ECL_INPUT
307 Opm::EclipseState* ecl_state,
308#endif
309 std::array<std::set<std::pair<int, int>>, 2>& nnc,
310 bool remove_ij_boundary,
311 bool turn_normals,
312 bool pinchActive,
313 double tolerance_unique_points,
314 bool edge_conformal);
315
323 void getIJK(int c, std::array<int,3>& ijk) const;
324
325 int cellFace(int cell, int local_index) const
326 {
327 return cell_to_face_[cpgrid::EntityRep<0>(cell, true)][local_index].index();
328 }
329
330 auto cellToFace(int cellIdx) const
331 {
332 return cell_to_face_[cpgrid::EntityRep<0>(cellIdx, true)];
333 }
334
335 const auto& cellToPoint() const
336 {
337 return cell_to_point_;
338 }
339
340 const auto& cellToPoint(int cellIdx) const
341 {
342 return cell_to_point_[cellIdx];
343 }
344
345 int faceToCellSize(int face) const {
346 Dune::cpgrid::EntityRep<1> faceRep(face, true);
347 return face_to_cell_[faceRep].size();
348 }
349
350 auto faceTag(int faceIdx) const
351 {
352 Dune::cpgrid::EntityRep<1> faceRep(faceIdx, true);
353 return face_tag_[faceRep];
354 }
355
356 auto faceNormals(int faceIdx) const
357 {
358 Dune::cpgrid::EntityRep<1> faceRep(faceIdx, true);
359 return face_normals_[faceRep];
360 }
361
362 auto faceToPoint(int faceIdx) const
363 {
364 return face_to_point_[faceIdx];
365 }
366
367 int numFaces() const
368 {
369 return face_to_cell_.size();
370 }
371
372 auto cornerHistorySize() const
373 {
374 return corner_history_.size();
375 }
376
377 const auto& getCornerHistory(int cornerIdx) const
378 {
379 if(cornerHistorySize()) {
380 return corner_history_[cornerIdx];
381 }
382 else {
383 OPM_THROW(std::logic_error, "Vertex has no history record.\n");
384 }
385 }
386
393 const std::vector<int>& globalCell() const
394 {
395 return global_cell_;
396 }
397
400 bool hasNNCs(const std::vector<int>& cellIndices) const;
401
414 bool mark(int refCount, const cpgrid::Entity<0>& element);
415
419 int getMark(const cpgrid::Entity<0>& element) const;
420
430 bool preAdapt();
431
433 bool adapt();
434
436 void postAdapt();
437
438private:
439 std::array<Dune::FieldVector<double,3>,8> getReferenceRefinedCorners(int idx_in_parent_cell, const std::array<int,3>& cells_per_dim) const;
440
441public:
443 int getGridIdx() const {
444 // Not the nicest way of checking if "this" points at the leaf grid view of a mixed grid (with coarse and refined cells).
445 // 1. When the grid has been refined at least onece, level_data_ptr_ ->size() >1. Therefore, there is a chance of "this" pointing at the leaf grid view.
446 // 2. Unfortunately, level_ is default initialized by 0. This implies, in particular, that if someone wants to check the value of
447 // "this->level_" when "this" points at the leaf grid view of a grid that has been refined, this value is - unfortunately - equal to 0.
448 // 3. Due to 2. we need an extra bool value to distinguish between the actual level 0 grid and such a leaf grid view (with incorrect level_ == 0). For this
449 // reason we check if child_to_parent_cells_.empty() [true for actual level 0 grid, false for the leaf grid view].
450 // --- TO BE IMPROVED ---
451 if ((level_data_ptr_ ->size() >1) && (level_ == 0) && (!child_to_parent_cells_.empty())) {
452 return level_data_ptr_->size() -1;
453 }
454 return level_;
455 }
457 const std::vector<std::shared_ptr<Dune::cpgrid::CpGridData>>& levelData() const
458 {
459 if (level_data_ptr_->empty()) {
460 OPM_THROW(std::logic_error, "Level data has not been initialized\n");
461 }
462 return *level_data_ptr_;
463 }
464
472 const std::tuple<int,std::vector<int>>& getChildrenLevelAndIndexList(int elemIdx) const {
473 return parent_to_children_cells_[elemIdx];
474 }
475
476 const std::vector<std::tuple<int,std::vector<int>>>& getParentToChildren() const {
477 return parent_to_children_cells_;
478 }
479
481 {
482 return geometry_;
483 }
484
485 int getLeafIdxFromLevelIdx(int level_cell_idx) const
486 {
487 if (level_to_leaf_cells_.empty()) {
488 OPM_THROW(std::logic_error, "Grid has no LGRs. No mapping to the leaf.\n");
489 }
490 return level_to_leaf_cells_[level_cell_idx];
491 }
492
514 std::tuple< const std::shared_ptr<CpGridData>,
515 const std::vector<std::array<int,2>>, // parent_to_refined_corners(~boundary_old_to_new_corners)
516 const std::vector<std::tuple<int,std::vector<int>>>, // parent_to_children_faces (~boundary_old_to_new_faces)
517 const std::tuple<int, std::vector<int>>, // parent_to_children_cells
518 const std::vector<std::array<int,2>>, // child_to_parent_faces
519 const std::vector<std::array<int,2>>> // child_to_parent_cells
520 refineSingleCell(const std::array<int,3>& cells_per_dim, const int& parent_idx) const;
521
522 // @breif Compute center of an entity/element/cell in the Eclipse way:
523 // - Average of the 4 corners of the bottom face.
524 // - Average of the 4 corners of the top face.
525 // Return average of the previous computations.
526 // @param [in] int Index of a cell.
527 // @return 'eclipse centroid'
528 std::array<double,3> computeEclCentroid(const int idx) const;
529
530 // @breif Compute center of an entity/element/cell in the Eclipse way:
531 // - Average of the 4 corners of the bottom face.
532 // - Average of the 4 corners of the top face.
533 // Return average of the previous computations.
534 // @param [in] Entity<0> Entity
535 // @return 'eclipse centroid'
536 std::array<double,3> computeEclCentroid(const Entity<0>& elem) const;
537
538 // Make unique boundary ids for all intersections.
540
544 bool uniqueBoundaryIds() const
545 {
546 return use_unique_boundary_ids_;
547 }
548
551 void setUniqueBoundaryIds(bool uids)
552 {
553 use_unique_boundary_ids_ = uids;
554 if (use_unique_boundary_ids_ && unique_boundary_ids_.empty()) {
556 }
557 }
558
562 const std::vector<double>& zcornData() const {
563 return zcorn;
564 }
565
566
569 const IndexSet& indexSet() const
570 {
571 return *index_set_;
572 }
573
576 {
577 return *local_id_set_;
578 }
579
582 {
583 return *global_id_set_;
584 }
585
589 const std::array<int, 3>& logicalCartesianSize() const
590 {
591 return logical_cartesian_size_;
592 }
593
598 const CpGridData& view_data,
599 const std::vector<int>& cell_part);
600
606 template<class DataHandle>
607 void communicate(DataHandle& data, InterfaceType iftype, CommunicationDirection dir);
608
610
612
613 void computeCommunicationInterfaces(int noexistingPoints);
614
620
623#if HAVE_MPI
626
629
632
635
638
643 {
644 return cell_comm_;
645 }
646
651 {
652 return cell_comm_;
653 }
654
656 {
657 return cellCommunication().indexSet();
658 }
659
661 {
662 return cellCommunication().indexSet();
663 }
664
666 {
667 return cellCommunication().remoteIndices();
668 }
669
671 {
672 return cellCommunication().remoteIndices();
673 }
674#endif
675
677 const std::vector<int>& sortedNumAquiferCells() const
678 {
679 return aquifer_cells_;
680 }
681
682private:
683
685 void populateGlobalCellIndexSet();
686
687#if HAVE_MPI
688
694 template<class DataHandle>
695 void gatherData(DataHandle& data, CpGridData* global_view,
696 CpGridData* distributed_view);
697
698
705 template<int codim, class DataHandle>
706 void gatherCodimData(DataHandle& data, CpGridData* global_data,
707 CpGridData* distributed_data);
708
715 template<class DataHandle>
716 void scatterData(DataHandle& data, const CpGridData* global_data,
717 const CpGridData* distributed_data, const InterfaceMap& cell_inf,
718 const InterfaceMap& point_inf);
719
727 template<int codim, class DataHandle>
728 void scatterCodimData(DataHandle& data, CpGridData* global_data,
729 CpGridData* distributed_data);
730
739 template<int codim, class DataHandle>
740 void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
741 const Interface& interface);
742
751 template<int codim, class DataHandle>
752 void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
753 const InterfaceMap& interface);
754
755#endif
756
757 void computeGeometry(const CpGrid& grid,
758 const DefaultGeometryPolicy& globalGeometry,
759 const std::vector<int>& globalAquiferCells,
760 const OrientedEntityTable<0, 1>& globalCell2Faces,
761 DefaultGeometryPolicy& geometry,
762 std::vector<int>& aquiferCells,
764 const std::vector< std::array<int,8> >& cell2Points);
765
766 // Representing the topology
778 Opm::SparseTable<int> face_to_point_;
780 std::vector< std::array<int,8> > cell_to_point_;
787 std::array<int, 3> logical_cartesian_size_{};
794 std::vector<int> global_cell_;
800 typedef FieldVector<double, 3> PointType;
804 cpgrid::EntityVariable<int, 1> unique_boundary_ids_;
806 std::unique_ptr<cpgrid::IndexSet> index_set_;
808 std::shared_ptr<const cpgrid::IdSet> local_id_set_;
810 std::shared_ptr<LevelGlobalIdSet> global_id_set_;
812 std::shared_ptr<PartitionTypeIndicator> partition_type_indicator_;
814 std::vector<int> mark_;
816 int level_{0};
818 std::vector<std::shared_ptr<CpGridData>>* level_data_ptr_;
819 // SUITABLE FOR ALL LEVELS EXCEPT FOR LEAFVIEW
821 std::vector<int> level_to_leaf_cells_; // In entry 'level cell index', we store 'leafview cell index' // {level LGR, {child0, child1, ...}}
823 std::vector<std::tuple<int,std::vector<int>>> parent_to_children_cells_; // {# children in x-direction, ... y-, ... z-}
825 std::array<int,3> cells_per_dim_;
826 // SUITABLE ONLY FOR LEAFVIEW // {level, cell index in that level}
828 std::vector<std::array<int,2>> leaf_to_level_cells_;
830 std::vector<std::array<int,2>> corner_history_;
831 // SUITABLE FOR ALL LEVELS INCLUDING LEAFVIEW // {level parent cell, parent cell index}
833 std::vector<std::array<int,2>> child_to_parent_cells_;
836 std::vector<int> cell_to_idxInParentCell_;
838 int refinement_max_level_{0};
839
841 Communication ccobj_;
842
843 // Boundary information (optional).
844 bool use_unique_boundary_ids_;
845
851 std::vector<double> zcorn;
852
854 std::vector<int> aquifer_cells_;
855
856#if HAVE_MPI
857
859 CommunicationType cell_comm_;
860
862 std::tuple<Interface,Interface,Interface,Interface,Interface> cell_interfaces_;
863 /*
864 // code deactivated, because users cannot access face indices and therefore
865 // communication on faces makes no sense!
867 std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
868 face_interfaces_;
869 */
871 std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
872 point_interfaces_;
873
874#endif
875
876 // Return the geometry vector corresponding to the given codim.
877 template <int codim>
878 const EntityVariable<Geometry<3 - codim, 3>, codim>& geomVector() const
879 {
880 return geometry_.geomVector<codim>();
881 }
882
883 friend class Dune::CpGrid;
884 template<int> friend class Entity;
885 template<int> friend class EntityRep;
886 friend class Intersection;
888};
889
890
891
892#if HAVE_MPI
893
894namespace
895{
900template<class T>
901T& getInterface(InterfaceType iftype,
902 std::tuple<T,T,T,T,T>& interfaces)
903{
904 switch(iftype)
905 {
906 case 0:
907 return std::get<0>(interfaces);
908 case 1:
909 return std::get<1>(interfaces);
910 case 2:
911 return std::get<2>(interfaces);
912 case 3:
913 return std::get<3>(interfaces);
914 case 4:
915 return std::get<4>(interfaces);
916 }
917 OPM_THROW(std::runtime_error, "Invalid Interface type was used during communication");
918}
919
920} // end unnamed namespace
921
922template<int codim, class DataHandle>
923void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
924 const Interface& interface)
925{
926 this->template communicateCodim<codim>(data, dir, interface.interfaces());
927}
928
929template<int codim, class DataHandle>
930void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data_wrapper, CommunicationDirection dir,
931 const InterfaceMap& interface)
932{
933 Communicator comm(ccobj_, interface);
934
935 if(dir==ForwardCommunication)
936 comm.forward(data_wrapper);
937 else
938 comm.backward(data_wrapper);
939}
940#endif
941
942template<class DataHandle>
943void CpGridData::communicate(DataHandle& data, InterfaceType iftype,
944 CommunicationDirection dir)
945{
946#if HAVE_MPI
947 if(data.contains(3,0))
948 {
949 Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*this, data);
950 communicateCodim<0>(data_wrapper, dir, getInterface(iftype, cell_interfaces_));
951 }
952 if(data.contains(3,3))
953 {
954 Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*this, data);
955 communicateCodim<3>(data_wrapper, dir, getInterface(iftype, point_interfaces_));
956 }
957#else
958 // Suppress warnings for unused arguments.
959 (void) data;
960 (void) iftype;
961 (void) dir;
962#endif
963}
964}}
965
966#if HAVE_MPI
969
970namespace Dune {
971namespace cpgrid {
972
973namespace mover
974{
975template<class T>
977{
979public:
980 void read(T& data)
981 {
982 data=buffer_[index_++];
983 }
984 void write(const T& data)
985 {
986 buffer_[index_++]=data;
987 }
988 void reset()
989 {
990 index_=0;
991 }
992 void resize(std::size_t size)
993 {
994 buffer_.resize(size);
995 index_=0;
996 }
997private:
998 std::vector<T> buffer_;
999 typename std::vector<T>::size_type index_;
1000};
1001template<class DataHandle,int codim>
1002struct Mover
1003{
1004};
1005
1006template<class DataHandle>
1008{
1009 explicit BaseMover(DataHandle& data)
1010 : data_(data)
1011 {}
1012 template<class E>
1013 void moveData(const E& from, const E& to)
1014 {
1015 std::size_t size=data_.size(from);
1016 buffer.resize(size);
1017 data_.gather(buffer, from);
1018 buffer.reset();
1019 data_.scatter(buffer, to, size);
1020 }
1021 DataHandle& data_;
1023};
1024
1025
1026template<class DataHandle>
1028{
1029 Mover(DataHandle& data, CpGridData* gatherView,
1030 CpGridData* scatterView)
1031 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
1032 {}
1033
1034 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
1035 {
1036 Entity<0> from_entity=Entity<0>(*gatherView_, from_cell_index, true);
1037 Entity<0> to_entity=Entity<0>(*scatterView_, to_cell_index, true);
1038 this->moveData(from_entity, to_entity);
1039 }
1042};
1043
1044template<class DataHandle>
1046{
1047 Mover(DataHandle& data, CpGridData* gatherView,
1048 CpGridData* scatterView)
1049 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
1050 {}
1051
1052 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
1053 {
1054 typedef typename OrientedEntityTable<0,1>::row_type row_type;
1055 EntityRep<0> from_cell=EntityRep<0>(from_cell_index, true);
1056 EntityRep<0> to_cell=EntityRep<0>(to_cell_index, true);
1057 const OrientedEntityTable<0,1>& table = gatherView_->cell_to_face_;
1058 row_type from_faces=table.operator[](from_cell);
1059 row_type to_faces=scatterView_->cell_to_face_[to_cell];
1060
1061 for(int i=0; i<from_faces.size(); ++i)
1062 this->moveData(from_faces[i], to_faces[i]);
1063 }
1066};
1067
1068template<class DataHandle>
1070{
1071 Mover(DataHandle& data, CpGridData* gatherView,
1072 CpGridData* scatterView)
1073 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
1074 {}
1075 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
1076 {
1077 const std::array<int,8>& from_cell_points=
1078 gatherView_->cell_to_point_[from_cell_index];
1079 const std::array<int,8>& to_cell_points=
1080 scatterView_->cell_to_point_[to_cell_index];
1081 for(std::size_t i=0; i<8; ++i)
1082 {
1083 this->moveData(Entity<3>(*gatherView_, from_cell_points[i], true),
1084 Entity<3>(*scatterView_, to_cell_points[i], true));
1085 }
1086 }
1089};
1090
1091} // end mover namespace
1092
1093template<class DataHandle>
1094void CpGridData::scatterData(DataHandle& data, const CpGridData* global_data,
1095 const CpGridData* distributed_data, const InterfaceMap& cell_inf,
1096 const InterfaceMap& point_inf)
1097{
1098#if HAVE_MPI
1099 if(data.contains(3,0))
1100 {
1101 Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*global_data, *distributed_data, data);
1102 communicateCodim<0>(data_wrapper, ForwardCommunication, cell_inf);
1103 }
1104 if(data.contains(3,3))
1105 {
1106 Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*global_data, *distributed_data, data);
1107 communicateCodim<3>(data_wrapper, ForwardCommunication, point_inf);
1108 }
1109#endif
1110}
1111
1112template<int codim, class DataHandle>
1113void CpGridData::scatterCodimData(DataHandle& data, CpGridData* global_data,
1114 CpGridData* distributed_data)
1115{
1116 CpGridData *gather_view, *scatter_view;
1117 gather_view=global_data;
1118 scatter_view=distributed_data;
1119
1120 mover::Mover<DataHandle,codim> mover(data, gather_view, scatter_view);
1121
1122
1123 for(auto index=distributed_data->cellIndexSet().begin(),
1124 end = distributed_data->cellIndexSet().end();
1125 index!=end; ++index)
1126 {
1127 std::size_t from=index->global();
1128 std::size_t to=index->local();
1129 mover(from,to);
1130 }
1131}
1132
1133namespace
1134{
1135
1136template<int codim, class T, class F>
1137void visitInterior(CpGridData& distributed_data, T begin, T endit, F& func)
1138{
1139 for(T it=begin; it!=endit; ++it)
1140 {
1141 Entity<codim> entity(distributed_data, it-begin, true);
1142 PartitionType pt = entity.partitionType();
1143 if(pt==Dune::InteriorEntity)
1144 {
1145 func(*it, entity);
1146 }
1147 }
1148}
1149
1150template<class DataHandle>
1151struct GlobalIndexSizeGatherer
1152{
1153 GlobalIndexSizeGatherer(DataHandle& data_,
1154 std::vector<int>& ownedGlobalIndices_,
1155 std::vector<int>& ownedSizes_)
1156 : data(data_), ownedGlobalIndices(ownedGlobalIndices_), ownedSizes(ownedSizes_)
1157 {}
1158
1159 template<class T, class E>
1160 void operator()(T& i, E& entity)
1161 {
1162 ownedGlobalIndices.push_back(i);
1163 ownedSizes.push_back(data.size(entity));
1164 }
1165 DataHandle& data;
1166 std::vector<int>& ownedGlobalIndices;
1167 std::vector<int>& ownedSizes;
1168};
1169
1170template<class DataHandle>
1171struct DataGatherer
1172{
1173 DataGatherer(mover::MoveBuffer<typename DataHandle::DataType>& buffer_,
1174 DataHandle& data_)
1175 : buffer(buffer_), data(data_)
1176 {}
1177
1178 template<class T, class E>
1179 void operator()(T& /* it */, E& entity)
1180 {
1181 data.gather(buffer, entity);
1182 }
1183 mover::MoveBuffer<typename DataHandle::DataType>& buffer;
1184 DataHandle& data;
1185};
1186
1187}
1188
1189template<class DataHandle>
1190void CpGridData::gatherData(DataHandle& data, CpGridData* global_data,
1191 CpGridData* distributed_data)
1192{
1193#if HAVE_MPI
1194 if(data.contains(3,0))
1195 gatherCodimData<0>(data, global_data, distributed_data);
1196 if(data.contains(3,3))
1197 gatherCodimData<3>(data, global_data, distributed_data);
1198#endif
1199}
1200
1201template<int codim, class DataHandle>
1202void CpGridData::gatherCodimData(DataHandle& data, CpGridData* global_data,
1203 CpGridData* distributed_data)
1204{
1205#if HAVE_MPI
1206 // Get the mapping to global index from the global id set
1207 const std::vector<int>& mapping =
1208 distributed_data->global_id_set_->getMapping<codim>();
1209
1210 // Get the global indices and data size for the entities whose data is
1211 // to be sent, i.e. the ones that we own.
1212 std::vector<int> owned_global_indices;
1213 std::vector<int> owned_sizes;
1214 owned_global_indices.reserve(mapping.size());
1215 owned_sizes.reserve(mapping.size());
1216
1217 GlobalIndexSizeGatherer<DataHandle> gisg(data, owned_global_indices, owned_sizes);
1218 visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gisg);
1219
1220 // communicate the number of indices that each processor sends
1221 int no_indices=owned_sizes.size();
1222 // We will take the address of the first elemet for MPI_Allgather below.
1223 // Make sure the containers have such an element.
1224 if ( owned_global_indices.empty() )
1225 owned_global_indices.resize(1);
1226 if ( owned_sizes.empty() )
1227 owned_sizes.resize(1);
1228 std::vector<int> no_indices_to_recv(distributed_data->ccobj_.size());
1229 distributed_data->ccobj_.allgather(&no_indices, 1, &(no_indices_to_recv[0]));
1230 // compute size of the vector capable for receiving all indices
1231 // and allgather the global indices and the sizes.
1232 // calculate displacements
1233 std::vector<int> displ(distributed_data->ccobj_.size()+1, 0);
1234 std::transform(displ.begin(), displ.end()-1, no_indices_to_recv.begin(), displ.begin()+1,
1235 std::plus<int>());
1236 int global_size=displ[displ.size()-1];//+no_indices_to_recv[displ.size()-1];
1237 std::vector<int> global_indices(global_size);
1238 std::vector<int> global_sizes(global_size);
1239 MPI_Allgatherv(&(owned_global_indices[0]), no_indices, MPITraits<int>::getType(),
1240 &(global_indices[0]), &(no_indices_to_recv[0]), &(displ[0]),
1241 MPITraits<int>::getType(),
1242 distributed_data->ccobj_);
1243 MPI_Allgatherv(&(owned_sizes[0]), no_indices, MPITraits<int>::getType(),
1244 &(global_sizes[0]), &(no_indices_to_recv[0]), &(displ[0]),
1245 MPITraits<int>::getType(),
1246 distributed_data->ccobj_);
1247 std::vector<int>().swap(owned_global_indices); // free data for reuse.
1248 // Compute the number of data items to send
1249 std::vector<int> no_data_send(distributed_data->ccobj_.size());
1250 for(typename std::vector<int>::iterator begin=no_data_send.begin(),
1251 i=begin, end=no_data_send.end(); i!=end; ++i)
1252 *i = std::accumulate(global_sizes.begin()+displ[i-begin],
1253 global_sizes.begin()+displ[i-begin+1], std::size_t());
1254 // free at least some memory that can be reused.
1255 std::vector<int>().swap(owned_sizes);
1256 // compute the displacements for receiving with allgatherv
1257 displ[0]=0;
1258 std::transform(displ.begin(), displ.end()-1, no_data_send.begin(), displ.begin()+1,
1259 std::plus<std::size_t>());
1260 // Compute the number of data items we will receive
1261 int no_data_recv = displ[displ.size()-1];//+global_sizes[displ.size()-1];
1262
1263 // Collect the data to send, gather it
1264 mover::MoveBuffer<typename DataHandle::DataType> local_data_buffer, global_data_buffer;
1265 if ( no_data_send[distributed_data->ccobj_.rank()] )
1266 {
1267 local_data_buffer.resize(no_data_send[distributed_data->ccobj_.rank()]);
1268 }
1269 else
1270 {
1271 local_data_buffer.resize(1);
1272 }
1273 global_data_buffer.resize(no_data_recv);
1274
1275 DataGatherer<DataHandle> gatherer(local_data_buffer, data);
1276 visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gatherer);
1277 MPI_Allgatherv(&(local_data_buffer.buffer_[0]), no_data_send[distributed_data->ccobj_.rank()],
1278 MPITraits<typename DataHandle::DataType>::getType(),
1279 &(global_data_buffer.buffer_[0]), &(no_data_send[0]), &(displ[0]),
1280 MPITraits<typename DataHandle::DataType>::getType(),
1281 distributed_data->ccobj_);
1282 Entity2IndexDataHandle<DataHandle, codim> edata(*global_data, data);
1283 int offset=0;
1284 for(int i=0; i< codim; ++i)
1285 offset+=global_data->size(i);
1286
1287 typename std::vector<int>::const_iterator s=global_sizes.begin();
1288 for(typename std::vector<int>::const_iterator i=global_indices.begin(),
1289 end=global_indices.end();
1290 i!=end; ++s, ++i)
1291 {
1292 edata.scatter(global_data_buffer, *i-offset, *s);
1293 }
1294#endif
1295}
1296
1297} // end namespace cpgrid
1298} // end namespace Dune
1299
1300#endif
1301
1302#endif
DataHandle & data
Definition: CpGridData.hpp:1165
mover::MoveBuffer< typename DataHandle::DataType > & buffer
Definition: CpGridData.hpp:1183
std::vector< int > & ownedGlobalIndices
Definition: CpGridData.hpp:1166
void refine_and_check(const Dune::cpgrid::Geometry< 3, 3 > &, const std::array< int, 3 > &, bool)
std::vector< int > & ownedSizes
Definition: CpGridData.hpp:1167
[ provides Dune::Grid ]
Definition: CpGrid.hpp:203
Struct that hods all the data needed to represent a Cpgrid.
Definition: CpGridData.hpp:118
auto faceToPoint(int faceIdx) const
Definition: CpGridData.hpp:362
void postAdapt()
Clean up refinement/coarsening markers - set every element to the mark 0 which represents 'doing noth...
const cpgrid::LevelGlobalIdSet & globalIdSet() const
Get the global index set.
Definition: CpGridData.hpp:581
CpGridDataTraits::CommunicationType CommunicationType
type of OwnerOverlap communication for cells
Definition: CpGridData.hpp:631
@ MAX_DATA_PER_CELL
The maximum data items allowed per cell (DUNE < 2.5.2)
Definition: CpGridData.hpp:143
CpGridDataTraits::ParallelIndexSet ParallelIndexSet
The type of the parallel index set.
Definition: CpGridData.hpp:634
int size(GeometryType type) const
number of leaf entities per geometry type in this process
Definition: CpGridData.hpp:175
const std::array< int, 3 > & logicalCartesianSize() const
Definition: CpGridData.hpp:589
void communicate(DataHandle &data, InterfaceType iftype, CommunicationDirection dir)
communicate objects for all codims on a given level
Definition: CpGridData.hpp:943
auto faceTag(int faceIdx) const
Definition: CpGridData.hpp:350
bool uniqueBoundaryIds() const
Definition: CpGridData.hpp:544
const std::tuple< int, std::vector< int > > & getChildrenLevelAndIndexList(int elemIdx) const
Retrieves the level and child indices of a given parent cell.
Definition: CpGridData.hpp:472
std::array< double, 3 > computeEclCentroid(const Entity< 0 > &elem) const
void computeCommunicationInterfaces(int noexistingPoints)
int getLeafIdxFromLevelIdx(int level_cell_idx) const
Definition: CpGridData.hpp:485
const auto & getCornerHistory(int cornerIdx) const
Definition: CpGridData.hpp:377
RemoteIndices & cellRemoteIndices()
Definition: CpGridData.hpp:665
int size(int codim) const
number of leaf entities per codim in this process
void readEclipseFormat(const std::string &filename, bool periodic_extension, bool turn_normals=false, bool edge_conformal=false)
CpGridDataTraits::CollectiveCommunication CollectiveCommunication
Definition: CpGridData.hpp:619
const std::vector< int > & globalCell() const
Definition: CpGridData.hpp:393
CpGridDataTraits::InterfaceMap InterfaceMap
The type of the map describing communication interfaces.
Definition: CpGridData.hpp:628
CpGridDataTraits::Communication Communication
The type of the collective communication.
Definition: CpGridData.hpp:618
int numFaces() const
Definition: CpGridData.hpp:367
const std::vector< std::tuple< int, std::vector< int > > > & getParentToChildren() const
Definition: CpGridData.hpp:476
void getIJK(int c, std::array< int, 3 > &ijk) const
Extract Cartesian index triplet (i,j,k) of an active cell.
const IndexSet & indexSet() const
Definition: CpGridData.hpp:569
CommunicationType & cellCommunication()
Get the owner-overlap-copy communication for cells.
Definition: CpGridData.hpp:642
auto cellToFace(int cellIdx) const
Definition: CpGridData.hpp:330
auto faceNormals(int faceIdx) const
Definition: CpGridData.hpp:356
CpGridDataTraits::RemoteIndices RemoteIndices
The type of the remote indices information.
Definition: CpGridData.hpp:637
auto cornerHistorySize() const
Definition: CpGridData.hpp:372
ParallelIndexSet & cellIndexSet()
Definition: CpGridData.hpp:655
const auto & cellToPoint(int cellIdx) const
Definition: CpGridData.hpp:340
int getMark(const cpgrid::Entity< 0 > &element) const
Return refinement mark for entity.
const ParallelIndexSet & cellIndexSet() const
Definition: CpGridData.hpp:660
CpGridDataTraits::MPICommunicator MPICommunicator
The type of the mpi communicator.
Definition: CpGridData.hpp:616
CpGridData(std::vector< std::shared_ptr< CpGridData > > &data)
Constructor.
std::tuple< const std::shared_ptr< CpGridData >, const std::vector< std::array< int, 2 > >, const std::vector< std::tuple< int, std::vector< int > > >, const std::tuple< int, std::vector< int > >, const std::vector< std::array< int, 2 > >, const std::vector< std::array< int, 2 > > > refineSingleCell(const std::array< int, 3 > &cells_per_dim, const int &parent_idx) const
Refine a single cell and return a shared pointer of CpGridData type.
void distributeGlobalGrid(CpGrid &grid, const CpGridData &view_data, const std::vector< int > &cell_part)
Redistribute a global grid.
const std::vector< std::shared_ptr< Dune::cpgrid::CpGridData > > & levelData() const
Add doc/or remove method and replace it with better approach.
Definition: CpGridData.hpp:457
const auto & cellToPoint() const
Definition: CpGridData.hpp:335
const std::vector< double > & zcornData() const
Definition: CpGridData.hpp:562
void setUniqueBoundaryIds(bool uids)
Definition: CpGridData.hpp:551
bool preAdapt()
Set mightVanish flags for elements that will be refined in the next adapt() call Need to be called af...
int getGridIdx() const
Add doc/or remove method and replace it with better approach.
Definition: CpGridData.hpp:443
void processEclipseFormat(const grdecl &input_data, std::array< std::set< std::pair< int, int > >, 2 > &nnc, bool remove_ij_boundary, bool turn_normals, bool pinchActive, double tolerance_unique_points, bool edge_conformal)
bool hasNNCs(const std::vector< int > &cellIndices) const
Check all cells selected for refinement have no NNCs (no neighbor connections). Assumption: all grid ...
bool mark(int refCount, const cpgrid::Entity< 0 > &element)
Mark entity for refinement or coarsening.
const CommunicationType & cellCommunication() const
Get the owner-overlap-copy communication for cells.
Definition: CpGridData.hpp:650
int cellFace(int cell, int local_index) const
Definition: CpGridData.hpp:325
const cpgrid::IdSet & localIdSet() const
Get the local index set.
Definition: CpGridData.hpp:575
const cpgrid::DefaultGeometryPolicy getGeometry() const
Definition: CpGridData.hpp:480
bool adapt()
TO DO: Documentation. Triggers the grid refinement process - Currently, returns preAdapt()
int faceToCellSize(int face) const
Definition: CpGridData.hpp:345
CpGridDataTraits::Communicator Communicator
The type of the Communicator.
Definition: CpGridData.hpp:625
std::array< double, 3 > computeEclCentroid(const int idx) const
const std::vector< int > & sortedNumAquiferCells() const
Get sorted active cell indices of numerical aquifer.
Definition: CpGridData.hpp:677
const RemoteIndices & cellRemoteIndices() const
Definition: CpGridData.hpp:670
CpGridData(MPIHelper::MPICommunicator comm, std::vector< std::shared_ptr< CpGridData > > &data)
Definition: DefaultGeometryPolicy.hpp:53
const EntityVariable< cpgrid::Geometry< 3 - codim, 3 >, codim > & geomVector() const
Definition: DefaultGeometryPolicy.hpp:86
Wrapper that turns a data handle suitable for dune-grid into one based on integers instead of entitie...
Definition: Entity2IndexDataHandle.hpp:56
Represents an entity of a given codim, with positive or negative orientation.
Definition: EntityRep.hpp:99
The global id set for Dune.
Definition: Indexsets.hpp:487
Only needs to provide interface for doing nothing.
Definition: Iterators.hpp:118
Definition: Indexsets.hpp:201
Definition: Indexsets.hpp:56
Definition: Intersection.hpp:66
Definition: Indexsets.hpp:371
int size() const
Returns the number of rows in the table.
Definition: SparseTable.hpp:121
Definition: PartitionTypeIndicator.hpp:50
Definition: CpGridData.hpp:977
void write(const T &data)
Definition: CpGridData.hpp:984
void reset()
Definition: CpGridData.hpp:988
void read(T &data)
Definition: CpGridData.hpp:980
void resize(std::size_t size)
Definition: CpGridData.hpp:992
The namespace Dune is the main namespace for all Dune code.
Definition: common/CartesianIndexMapper.hpp:10
Dune::cpgrid::Cell2FacesContainer cell2Faces(const Dune::CpGrid &grid)
Get the cell to faces mapping of a grid.
Holds the implementation of the CpGrid as a pimple.
Definition: CellQuadrature.hpp:26
MPIHelper::MPICommunicator MPICommunicator
The type of the collective communication.
Definition: CpGridDataTraits.hpp:56
Dune::VariableSizeCommunicator<> Communicator
The type of the Communicator.
Definition: CpGridDataTraits.hpp:71
Dune::RemoteIndices< ParallelIndexSet > RemoteIndices
The type of the remote indices information.
Definition: CpGridDataTraits.hpp:83
typename CommunicationType::ParallelIndexSet ParallelIndexSet
The type of the parallel index set.
Definition: CpGridDataTraits.hpp:80
Dune::Communication< MPICommunicator > CollectiveCommunication
Definition: CpGridDataTraits.hpp:59
Dune::OwnerOverlapCopyCommunication< int, int > CommunicationType
type of OwnerOverlap communication for cells
Definition: CpGridDataTraits.hpp:77
Dune::Communication< MPICommunicator > Communication
Definition: CpGridDataTraits.hpp:58
AttributeSet
The type of the set of the attributes.
Definition: CpGridDataTraits.hpp:66
Communicator::InterfaceMap InterfaceMap
The type of the map describing communication interfaces.
Definition: CpGridDataTraits.hpp:74
Definition: CpGridData.hpp:1008
BaseMover(DataHandle &data)
Definition: CpGridData.hpp:1009
void moveData(const E &from, const E &to)
Definition: CpGridData.hpp:1013
MoveBuffer< typename DataHandle::DataType > buffer
Definition: CpGridData.hpp:1022
DataHandle & data_
Definition: CpGridData.hpp:1021
Definition: CpGridData.hpp:1028
void operator()(std::size_t from_cell_index, std::size_t to_cell_index)
Definition: CpGridData.hpp:1034
CpGridData * scatterView_
Definition: CpGridData.hpp:1041
CpGridData * gatherView_
Definition: CpGridData.hpp:1040
Mover(DataHandle &data, CpGridData *gatherView, CpGridData *scatterView)
Definition: CpGridData.hpp:1029
Definition: CpGridData.hpp:1046
Mover(DataHandle &data, CpGridData *gatherView, CpGridData *scatterView)
Definition: CpGridData.hpp:1047
CpGridData * gatherView_
Definition: CpGridData.hpp:1064
CpGridData * scatterView_
Definition: CpGridData.hpp:1065
void operator()(std::size_t from_cell_index, std::size_t to_cell_index)
Definition: CpGridData.hpp:1052
Definition: CpGridData.hpp:1070
CpGridData * scatterView_
Definition: CpGridData.hpp:1088
CpGridData * gatherView_
Definition: CpGridData.hpp:1087
Mover(DataHandle &data, CpGridData *gatherView, CpGridData *scatterView)
Definition: CpGridData.hpp:1071
void operator()(std::size_t from_cell_index, std::size_t to_cell_index)
Definition: CpGridData.hpp:1075
Definition: CpGridData.hpp:1003
Definition: preprocess.h:56