BlackoilWellModelGasLift_impl.hpp
Go to the documentation of this file.
1/*
2 Copyright 2016 - 2019 SINTEF Digital, Mathematics & Cybernetics.
3 Copyright 2016 - 2018 Equinor ASA.
4 Copyright 2017 Dr. Blatt - HPC-Simulation-Software & Services
5 Copyright 2016 - 2018 Norce AS
6
7 This file is part of the Open Porous Media project (OPM).
8
9 OPM is free software: you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation, either version 3 of the License, or
12 (at your option) any later version.
13
14 OPM is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with OPM. If not, see <http://www.gnu.org/licenses/>.
21*/
22#define OPM_BLACKOILWELLMODEL_GASLIFT_IMPL_HEADER_INCLUDED
23#define OPM_BLACKOILWELLMODEL_GASLIFT_IMPL_HEADER_INCLUDED
24
25// Improve IDE experience
26#ifndef OPM_BLACKOILWELLMODEL_GASLIFT_HEADER_INCLUDED
27#include <config.h>
29#endif
30#include <opm/common/TimingMacros.hpp>
33
34#if HAVE_MPI
36#endif
37
38namespace Opm {
39
40template<typename TypeTag>
41bool
43maybeDoGasLiftOptimize(const Simulator& simulator,
44 const std::vector<WellInterfacePtr>& well_container,
45 const std::map<std::string, Scalar>& node_pressures,
46 const bool updatePotentials,
47 WellStateType& wellState,
48 GroupState<Scalar>& groupState,
49 DeferredLogger& deferred_logger)
50{
51 OPM_TIMEFUNCTION();
52 const auto& glo = simulator.vanguard().schedule().glo(simulator.episodeIndex());
53 if(!glo.active()) {
54 return false;
55 }
56 bool do_glift_optimization = false;
57 int num_wells_changed = 0;
58 const double simulation_time = simulator.time();
59 const Scalar min_wait = glo.min_wait();
60 // We only optimize if a min_wait time has past.
61 // If all_newton is true we still want to optimize several times pr timestep
62 // i.e. we also optimize if check simulation_time == last_glift_opt_time_
63 // that is when the last_glift_opt_time is already updated with the current time step
64 if (simulation_time == this->last_glift_opt_time_ ||
65 simulation_time >= (this->last_glift_opt_time_ + min_wait))
66 {
67 do_glift_optimization = true;
68 this->last_glift_opt_time_ = simulation_time;
69 }
70
71 // update potentials if changed by network (needed by the gaslift algorithm)
72 if (updatePotentials) {
73 updateWellPotentials(simulator, well_container, node_pressures, wellState, deferred_logger);
74 }
75
76 if (do_glift_optimization) {
77 GLiftOptWells glift_wells;
78 GLiftProdWells prod_wells;
79 GLiftWellStateMap state_map;
80 // NOTE: To make GasLiftGroupInfo (see below) independent of the TypeTag
81 // associated with *this (i.e. BlackoilWellModel<TypeTag>) we observe
82 // that GasLiftGroupInfo's only dependence on *this is that it needs to
83 // access the eclipse Wells in the well container (the eclipse Wells
84 // themselves are independent of the TypeTag).
85 // Hence, we extract them from the well container such that we can pass
86 // them to the GasLiftGroupInfo constructor.
87 GLiftEclWells ecl_well_map;
88 initGliftEclWellMap(well_container, ecl_well_map);
89 GasLiftGroupInfo group_info {
90 ecl_well_map,
91 simulator.vanguard().schedule(),
92 simulator.vanguard().summaryState(),
93 simulator.episodeIndex(),
94 simulator.model().newtonMethod().numIterations(),
95 deferred_logger,
96 wellState,
97 groupState,
98 simulator.vanguard().grid().comm(),
99 this->glift_debug
100 };
101 group_info.initialize();
102
103 gasLiftOptimizationStage1(simulator,
104 well_container,
105 wellState,
106 groupState,
107 prod_wells,
108 glift_wells,
109 group_info,
110 state_map,
111 deferred_logger);
112
113 this->gasLiftOptimizationStage2(simulator.vanguard().gridView().comm(),
114 simulator.vanguard().schedule(),
115 simulator.vanguard().summaryState(),
116 wellState,
117 groupState,
118 prod_wells,
119 glift_wells,
120 group_info,
121 state_map,
122 simulator.episodeIndex(),
123 deferred_logger);
124
125 if constexpr (glift_debug) {
126 std::vector<WellInterfaceGeneric<Scalar, IndexTraits>*> wc;
127 wc.reserve(well_container.size());
128 std::transform(well_container.begin(), well_container.end(),
129 std::back_inserter(wc),
130 [](const auto& w)
131 { return static_cast<WellInterfaceGeneric<Scalar, IndexTraits>*>(w.get()); });
132 this->gliftDebugShowALQ(wc,
133 wellState,
134 deferred_logger);
135 }
136 num_wells_changed = glift_wells.size();
137 }
138 num_wells_changed = simulator.vanguard().gridView().comm().sum(num_wells_changed);
139 return num_wells_changed > 0;
140}
141
142template<typename TypeTag>
143void
145gasLiftOptimizationStage1(const Simulator& simulator,
146 const std::vector<WellInterfacePtr>& well_container,
147 WellStateType& wellState,
148 GroupState<Scalar>& groupState,
149 GLiftProdWells& prod_wells,
150 GLiftOptWells &glift_wells,
152 GLiftWellStateMap& state_map,
153 DeferredLogger& deferred_logger)
154{
155 OPM_TIMEFUNCTION();
156 auto comm = simulator.vanguard().grid().comm();
157 int num_procs = comm.size();
158 // NOTE: Gas lift optimization stage 1 seems to be difficult
159 // to do in parallel since the wells are optimized on different
160 // processes and each process needs to know the current ALQ allocated
161 // to each group it is a memeber of in order to check group limits and avoid
162 // allocating more ALQ than necessary. (Surplus ALQ is removed in
163 // stage 2). In stage1, as each well is adding ALQ, the current group ALQ needs
164 // to be communicated to the other processes. But there is no common
165 // synchronization point that all process will reach in the
166 // runOptimizeLoop_() in GasLiftSingleWell.cpp.
167 //
168 // TODO: Maybe a better solution could be invented by distributing
169 // wells according to certain parent groups. Then updated group rates
170 // might not have to be communicated to the other processors.
171
172 // Currently, the best option seems to be to run this part sequentially
173 // (not in parallel).
174 //
175 // TODO: The simplest approach seems to be if a) one process could take
176 // ownership of all the wells (the union of all the wells in the
177 // well_container_ of each process) then this process could do the
178 // optimization, while the other processes could wait for it to
179 // finish (e.g. comm.barrier()), or alternatively, b) if all
180 // processes could take ownership of all the wells. Then there
181 // would be no need for synchronization here..
182 //
183 for (int i = 0; i< num_procs; i++) {
184 int num_rates_to_sync = 0; // communication variable
185 GLiftSyncGroups groups_to_sync;
186 if (comm.rank() == i) {
187 // Run stage1: Optimize single wells while also checking group limits
188 for (const auto& well : well_container) {
189 // NOTE: Only the wells in "group_info" needs to be optimized
190 if (group_info.hasWell(well->name())) {
191 gasLiftOptimizationStage1SingleWell(well.get(),
192 simulator,
193 wellState,
194 groupState,
195 prod_wells,
196 glift_wells,
197 group_info,
198 state_map,
199 groups_to_sync,
200 deferred_logger);
201 }
202 }
203 num_rates_to_sync = groups_to_sync.size();
204 }
205 {
206 OPM_TIMEBLOCK(WaitForGasLiftSyncGroups);
207 num_rates_to_sync = comm.sum(num_rates_to_sync);
208 }
209 if (num_rates_to_sync > 0) {
210 OPM_TIMEBLOCK(GasLiftSyncGroups);
211 std::vector<int> group_indexes;
212 group_indexes.reserve(num_rates_to_sync);
213 std::vector<Scalar> group_alq_rates;
214 group_alq_rates.reserve(num_rates_to_sync);
215 std::vector<Scalar> group_oil_rates;
216 group_oil_rates.reserve(num_rates_to_sync);
217 std::vector<Scalar> group_gas_rates;
218 group_gas_rates.reserve(num_rates_to_sync);
219 std::vector<Scalar> group_water_rates;
220 group_water_rates.reserve(num_rates_to_sync);
221 if (comm.rank() == i) {
222 for (auto idx : groups_to_sync) {
223 auto [oil_rate, gas_rate, water_rate, alq] = group_info.getRates(idx);
224 group_indexes.push_back(idx);
225 group_oil_rates.push_back(oil_rate);
226 group_gas_rates.push_back(gas_rate);
227 group_water_rates.push_back(water_rate);
228 group_alq_rates.push_back(alq);
229 }
230 } else {
231 group_indexes.resize(num_rates_to_sync);
232 group_oil_rates.resize(num_rates_to_sync);
233 group_gas_rates.resize(num_rates_to_sync);
234 group_water_rates.resize(num_rates_to_sync);
235 group_alq_rates.resize(num_rates_to_sync);
236 }
237#if HAVE_MPI
238 Parallel::MpiSerializer ser(comm);
239 ser.broadcast(Parallel::RootRank{i}, group_indexes, group_oil_rates,
240 group_gas_rates, group_water_rates, group_alq_rates);
241#endif
242 if (comm.rank() != i) {
243 for (int j = 0; j < num_rates_to_sync; ++j) {
244 group_info.updateRate(group_indexes[j],
245 group_oil_rates[j],
246 group_gas_rates[j],
247 group_water_rates[j],
248 group_alq_rates[j]);
249 }
250 }
251 if constexpr (glift_debug) {
252 int counter = 0;
253 if (comm.rank() == i) {
254 counter = wellState.gliftGetDebugCounter();
255 }
256 counter = comm.sum(counter);
257 if (comm.rank() != i) {
258 wellState.gliftSetDebugCounter(counter);
259 }
260 }
261 }
262 }
263}
264
265// NOTE: this method cannot be const since it passes this->wellState()
266// (see below) to the GasLiftSingleWell constructor which accepts WellState
267// as a non-const reference..
268template<typename TypeTag>
269void
270BlackoilWellModelGasLift<TypeTag>::
271gasLiftOptimizationStage1SingleWell(WellInterface<TypeTag>* well,
272 const Simulator& simulator,
273 WellStateType& wellState,
274 GroupState<Scalar>& groupState,
275 GLiftProdWells& prod_wells,
276 GLiftOptWells& glift_wells,
277 GasLiftGroupInfo<Scalar, IndexTraits>& group_info,
278 GLiftWellStateMap& state_map,
279 GLiftSyncGroups& sync_groups,
280 DeferredLogger& deferred_logger)
281{
282 OPM_TIMEFUNCTION();
283 const auto& summary_state = simulator.vanguard().summaryState();
284 auto glift = std::make_unique<GasLiftSingleWell<TypeTag>>(*well,
285 simulator,
286 summary_state,
287 deferred_logger,
288 wellState,
289 groupState,
290 group_info,
291 sync_groups,
292 simulator.vanguard().gridView().comm(),
293 this->glift_debug);
294 auto state = glift->runOptimize(simulator.model().newtonMethod().numIterations());
295 if (state) {
296 state_map.emplace(well->name(), std::move(state));
297 glift_wells.emplace(well->name(), std::move(glift));
298 return;
299 }
300 prod_wells.insert({well->name(), well});
301}
302
303template<typename TypeTag>
304void
306initGliftEclWellMap(const std::vector<WellInterfacePtr>& well_container,
307 GLiftEclWells& ecl_well_map)
308{
309 for (const auto& well : well_container) {
310 ecl_well_map.try_emplace(well->name(), &well->wellEcl(), well->indexOfWell());
311 }
312}
313
314template<typename TypeTag>
315void
317updateWellPotentials(const Simulator& simulator,
318 const std::vector<WellInterfacePtr>& well_container,
319 const std::map<std::string, Scalar>& node_pressures,
320 WellStateType& wellState,
321 DeferredLogger& deferred_logger)
322{
323 auto well_state_copy = wellState;
324 const int np = wellState.numPhases();
325 std::string exc_msg;
326 auto exc_type = ExceptionType::NONE;
327 for (const auto& well : well_container) {
328 if (well->isInjector() || !well->wellEcl().predictionMode())
329 continue;
330
331 const auto it = node_pressures.find(well->wellEcl().groupName());
332 if (it != node_pressures.end()) {
333 std::string cur_exc_msg;
334 auto cur_exc_type = ExceptionType::NONE;
335 try {
336 std::vector<Scalar> potentials;
337 well->computeWellPotentials(simulator, well_state_copy, potentials, deferred_logger);
338 auto& ws = wellState.well(well->indexOfWell());
339 for (int p = 0; p < np; ++p) {
340 // make sure the potentials are positive
341 ws.well_potentials[p] = std::max(Scalar{0.0}, potentials[p]);
342 }
343 }
344 // catch all possible exception and store type and message.
345 OPM_PARALLEL_CATCH_CLAUSE(cur_exc_type, cur_exc_msg);
346 if (cur_exc_type != ExceptionType::NONE) {
347 exc_msg += fmt::format("\nFor well {}: {}", well->name(), cur_exc_msg);
348 }
349 exc_type = std::max(exc_type, cur_exc_type);
350 }
351 }
352 if (exc_type != ExceptionType::NONE) {
353 const std::string msg = "Updating well potentials after network balancing failed. Continue with current potentials";
354 deferred_logger.warning("WELL_POT_SOLVE_AFTER_NETWORK_FAILED", msg + exc_msg);
355 }
356}
357
358} // namespace Opm
#define OPM_PARALLEL_CATCH_CLAUSE(obptc_exc_type, obptc_exc_msg)
Inserts catch classes for the parallel try-catch.
Definition: DeferredLoggingErrorHelpers.hpp:166
Class for handling the gaslift in the blackoil well model.
Definition: BlackoilWellModelGasLift.hpp:96
GetPropType< TypeTag, Properties::Simulator > Simulator
Definition: BlackoilWellModelGasLift.hpp:108
bool maybeDoGasLiftOptimize(const Simulator &simulator, const std::vector< WellInterfacePtr > &well_container, const std::map< std::string, Scalar > &node_pressures, const bool updatePotentials, WellStateType &wellState, GroupState< Scalar > &groupState, DeferredLogger &deferred_logger)
Definition: BlackoilWellModelGasLift_impl.hpp:43
static void initGliftEclWellMap(const std::vector< WellInterfacePtr > &well_container, GLiftEclWells &ecl_well_map)
Definition: BlackoilWellModelGasLift_impl.hpp:306
typename Base::GLiftWellStateMap GLiftWellStateMap
Definition: BlackoilWellModelGasLift.hpp:107
typename Base::GLiftOptWells GLiftOptWells
Definition: BlackoilWellModelGasLift.hpp:104
typename GasLiftGroupInfo< Scalar, IndexTraits >::GLiftEclWells GLiftEclWells
Definition: BlackoilWellModelGasLift.hpp:103
typename Base::GLiftProdWells GLiftProdWells
Definition: BlackoilWellModelGasLift.hpp:105
GetPropType< TypeTag, Properties::Scalar > Scalar
Definition: BlackoilWellModelGasLift.hpp:98
Definition: DeferredLogger.hpp:57
void warning(const std::string &tag, const std::string &message)
Definition: GasLiftGroupInfo.hpp:46
bool hasWell(const std::string &well_name)
std::tuple< Scalar, Scalar, Scalar, Scalar > getRates(const int group_idx) const
void updateRate(int idx, Scalar oil_rate, Scalar gas_rate, Scalar water_rate, Scalar alq)
Definition: GroupState.hpp:41
Manages the initializing and running of time dependent problems.
Definition: simulator.hh:84
Vanguard & vanguard()
Return a reference to the grid manager of simulation.
Definition: simulator.hh:234
Definition: WellState.hpp:66
@ NONE
Definition: DeferredLogger.hpp:46
Definition: blackoilboundaryratevector.hh:39