BlackoilModelNldd.hpp
Go to the documentation of this file.
1/*
2 Copyright 2013, 2015 SINTEF ICT, Applied Mathematics.
3 Copyright 2014, 2015 Dr. Blatt - HPC-Simulation-Software & Services
4 Copyright 2014, 2015 Statoil ASA.
5 Copyright 2015 NTNU
6 Copyright 2015, 2016, 2017 IRIS AS
7
8 This file is part of the Open Porous Media project (OPM).
9
10 OPM is free software: you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation, either version 3 of the License, or
13 (at your option) any later version.
14
15 OPM is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with OPM. If not, see <http://www.gnu.org/licenses/>.
22*/
23
24#ifndef OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
25#define OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
26
27#include <dune/common/timer.hh>
28#include <dune/istl/istlexception.hh>
29
30#include <opm/common/Exceptions.hpp>
31
32#include <opm/grid/common/SubGridPart.hpp>
33
35
42
44
45#if COMPILE_GPU_BRIDGE
47#else
49#endif
51
55
58
60
61#include <fmt/format.h>
62
63#include <algorithm>
64#include <array>
65#include <cassert>
66#include <cmath>
67#include <cstddef>
68#include <filesystem>
69#include <memory>
70#include <numeric>
71#include <set>
72#include <sstream>
73#include <string>
74#include <type_traits>
75#include <utility>
76#include <vector>
77
78namespace Opm {
79
80template<class TypeTag> class BlackoilModel;
81
83template <class TypeTag>
85{
86public:
94
99
100 static constexpr int numEq = Indices::numEq;
101
107 : model_(model)
108 , wellModel_(model.wellModel())
109 , rank_(model_.simulator().vanguard().grid().comm().rank())
110 {
111 // Create partitions.
112 const auto& [partition_vector_initial, num_domains_initial] = this->partitionCells();
113
114 int num_domains = num_domains_initial;
115 std::vector<int> partition_vector = partition_vector_initial;
116
117 // Fix-up for an extreme case: Interior cells who do not have any on-rank
118 // neighbours. Move all such cells into a single domain on this rank,
119 // and mark the domain for skipping. For what it's worth, we've seen this
120 // case occur in practice when testing on field cases.
121 bool isolated_cells = false;
122 for (auto& domainId : partition_vector) {
123 if (domainId < 0) {
124 domainId = num_domains;
125 isolated_cells = true;
126 }
127 }
128 if (isolated_cells) {
129 num_domains++;
130 }
131
132 // Set nldd handler in main well model
133 model.wellModel().setNlddAdapter(&wellModel_);
134
135 // Scan through partitioning to get correct size for each.
136 std::vector<int> sizes(num_domains, 0);
137 for (const auto& p : partition_vector) {
138 ++sizes[p];
139 }
140
141 // Set up correctly sized vectors of entity seeds and of indices for each partition.
142 using EntitySeed = typename Grid::template Codim<0>::EntitySeed;
143 std::vector<std::vector<EntitySeed>> seeds(num_domains);
144 std::vector<std::vector<int>> partitions(num_domains);
145 for (int domain = 0; domain < num_domains; ++domain) {
146 seeds[domain].resize(sizes[domain]);
147 partitions[domain].resize(sizes[domain]);
148 }
149
150 // Iterate through grid once, setting the seeds of all partitions.
151 // Note: owned cells only!
152 const auto& grid = model_.simulator().vanguard().grid();
153
154 std::vector<int> count(num_domains, 0);
155 const auto& gridView = grid.leafGridView();
156 const auto beg = gridView.template begin<0, Dune::Interior_Partition>();
157 const auto end = gridView.template end<0, Dune::Interior_Partition>();
158 int cell = 0;
159 for (auto it = beg; it != end; ++it, ++cell) {
160 const int p = partition_vector[cell];
161 seeds[p][count[p]] = it->seed();
162 partitions[p][count[p]] = cell;
163 ++count[p];
164 }
165 assert(count == sizes);
166
167 // Create the domains.
168 for (int index = 0; index < num_domains; ++index) {
169 std::vector<bool> interior(partition_vector.size(), false);
170 for (int ix : partitions[index]) {
171 interior[ix] = true;
172 }
173
174 Dune::SubGridPart<Grid> view{grid, std::move(seeds[index])};
175
176 // Mark the last domain for skipping if it contains isolated cells
177 const bool skip = isolated_cells && (index == num_domains - 1);
178 this->domains_.emplace_back(index,
179 std::move(partitions[index]),
180 std::move(interior),
181 std::move(view),
182 skip);
183 }
184
185 // Initialize storage for previous mobilities in a single flat vector
186 const auto numCells = grid.size(0);
187 previousMobilities_.resize(numCells * FluidSystem::numActivePhases(), 0.0);
188 for (const auto& domain : domains_) {
189 updateMobilities(domain);
190 }
191
192 // Initialize domain_needs_solving_ to true for all domains
193 domain_needs_solving_.resize(num_domains, true);
194
195 // Set up container for the local system matrices.
196 domain_matrices_.resize(num_domains);
197
198 // Set up container for the local linear solvers.
199 for (int index = 0; index < num_domains; ++index) {
200 // TODO: The ISTLSolver constructor will make
201 // parallel structures appropriate for the full grid
202 // only. This must be addressed before going parallel.
203 const auto& eclState = model_.simulator().vanguard().eclState();
205 loc_param.is_nldd_local_solver_ = true;
206 loc_param.init(eclState.getSimulationConfig().useCPR());
207 // Override solver type with umfpack if small domain.
208 if (domains_[index].cells.size() < 200) {
209 loc_param.linsolver_ = "umfpack";
210 }
211 loc_param.linear_solver_print_json_definition_ = false;
212 const bool force_serial = true;
213 domain_linsolvers_.emplace_back(model_.simulator(), loc_param, force_serial);
214 domain_linsolvers_.back().setDomainIndex(index);
215 }
216
217 assert(int(domains_.size()) == num_domains);
218
219 domain_reports_accumulated_.resize(num_domains);
220
221 // Print domain distribution summary
223 partition_vector,
224 domains_,
225 local_reports_accumulated_,
226 domain_reports_accumulated_,
227 grid,
228 wellModel_.numLocalWellsEnd());
229 }
230
233 {
234 // Setup domain->well mapping.
235 wellModel_.setupDomains(domains_);
236 }
237
239 template <class NonlinearSolverType>
241 NonlinearSolverType& nonlinear_solver)
242 {
243 // ----------- Set up reports and timer -----------
245 // When nldd_num_initial_newton_iter_ == 0, NLDD starts immediately but wells are still
246 // properly initialized via NewtonIterationContext::needsTimestepInit() which
247 // ensures prepareTimeStep() is called during the initial global linearization.
248 // If nldd_num_initial_newton_iter_ > 0, run global Newton iterations first.
249 // TODO: Investigate whether wells and other subsystems are correctly initialized
250 // for the following NLDD iteration.
251 if (model_.simulator().problem().iterationContext().iteration() < model_.param().nldd_num_initial_newton_iter_) {
252 report = model_.nonlinearIterationNewton(timer, nonlinear_solver);
253 return report;
254 }
255
256 model_.initialLinearization(report, model_.param().newton_min_iter_,
257 model_.param().newton_max_iter_, timer);
258
259 if (report.converged) {
260 return report;
261 }
262
263 // Remove the initial linearization entry; the final Newton step will
264 // record its own, keeping one entry per outer iteration.
265 model_.popLastConvergenceReport();
266
267 // ----------- If not converged, do an NLDD iteration -----------
268 Dune::Timer localSolveTimer;
269 Dune::Timer detailTimer;
270 localSolveTimer.start();
271 detailTimer.start();
272 auto& solution = model_.simulator().model().solution(0);
273 auto initial_solution = solution;
274 auto locally_solved = initial_solution;
275
276 // ----------- Decide on an ordering for the domains -----------
277 const auto domain_order = this->getSubdomainOrder();
278 local_reports_accumulated_.success.pre_post_time += detailTimer.stop();
279
280 // ----------- Solve each domain separately -----------
281 DeferredLogger logger;
282 std::vector<SimulatorReportSingle> domain_reports(domains_.size());
283
285 for (const int domain_index : domain_order) {
286 const auto& domain = domains_[domain_index];
287 SimulatorReportSingle local_report;
288 detailTimer.reset();
289 detailTimer.start();
290
291 domain_needs_solving_[domain_index] = checkIfSubdomainNeedsSolving(domain);
292
293 updateMobilities(domain);
294
295 if (domain.skip || !domain_needs_solving_[domain_index]) {
296 local_report.skipped_domains = true;
297 local_report.converged = true;
298 domain_reports[domain.index] = local_report;
299 continue;
300 }
301 switch (model_.param().local_solve_approach_) {
303 solveDomainJacobi(solution, locally_solved, local_report, logger,
304 timer, domain);
305 break;
306 default:
308 solveDomainGaussSeidel(solution, locally_solved, local_report, logger,
309 timer, domain);
310 break;
311 }
312 // This should have updated the global matrix to be
313 // dR_i/du_j evaluated at new local solutions for
314 // i == j, at old solution for i != j.
315 if (!local_report.converged) {
316 // TODO: more proper treatment, including in parallel.
317 logger.debug(fmt::format("Convergence failure in domain {} on rank {}." , domain.index, rank_));
318 }
319 local_report.solver_time += detailTimer.stop();
320 domain_reports[domain.index] = local_report;
321 }
322 OPM_END_PARALLEL_TRY_CATCH("Unexpected exception in local domain solve: ", model_.simulator().vanguard().grid().comm());
323
324 detailTimer.reset();
325 detailTimer.start();
326 // Communicate and log all messages.
327 auto global_logger = gatherDeferredLogger(logger, model_.simulator().vanguard().grid().comm());
328 global_logger.logMessages();
329
330 // Accumulate local solve data.
331 // Putting the counts in a single array to avoid multiple
332 // comm.sum() calls. Keeping the named vars for readability.
333 std::array<int, 5> counts{ 0, 0, 0, static_cast<int>(domain_reports.size()), 0 };
334 int& num_converged = counts[0];
335 int& num_converged_already = counts[1];
336 int& num_local_newtons = counts[2];
337 int& num_domains = counts[3];
338 int& num_skipped = counts[4];
339 {
340 auto step_newtons = 0;
341 const auto dr_size = domain_reports.size();
342 for (auto i = 0*dr_size; i < dr_size; ++i) {
343 // Reset the needsSolving flag for the next iteration
344 domain_needs_solving_[i] = false;
345 const auto& dr = domain_reports[i];
346 if (dr.converged) {
347 ++num_converged;
348 if (dr.total_newton_iterations == 0) {
349 ++num_converged_already;
350 }
351 else {
352 // If we needed to solve the domain, we also solve in next iteration
353 domain_needs_solving_[i] = true;
354 }
355 }
356 if (dr.skipped_domains) {
357 ++num_skipped;
358 }
359 step_newtons += dr.total_newton_iterations;
360 // Accumulate local reports per domain
361 domain_reports_accumulated_[i] += dr;
362 // Accumulate local reports per rank
363 local_reports_accumulated_ += dr;
364 }
365 num_local_newtons = step_newtons;
366 }
367
368 if (model_.param().local_solve_approach_ == DomainSolveApproach::Jacobi) {
369 solution = locally_solved;
370 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0);
371 }
372
373#if HAVE_MPI
374 // Communicate solutions:
375 // With multiple processes, this process' overlap (i.e. not
376 // owned) cells' solution values have been modified by local
377 // solves in the owning processes, and remain unchanged
378 // here. We must therefore receive the updated solution on the
379 // overlap cells and update their intensive quantities before
380 // we move on.
381 const auto& comm = model_.simulator().vanguard().grid().comm();
382 if (comm.size() > 1) {
383 const auto* ccomm = model_.simulator().model().newtonMethod().linearSolver().comm();
384
385 // Copy numerical values from primary vars.
386 ccomm->copyOwnerToAll(solution, solution);
387
388 // Copy flags from primary vars.
389 const std::size_t num = solution.size();
390 Dune::BlockVector<std::size_t> allmeanings(num);
391 for (std::size_t ii = 0; ii < num; ++ii) {
392 allmeanings[ii] = PVUtil::pack(solution[ii]);
393 }
394 ccomm->copyOwnerToAll(allmeanings, allmeanings);
395 for (std::size_t ii = 0; ii < num; ++ii) {
396 PVUtil::unPack(solution[ii], allmeanings[ii]);
397 }
398
399 // Update intensive quantities for our overlap values.
400 model_.simulator().model().invalidateAndUpdateIntensiveQuantitiesOverlap(/*timeIdx=*/0);
401
402 // Make total counts of domains converged.
403 comm.sum(counts.data(), counts.size());
404 }
405#endif // HAVE_MPI
406
407 const bool is_iorank = this->rank_ == 0;
408 if (is_iorank) {
409 OpmLog::debug(fmt::format(fmt::runtime("Local solves finished. Converged for {}/{} domains. {} domains were skipped. {} domains did no work. {} total local Newton iterations.\n"),
410 num_converged, num_domains, num_skipped, num_converged_already, num_local_newtons));
411 }
412 auto total_local_solve_time = localSolveTimer.stop();
413 report.local_solve_time += total_local_solve_time;
414 local_reports_accumulated_.success.total_time += total_local_solve_time;
415 local_reports_accumulated_.success.pre_post_time += detailTimer.stop();
416
417 // Finish with a global Newton step.
418 auto rep = model_.nonlinearIterationNewton(timer, nonlinear_solver);
419 report += rep;
420 if (rep.converged) {
421 report.converged = true;
422 }
423 return report;
424 }
425
428 {
429 return local_reports_accumulated_;
430 }
431
433 const std::vector<SimulatorReport>& domainAccumulatedReports() const
434 {
435 // Update the number of wells for each domain that has been added to the well model at this point
436 // this is a mutable operation that updates the well counts in the domain reports.
437 const auto dr_size = domain_reports_accumulated_.size();
438 // Reset well counts before updating
439 for (auto i = 0*dr_size; i < dr_size; ++i) {
440 domain_reports_accumulated_[i].success.num_wells = 0;
441 }
442 // Update the number of wells for each domain
443 for (const auto& [wname, domain] : wellModel_.well_domain()) {
444 domain_reports_accumulated_[domain].success.num_wells++;
445 }
446 return domain_reports_accumulated_;
447 }
448
451 void writePartitions(const std::filesystem::path& odir) const
452 {
453 const auto& grid = this->model_.simulator().vanguard().grid();
454 const auto& elementMapper = this->model_.simulator().model().elementMapper();
455 const auto& cartMapper = this->model_.simulator().vanguard().cartesianIndexMapper();
456
458 odir,
459 domains_,
460 grid,
461 elementMapper,
462 cartMapper);
463 }
464
466 void writeNonlinearIterationsPerCell(const std::filesystem::path& odir) const
467 {
468 const auto& grid = this->model_.simulator().vanguard().grid();
469 const auto& elementMapper = this->model_.simulator().model().elementMapper();
470 const auto& cartMapper = this->model_.simulator().vanguard().cartesianIndexMapper();
471
473 odir,
474 domains_,
475 domain_reports_accumulated_,
476 grid,
477 elementMapper,
478 cartMapper);
479 }
480
481private:
484 solveDomain(const Domain& domain,
485 const SimulatorTimerInterface& timer,
486 SimulatorReportSingle& local_report,
487 DeferredLogger& logger,
488 const bool initial_assembly_required)
489 {
490 auto& modelSimulator = model_.simulator();
491 Dune::Timer detailTimer;
492
493 // RAII guard: local iteration context for this domain, restored on scope exit.
494 LocalContextGuard localCtxGuard(modelSimulator.problem());
495 auto& localCtx = localCtxGuard.context();
496
497 // When called, if assembly has already been performed
498 // with the initial values, we only need to check
499 // for local convergence. Otherwise, we must do a local
500 // assembly.
501 if (initial_assembly_required) {
502 detailTimer.start();
503 // TODO: we should have a beginIterationLocal function()
504 // only handling the well model for now
505 wellModel_.assemble(modelSimulator.timeStepSize(),
506 domain);
507 const double tt0 = detailTimer.stop();
508 local_report.assemble_time += tt0;
509 local_report.assemble_time_well += tt0;
510 detailTimer.reset();
511 detailTimer.start();
512 // Assemble reservoir locally.
513 this->assembleReservoirDomain(domain);
514 local_report.assemble_time += detailTimer.stop();
515 local_report.total_linearizations += 1;
516 }
517 detailTimer.reset();
518 detailTimer.start();
519 std::vector<Scalar> resnorms;
520 auto convreport = this->getDomainConvergence(domain, timer, logger, resnorms);
521 local_report.update_time += detailTimer.stop();
522 if (convreport.converged()) {
523 // TODO: set more info, timing etc.
524 local_report.converged = true;
525 return convreport;
526 }
527
528 // We have already assembled for the first iteration,
529 // but not done the Schur complement for the wells yet.
530 detailTimer.reset();
531 detailTimer.start();
532 model_.wellModel().linearizeDomain(domain,
533 modelSimulator.model().linearizer().jacobian(),
534 modelSimulator.model().linearizer().residual());
535 const double tt1 = detailTimer.stop();
536 local_report.assemble_time += tt1;
537 local_report.assemble_time_well += tt1;
538
539 // Local Newton loop.
540 const int max_iter = model_.param().max_local_solve_iterations_;
541 const auto& grid = modelSimulator.vanguard().grid();
542 double damping_factor = 1.0;
543 std::vector<std::vector<Scalar>> convergence_history;
544 convergence_history.reserve(20);
545 convergence_history.push_back(resnorms);
546 do {
547 // Solve local linear system.
548 // Note that x has full size, we expect it to be nonzero only for in-domain cells.
549 const int nc = grid.size(0);
550 BVector x(nc);
551 detailTimer.reset();
552 detailTimer.start();
553 double setup_time = 0.0;
554 try {
555 this->solveJacobianSystemDomain(domain, x, setup_time);
556 }
557 catch (const NumericalProblem& e) {
558 // Local linear solve failed - treat as domain-level failure, not global timestep cut.
559 logger.debug(fmt::format(
560 "Local linear solver failed in domain {} on rank {}: {}",
561 domain.index, rank_, e.what()));
562 // record statistics and return early
563 local_report.linear_solve_time += detailTimer.stop();
564 local_report.linear_solve_setup_time += setup_time;
565 local_report.total_linear_iterations = domain_linsolvers_[domain.index].iterations();
566 modelSimulator.problem().endIteration();
567 local_report.converged = false;
568 local_report.total_newton_iterations = localCtx.iteration();
569 local_report.total_linearizations += localCtx.iteration();
570 return convreport;
571 }
572 model_.wellModel().postSolveDomain(x, domain);
573 if (damping_factor != 1.0) {
574 x *= damping_factor;
575 }
576 local_report.linear_solve_time += detailTimer.stop();
577 local_report.linear_solve_setup_time += setup_time;
578 local_report.total_linear_iterations = domain_linsolvers_[domain.index].iterations();
579
580 // Update local solution. // TODO: x is still full size, should we optimize it?
581 detailTimer.reset();
582 detailTimer.start();
583 this->updateDomainSolution(domain, x);
584 local_report.update_time += detailTimer.stop();
585
586 // Assemble well and reservoir.
587 detailTimer.reset();
588 detailTimer.start();
589 localCtx.advanceIteration();
590 // TODO: we should have a beginIterationLocal function()
591 // only handling the well model for now
592 // Assemble reservoir locally.
593 wellModel_.assemble(modelSimulator.timeStepSize(),
594 domain);
595 const double tt3 = detailTimer.stop();
596 local_report.assemble_time += tt3;
597 local_report.assemble_time_well += tt3;
598 detailTimer.reset();
599 detailTimer.start();
600 this->assembleReservoirDomain(domain);
601 local_report.assemble_time += detailTimer.stop();
602
603 // Check for local convergence.
604 detailTimer.reset();
605 detailTimer.start();
606 resnorms.clear();
607 convreport = this->getDomainConvergence(domain, timer, logger, resnorms);
608 convergence_history.push_back(resnorms);
609 local_report.update_time += detailTimer.stop();
610
611 // apply the Schur complement of the well model to the
612 // reservoir linearized equations
613 detailTimer.reset();
614 detailTimer.start();
615 model_.wellModel().linearizeDomain(domain,
616 modelSimulator.model().linearizer().jacobian(),
617 modelSimulator.model().linearizer().residual());
618 const double tt2 = detailTimer.stop();
619 local_report.assemble_time += tt2;
620 local_report.assemble_time_well += tt2;
621
622 // Check if we should dampen. Only do so if wells are converged.
623 if (!convreport.converged() && !convreport.wellFailed()) {
624 bool oscillate = false;
625 bool stagnate = false;
626 const auto num_residuals = convergence_history.front().size();
627 detail::detectOscillations(convergence_history, localCtx.iteration(), num_residuals,
628 Scalar{0.2}, 1, oscillate, stagnate);
629 if (oscillate) {
630 damping_factor *= 0.85;
631 logger.debug(fmt::format(fmt::runtime("| Damping factor is now {}"), damping_factor));
632 }
633 }
634 } while (!convreport.converged() && localCtx.iteration() <= max_iter);
635
636 modelSimulator.problem().endIteration();
637
638 local_report.converged = convreport.converged();
639 local_report.total_newton_iterations = localCtx.iteration();
640 local_report.total_linearizations += localCtx.iteration();
641 // TODO: set more info, timing etc.
642 return convreport;
643 }
644
646 void assembleReservoirDomain(const Domain& domain)
647 {
648 OPM_TIMEBLOCK(assembleReservoirDomain);
649 // -------- Mass balance equations --------
650 model_.simulator().model().linearizer().linearizeDomain(domain);
651 }
652
654 void solveJacobianSystemDomain(const Domain& domain, BVector& global_x, double& setup_time)
655 {
656 const auto& modelSimulator = model_.simulator();
657
658 Dune::Timer perfTimer;
659 perfTimer.start();
660
661 const Mat& main_matrix = modelSimulator.model().linearizer().jacobian().istlMatrix();
662 if (domain_matrices_[domain.index]) {
663 Details::copySubMatrix(main_matrix, domain.cells, *domain_matrices_[domain.index]);
664 } else {
665 domain_matrices_[domain.index] = std::make_unique<Mat>(Details::extractMatrix(main_matrix, domain.cells));
666 }
667 auto& jac = *domain_matrices_[domain.index];
668 auto res = Details::extractVector(modelSimulator.model().linearizer().residual(),
669 domain.cells);
670 auto x = res;
671
672 // set initial guess
673 global_x = 0.0;
674 x = 0.0;
675
676 auto& linsolver = domain_linsolvers_[domain.index];
677
678 linsolver.prepare(jac, res);
679 setup_time = perfTimer.stop();
680 linsolver.setResidual(res);
681 linsolver.solve(x);
682
683 Details::setGlobal(x, domain.cells, global_x);
684 }
685
687 void updateDomainSolution(const Domain& domain, const BVector& dx)
688 {
689 OPM_TIMEBLOCK(updateDomainSolution);
690 auto& simulator = model_.simulator();
691 auto& newtonMethod = simulator.model().newtonMethod();
692 SolutionVector& solution = simulator.model().solution(/*timeIdx=*/0);
693
694 newtonMethod.update_(/*nextSolution=*/solution,
695 /*curSolution=*/solution,
696 /*update=*/dx,
697 /*resid=*/dx,
698 domain.cells); // the update routines of the black
699 // oil model do not care about the
700 // residual
701
702 // if the solution is updated, the intensive quantities need to be recalculated
703 simulator.model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
704 }
705
707 std::pair<Scalar, Scalar> localDomainConvergenceData(const Domain& domain,
708 std::vector<Scalar>& R_sum,
709 std::vector<Scalar>& maxCoeff,
710 std::vector<Scalar>& B_avg,
711 std::vector<int>& maxCoeffCell)
712 {
713 const auto& modelSimulator = model_.simulator();
714
715 Scalar pvSumLocal = 0.0;
716 Scalar numAquiferPvSumLocal = 0.0;
717 const auto& model = modelSimulator.model();
718 const auto& problem = modelSimulator.problem();
719
720 const auto& modelResid = modelSimulator.model().linearizer().residual();
721
722 ElementContext elemCtx(modelSimulator);
723 const auto& gridView = domain.view;
724 const auto& elemEndIt = gridView.template end</*codim=*/0>();
725 IsNumericalAquiferCell isNumericalAquiferCell(gridView.grid());
726
727 for (auto elemIt = gridView.template begin</*codim=*/0>();
728 elemIt != elemEndIt;
729 ++elemIt)
730 {
731 if (elemIt->partitionType() != Dune::InteriorEntity) {
732 continue;
733 }
734 const auto& elem = *elemIt;
735 elemCtx.updatePrimaryStencil(elem);
736 elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
737
738 const unsigned cell_idx = elemCtx.globalSpaceIndex(/*spaceIdx=*/0, /*timeIdx=*/0);
739 const auto& intQuants = elemCtx.intensiveQuantities(/*spaceIdx=*/0, /*timeIdx=*/0);
740 const auto& fs = intQuants.fluidState();
741
742 const auto pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) *
743 model.dofTotalVolume(cell_idx);
744 pvSumLocal += pvValue;
745
746 if (isNumericalAquiferCell(elem))
747 {
748 numAquiferPvSumLocal += pvValue;
749 }
750
751 model_.getMaxCoeff(cell_idx, intQuants, fs, modelResid, pvValue,
752 B_avg, R_sum, maxCoeff, maxCoeffCell);
753 }
754
755 // compute local average in terms of global number of elements
756 const int bSize = B_avg.size();
757 for ( int i = 0; i<bSize; ++i )
758 {
759 B_avg[ i ] /= Scalar(domain.cells.size());
760 }
761
762 return {pvSumLocal, numAquiferPvSumLocal};
763 }
764
765 ConvergenceReport getDomainReservoirConvergence(const double reportTime,
766 const double dt,
767 const Domain& domain,
768 DeferredLogger& logger,
769 std::vector<Scalar>& B_avg,
770 std::vector<Scalar>& residual_norms)
771 {
772 using Vector = std::vector<Scalar>;
773
774 const auto& iterCtx = model_.simulator().problem().iterationContext();
775
776 const int numComp = numEq;
777 Vector R_sum(numComp, 0.0 );
778 Vector maxCoeff(numComp, std::numeric_limits<Scalar>::lowest() );
779 std::vector<int> maxCoeffCell(numComp, -1);
780 const auto [ pvSum, numAquiferPvSum]
781 = this->localDomainConvergenceData(domain, R_sum, maxCoeff, B_avg, maxCoeffCell);
782
783 auto cnvErrorPvFraction = computeCnvErrorPvLocal(domain, B_avg, dt);
784 cnvErrorPvFraction /= (pvSum - numAquiferPvSum);
785
786 // For each iteration, we need to determine whether to use the relaxed tolerances.
787 // To disable the usage of relaxed tolerances, you can set the relaxed tolerances
788 // equal to the strict tolerances.
789 //
790 // If min_strict_mb_iter_ = -1 (default) we use a relaxed tolerance for the mass
791 // balance only on the last local iteration. For non-negative values we use the
792 // relaxed tolerance after the given number of iterations.
793 const int maxLocalIter = model_.param().max_local_solve_iterations_;
794
795 const bool relax_final_iteration_mb =
796 model_.param().min_strict_mb_iter_ < 0 && iterCtx.iteration() == maxLocalIter;
797
798 const bool relax_iter_mb =
799 model_.param().min_strict_mb_iter_ >= 0 &&
800 iterCtx.shouldRelax(model_.param().min_strict_mb_iter_);
801
802 const bool use_relaxed_mb = relax_final_iteration_mb || relax_iter_mb;
803
804 const Scalar tol_mb = model_.param().local_tolerance_scaling_mb_
805 * (use_relaxed_mb ? model_.param().tolerance_mb_relaxed_ : model_.param().tolerance_mb_);
806
807 // If min_strict_cnv_iter_ = -1 (default) we use a relaxed tolerance for the CNV
808 // only on the last local iteration. For non-negative values we use the relaxed
809 // tolerance after the given number of iterations. We also use relaxed tolerances
810 // for cells with total pore-volume less than relaxed_max_pv_fraction_.
811 const bool relax_final_iteration_cnv =
812 model_.param().min_strict_cnv_iter_ < 0 && iterCtx.iteration() == maxLocalIter;
813
814 const bool relax_iter_cnv =
815 model_.param().min_strict_cnv_iter_ >= 0 &&
816 iterCtx.shouldRelax(model_.param().min_strict_cnv_iter_);
817
818 const bool relax_pv_fraction_cnv =
819 cnvErrorPvFraction < model_.param().relaxed_max_pv_fraction_;
820
821 const bool use_relaxed_cnv = relax_final_iteration_cnv ||
822 relax_iter_cnv ||
823 relax_pv_fraction_cnv;
824
825 // Tighter bound for local convergence should increase the
826 // likelyhood of: local convergence => global convergence
827 const Scalar tol_cnv = model_.param().local_tolerance_scaling_cnv_
828 * (use_relaxed_cnv ? model_.param().tolerance_cnv_relaxed_
829 : model_.param().tolerance_cnv_);
830
831 // TODO: Energy-specific tolerances are not currently applied. A closer analysis
832 // is needed to determine if everything in NLDD is compatible with thermal simulations.
833
834 // Finish computation
835 std::vector<Scalar> CNV(numComp);
836 std::vector<Scalar> mass_balance_residual(numComp);
837 for (int compIdx = 0; compIdx < numComp; ++compIdx )
838 {
839 CNV[compIdx] = B_avg[compIdx] * dt * maxCoeff[compIdx];
840 mass_balance_residual[compIdx] = std::abs(B_avg[compIdx]*R_sum[compIdx]) * dt / pvSum;
841 residual_norms.push_back(CNV[compIdx]);
842 }
843
844 // Create convergence report.
845 ConvergenceReport report{reportTime};
846 using CR = ConvergenceReport;
847 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
848 Scalar res[2] = { mass_balance_residual[compIdx], CNV[compIdx] };
849 CR::ReservoirFailure::Type types[2] = { CR::ReservoirFailure::Type::MassBalance,
850 CR::ReservoirFailure::Type::Cnv };
851 Scalar tol[2] = { tol_mb, tol_cnv };
852 for (int ii : {0, 1}) {
853 if (std::isnan(res[ii])) {
854 report.setReservoirFailed({types[ii], CR::Severity::NotANumber, compIdx});
855 logger.debug("NaN residual for " + model_.compNames().name(compIdx) + " equation.");
856 } else if (res[ii] > model_.param().max_residual_allowed_) {
857 report.setReservoirFailed({types[ii], CR::Severity::TooLarge, compIdx});
858 logger.debug("Too large residual for " + model_.compNames().name(compIdx) + " equation.");
859 } else if (res[ii] < 0.0) {
860 report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
861 logger.debug("Negative residual for " + model_.compNames().name(compIdx) + " equation.");
862 } else if (res[ii] > tol[ii]) {
863 report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
864 }
865
866 report.setReservoirConvergenceMetric(types[ii], compIdx, res[ii], tol[ii]);
867 }
868 }
869
870 // Output of residuals. If converged at initial state, log nothing.
871 const bool converged_at_initial_state = (report.converged() && iterCtx.iteration() == 0);
872 if (!converged_at_initial_state) {
873 if (iterCtx.iteration() == 0) {
874 // Log header.
875 std::string msg = fmt::format("Domain {} on rank {}, size {}, containing cell {}\n| Iter",
876 domain.index, this->rank_, domain.cells.size(), domain.cells[0]);
877 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
878 msg += " MB(";
879 msg += model_.compNames().name(compIdx)[0];
880 msg += ") ";
881 }
882 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
883 msg += " CNV(";
884 msg += model_.compNames().name(compIdx)[0];
885 msg += ") ";
886 }
887 logger.debug(msg);
888 }
889 // Log convergence data.
890 std::ostringstream ss;
891 ss << "| ";
892 const std::streamsize oprec = ss.precision(3);
893 const std::ios::fmtflags oflags = ss.setf(std::ios::scientific);
894 ss << std::setw(4) << iterCtx.iteration();
895 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
896 ss << std::setw(11) << mass_balance_residual[compIdx];
897 }
898 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
899 ss << std::setw(11) << CNV[compIdx];
900 }
901 ss.precision(oprec);
902 ss.flags(oflags);
903 logger.debug(ss.str());
904 }
905
906 return report;
907 }
908
909 ConvergenceReport getDomainConvergence(const Domain& domain,
910 const SimulatorTimerInterface& timer,
911 DeferredLogger& logger,
912 std::vector<Scalar>& residual_norms)
913 {
914 OPM_TIMEBLOCK(getDomainConvergence);
915 std::vector<Scalar> B_avg(numEq, 0.0);
916 auto report = this->getDomainReservoirConvergence(timer.simulationTimeElapsed(),
917 timer.currentStepLength(),
918 domain,
919 logger,
920 B_avg,
921 residual_norms);
922 report += wellModel_.getWellConvergence(domain, B_avg, logger);
923 return report;
924 }
925
927 std::vector<int> getSubdomainOrder()
928 {
929 const auto& modelSimulator = model_.simulator();
930 const auto& solution = modelSimulator.model().solution(0);
931
932 std::vector<int> domain_order(domains_.size());
933 std::iota(domain_order.begin(), domain_order.end(), 0);
934
935 if (model_.param().local_solve_approach_ == DomainSolveApproach::Jacobi) {
936 // Do nothing, 0..n-1 order is fine.
937 return domain_order;
938 } else if (model_.param().local_solve_approach_ == DomainSolveApproach::GaussSeidel) {
939 // Calculate the measure used to order the domains.
940 std::vector<Scalar> measure_per_domain(domains_.size());
941 switch (model_.param().local_domains_ordering_) {
943 // Use average pressures to order domains.
944 for (const auto& domain : domains_) {
945 const Scalar press_sum =
946 std::accumulate(domain.cells.begin(), domain.cells.end(), Scalar{0},
947 [&solution](const auto acc, const auto c)
948 { return acc + solution[c][Indices::pressureSwitchIdx]; });
949 const Scalar avgpress = press_sum / domain.cells.size();
950 measure_per_domain[domain.index] = avgpress;
951 }
952 break;
953 }
955 // Use max pressures to order domains.
956 for (const auto& domain : domains_) {
957 measure_per_domain[domain.index] =
958 std::accumulate(domain.cells.begin(), domain.cells.end(), Scalar{0},
959 [&solution](const auto acc, const auto c)
960 { return std::max(acc, solution[c][Indices::pressureSwitchIdx]); });
961 }
962 break;
963 }
965 // Use maximum residual to order domains.
966 const auto& residual = modelSimulator.model().linearizer().residual();
967 const int num_vars = residual[0].size();
968 for (const auto& domain : domains_) {
969 Scalar maxres = 0.0;
970 for (const int c : domain.cells) {
971 for (int ii = 0; ii < num_vars; ++ii) {
972 maxres = std::max(maxres, std::fabs(residual[c][ii]));
973 }
974 }
975 measure_per_domain[domain.index] = maxres;
976 }
977 break;
978 }
979 } // end of switch (model_.param().local_domains_ordering_)
980
981 // Sort by largest measure, keeping index order if equal.
982 const auto& m = measure_per_domain;
983 std::stable_sort(domain_order.begin(), domain_order.end(),
984 [&m](const int i1, const int i2){ return m[i1] > m[i2]; });
985 return domain_order;
986 } else {
987 throw std::logic_error("Domain solve approach must be Jacobi or Gauss-Seidel");
988 }
989 }
990
991 template<class GlobalEqVector>
992 void solveDomainJacobi(GlobalEqVector& solution,
993 GlobalEqVector& locally_solved,
994 SimulatorReportSingle& local_report,
995 DeferredLogger& logger,
996 const SimulatorTimerInterface& timer,
997 const Domain& domain)
998 {
999 auto initial_local_well_primary_vars = wellModel_.getPrimaryVarsDomain(domain.index);
1000 auto initial_local_solution = Details::extractVector(solution, domain.cells);
1001 auto convrep = solveDomain(domain, timer, local_report, logger, false);
1002 if (local_report.converged) {
1003 auto local_solution = Details::extractVector(solution, domain.cells);
1004 Details::setGlobal(local_solution, domain.cells, locally_solved);
1005 Details::setGlobal(initial_local_solution, domain.cells, solution);
1006 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
1007 } else {
1008 wellModel_.setPrimaryVarsDomain(domain.index, initial_local_well_primary_vars);
1009 Details::setGlobal(initial_local_solution, domain.cells, solution);
1010 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
1011 }
1012 }
1013
1014 template<class GlobalEqVector>
1015 void solveDomainGaussSeidel(GlobalEqVector& solution,
1016 GlobalEqVector& locally_solved,
1017 SimulatorReportSingle& local_report,
1018 DeferredLogger& logger,
1019 const SimulatorTimerInterface& timer,
1020 const Domain& domain)
1021 {
1022 auto initial_local_well_primary_vars = wellModel_.getPrimaryVarsDomain(domain.index);
1023 auto initial_local_solution = Details::extractVector(solution, domain.cells);
1024 auto convrep = solveDomain(domain, timer, local_report, logger, true);
1025 if (!local_report.converged) {
1026 // We look at the detailed convergence report to evaluate
1027 // if we should accept the unconverged solution.
1028 // We do not accept a solution if the wells are unconverged.
1029 if (!convrep.wellFailed()) {
1030 // Calculare the sums of the mb and cnv failures.
1031 Scalar mb_sum = 0.0;
1032 Scalar cnv_sum = 0.0;
1033 for (const auto& rc : convrep.reservoirConvergence()) {
1035 mb_sum += rc.value();
1036 } else if (rc.type() == ConvergenceReport::ReservoirFailure::Type::Cnv) {
1037 cnv_sum += rc.value();
1038 }
1039 }
1040 // If not too high, we overrule the convergence failure.
1041 const Scalar acceptable_local_mb_sum = 1e-3;
1042 const Scalar acceptable_local_cnv_sum = 1.0;
1043 if (mb_sum < acceptable_local_mb_sum && cnv_sum < acceptable_local_cnv_sum) {
1044 local_report.converged = true;
1045 local_report.accepted_unconverged_domains += 1;
1046 logger.debug(fmt::format("Accepting solution in unconverged domain {} on rank {}.", domain.index, rank_));
1047 logger.debug(fmt::format("Value of mb_sum: {} cnv_sum: {}", mb_sum, cnv_sum));
1048 } else {
1049 logger.debug("Unconverged local solution.");
1050 }
1051 } else {
1052 logger.debug("Unconverged local solution with well convergence failures:");
1053 for (const auto& wf : convrep.wellFailures()) {
1054 logger.debug(to_string(wf));
1055 }
1056 }
1057 }
1058 if (local_report.converged) {
1059 local_report.converged_domains += 1;
1060 auto local_solution = Details::extractVector(solution, domain.cells);
1061 Details::setGlobal(local_solution, domain.cells, locally_solved);
1062 } else {
1063 local_report.unconverged_domains += 1;
1064 wellModel_.setPrimaryVarsDomain(domain.index, initial_local_well_primary_vars);
1065 Details::setGlobal(initial_local_solution, domain.cells, solution);
1066 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
1067 }
1068 }
1069
1070 Scalar computeCnvErrorPvLocal(const Domain& domain,
1071 const std::vector<Scalar>& B_avg, double dt) const
1072 {
1073 Scalar errorPV{};
1074 const auto& simulator = model_.simulator();
1075 const auto& model = simulator.model();
1076 const auto& problem = simulator.problem();
1077 const auto& residual = simulator.model().linearizer().residual();
1078
1079 for (const int cell_idx : domain.cells) {
1080 const Scalar pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) *
1081 model.dofTotalVolume(cell_idx);
1082 const auto& cellResidual = residual[cell_idx];
1083 bool cnvViolated = false;
1084
1085 for (unsigned eqIdx = 0; eqIdx < cellResidual.size(); ++eqIdx) {
1086 using std::fabs;
1087 Scalar CNV = cellResidual[eqIdx] * dt * B_avg[eqIdx] / pvValue;
1088 cnvViolated = cnvViolated || (fabs(CNV) > model_.param().tolerance_cnv_);
1089 }
1090
1091 if (cnvViolated) {
1092 errorPV += pvValue;
1093 }
1094 }
1095 return errorPV;
1096 }
1097
1098 decltype(auto) partitionCells() const
1099 {
1100 const auto& grid = this->model_.simulator().vanguard().grid();
1101
1102 using GridView = std::remove_cv_t<std::remove_reference_t<
1103 decltype(grid.leafGridView())>>;
1104
1105 using Element = std::remove_cv_t<std::remove_reference_t<
1106 typename GridView::template Codim<0>::Entity>>;
1107
1108 const auto& param = this->model_.param();
1109
1110 auto zoltan_ctrl = ZoltanPartitioningControl<Element>{};
1111
1112 zoltan_ctrl.domain_imbalance = param.local_domains_partition_imbalance_;
1113
1114 zoltan_ctrl.index =
1115 [elementMapper = &this->model_.simulator().model().elementMapper()]
1116 (const Element& element)
1117 {
1118 return elementMapper->index(element);
1119 };
1120
1121 zoltan_ctrl.local_to_global =
1122 [cartMapper = &this->model_.simulator().vanguard().cartesianIndexMapper()]
1123 (const int elemIdx)
1124 {
1125 return cartMapper->cartesianIndex(elemIdx);
1126 };
1127
1128 // Forming the list of wells is expensive, so do this only if needed.
1129 const auto need_wells = param.local_domains_partition_method_ == "zoltan";
1130
1131 const auto wells = need_wells
1132 ? this->model_.simulator().vanguard().schedule().getWellsatEnd()
1133 : std::vector<Well>{};
1134
1135 const auto& possibleFutureConnectionSet = need_wells
1136 ? this->model_.simulator().vanguard().schedule().getPossibleFutureConnections()
1137 : std::unordered_map<std::string, std::set<int>> {};
1138
1139 // If defaulted parameter for number of domains, choose a reasonable default.
1140 constexpr int default_cells_per_domain = 1000;
1141 const int num_domains = (param.num_local_domains_ > 0)
1142 ? param.num_local_domains_
1143 : detail::countGlobalCells(grid) / default_cells_per_domain;
1144 return ::Opm::partitionCells(param.local_domains_partition_method_,
1145 num_domains, grid.leafGridView(), wells,
1146 possibleFutureConnectionSet, zoltan_ctrl,
1147 param.local_domains_partition_well_neighbor_levels_);
1148 }
1149
1150 void updateMobilities(const Domain& domain)
1151 {
1152 if (domain.skip || model_.param().nldd_relative_mobility_change_tol_ == 0.0) {
1153 return;
1154 }
1155 const auto numActivePhases = FluidSystem::numActivePhases();
1156 for (const auto globalDofIdx : domain.cells) {
1157 const auto& intQuants = model_.simulator().model().intensiveQuantities(globalDofIdx, /* time_idx = */ 0);
1158
1159 for (unsigned activePhaseIdx = 0; activePhaseIdx < numActivePhases; ++activePhaseIdx) {
1160 const auto phaseIdx = FluidSystem::activeToCanonicalPhaseIdx(activePhaseIdx);
1161 const auto mobIdx = globalDofIdx * numActivePhases + activePhaseIdx;
1162 previousMobilities_[mobIdx] = getValue(intQuants.mobility(phaseIdx));
1163 }
1164 }
1165 }
1166
1167 bool checkIfSubdomainNeedsSolving(const Domain& domain)
1168 {
1169 if (domain.skip) {
1170 return false;
1171 }
1172
1173 // If we domain was marked as needing solving in previous iterations,
1174 // we do not need to check again
1175 if (domain_needs_solving_[domain.index]) {
1176 return true;
1177 }
1178
1179 // If we do not check for mobility changes, we need to solve the domain
1180 if (model_.param().nldd_relative_mobility_change_tol_ == 0.0) {
1181 return true;
1182 }
1183
1184 // Skip mobility check on first global iteration (no previous mobilities to compare)
1185 if (model_.simulator().problem().iterationContext().isFirstGlobalIteration()) {
1186 return true;
1187 }
1188
1189 return checkSubdomainChangeRelative(domain);
1190 }
1191
1192 bool checkSubdomainChangeRelative(const Domain& domain)
1193 {
1194 const auto numActivePhases = FluidSystem::numActivePhases();
1195
1196 // Check mobility changes for all cells in the domain
1197 for (const auto globalDofIdx : domain.cells) {
1198 const auto& intQuants = model_.simulator().model().intensiveQuantities(globalDofIdx, /* time_idx = */ 0);
1199
1200 // Calculate average previous mobility for normalization
1201 Scalar cellMob = 0.0;
1202 for (unsigned activePhaseIdx = 0; activePhaseIdx < numActivePhases; ++activePhaseIdx) {
1203 const auto mobIdx = globalDofIdx * numActivePhases + activePhaseIdx;
1204 cellMob += previousMobilities_[mobIdx] / numActivePhases;
1205 }
1206
1207 // Check relative changes for each phase
1208 for (unsigned activePhaseIdx = 0; activePhaseIdx < numActivePhases; ++activePhaseIdx) {
1209 const auto phaseIdx = FluidSystem::activeToCanonicalPhaseIdx(activePhaseIdx);
1210 const auto mobIdx = globalDofIdx * numActivePhases + activePhaseIdx;
1211 const auto mobility = getValue(intQuants.mobility(phaseIdx));
1212 const auto relDiff = std::abs(mobility - previousMobilities_[mobIdx]) / cellMob;
1213 if (relDiff > model_.param().nldd_relative_mobility_change_tol_) {
1214 return true;
1215 }
1216 }
1217 }
1218 return false;
1219 }
1220
1221 BlackoilModel<TypeTag>& model_;
1222 BlackoilWellModelNldd<TypeTag> wellModel_;
1223 std::vector<Domain> domains_;
1224 std::vector<std::unique_ptr<Mat>> domain_matrices_;
1225 std::vector<ISTLSolverType> domain_linsolvers_;
1226 SimulatorReport local_reports_accumulated_;
1227 // mutable because we need to update the number of wells for each domain in getDomainAccumulatedReports()
1228 mutable std::vector<SimulatorReport> domain_reports_accumulated_;
1229 int rank_ = 0;
1230 // Store previous mobilities to check for changes - single flat vector indexed by (globalCellIdx * numActivePhases + activePhaseIdx)
1231 std::vector<Scalar> previousMobilities_;
1232 // Flag indicating if this domain should be solved in the next iteration
1233 std::vector<bool> domain_needs_solving_;
1234};
1235
1236} // namespace Opm
1237
1238#endif // OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
#define OPM_END_PARALLEL_TRY_CATCH(prefix, comm)
Catch exception and throw in a parallel try-catch clause.
Definition: DeferredLoggingErrorHelpers.hpp:192
#define OPM_BEGIN_PARALLEL_TRY_CATCH()
Macro to setup the try of a parallel try-catch.
Definition: DeferredLoggingErrorHelpers.hpp:158
A NLDD implementation for three-phase black oil.
Definition: BlackoilModelNldd.hpp:85
GetPropType< TypeTag, Properties::SolutionVector > SolutionVector
Definition: BlackoilModelNldd.hpp:93
GetPropType< TypeTag, Properties::Indices > Indices
Definition: BlackoilModelNldd.hpp:90
const std::vector< SimulatorReport > & domainAccumulatedReports() const
return the statistics of local solves accumulated for each domain on this rank
Definition: BlackoilModelNldd.hpp:433
void writePartitions(const std::filesystem::path &odir) const
Definition: BlackoilModelNldd.hpp:451
BlackoilModelNldd(BlackoilModel< TypeTag > &model)
The constructor sets up the subdomains.
Definition: BlackoilModelNldd.hpp:106
SimulatorReportSingle nonlinearIterationNldd(const SimulatorTimerInterface &timer, NonlinearSolverType &nonlinear_solver)
Do one non-linear NLDD iteration.
Definition: BlackoilModelNldd.hpp:240
GetPropType< TypeTag, Properties::Scalar > Scalar
Definition: BlackoilModelNldd.hpp:91
typename BlackoilModel< TypeTag >::Mat Mat
Definition: BlackoilModelNldd.hpp:98
const SimulatorReport & localAccumulatedReports() const
return the statistics of local solves accumulated for this rank
Definition: BlackoilModelNldd.hpp:427
SubDomain< Grid > Domain
Definition: BlackoilModelNldd.hpp:96
GetPropType< TypeTag, Properties::ElementContext > ElementContext
Definition: BlackoilModelNldd.hpp:87
GetPropType< TypeTag, Properties::FluidSystem > FluidSystem
Definition: BlackoilModelNldd.hpp:88
void prepareStep()
Called before starting a time step.
Definition: BlackoilModelNldd.hpp:232
void writeNonlinearIterationsPerCell(const std::filesystem::path &odir) const
Write the number of nonlinear iterations per cell to a file in ResInsight compatible format.
Definition: BlackoilModelNldd.hpp:466
static constexpr int numEq
Definition: BlackoilModelNldd.hpp:100
GetPropType< TypeTag, Properties::Grid > Grid
Definition: BlackoilModelNldd.hpp:89
typename BlackoilModel< TypeTag >::BVector BVector
Definition: BlackoilModelNldd.hpp:95
Definition: BlackoilModel.hpp:61
BlackoilWellModel< TypeTag > & wellModel()
return the StandardWells object
Definition: BlackoilModel.hpp:304
typename SparseMatrixAdapter::IstlMatrix Mat
Definition: BlackoilModel.hpp:108
Dune::BlockVector< VectorBlockType > BVector
Definition: BlackoilModel.hpp:109
Definition: ConvergenceReport.hpp:38
Definition: DeferredLogger.hpp:57
void debug(const std::string &tag, const std::string &message)
Definition: ISTLSolver.hpp:149
Definition: NewtonIterationContext.hpp:152
Interface class for SimulatorTimer objects, to be improved.
Definition: SimulatorTimerInterface.hpp:34
Vector extractVector(const Vector &x, const std::vector< int > &indices)
Definition: extractMatrix.hpp:104
void copySubMatrix(const Matrix &A, const std::vector< int > &indices, Matrix &B)
Definition: extractMatrix.hpp:35
void setGlobal(const Vector &x, const std::vector< int > &indices, Vector &global_x)
Definition: extractMatrix.hpp:115
Matrix extractMatrix(const Matrix &m, const std::vector< int > &indices)
Definition: extractMatrix.hpp:47
void unPack(PV &privar, const std::size_t meanings)
Definition: priVarsPacking.hpp:41
std::size_t pack(const PV &privar)
Definition: priVarsPacking.hpp:31
std::size_t countGlobalCells(const Grid &grid)
Get the number of cells of a global grid.
Definition: countGlobalCells.hpp:80
void detectOscillations(const std::vector< std::vector< Scalar > > &residualHistory, const int it, const int numPhases, const Scalar relaxRelTol, const int minimumOscillatingPhases, bool &oscillate, bool &stagnate)
Detect oscillation or stagnation in a given residual history.
Definition: blackoilbioeffectsmodules.hh:45
Opm::DeferredLogger gatherDeferredLogger(const Opm::DeferredLogger &local_deferredlogger, Parallel::Communication communicator)
Create a global log combining local logs.
std::pair< std::vector< int >, int > partitionCells(const std::string &method, const int num_local_domains, const GridView &grid_view, const std::vector< Well > &wells, const std::unordered_map< std::string, std::set< int > > &possibleFutureConnections, const ZoltanPartitioningControl< Element > &zoltan_ctrl, const int num_neighbor_levels)
void printDomainDistributionSummary(const std::vector< int > &partition_vector, const std::vector< Domain > &domains, SimulatorReport &local_reports_accumulated, std::vector< SimulatorReport > &domain_reports_accumulated, const Grid &grid, int num_wells)
Definition: NlddReporting.hpp:219
typename Properties::Detail::GetPropImpl< TypeTag, Property >::type::type GetPropType
get the type alias defined in the property (equivalent to old macro GET_PROP_TYPE(....
Definition: propertysystem.hh:233
void writePartitions(const std::filesystem::path &odir, const std::vector< Domain > &domains, const Grid &grid, const ElementMapper &elementMapper, const CartMapper &cartMapper)
Definition: NlddReporting.hpp:164
std::string to_string(const ConvergenceReport::ReservoirFailure::Type t)
void writeNonlinearIterationsPerCell(const std::filesystem::path &odir, const std::vector< Domain > &domains, const std::vector< SimulatorReport > &domain_reports, const Grid &grid, const ElementMapper &elementMapper, const CartMapper &cartMapper)
Definition: NlddReporting.hpp:104
Solver parameters for the BlackoilModel.
Definition: BlackoilModelParameters.hpp:194
This class carries all parameters for the NewtonIterationBlackoilInterleaved class.
Definition: FlowLinearSolverParameters.hpp:98
bool is_nldd_local_solver_
Definition: FlowLinearSolverParameters.hpp:112
void init(bool cprRequestedInDataFile)
bool linear_solver_print_json_definition_
Definition: FlowLinearSolverParameters.hpp:114
std::string linsolver_
Definition: FlowLinearSolverParameters.hpp:113
Definition: SimulatorReport.hpp:122
SimulatorReportSingle success
Definition: SimulatorReport.hpp:123
A struct for returning timing data from a simulator to its caller.
Definition: SimulatorReport.hpp:34
double linear_solve_time
Definition: SimulatorReport.hpp:43
int skipped_domains
Definition: SimulatorReport.hpp:71
double assemble_time
Definition: SimulatorReport.hpp:39
double assemble_time_well
Definition: SimulatorReport.hpp:41
double solver_time
Definition: SimulatorReport.hpp:38
bool converged
Definition: SimulatorReport.hpp:55
double pre_post_time
Definition: SimulatorReport.hpp:40
double total_time
Definition: SimulatorReport.hpp:37
double linear_solve_setup_time
Definition: SimulatorReport.hpp:42
unsigned int total_newton_iterations
Definition: SimulatorReport.hpp:50
double update_time
Definition: SimulatorReport.hpp:45
double local_solve_time
Definition: SimulatorReport.hpp:44
unsigned int total_linearizations
Definition: SimulatorReport.hpp:49
unsigned int total_linear_iterations
Definition: SimulatorReport.hpp:51
Definition: SubDomain.hpp:85