BlackoilModelNldd.hpp
Go to the documentation of this file.
1/*
2 Copyright 2013, 2015 SINTEF ICT, Applied Mathematics.
3 Copyright 2014, 2015 Dr. Blatt - HPC-Simulation-Software & Services
4 Copyright 2014, 2015 Statoil ASA.
5 Copyright 2015 NTNU
6 Copyright 2015, 2016, 2017 IRIS AS
7
8 This file is part of the Open Porous Media project (OPM).
9
10 OPM is free software: you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation, either version 3 of the License, or
13 (at your option) any later version.
14
15 OPM is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with OPM. If not, see <http://www.gnu.org/licenses/>.
22*/
23
24#ifndef OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
25#define OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
26
27#include <dune/common/timer.hh>
28
29#include <opm/grid/common/SubGridPart.hpp>
30
32
39
41
42#if COMPILE_GPU_BRIDGE
44#else
46#endif
48
52
54
56
57#include <fmt/format.h>
58
59#include <algorithm>
60#include <array>
61#include <cassert>
62#include <cmath>
63#include <cstddef>
64#include <filesystem>
65#include <memory>
66#include <numeric>
67#include <set>
68#include <sstream>
69#include <string>
70#include <type_traits>
71#include <utility>
72#include <vector>
73
74namespace Opm {
75
76template<class TypeTag> class BlackoilModel;
77
79template <class TypeTag>
81{
82public:
90
95
96 static constexpr int numEq = Indices::numEq;
97
103 : model_(model)
104 , wellModel_(model.wellModel())
105 , rank_(model_.simulator().vanguard().grid().comm().rank())
106 {
107 // Create partitions.
108 const auto& [partition_vector_initial, num_domains_initial] = this->partitionCells();
109
110 int num_domains = num_domains_initial;
111 std::vector<int> partition_vector = partition_vector_initial;
112
113 // Fix-up for an extreme case: Interior cells who do not have any on-rank
114 // neighbours. Move all such cells into a single domain on this rank,
115 // and mark the domain for skipping. For what it's worth, we've seen this
116 // case occur in practice when testing on field cases.
117 bool isolated_cells = false;
118 for (auto& domainId : partition_vector) {
119 if (domainId < 0) {
120 domainId = num_domains;
121 isolated_cells = true;
122 }
123 }
124 if (isolated_cells) {
125 num_domains++;
126 }
127
128 // Set nldd handler in main well model
129 model.wellModel().setNlddAdapter(&wellModel_);
130
131 // Scan through partitioning to get correct size for each.
132 std::vector<int> sizes(num_domains, 0);
133 for (const auto& p : partition_vector) {
134 ++sizes[p];
135 }
136
137 // Set up correctly sized vectors of entity seeds and of indices for each partition.
138 using EntitySeed = typename Grid::template Codim<0>::EntitySeed;
139 std::vector<std::vector<EntitySeed>> seeds(num_domains);
140 std::vector<std::vector<int>> partitions(num_domains);
141 for (int domain = 0; domain < num_domains; ++domain) {
142 seeds[domain].resize(sizes[domain]);
143 partitions[domain].resize(sizes[domain]);
144 }
145
146 // Iterate through grid once, setting the seeds of all partitions.
147 // Note: owned cells only!
148 const auto& grid = model_.simulator().vanguard().grid();
149
150 std::vector<int> count(num_domains, 0);
151 const auto& gridView = grid.leafGridView();
152 const auto beg = gridView.template begin<0, Dune::Interior_Partition>();
153 const auto end = gridView.template end<0, Dune::Interior_Partition>();
154 int cell = 0;
155 for (auto it = beg; it != end; ++it, ++cell) {
156 const int p = partition_vector[cell];
157 seeds[p][count[p]] = it->seed();
158 partitions[p][count[p]] = cell;
159 ++count[p];
160 }
161 assert(count == sizes);
162
163 // Create the domains.
164 for (int index = 0; index < num_domains; ++index) {
165 std::vector<bool> interior(partition_vector.size(), false);
166 for (int ix : partitions[index]) {
167 interior[ix] = true;
168 }
169
170 Dune::SubGridPart<Grid> view{grid, std::move(seeds[index])};
171
172 // Mark the last domain for skipping if it contains isolated cells
173 const bool skip = isolated_cells && (index == num_domains - 1);
174 this->domains_.emplace_back(index,
175 std::move(partitions[index]),
176 std::move(interior),
177 std::move(view),
178 skip);
179 }
180
181 // Initialize storage for previous mobilities in a single flat vector
182 const auto numCells = grid.size(0);
183 previousMobilities_.resize(numCells * FluidSystem::numActivePhases(), 0.0);
184 for (const auto& domain : domains_) {
185 updateMobilities(domain);
186 }
187
188 // Initialize domain_needs_solving_ to true for all domains
189 domain_needs_solving_.resize(num_domains, true);
190
191 // Set up container for the local system matrices.
192 domain_matrices_.resize(num_domains);
193
194 // Set up container for the local linear solvers.
195 for (int index = 0; index < num_domains; ++index) {
196 // TODO: The ISTLSolver constructor will make
197 // parallel structures appropriate for the full grid
198 // only. This must be addressed before going parallel.
199 const auto& eclState = model_.simulator().vanguard().eclState();
201 loc_param.is_nldd_local_solver_ = true;
202 loc_param.init(eclState.getSimulationConfig().useCPR());
203 // Override solver type with umfpack if small domain.
204 if (domains_[index].cells.size() < 200) {
205 loc_param.linsolver_ = "umfpack";
206 }
207 loc_param.linear_solver_print_json_definition_ = false;
208 const bool force_serial = true;
209 domain_linsolvers_.emplace_back(model_.simulator(), loc_param, force_serial);
210 domain_linsolvers_.back().setDomainIndex(index);
211 }
212
213 assert(int(domains_.size()) == num_domains);
214
215 domain_reports_accumulated_.resize(num_domains);
216
217 // Print domain distribution summary
219 partition_vector,
220 domains_,
221 local_reports_accumulated_,
222 domain_reports_accumulated_,
223 grid,
224 wellModel_.numLocalWellsEnd());
225 }
226
229 {
230 // Setup domain->well mapping.
231 wellModel_.setupDomains(domains_);
232 }
233
235 template <class NonlinearSolverType>
237 const SimulatorTimerInterface& timer,
238 NonlinearSolverType& nonlinear_solver)
239 {
240 // ----------- Set up reports and timer -----------
242
243 if (iteration < model_.param().nldd_num_initial_newton_iter_) {
244 report = model_.nonlinearIterationNewton(iteration,
245 timer,
246 nonlinear_solver);
247 return report;
248 }
249
250 model_.initialLinearization(report, iteration, model_.param().newton_min_iter_,
251 model_.param().newton_max_iter_, timer);
252
253 if (report.converged) {
254 return report;
255 }
256
257 // ----------- If not converged, do an NLDD iteration -----------
258 Dune::Timer localSolveTimer;
259 Dune::Timer detailTimer;
260 localSolveTimer.start();
261 detailTimer.start();
262 auto& solution = model_.simulator().model().solution(0);
263 auto initial_solution = solution;
264 auto locally_solved = initial_solution;
265
266 // ----------- Decide on an ordering for the domains -----------
267 const auto domain_order = this->getSubdomainOrder();
268 local_reports_accumulated_.success.pre_post_time += detailTimer.stop();
269
270 // ----------- Solve each domain separately -----------
271 DeferredLogger logger;
272 std::vector<SimulatorReportSingle> domain_reports(domains_.size());
273 for (const int domain_index : domain_order) {
274 const auto& domain = domains_[domain_index];
275 SimulatorReportSingle local_report;
276 detailTimer.reset();
277 detailTimer.start();
278
279 domain_needs_solving_[domain_index] = checkIfSubdomainNeedsSolving(domain, iteration);
280
281 updateMobilities(domain);
282
283 if (domain.skip || !domain_needs_solving_[domain_index]) {
284 local_report.skipped_domains = true;
285 local_report.converged = true;
286 domain_reports[domain.index] = local_report;
287 continue;
288 }
289 try {
290 switch (model_.param().local_solve_approach_) {
292 solveDomainJacobi(solution, locally_solved, local_report, logger,
293 iteration, timer, domain);
294 break;
295 default:
297 solveDomainGaussSeidel(solution, locally_solved, local_report, logger,
298 iteration, timer, domain);
299 break;
300 }
301 }
302 catch (...) {
303 // Something went wrong during local solves.
304 local_report.converged = false;
305 }
306 // This should have updated the global matrix to be
307 // dR_i/du_j evaluated at new local solutions for
308 // i == j, at old solution for i != j.
309 if (!local_report.converged) {
310 // TODO: more proper treatment, including in parallel.
311 logger.debug(fmt::format("Convergence failure in domain {} on rank {}." , domain.index, rank_));
312 }
313 local_report.solver_time += detailTimer.stop();
314 domain_reports[domain.index] = local_report;
315 }
316 detailTimer.reset();
317 detailTimer.start();
318 // Communicate and log all messages.
319 auto global_logger = gatherDeferredLogger(logger, model_.simulator().vanguard().grid().comm());
320 global_logger.logMessages();
321
322 // Accumulate local solve data.
323 // Putting the counts in a single array to avoid multiple
324 // comm.sum() calls. Keeping the named vars for readability.
325 std::array<int, 5> counts{ 0, 0, 0, static_cast<int>(domain_reports.size()), 0 };
326 int& num_converged = counts[0];
327 int& num_converged_already = counts[1];
328 int& num_local_newtons = counts[2];
329 int& num_domains = counts[3];
330 int& num_skipped = counts[4];
331 {
332 auto step_newtons = 0;
333 const auto dr_size = domain_reports.size();
334 for (auto i = 0*dr_size; i < dr_size; ++i) {
335 // Reset the needsSolving flag for the next iteration
336 domain_needs_solving_[i] = false;
337 const auto& dr = domain_reports[i];
338 if (dr.converged) {
339 ++num_converged;
340 if (dr.total_newton_iterations == 0) {
341 ++num_converged_already;
342 }
343 else {
344 // If we needed to solve the domain, we also solve in next iteration
345 domain_needs_solving_[i] = true;
346 }
347 }
348 if (dr.skipped_domains) {
349 ++num_skipped;
350 }
351 step_newtons += dr.total_newton_iterations;
352 // Accumulate local reports per domain
353 domain_reports_accumulated_[i] += dr;
354 // Accumulate local reports per rank
355 local_reports_accumulated_ += dr;
356 }
357 num_local_newtons = step_newtons;
358 }
359
360 if (model_.param().local_solve_approach_ == DomainSolveApproach::Jacobi) {
361 solution = locally_solved;
362 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0);
363 }
364
365#if HAVE_MPI
366 // Communicate solutions:
367 // With multiple processes, this process' overlap (i.e. not
368 // owned) cells' solution values have been modified by local
369 // solves in the owning processes, and remain unchanged
370 // here. We must therefore receive the updated solution on the
371 // overlap cells and update their intensive quantities before
372 // we move on.
373 const auto& comm = model_.simulator().vanguard().grid().comm();
374 if (comm.size() > 1) {
375 const auto* ccomm = model_.simulator().model().newtonMethod().linearSolver().comm();
376
377 // Copy numerical values from primary vars.
378 ccomm->copyOwnerToAll(solution, solution);
379
380 // Copy flags from primary vars.
381 const std::size_t num = solution.size();
382 Dune::BlockVector<std::size_t> allmeanings(num);
383 for (std::size_t ii = 0; ii < num; ++ii) {
384 allmeanings[ii] = PVUtil::pack(solution[ii]);
385 }
386 ccomm->copyOwnerToAll(allmeanings, allmeanings);
387 for (std::size_t ii = 0; ii < num; ++ii) {
388 PVUtil::unPack(solution[ii], allmeanings[ii]);
389 }
390
391 // Update intensive quantities for our overlap values.
392 model_.simulator().model().invalidateAndUpdateIntensiveQuantitiesOverlap(/*timeIdx=*/0);
393
394 // Make total counts of domains converged.
395 comm.sum(counts.data(), counts.size());
396 }
397#endif // HAVE_MPI
398
399 const bool is_iorank = this->rank_ == 0;
400 if (is_iorank) {
401 OpmLog::debug(fmt::format("Local solves finished. Converged for {}/{} domains. {} domains were skipped. {} domains did no work. {} total local Newton iterations.\n",
402 num_converged, num_domains, num_skipped, num_converged_already, num_local_newtons));
403 }
404 auto total_local_solve_time = localSolveTimer.stop();
405 report.local_solve_time += total_local_solve_time;
406 local_reports_accumulated_.success.total_time += total_local_solve_time;
407 local_reports_accumulated_.success.pre_post_time += detailTimer.stop();
408
409 // Finish with a Newton step.
410 // Note that the "iteration + 100" is a simple way to avoid entering
411 // "if (iteration == 0)" and similar blocks, and also makes it a little
412 // easier to spot the iteration residuals in the DBG file. A more sophisticated
413 // approach can be done later.
414 auto rep = model_.nonlinearIterationNewton(iteration + 100, timer, nonlinear_solver);
415 report += rep;
416 if (rep.converged) {
417 report.converged = true;
418 }
419 return report;
420 }
421
424 {
425 return local_reports_accumulated_;
426 }
427
429 const std::vector<SimulatorReport>& domainAccumulatedReports() const
430 {
431 // Update the number of wells for each domain that has been added to the well model at this point
432 // this is a mutable operation that updates the well counts in the domain reports.
433 const auto dr_size = domain_reports_accumulated_.size();
434 // Reset well counts before updating
435 for (auto i = 0*dr_size; i < dr_size; ++i) {
436 domain_reports_accumulated_[i].success.num_wells = 0;
437 }
438 // Update the number of wells for each domain
439 for (const auto& [wname, domain] : wellModel_.well_domain()) {
440 domain_reports_accumulated_[domain].success.num_wells++;
441 }
442 return domain_reports_accumulated_;
443 }
444
447 void writePartitions(const std::filesystem::path& odir) const
448 {
449 const auto& grid = this->model_.simulator().vanguard().grid();
450 const auto& elementMapper = this->model_.simulator().model().elementMapper();
451 const auto& cartMapper = this->model_.simulator().vanguard().cartesianIndexMapper();
452
454 odir,
455 domains_,
456 grid,
457 elementMapper,
458 cartMapper);
459 }
460
462 void writeNonlinearIterationsPerCell(const std::filesystem::path& odir) const
463 {
464 const auto& grid = this->model_.simulator().vanguard().grid();
465 const auto& elementMapper = this->model_.simulator().model().elementMapper();
466 const auto& cartMapper = this->model_.simulator().vanguard().cartesianIndexMapper();
467
469 odir,
470 domains_,
471 domain_reports_accumulated_,
472 grid,
473 elementMapper,
474 cartMapper);
475 }
476
477private:
480 solveDomain(const Domain& domain,
481 const SimulatorTimerInterface& timer,
482 SimulatorReportSingle& local_report,
483 DeferredLogger& logger,
484 [[maybe_unused]] const int global_iteration,
485 const bool initial_assembly_required)
486 {
487 auto& modelSimulator = model_.simulator();
488 Dune::Timer detailTimer;
489
490 modelSimulator.model().newtonMethod().setIterationIndex(0);
491
492 // When called, if assembly has already been performed
493 // with the initial values, we only need to check
494 // for local convergence. Otherwise, we must do a local
495 // assembly.
496 int iter = 0;
497 if (initial_assembly_required) {
498 detailTimer.start();
499 modelSimulator.model().newtonMethod().setIterationIndex(iter);
500 // TODO: we should have a beginIterationLocal function()
501 // only handling the well model for now
502 wellModel_.assemble(modelSimulator.model().newtonMethod().numIterations(),
503 modelSimulator.timeStepSize(),
504 domain);
505 const double tt0 = detailTimer.stop();
506 local_report.assemble_time += tt0;
507 local_report.assemble_time_well += tt0;
508 detailTimer.reset();
509 detailTimer.start();
510 // Assemble reservoir locally.
511 this->assembleReservoirDomain(domain);
512 local_report.assemble_time += detailTimer.stop();
513 local_report.total_linearizations += 1;
514 }
515 detailTimer.reset();
516 detailTimer.start();
517 std::vector<Scalar> resnorms;
518 auto convreport = this->getDomainConvergence(domain, timer, 0, logger, resnorms);
519 local_report.update_time += detailTimer.stop();
520 if (convreport.converged()) {
521 // TODO: set more info, timing etc.
522 local_report.converged = true;
523 return convreport;
524 }
525
526 // We have already assembled for the first iteration,
527 // but not done the Schur complement for the wells yet.
528 detailTimer.reset();
529 detailTimer.start();
530 model_.wellModel().linearizeDomain(domain,
531 modelSimulator.model().linearizer().jacobian(),
532 modelSimulator.model().linearizer().residual());
533 const double tt1 = detailTimer.stop();
534 local_report.assemble_time += tt1;
535 local_report.assemble_time_well += tt1;
536
537 // Local Newton loop.
538 const int max_iter = model_.param().max_local_solve_iterations_;
539 const auto& grid = modelSimulator.vanguard().grid();
540 double damping_factor = 1.0;
541 std::vector<std::vector<Scalar>> convergence_history;
542 convergence_history.reserve(20);
543 convergence_history.push_back(resnorms);
544 do {
545 // Solve local linear system.
546 // Note that x has full size, we expect it to be nonzero only for in-domain cells.
547 const int nc = grid.size(0);
548 BVector x(nc);
549 detailTimer.reset();
550 detailTimer.start();
551 this->solveJacobianSystemDomain(domain, x);
552 model_.wellModel().postSolveDomain(x, domain);
553 if (damping_factor != 1.0) {
554 x *= damping_factor;
555 }
556 local_report.linear_solve_time += detailTimer.stop();
557 local_report.linear_solve_setup_time += model_.linearSolveSetupTime();
558 local_report.total_linear_iterations = model_.linearIterationsLastSolve();
559
560 // Update local solution. // TODO: x is still full size, should we optimize it?
561 detailTimer.reset();
562 detailTimer.start();
563 this->updateDomainSolution(domain, x);
564 local_report.update_time += detailTimer.stop();
565
566 // Assemble well and reservoir.
567 detailTimer.reset();
568 detailTimer.start();
569 ++iter;
570 modelSimulator.model().newtonMethod().setIterationIndex(iter);
571 // TODO: we should have a beginIterationLocal function()
572 // only handling the well model for now
573 // Assemble reservoir locally.
574 wellModel_.assemble(modelSimulator.model().newtonMethod().numIterations(),
575 modelSimulator.timeStepSize(),
576 domain);
577 const double tt3 = detailTimer.stop();
578 local_report.assemble_time += tt3;
579 local_report.assemble_time_well += tt3;
580 detailTimer.reset();
581 detailTimer.start();
582 this->assembleReservoirDomain(domain);
583 local_report.assemble_time += detailTimer.stop();
584
585 // Check for local convergence.
586 detailTimer.reset();
587 detailTimer.start();
588 resnorms.clear();
589 convreport = this->getDomainConvergence(domain, timer, iter, logger, resnorms);
590 convergence_history.push_back(resnorms);
591 local_report.update_time += detailTimer.stop();
592
593 // apply the Schur complement of the well model to the
594 // reservoir linearized equations
595 detailTimer.reset();
596 detailTimer.start();
597 model_.wellModel().linearizeDomain(domain,
598 modelSimulator.model().linearizer().jacobian(),
599 modelSimulator.model().linearizer().residual());
600 const double tt2 = detailTimer.stop();
601 local_report.assemble_time += tt2;
602 local_report.assemble_time_well += tt2;
603
604 // Check if we should dampen. Only do so if wells are converged.
605 if (!convreport.converged() && !convreport.wellFailed()) {
606 bool oscillate = false;
607 bool stagnate = false;
608 const auto num_residuals = convergence_history.front().size();
609 detail::detectOscillations(convergence_history, iter, num_residuals,
610 Scalar{0.2}, 1, oscillate, stagnate);
611 if (oscillate) {
612 damping_factor *= 0.85;
613 logger.debug(fmt::format("| Damping factor is now {}", damping_factor));
614 }
615 }
616 } while (!convreport.converged() && iter <= max_iter);
617
618 modelSimulator.problem().endIteration();
619
620 local_report.converged = convreport.converged();
621 local_report.total_newton_iterations = iter;
622 local_report.total_linearizations += iter;
623 // TODO: set more info, timing etc.
624 return convreport;
625 }
626
628 void assembleReservoirDomain(const Domain& domain)
629 {
630 OPM_TIMEBLOCK(assembleReservoirDomain);
631 // -------- Mass balance equations --------
632 model_.simulator().model().linearizer().linearizeDomain(domain);
633 }
634
636 void solveJacobianSystemDomain(const Domain& domain, BVector& global_x)
637 {
638 const auto& modelSimulator = model_.simulator();
639
640 Dune::Timer perfTimer;
641 perfTimer.start();
642
643 const Mat& main_matrix = modelSimulator.model().linearizer().jacobian().istlMatrix();
644 if (domain_matrices_[domain.index]) {
645 Details::copySubMatrix(main_matrix, domain.cells, *domain_matrices_[domain.index]);
646 } else {
647 domain_matrices_[domain.index] = std::make_unique<Mat>(Details::extractMatrix(main_matrix, domain.cells));
648 }
649 auto& jac = *domain_matrices_[domain.index];
650 auto res = Details::extractVector(modelSimulator.model().linearizer().residual(),
651 domain.cells);
652 auto x = res;
653
654 // set initial guess
655 global_x = 0.0;
656 x = 0.0;
657
658 auto& linsolver = domain_linsolvers_[domain.index];
659
660 linsolver.prepare(jac, res);
661 model_.linearSolveSetupTime() = perfTimer.stop();
662 linsolver.setResidual(res);
663 linsolver.solve(x);
664
665 Details::setGlobal(x, domain.cells, global_x);
666 }
667
669 void updateDomainSolution(const Domain& domain, const BVector& dx)
670 {
671 OPM_TIMEBLOCK(updateDomainSolution);
672 auto& simulator = model_.simulator();
673 auto& newtonMethod = simulator.model().newtonMethod();
674 SolutionVector& solution = simulator.model().solution(/*timeIdx=*/0);
675
676 newtonMethod.update_(/*nextSolution=*/solution,
677 /*curSolution=*/solution,
678 /*update=*/dx,
679 /*resid=*/dx,
680 domain.cells); // the update routines of the black
681 // oil model do not care about the
682 // residual
683
684 // if the solution is updated, the intensive quantities need to be recalculated
685 simulator.model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
686 }
687
689 std::pair<Scalar, Scalar> localDomainConvergenceData(const Domain& domain,
690 std::vector<Scalar>& R_sum,
691 std::vector<Scalar>& maxCoeff,
692 std::vector<Scalar>& B_avg,
693 std::vector<int>& maxCoeffCell)
694 {
695 const auto& modelSimulator = model_.simulator();
696
697 Scalar pvSumLocal = 0.0;
698 Scalar numAquiferPvSumLocal = 0.0;
699 const auto& model = modelSimulator.model();
700 const auto& problem = modelSimulator.problem();
701
702 const auto& modelResid = modelSimulator.model().linearizer().residual();
703
704 ElementContext elemCtx(modelSimulator);
705 const auto& gridView = domain.view;
706 const auto& elemEndIt = gridView.template end</*codim=*/0>();
707 IsNumericalAquiferCell isNumericalAquiferCell(gridView.grid());
708
709 for (auto elemIt = gridView.template begin</*codim=*/0>();
710 elemIt != elemEndIt;
711 ++elemIt)
712 {
713 if (elemIt->partitionType() != Dune::InteriorEntity) {
714 continue;
715 }
716 const auto& elem = *elemIt;
717 elemCtx.updatePrimaryStencil(elem);
718 elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
719
720 const unsigned cell_idx = elemCtx.globalSpaceIndex(/*spaceIdx=*/0, /*timeIdx=*/0);
721 const auto& intQuants = elemCtx.intensiveQuantities(/*spaceIdx=*/0, /*timeIdx=*/0);
722 const auto& fs = intQuants.fluidState();
723
724 const auto pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) *
725 model.dofTotalVolume(cell_idx);
726 pvSumLocal += pvValue;
727
728 if (isNumericalAquiferCell(elem))
729 {
730 numAquiferPvSumLocal += pvValue;
731 }
732
733 model_.getMaxCoeff(cell_idx, intQuants, fs, modelResid, pvValue,
734 B_avg, R_sum, maxCoeff, maxCoeffCell);
735 }
736
737 // compute local average in terms of global number of elements
738 const int bSize = B_avg.size();
739 for ( int i = 0; i<bSize; ++i )
740 {
741 B_avg[ i ] /= Scalar(domain.cells.size());
742 }
743
744 return {pvSumLocal, numAquiferPvSumLocal};
745 }
746
747 ConvergenceReport getDomainReservoirConvergence(const double reportTime,
748 const double dt,
749 const int iteration,
750 const Domain& domain,
751 DeferredLogger& logger,
752 std::vector<Scalar>& B_avg,
753 std::vector<Scalar>& residual_norms)
754 {
755 using Vector = std::vector<Scalar>;
756
757 const int numComp = numEq;
758 Vector R_sum(numComp, 0.0 );
759 Vector maxCoeff(numComp, std::numeric_limits<Scalar>::lowest() );
760 std::vector<int> maxCoeffCell(numComp, -1);
761 const auto [ pvSum, numAquiferPvSum]
762 = this->localDomainConvergenceData(domain, R_sum, maxCoeff, B_avg, maxCoeffCell);
763
764 auto cnvErrorPvFraction = computeCnvErrorPvLocal(domain, B_avg, dt);
765 cnvErrorPvFraction /= (pvSum - numAquiferPvSum);
766
767 // Default value of relaxed_max_pv_fraction_ is 0.03 and min_strict_cnv_iter_ is 0.
768 // For each iteration, we need to determine whether to use the relaxed CNV tolerance.
769 // To disable the usage of relaxed CNV tolerance, you can set the relaxed_max_pv_fraction_ to be 0.
770 const bool use_relaxed_cnv = cnvErrorPvFraction < model_.param().relaxed_max_pv_fraction_ &&
771 iteration >= model_.param().min_strict_cnv_iter_;
772 // Tighter bound for local convergence should increase the
773 // likelyhood of: local convergence => global convergence
774 const Scalar tol_cnv = model_.param().local_tolerance_scaling_cnv_
775 * (use_relaxed_cnv ? model_.param().tolerance_cnv_relaxed_
776 : model_.param().tolerance_cnv_);
777
778 const bool use_relaxed_mb = iteration >= model_.param().min_strict_mb_iter_;
779 const Scalar tol_mb = model_.param().local_tolerance_scaling_mb_
780 * (use_relaxed_mb ? model_.param().tolerance_mb_relaxed_ : model_.param().tolerance_mb_);
781
782 // Finish computation
783 std::vector<Scalar> CNV(numComp);
784 std::vector<Scalar> mass_balance_residual(numComp);
785 for (int compIdx = 0; compIdx < numComp; ++compIdx )
786 {
787 CNV[compIdx] = B_avg[compIdx] * dt * maxCoeff[compIdx];
788 mass_balance_residual[compIdx] = std::abs(B_avg[compIdx]*R_sum[compIdx]) * dt / pvSum;
789 residual_norms.push_back(CNV[compIdx]);
790 }
791
792 // Create convergence report.
793 ConvergenceReport report{reportTime};
794 using CR = ConvergenceReport;
795 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
796 Scalar res[2] = { mass_balance_residual[compIdx], CNV[compIdx] };
797 CR::ReservoirFailure::Type types[2] = { CR::ReservoirFailure::Type::MassBalance,
798 CR::ReservoirFailure::Type::Cnv };
799 Scalar tol[2] = { tol_mb, tol_cnv };
800 for (int ii : {0, 1}) {
801 if (std::isnan(res[ii])) {
802 report.setReservoirFailed({types[ii], CR::Severity::NotANumber, compIdx});
803 logger.debug("NaN residual for " + model_.compNames().name(compIdx) + " equation.");
804 } else if (res[ii] > model_.param().max_residual_allowed_) {
805 report.setReservoirFailed({types[ii], CR::Severity::TooLarge, compIdx});
806 logger.debug("Too large residual for " + model_.compNames().name(compIdx) + " equation.");
807 } else if (res[ii] < 0.0) {
808 report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
809 logger.debug("Negative residual for " + model_.compNames().name(compIdx) + " equation.");
810 } else if (res[ii] > tol[ii]) {
811 report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
812 }
813
814 report.setReservoirConvergenceMetric(types[ii], compIdx, res[ii], tol[ii]);
815 }
816 }
817
818 // Output of residuals. If converged at initial state, log nothing.
819 const bool converged_at_initial_state = (report.converged() && iteration == 0);
820 if (!converged_at_initial_state) {
821 if (iteration == 0) {
822 // Log header.
823 std::string msg = fmt::format("Domain {} on rank {}, size {}, containing cell {}\n| Iter",
824 domain.index, this->rank_, domain.cells.size(), domain.cells[0]);
825 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
826 msg += " MB(";
827 msg += model_.compNames().name(compIdx)[0];
828 msg += ") ";
829 }
830 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
831 msg += " CNV(";
832 msg += model_.compNames().name(compIdx)[0];
833 msg += ") ";
834 }
835 logger.debug(msg);
836 }
837 // Log convergence data.
838 std::ostringstream ss;
839 ss << "| ";
840 const std::streamsize oprec = ss.precision(3);
841 const std::ios::fmtflags oflags = ss.setf(std::ios::scientific);
842 ss << std::setw(4) << iteration;
843 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
844 ss << std::setw(11) << mass_balance_residual[compIdx];
845 }
846 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
847 ss << std::setw(11) << CNV[compIdx];
848 }
849 ss.precision(oprec);
850 ss.flags(oflags);
851 logger.debug(ss.str());
852 }
853
854 return report;
855 }
856
857 ConvergenceReport getDomainConvergence(const Domain& domain,
858 const SimulatorTimerInterface& timer,
859 const int iteration,
860 DeferredLogger& logger,
861 std::vector<Scalar>& residual_norms)
862 {
863 OPM_TIMEBLOCK(getDomainConvergence);
864 std::vector<Scalar> B_avg(numEq, 0.0);
865 auto report = this->getDomainReservoirConvergence(timer.simulationTimeElapsed(),
866 timer.currentStepLength(),
867 iteration,
868 domain,
869 logger,
870 B_avg,
871 residual_norms);
872 report += wellModel_.getWellConvergence(domain, B_avg, logger);
873 return report;
874 }
875
877 std::vector<int> getSubdomainOrder()
878 {
879 const auto& modelSimulator = model_.simulator();
880 const auto& solution = modelSimulator.model().solution(0);
881
882 std::vector<int> domain_order(domains_.size());
883 std::iota(domain_order.begin(), domain_order.end(), 0);
884
885 if (model_.param().local_solve_approach_ == DomainSolveApproach::Jacobi) {
886 // Do nothing, 0..n-1 order is fine.
887 return domain_order;
888 } else if (model_.param().local_solve_approach_ == DomainSolveApproach::GaussSeidel) {
889 // Calculate the measure used to order the domains.
890 std::vector<Scalar> measure_per_domain(domains_.size());
891 switch (model_.param().local_domains_ordering_) {
893 // Use average pressures to order domains.
894 for (const auto& domain : domains_) {
895 const Scalar press_sum =
896 std::accumulate(domain.cells.begin(), domain.cells.end(), Scalar{0},
897 [&solution](const auto acc, const auto c)
898 { return acc + solution[c][Indices::pressureSwitchIdx]; });
899 const Scalar avgpress = press_sum / domain.cells.size();
900 measure_per_domain[domain.index] = avgpress;
901 }
902 break;
903 }
905 // Use max pressures to order domains.
906 for (const auto& domain : domains_) {
907 measure_per_domain[domain.index] =
908 std::accumulate(domain.cells.begin(), domain.cells.end(), Scalar{0},
909 [&solution](const auto acc, const auto c)
910 { return std::max(acc, solution[c][Indices::pressureSwitchIdx]); });
911 }
912 break;
913 }
915 // Use maximum residual to order domains.
916 const auto& residual = modelSimulator.model().linearizer().residual();
917 const int num_vars = residual[0].size();
918 for (const auto& domain : domains_) {
919 Scalar maxres = 0.0;
920 for (const int c : domain.cells) {
921 for (int ii = 0; ii < num_vars; ++ii) {
922 maxres = std::max(maxres, std::fabs(residual[c][ii]));
923 }
924 }
925 measure_per_domain[domain.index] = maxres;
926 }
927 break;
928 }
929 } // end of switch (model_.param().local_domains_ordering_)
930
931 // Sort by largest measure, keeping index order if equal.
932 const auto& m = measure_per_domain;
933 std::stable_sort(domain_order.begin(), domain_order.end(),
934 [&m](const int i1, const int i2){ return m[i1] > m[i2]; });
935 return domain_order;
936 } else {
937 throw std::logic_error("Domain solve approach must be Jacobi or Gauss-Seidel");
938 }
939 }
940
941 template<class GlobalEqVector>
942 void solveDomainJacobi(GlobalEqVector& solution,
943 GlobalEqVector& locally_solved,
944 SimulatorReportSingle& local_report,
945 DeferredLogger& logger,
946 const int iteration,
947 const SimulatorTimerInterface& timer,
948 const Domain& domain)
949 {
950 auto initial_local_well_primary_vars = wellModel_.getPrimaryVarsDomain(domain.index);
951 auto initial_local_solution = Details::extractVector(solution, domain.cells);
952 auto convrep = solveDomain(domain, timer, local_report, logger, iteration, false);
953 if (local_report.converged) {
954 auto local_solution = Details::extractVector(solution, domain.cells);
955 Details::setGlobal(local_solution, domain.cells, locally_solved);
956 Details::setGlobal(initial_local_solution, domain.cells, solution);
957 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
958 } else {
959 wellModel_.setPrimaryVarsDomain(domain.index, initial_local_well_primary_vars);
960 Details::setGlobal(initial_local_solution, domain.cells, solution);
961 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
962 }
963 }
964
965 template<class GlobalEqVector>
966 void solveDomainGaussSeidel(GlobalEqVector& solution,
967 GlobalEqVector& locally_solved,
968 SimulatorReportSingle& local_report,
969 DeferredLogger& logger,
970 const int iteration,
971 const SimulatorTimerInterface& timer,
972 const Domain& domain)
973 {
974 auto initial_local_well_primary_vars = wellModel_.getPrimaryVarsDomain(domain.index);
975 auto initial_local_solution = Details::extractVector(solution, domain.cells);
976 auto convrep = solveDomain(domain, timer, local_report, logger, iteration, true);
977 if (!local_report.converged) {
978 // We look at the detailed convergence report to evaluate
979 // if we should accept the unconverged solution.
980 // We do not accept a solution if the wells are unconverged.
981 if (!convrep.wellFailed()) {
982 // Calculare the sums of the mb and cnv failures.
983 Scalar mb_sum = 0.0;
984 Scalar cnv_sum = 0.0;
985 for (const auto& rc : convrep.reservoirConvergence()) {
987 mb_sum += rc.value();
988 } else if (rc.type() == ConvergenceReport::ReservoirFailure::Type::Cnv) {
989 cnv_sum += rc.value();
990 }
991 }
992 // If not too high, we overrule the convergence failure.
993 const Scalar acceptable_local_mb_sum = 1e-3;
994 const Scalar acceptable_local_cnv_sum = 1.0;
995 if (mb_sum < acceptable_local_mb_sum && cnv_sum < acceptable_local_cnv_sum) {
996 local_report.converged = true;
997 local_report.accepted_unconverged_domains += 1;
998 logger.debug(fmt::format("Accepting solution in unconverged domain {} on rank {}.", domain.index, rank_));
999 logger.debug(fmt::format("Value of mb_sum: {} cnv_sum: {}", mb_sum, cnv_sum));
1000 } else {
1001 logger.debug("Unconverged local solution.");
1002 }
1003 } else {
1004 logger.debug("Unconverged local solution with well convergence failures:");
1005 for (const auto& wf : convrep.wellFailures()) {
1006 logger.debug(to_string(wf));
1007 }
1008 }
1009 }
1010 if (local_report.converged) {
1011 local_report.converged_domains += 1;
1012 auto local_solution = Details::extractVector(solution, domain.cells);
1013 Details::setGlobal(local_solution, domain.cells, locally_solved);
1014 } else {
1015 local_report.unconverged_domains += 1;
1016 wellModel_.setPrimaryVarsDomain(domain.index, initial_local_well_primary_vars);
1017 Details::setGlobal(initial_local_solution, domain.cells, solution);
1018 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
1019 }
1020 }
1021
1022 Scalar computeCnvErrorPvLocal(const Domain& domain,
1023 const std::vector<Scalar>& B_avg, double dt) const
1024 {
1025 Scalar errorPV{};
1026 const auto& simulator = model_.simulator();
1027 const auto& model = simulator.model();
1028 const auto& problem = simulator.problem();
1029 const auto& residual = simulator.model().linearizer().residual();
1030
1031 for (const int cell_idx : domain.cells) {
1032 const Scalar pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) *
1033 model.dofTotalVolume(cell_idx);
1034 const auto& cellResidual = residual[cell_idx];
1035 bool cnvViolated = false;
1036
1037 for (unsigned eqIdx = 0; eqIdx < cellResidual.size(); ++eqIdx) {
1038 using std::fabs;
1039 Scalar CNV = cellResidual[eqIdx] * dt * B_avg[eqIdx] / pvValue;
1040 cnvViolated = cnvViolated || (fabs(CNV) > model_.param().tolerance_cnv_);
1041 }
1042
1043 if (cnvViolated) {
1044 errorPV += pvValue;
1045 }
1046 }
1047 return errorPV;
1048 }
1049
1050 decltype(auto) partitionCells() const
1051 {
1052 const auto& grid = this->model_.simulator().vanguard().grid();
1053
1054 using GridView = std::remove_cv_t<std::remove_reference_t<
1055 decltype(grid.leafGridView())>>;
1056
1057 using Element = std::remove_cv_t<std::remove_reference_t<
1058 typename GridView::template Codim<0>::Entity>>;
1059
1060 const auto& param = this->model_.param();
1061
1062 auto zoltan_ctrl = ZoltanPartitioningControl<Element>{};
1063
1064 zoltan_ctrl.domain_imbalance = param.local_domains_partition_imbalance_;
1065
1066 zoltan_ctrl.index =
1067 [elementMapper = &this->model_.simulator().model().elementMapper()]
1068 (const Element& element)
1069 {
1070 return elementMapper->index(element);
1071 };
1072
1073 zoltan_ctrl.local_to_global =
1074 [cartMapper = &this->model_.simulator().vanguard().cartesianIndexMapper()]
1075 (const int elemIdx)
1076 {
1077 return cartMapper->cartesianIndex(elemIdx);
1078 };
1079
1080 // Forming the list of wells is expensive, so do this only if needed.
1081 const auto need_wells = param.local_domains_partition_method_ == "zoltan";
1082
1083 const auto wells = need_wells
1084 ? this->model_.simulator().vanguard().schedule().getWellsatEnd()
1085 : std::vector<Well>{};
1086
1087 const auto& possibleFutureConnectionSet = need_wells
1088 ? this->model_.simulator().vanguard().schedule().getPossibleFutureConnections()
1089 : std::unordered_map<std::string, std::set<int>> {};
1090
1091 // If defaulted parameter for number of domains, choose a reasonable default.
1092 constexpr int default_cells_per_domain = 1000;
1093 const int num_domains = (param.num_local_domains_ > 0)
1094 ? param.num_local_domains_
1095 : detail::countGlobalCells(grid) / default_cells_per_domain;
1096 return ::Opm::partitionCells(param.local_domains_partition_method_,
1097 num_domains, grid.leafGridView(), wells,
1098 possibleFutureConnectionSet, zoltan_ctrl,
1099 param.local_domains_partition_well_neighbor_levels_);
1100 }
1101
1102 void updateMobilities(const Domain& domain)
1103 {
1104 if (domain.skip || model_.param().nldd_relative_mobility_change_tol_ == 0.0) {
1105 return;
1106 }
1107 const auto numActivePhases = FluidSystem::numActivePhases();
1108 for (const auto globalDofIdx : domain.cells) {
1109 const auto& intQuants = model_.simulator().model().intensiveQuantities(globalDofIdx, /* time_idx = */ 0);
1110
1111 for (unsigned activePhaseIdx = 0; activePhaseIdx < numActivePhases; ++activePhaseIdx) {
1112 const auto phaseIdx = FluidSystem::activeToCanonicalPhaseIdx(activePhaseIdx);
1113 const auto mobIdx = globalDofIdx * numActivePhases + activePhaseIdx;
1114 previousMobilities_[mobIdx] = getValue(intQuants.mobility(phaseIdx));
1115 }
1116 }
1117 }
1118
1119 bool checkIfSubdomainNeedsSolving(const Domain& domain, const int iteration)
1120 {
1121 if (domain.skip) {
1122 return false;
1123 }
1124
1125 // If we domain was marked as needing solving in previous iterations,
1126 // we do not need to check again
1127 if (domain_needs_solving_[domain.index]) {
1128 return true;
1129 }
1130
1131 // If we do not check for mobility changes, we need to solve the domain
1132 if (model_.param().nldd_relative_mobility_change_tol_ == 0.0) {
1133 return true;
1134 }
1135
1136 // Skip mobility check on first iteration
1137 if (iteration == 0) {
1138 return true;
1139 }
1140
1141 return checkSubdomainChangeRelative(domain);
1142 }
1143
1144 bool checkSubdomainChangeRelative(const Domain& domain)
1145 {
1146 const auto numActivePhases = FluidSystem::numActivePhases();
1147
1148 // Check mobility changes for all cells in the domain
1149 for (const auto globalDofIdx : domain.cells) {
1150 const auto& intQuants = model_.simulator().model().intensiveQuantities(globalDofIdx, /* time_idx = */ 0);
1151
1152 // Calculate average previous mobility for normalization
1153 Scalar cellMob = 0.0;
1154 for (unsigned activePhaseIdx = 0; activePhaseIdx < numActivePhases; ++activePhaseIdx) {
1155 const auto mobIdx = globalDofIdx * numActivePhases + activePhaseIdx;
1156 cellMob += previousMobilities_[mobIdx] / numActivePhases;
1157 }
1158
1159 // Check relative changes for each phase
1160 for (unsigned activePhaseIdx = 0; activePhaseIdx < numActivePhases; ++activePhaseIdx) {
1161 const auto phaseIdx = FluidSystem::activeToCanonicalPhaseIdx(activePhaseIdx);
1162 const auto mobIdx = globalDofIdx * numActivePhases + activePhaseIdx;
1163 const auto mobility = getValue(intQuants.mobility(phaseIdx));
1164 const auto relDiff = std::abs(mobility - previousMobilities_[mobIdx]) / cellMob;
1165 if (relDiff > model_.param().nldd_relative_mobility_change_tol_) {
1166 return true;
1167 }
1168 }
1169 }
1170 return false;
1171 }
1172
1173 BlackoilModel<TypeTag>& model_;
1174 BlackoilWellModelNldd<TypeTag> wellModel_;
1175 std::vector<Domain> domains_;
1176 std::vector<std::unique_ptr<Mat>> domain_matrices_;
1177 std::vector<ISTLSolverType> domain_linsolvers_;
1178 SimulatorReport local_reports_accumulated_;
1179 // mutable because we need to update the number of wells for each domain in getDomainAccumulatedReports()
1180 mutable std::vector<SimulatorReport> domain_reports_accumulated_;
1181 int rank_ = 0;
1182 // Store previous mobilities to check for changes - single flat vector indexed by (globalCellIdx * numActivePhases + activePhaseIdx)
1183 std::vector<Scalar> previousMobilities_;
1184 // Flag indicating if this domain should be solved in the next iteration
1185 std::vector<bool> domain_needs_solving_;
1186};
1187
1188} // namespace Opm
1189
1190#endif // OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
A NLDD implementation for three-phase black oil.
Definition: BlackoilModelNldd.hpp:81
GetPropType< TypeTag, Properties::SolutionVector > SolutionVector
Definition: BlackoilModelNldd.hpp:89
GetPropType< TypeTag, Properties::Indices > Indices
Definition: BlackoilModelNldd.hpp:86
const std::vector< SimulatorReport > & domainAccumulatedReports() const
return the statistics of local solves accumulated for each domain on this rank
Definition: BlackoilModelNldd.hpp:429
void writePartitions(const std::filesystem::path &odir) const
Definition: BlackoilModelNldd.hpp:447
BlackoilModelNldd(BlackoilModel< TypeTag > &model)
The constructor sets up the subdomains.
Definition: BlackoilModelNldd.hpp:102
GetPropType< TypeTag, Properties::Scalar > Scalar
Definition: BlackoilModelNldd.hpp:87
typename BlackoilModel< TypeTag >::Mat Mat
Definition: BlackoilModelNldd.hpp:94
const SimulatorReport & localAccumulatedReports() const
return the statistics of local solves accumulated for this rank
Definition: BlackoilModelNldd.hpp:423
SimulatorReportSingle nonlinearIterationNldd(const int iteration, const SimulatorTimerInterface &timer, NonlinearSolverType &nonlinear_solver)
Do one non-linear NLDD iteration.
Definition: BlackoilModelNldd.hpp:236
SubDomain< Grid > Domain
Definition: BlackoilModelNldd.hpp:92
GetPropType< TypeTag, Properties::ElementContext > ElementContext
Definition: BlackoilModelNldd.hpp:83
GetPropType< TypeTag, Properties::FluidSystem > FluidSystem
Definition: BlackoilModelNldd.hpp:84
void prepareStep()
Called before starting a time step.
Definition: BlackoilModelNldd.hpp:228
void writeNonlinearIterationsPerCell(const std::filesystem::path &odir) const
Write the number of nonlinear iterations per cell to a file in ResInsight compatible format.
Definition: BlackoilModelNldd.hpp:462
static constexpr int numEq
Definition: BlackoilModelNldd.hpp:96
GetPropType< TypeTag, Properties::Grid > Grid
Definition: BlackoilModelNldd.hpp:85
typename BlackoilModel< TypeTag >::BVector BVector
Definition: BlackoilModelNldd.hpp:91
Definition: BlackoilModel.hpp:61
BlackoilWellModel< TypeTag > & wellModel()
return the StandardWells object
Definition: BlackoilModel.hpp:282
typename SparseMatrixAdapter::IstlMatrix Mat
Definition: BlackoilModel.hpp:106
Dune::BlockVector< VectorBlockType > BVector
Definition: BlackoilModel.hpp:107
Definition: ConvergenceReport.hpp:38
Definition: DeferredLogger.hpp:57
void debug(const std::string &tag, const std::string &message)
Definition: ISTLSolver.hpp:149
Interface class for SimulatorTimer objects, to be improved.
Definition: SimulatorTimerInterface.hpp:34
Vector extractVector(const Vector &x, const std::vector< int > &indices)
Definition: extractMatrix.hpp:104
void copySubMatrix(const Matrix &A, const std::vector< int > &indices, Matrix &B)
Definition: extractMatrix.hpp:35
void setGlobal(const Vector &x, const std::vector< int > &indices, Vector &global_x)
Definition: extractMatrix.hpp:115
Matrix extractMatrix(const Matrix &m, const std::vector< int > &indices)
Definition: extractMatrix.hpp:47
void unPack(PV &privar, const std::size_t meanings)
Definition: priVarsPacking.hpp:41
std::size_t pack(const PV &privar)
Definition: priVarsPacking.hpp:31
std::size_t countGlobalCells(const Grid &grid)
Get the number of cells of a global grid.
Definition: countGlobalCells.hpp:80
void detectOscillations(const std::vector< std::vector< Scalar > > &residualHistory, const int it, const int numPhases, const Scalar relaxRelTol, const int minimumOscillatingPhases, bool &oscillate, bool &stagnate)
Detect oscillation or stagnation in a given residual history.
Definition: blackoilboundaryratevector.hh:39
Opm::DeferredLogger gatherDeferredLogger(const Opm::DeferredLogger &local_deferredlogger, Parallel::Communication communicator)
Create a global log combining local logs.
std::pair< std::vector< int >, int > partitionCells(const std::string &method, const int num_local_domains, const GridView &grid_view, const std::vector< Well > &wells, const std::unordered_map< std::string, std::set< int > > &possibleFutureConnections, const ZoltanPartitioningControl< Element > &zoltan_ctrl, const int num_neighbor_levels)
void printDomainDistributionSummary(const std::vector< int > &partition_vector, const std::vector< Domain > &domains, SimulatorReport &local_reports_accumulated, std::vector< SimulatorReport > &domain_reports_accumulated, const Grid &grid, int num_wells)
Definition: NlddReporting.hpp:219
typename Properties::Detail::GetPropImpl< TypeTag, Property >::type::type GetPropType
get the type alias defined in the property (equivalent to old macro GET_PROP_TYPE(....
Definition: propertysystem.hh:233
void writePartitions(const std::filesystem::path &odir, const std::vector< Domain > &domains, const Grid &grid, const ElementMapper &elementMapper, const CartMapper &cartMapper)
Definition: NlddReporting.hpp:164
std::string to_string(const ConvergenceReport::ReservoirFailure::Type t)
void writeNonlinearIterationsPerCell(const std::filesystem::path &odir, const std::vector< Domain > &domains, const std::vector< SimulatorReport > &domain_reports, const Grid &grid, const ElementMapper &elementMapper, const CartMapper &cartMapper)
Definition: NlddReporting.hpp:104
Solver parameters for the BlackoilModel.
Definition: BlackoilModelParameters.hpp:179
This class carries all parameters for the NewtonIterationBlackoilInterleaved class.
Definition: FlowLinearSolverParameters.hpp:97
bool is_nldd_local_solver_
Definition: FlowLinearSolverParameters.hpp:111
void init(bool cprRequestedInDataFile)
bool linear_solver_print_json_definition_
Definition: FlowLinearSolverParameters.hpp:113
std::string linsolver_
Definition: FlowLinearSolverParameters.hpp:112
Definition: SimulatorReport.hpp:122
SimulatorReportSingle success
Definition: SimulatorReport.hpp:123
A struct for returning timing data from a simulator to its caller.
Definition: SimulatorReport.hpp:34
double linear_solve_time
Definition: SimulatorReport.hpp:43
int skipped_domains
Definition: SimulatorReport.hpp:71
double assemble_time
Definition: SimulatorReport.hpp:39
double assemble_time_well
Definition: SimulatorReport.hpp:41
double solver_time
Definition: SimulatorReport.hpp:38
bool converged
Definition: SimulatorReport.hpp:55
double pre_post_time
Definition: SimulatorReport.hpp:40
double total_time
Definition: SimulatorReport.hpp:37
double linear_solve_setup_time
Definition: SimulatorReport.hpp:42
unsigned int total_newton_iterations
Definition: SimulatorReport.hpp:50
double update_time
Definition: SimulatorReport.hpp:45
double local_solve_time
Definition: SimulatorReport.hpp:44
unsigned int total_linearizations
Definition: SimulatorReport.hpp:49
unsigned int total_linear_iterations
Definition: SimulatorReport.hpp:51
Definition: SubDomain.hpp:85