BlackoilModelNldd.hpp
Go to the documentation of this file.
1/*
2 Copyright 2013, 2015 SINTEF ICT, Applied Mathematics.
3 Copyright 2014, 2015 Dr. Blatt - HPC-Simulation-Software & Services
4 Copyright 2014, 2015 Statoil ASA.
5 Copyright 2015 NTNU
6 Copyright 2015, 2016, 2017 IRIS AS
7
8 This file is part of the Open Porous Media project (OPM).
9
10 OPM is free software: you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation, either version 3 of the License, or
13 (at your option) any later version.
14
15 OPM is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with OPM. If not, see <http://www.gnu.org/licenses/>.
22*/
23
24#ifndef OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
25#define OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
26
27#include <dune/common/timer.hh>
28#include <dune/istl/istlexception.hh>
29
30#include <opm/common/Exceptions.hpp>
31
32#include <opm/grid/common/SubGridPart.hpp>
33
35
42
44
45#if COMPILE_GPU_BRIDGE
47#else
49#endif
51
55
58
60
61#include <fmt/format.h>
62
63#include <algorithm>
64#include <array>
65#include <cassert>
66#include <cmath>
67#include <cstddef>
68#include <filesystem>
69#include <memory>
70#include <numeric>
71#include <set>
72#include <sstream>
73#include <string>
74#include <type_traits>
75#include <utility>
76#include <vector>
77
78namespace Opm {
79
80template<class TypeTag> class BlackoilModel;
81
83template <class TypeTag>
85{
86public:
94
99
100 static constexpr int numEq = Indices::numEq;
101
107 : model_(model)
108 , wellModel_(model.wellModel())
109 , rank_(model_.simulator().vanguard().grid().comm().rank())
110 {
111 // Create partitions.
112 const auto& [partition_vector_initial, num_domains_initial] = this->partitionCells();
113
114 int num_domains = num_domains_initial;
115 std::vector<int> partition_vector = partition_vector_initial;
116
117 // Fix-up for an extreme case: Interior cells who do not have any on-rank
118 // neighbours. Move all such cells into a single domain on this rank,
119 // and mark the domain for skipping. For what it's worth, we've seen this
120 // case occur in practice when testing on field cases.
121 bool isolated_cells = false;
122 for (auto& domainId : partition_vector) {
123 if (domainId < 0) {
124 domainId = num_domains;
125 isolated_cells = true;
126 }
127 }
128 if (isolated_cells) {
129 num_domains++;
130 }
131
132 // Set nldd handler in main well model
133 model.wellModel().setNlddAdapter(&wellModel_);
134
135 // Scan through partitioning to get correct size for each.
136 std::vector<int> sizes(num_domains, 0);
137 for (const auto& p : partition_vector) {
138 ++sizes[p];
139 }
140
141 // Set up correctly sized vectors of entity seeds and of indices for each partition.
142 using EntitySeed = typename Grid::template Codim<0>::EntitySeed;
143 std::vector<std::vector<EntitySeed>> seeds(num_domains);
144 std::vector<std::vector<int>> partitions(num_domains);
145 for (int domain = 0; domain < num_domains; ++domain) {
146 seeds[domain].resize(sizes[domain]);
147 partitions[domain].resize(sizes[domain]);
148 }
149
150 // Iterate through grid once, setting the seeds of all partitions.
151 // Note: owned cells only!
152 const auto& grid = model_.simulator().vanguard().grid();
153
154 std::vector<int> count(num_domains, 0);
155 const auto& gridView = grid.leafGridView();
156 const auto beg = gridView.template begin<0, Dune::Interior_Partition>();
157 const auto end = gridView.template end<0, Dune::Interior_Partition>();
158 int cell = 0;
159 for (auto it = beg; it != end; ++it, ++cell) {
160 const int p = partition_vector[cell];
161 seeds[p][count[p]] = it->seed();
162 partitions[p][count[p]] = cell;
163 ++count[p];
164 }
165 assert(count == sizes);
166
167 // Create the domains.
168 for (int index = 0; index < num_domains; ++index) {
169 std::vector<bool> interior(partition_vector.size(), false);
170 for (int ix : partitions[index]) {
171 interior[ix] = true;
172 }
173
174 Dune::SubGridPart<Grid> view{grid, std::move(seeds[index])};
175
176 // Mark the last domain for skipping if it contains isolated cells
177 const bool skip = isolated_cells && (index == num_domains - 1);
178 this->domains_.emplace_back(index,
179 std::move(partitions[index]),
180 std::move(interior),
181 std::move(view),
182 skip);
183 }
184
185 // Initialize storage for previous mobilities in a single flat vector
186 const auto numCells = grid.size(0);
187 previousMobilities_.resize(numCells * FluidSystem::numActivePhases(), 0.0);
188 for (const auto& domain : domains_) {
189 updateMobilities(domain);
190 }
191
192 // Initialize domain_needs_solving_ to true for all domains
193 domain_needs_solving_.resize(num_domains, true);
194
195 // Set up container for the local system matrices.
196 domain_matrices_.resize(num_domains);
197
198 // Set up container for the local linear solvers.
199 for (int index = 0; index < num_domains; ++index) {
200 // TODO: The ISTLSolver constructor will make
201 // parallel structures appropriate for the full grid
202 // only. This must be addressed before going parallel.
203 const auto& eclState = model_.simulator().vanguard().eclState();
205 loc_param.is_nldd_local_solver_ = true;
206 loc_param.init(eclState.getSimulationConfig().useCPR());
207 // Override solver type with umfpack if small domain.
208 if (domains_[index].cells.size() < 200) {
209 loc_param.linsolver_ = "umfpack";
210 }
211 loc_param.linear_solver_print_json_definition_ = false;
212 const bool force_serial = true;
213 domain_linsolvers_.emplace_back(model_.simulator(), loc_param, force_serial);
214 domain_linsolvers_.back().setDomainIndex(index);
215 }
216
217 assert(int(domains_.size()) == num_domains);
218
219 domain_reports_accumulated_.resize(num_domains);
220
221 // Print domain distribution summary
223 partition_vector,
224 domains_,
225 local_reports_accumulated_,
226 domain_reports_accumulated_,
227 grid,
228 wellModel_.numLocalWellsEnd());
229 }
230
233 {
234 // Setup domain->well mapping.
235 wellModel_.setupDomains(domains_);
236 }
237
239 template <class NonlinearSolverType>
241 const SimulatorTimerInterface& timer,
242 NonlinearSolverType& nonlinear_solver)
243 {
244 // ----------- Set up reports and timer -----------
246
247 if (iteration < model_.param().nldd_num_initial_newton_iter_) {
248 report = model_.nonlinearIterationNewton(iteration,
249 timer,
250 nonlinear_solver);
251 return report;
252 }
253
254 model_.initialLinearization(report, iteration, model_.param().newton_min_iter_,
255 model_.param().newton_max_iter_, timer);
256
257 if (report.converged) {
258 return report;
259 }
260
261 // ----------- If not converged, do an NLDD iteration -----------
262 Dune::Timer localSolveTimer;
263 Dune::Timer detailTimer;
264 localSolveTimer.start();
265 detailTimer.start();
266 auto& solution = model_.simulator().model().solution(0);
267 auto initial_solution = solution;
268 auto locally_solved = initial_solution;
269
270 // ----------- Decide on an ordering for the domains -----------
271 const auto domain_order = this->getSubdomainOrder();
272 local_reports_accumulated_.success.pre_post_time += detailTimer.stop();
273
274 // ----------- Solve each domain separately -----------
275 DeferredLogger logger;
276 std::vector<SimulatorReportSingle> domain_reports(domains_.size());
277
279 for (const int domain_index : domain_order) {
280 const auto& domain = domains_[domain_index];
281 SimulatorReportSingle local_report;
282 detailTimer.reset();
283 detailTimer.start();
284
285 domain_needs_solving_[domain_index] = checkIfSubdomainNeedsSolving(domain, iteration);
286
287 updateMobilities(domain);
288
289 if (domain.skip || !domain_needs_solving_[domain_index]) {
290 local_report.skipped_domains = true;
291 local_report.converged = true;
292 domain_reports[domain.index] = local_report;
293 continue;
294 }
295 switch (model_.param().local_solve_approach_) {
297 solveDomainJacobi(solution, locally_solved, local_report, logger,
298 iteration, timer, domain);
299 break;
300 default:
302 solveDomainGaussSeidel(solution, locally_solved, local_report, logger,
303 iteration, timer, domain);
304 break;
305 }
306 // This should have updated the global matrix to be
307 // dR_i/du_j evaluated at new local solutions for
308 // i == j, at old solution for i != j.
309 if (!local_report.converged) {
310 // TODO: more proper treatment, including in parallel.
311 logger.debug(fmt::format("Convergence failure in domain {} on rank {}." , domain.index, rank_));
312 }
313 local_report.solver_time += detailTimer.stop();
314 domain_reports[domain.index] = local_report;
315 }
316 OPM_END_PARALLEL_TRY_CATCH("Unexpected exception in local domain solve: ", model_.simulator().vanguard().grid().comm());
317
318 detailTimer.reset();
319 detailTimer.start();
320 // Communicate and log all messages.
321 auto global_logger = gatherDeferredLogger(logger, model_.simulator().vanguard().grid().comm());
322 global_logger.logMessages();
323
324 // Accumulate local solve data.
325 // Putting the counts in a single array to avoid multiple
326 // comm.sum() calls. Keeping the named vars for readability.
327 std::array<int, 5> counts{ 0, 0, 0, static_cast<int>(domain_reports.size()), 0 };
328 int& num_converged = counts[0];
329 int& num_converged_already = counts[1];
330 int& num_local_newtons = counts[2];
331 int& num_domains = counts[3];
332 int& num_skipped = counts[4];
333 {
334 auto step_newtons = 0;
335 const auto dr_size = domain_reports.size();
336 for (auto i = 0*dr_size; i < dr_size; ++i) {
337 // Reset the needsSolving flag for the next iteration
338 domain_needs_solving_[i] = false;
339 const auto& dr = domain_reports[i];
340 if (dr.converged) {
341 ++num_converged;
342 if (dr.total_newton_iterations == 0) {
343 ++num_converged_already;
344 }
345 else {
346 // If we needed to solve the domain, we also solve in next iteration
347 domain_needs_solving_[i] = true;
348 }
349 }
350 if (dr.skipped_domains) {
351 ++num_skipped;
352 }
353 step_newtons += dr.total_newton_iterations;
354 // Accumulate local reports per domain
355 domain_reports_accumulated_[i] += dr;
356 // Accumulate local reports per rank
357 local_reports_accumulated_ += dr;
358 }
359 num_local_newtons = step_newtons;
360 }
361
362 if (model_.param().local_solve_approach_ == DomainSolveApproach::Jacobi) {
363 solution = locally_solved;
364 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0);
365 }
366
367#if HAVE_MPI
368 // Communicate solutions:
369 // With multiple processes, this process' overlap (i.e. not
370 // owned) cells' solution values have been modified by local
371 // solves in the owning processes, and remain unchanged
372 // here. We must therefore receive the updated solution on the
373 // overlap cells and update their intensive quantities before
374 // we move on.
375 const auto& comm = model_.simulator().vanguard().grid().comm();
376 if (comm.size() > 1) {
377 const auto* ccomm = model_.simulator().model().newtonMethod().linearSolver().comm();
378
379 // Copy numerical values from primary vars.
380 ccomm->copyOwnerToAll(solution, solution);
381
382 // Copy flags from primary vars.
383 const std::size_t num = solution.size();
384 Dune::BlockVector<std::size_t> allmeanings(num);
385 for (std::size_t ii = 0; ii < num; ++ii) {
386 allmeanings[ii] = PVUtil::pack(solution[ii]);
387 }
388 ccomm->copyOwnerToAll(allmeanings, allmeanings);
389 for (std::size_t ii = 0; ii < num; ++ii) {
390 PVUtil::unPack(solution[ii], allmeanings[ii]);
391 }
392
393 // Update intensive quantities for our overlap values.
394 model_.simulator().model().invalidateAndUpdateIntensiveQuantitiesOverlap(/*timeIdx=*/0);
395
396 // Make total counts of domains converged.
397 comm.sum(counts.data(), counts.size());
398 }
399#endif // HAVE_MPI
400
401 const bool is_iorank = this->rank_ == 0;
402 if (is_iorank) {
403 OpmLog::debug(fmt::format("Local solves finished. Converged for {}/{} domains. {} domains were skipped. {} domains did no work. {} total local Newton iterations.\n",
404 num_converged, num_domains, num_skipped, num_converged_already, num_local_newtons));
405 }
406 auto total_local_solve_time = localSolveTimer.stop();
407 report.local_solve_time += total_local_solve_time;
408 local_reports_accumulated_.success.total_time += total_local_solve_time;
409 local_reports_accumulated_.success.pre_post_time += detailTimer.stop();
410
411 // Finish with a Newton step.
412 // Note that the "iteration + 100" is a simple way to avoid entering
413 // "if (iteration == 0)" and similar blocks, and also makes it a little
414 // easier to spot the iteration residuals in the DBG file. A more sophisticated
415 // approach can be done later.
416 auto rep = model_.nonlinearIterationNewton(iteration + 100, timer, nonlinear_solver);
417 report += rep;
418 if (rep.converged) {
419 report.converged = true;
420 }
421 return report;
422 }
423
426 {
427 return local_reports_accumulated_;
428 }
429
431 const std::vector<SimulatorReport>& domainAccumulatedReports() const
432 {
433 // Update the number of wells for each domain that has been added to the well model at this point
434 // this is a mutable operation that updates the well counts in the domain reports.
435 const auto dr_size = domain_reports_accumulated_.size();
436 // Reset well counts before updating
437 for (auto i = 0*dr_size; i < dr_size; ++i) {
438 domain_reports_accumulated_[i].success.num_wells = 0;
439 }
440 // Update the number of wells for each domain
441 for (const auto& [wname, domain] : wellModel_.well_domain()) {
442 domain_reports_accumulated_[domain].success.num_wells++;
443 }
444 return domain_reports_accumulated_;
445 }
446
449 void writePartitions(const std::filesystem::path& odir) const
450 {
451 const auto& grid = this->model_.simulator().vanguard().grid();
452 const auto& elementMapper = this->model_.simulator().model().elementMapper();
453 const auto& cartMapper = this->model_.simulator().vanguard().cartesianIndexMapper();
454
456 odir,
457 domains_,
458 grid,
459 elementMapper,
460 cartMapper);
461 }
462
464 void writeNonlinearIterationsPerCell(const std::filesystem::path& odir) const
465 {
466 const auto& grid = this->model_.simulator().vanguard().grid();
467 const auto& elementMapper = this->model_.simulator().model().elementMapper();
468 const auto& cartMapper = this->model_.simulator().vanguard().cartesianIndexMapper();
469
471 odir,
472 domains_,
473 domain_reports_accumulated_,
474 grid,
475 elementMapper,
476 cartMapper);
477 }
478
479private:
482 solveDomain(const Domain& domain,
483 const SimulatorTimerInterface& timer,
484 SimulatorReportSingle& local_report,
485 DeferredLogger& logger,
486 [[maybe_unused]] const int global_iteration,
487 const bool initial_assembly_required)
488 {
489 auto& modelSimulator = model_.simulator();
490 Dune::Timer detailTimer;
491
492 modelSimulator.model().newtonMethod().setIterationIndex(0);
493
494 // When called, if assembly has already been performed
495 // with the initial values, we only need to check
496 // for local convergence. Otherwise, we must do a local
497 // assembly.
498 int iter = 0;
499 if (initial_assembly_required) {
500 detailTimer.start();
501 modelSimulator.model().newtonMethod().setIterationIndex(iter);
502 // TODO: we should have a beginIterationLocal function()
503 // only handling the well model for now
504 wellModel_.assemble(modelSimulator.model().newtonMethod().numIterations(),
505 modelSimulator.timeStepSize(),
506 domain);
507 const double tt0 = detailTimer.stop();
508 local_report.assemble_time += tt0;
509 local_report.assemble_time_well += tt0;
510 detailTimer.reset();
511 detailTimer.start();
512 // Assemble reservoir locally.
513 this->assembleReservoirDomain(domain);
514 local_report.assemble_time += detailTimer.stop();
515 local_report.total_linearizations += 1;
516 }
517 detailTimer.reset();
518 detailTimer.start();
519 std::vector<Scalar> resnorms;
520 auto convreport = this->getDomainConvergence(domain, timer, 0, logger, resnorms);
521 local_report.update_time += detailTimer.stop();
522 if (convreport.converged()) {
523 // TODO: set more info, timing etc.
524 local_report.converged = true;
525 return convreport;
526 }
527
528 // We have already assembled for the first iteration,
529 // but not done the Schur complement for the wells yet.
530 detailTimer.reset();
531 detailTimer.start();
532 model_.wellModel().linearizeDomain(domain,
533 modelSimulator.model().linearizer().jacobian(),
534 modelSimulator.model().linearizer().residual());
535 const double tt1 = detailTimer.stop();
536 local_report.assemble_time += tt1;
537 local_report.assemble_time_well += tt1;
538
539 // Local Newton loop.
540 const int max_iter = model_.param().max_local_solve_iterations_;
541 const auto& grid = modelSimulator.vanguard().grid();
542 double damping_factor = 1.0;
543 std::vector<std::vector<Scalar>> convergence_history;
544 convergence_history.reserve(20);
545 convergence_history.push_back(resnorms);
546 do {
547 // Solve local linear system.
548 // Note that x has full size, we expect it to be nonzero only for in-domain cells.
549 const int nc = grid.size(0);
550 BVector x(nc);
551 detailTimer.reset();
552 detailTimer.start();
553 double setup_time = 0.0;
554 try {
555 this->solveJacobianSystemDomain(domain, x, setup_time);
556 }
557 catch (const NumericalProblem& e) {
558 // Local linear solve failed - treat as domain-level failure, not global timestep cut.
559 logger.debug(fmt::format(
560 "Local linear solver failed in domain {} on rank {}: {}",
561 domain.index, rank_, e.what()));
562 // record statistics and return early
563 local_report.linear_solve_time += detailTimer.stop();
564 local_report.linear_solve_setup_time += setup_time;
565 local_report.total_linear_iterations = domain_linsolvers_[domain.index].iterations();
566 modelSimulator.problem().endIteration();
567 local_report.converged = false;
568 local_report.total_newton_iterations = iter;
569 local_report.total_linearizations += iter;
570 return convreport;
571 }
572 model_.wellModel().postSolveDomain(x, domain);
573 if (damping_factor != 1.0) {
574 x *= damping_factor;
575 }
576 local_report.linear_solve_time += detailTimer.stop();
577 local_report.linear_solve_setup_time += setup_time;
578 local_report.total_linear_iterations = domain_linsolvers_[domain.index].iterations();
579
580 // Update local solution. // TODO: x is still full size, should we optimize it?
581 detailTimer.reset();
582 detailTimer.start();
583 this->updateDomainSolution(domain, x);
584 local_report.update_time += detailTimer.stop();
585
586 // Assemble well and reservoir.
587 detailTimer.reset();
588 detailTimer.start();
589 ++iter;
590 modelSimulator.model().newtonMethod().setIterationIndex(iter);
591 // TODO: we should have a beginIterationLocal function()
592 // only handling the well model for now
593 // Assemble reservoir locally.
594 wellModel_.assemble(modelSimulator.model().newtonMethod().numIterations(),
595 modelSimulator.timeStepSize(),
596 domain);
597 const double tt3 = detailTimer.stop();
598 local_report.assemble_time += tt3;
599 local_report.assemble_time_well += tt3;
600 detailTimer.reset();
601 detailTimer.start();
602 this->assembleReservoirDomain(domain);
603 local_report.assemble_time += detailTimer.stop();
604
605 // Check for local convergence.
606 detailTimer.reset();
607 detailTimer.start();
608 resnorms.clear();
609 convreport = this->getDomainConvergence(domain, timer, iter, logger, resnorms);
610 convergence_history.push_back(resnorms);
611 local_report.update_time += detailTimer.stop();
612
613 // apply the Schur complement of the well model to the
614 // reservoir linearized equations
615 detailTimer.reset();
616 detailTimer.start();
617 model_.wellModel().linearizeDomain(domain,
618 modelSimulator.model().linearizer().jacobian(),
619 modelSimulator.model().linearizer().residual());
620 const double tt2 = detailTimer.stop();
621 local_report.assemble_time += tt2;
622 local_report.assemble_time_well += tt2;
623
624 // Check if we should dampen. Only do so if wells are converged.
625 if (!convreport.converged() && !convreport.wellFailed()) {
626 bool oscillate = false;
627 bool stagnate = false;
628 const auto num_residuals = convergence_history.front().size();
629 detail::detectOscillations(convergence_history, iter, num_residuals,
630 Scalar{0.2}, 1, oscillate, stagnate);
631 if (oscillate) {
632 damping_factor *= 0.85;
633 logger.debug(fmt::format("| Damping factor is now {}", damping_factor));
634 }
635 }
636 } while (!convreport.converged() && iter <= max_iter);
637
638 modelSimulator.problem().endIteration();
639
640 local_report.converged = convreport.converged();
641 local_report.total_newton_iterations = iter;
642 local_report.total_linearizations += iter;
643 // TODO: set more info, timing etc.
644 return convreport;
645 }
646
648 void assembleReservoirDomain(const Domain& domain)
649 {
650 OPM_TIMEBLOCK(assembleReservoirDomain);
651 // -------- Mass balance equations --------
652 model_.simulator().model().linearizer().linearizeDomain(domain, /*isNlddLocalSolve=*/true);
653 }
654
656 void solveJacobianSystemDomain(const Domain& domain, BVector& global_x, double& setup_time)
657 {
658 const auto& modelSimulator = model_.simulator();
659
660 Dune::Timer perfTimer;
661 perfTimer.start();
662
663 const Mat& main_matrix = modelSimulator.model().linearizer().jacobian().istlMatrix();
664 if (domain_matrices_[domain.index]) {
665 Details::copySubMatrix(main_matrix, domain.cells, *domain_matrices_[domain.index]);
666 } else {
667 domain_matrices_[domain.index] = std::make_unique<Mat>(Details::extractMatrix(main_matrix, domain.cells));
668 }
669 auto& jac = *domain_matrices_[domain.index];
670 auto res = Details::extractVector(modelSimulator.model().linearizer().residual(),
671 domain.cells);
672 auto x = res;
673
674 // set initial guess
675 global_x = 0.0;
676 x = 0.0;
677
678 auto& linsolver = domain_linsolvers_[domain.index];
679
680 linsolver.prepare(jac, res);
681 setup_time = perfTimer.stop();
682 linsolver.setResidual(res);
683 linsolver.solve(x);
684
685 Details::setGlobal(x, domain.cells, global_x);
686 }
687
689 void updateDomainSolution(const Domain& domain, const BVector& dx)
690 {
691 OPM_TIMEBLOCK(updateDomainSolution);
692 auto& simulator = model_.simulator();
693 auto& newtonMethod = simulator.model().newtonMethod();
694 SolutionVector& solution = simulator.model().solution(/*timeIdx=*/0);
695
696 newtonMethod.update_(/*nextSolution=*/solution,
697 /*curSolution=*/solution,
698 /*update=*/dx,
699 /*resid=*/dx,
700 domain.cells); // the update routines of the black
701 // oil model do not care about the
702 // residual
703
704 // if the solution is updated, the intensive quantities need to be recalculated
705 simulator.model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
706 }
707
709 std::pair<Scalar, Scalar> localDomainConvergenceData(const Domain& domain,
710 std::vector<Scalar>& R_sum,
711 std::vector<Scalar>& maxCoeff,
712 std::vector<Scalar>& B_avg,
713 std::vector<int>& maxCoeffCell)
714 {
715 const auto& modelSimulator = model_.simulator();
716
717 Scalar pvSumLocal = 0.0;
718 Scalar numAquiferPvSumLocal = 0.0;
719 const auto& model = modelSimulator.model();
720 const auto& problem = modelSimulator.problem();
721
722 const auto& modelResid = modelSimulator.model().linearizer().residual();
723
724 ElementContext elemCtx(modelSimulator);
725 const auto& gridView = domain.view;
726 const auto& elemEndIt = gridView.template end</*codim=*/0>();
727 IsNumericalAquiferCell isNumericalAquiferCell(gridView.grid());
728
729 for (auto elemIt = gridView.template begin</*codim=*/0>();
730 elemIt != elemEndIt;
731 ++elemIt)
732 {
733 if (elemIt->partitionType() != Dune::InteriorEntity) {
734 continue;
735 }
736 const auto& elem = *elemIt;
737 elemCtx.updatePrimaryStencil(elem);
738 elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
739
740 const unsigned cell_idx = elemCtx.globalSpaceIndex(/*spaceIdx=*/0, /*timeIdx=*/0);
741 const auto& intQuants = elemCtx.intensiveQuantities(/*spaceIdx=*/0, /*timeIdx=*/0);
742 const auto& fs = intQuants.fluidState();
743
744 const auto pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) *
745 model.dofTotalVolume(cell_idx);
746 pvSumLocal += pvValue;
747
748 if (isNumericalAquiferCell(elem))
749 {
750 numAquiferPvSumLocal += pvValue;
751 }
752
753 model_.getMaxCoeff(cell_idx, intQuants, fs, modelResid, pvValue,
754 B_avg, R_sum, maxCoeff, maxCoeffCell);
755 }
756
757 // compute local average in terms of global number of elements
758 const int bSize = B_avg.size();
759 for ( int i = 0; i<bSize; ++i )
760 {
761 B_avg[ i ] /= Scalar(domain.cells.size());
762 }
763
764 return {pvSumLocal, numAquiferPvSumLocal};
765 }
766
767 ConvergenceReport getDomainReservoirConvergence(const double reportTime,
768 const double dt,
769 const int iteration,
770 const Domain& domain,
771 DeferredLogger& logger,
772 std::vector<Scalar>& B_avg,
773 std::vector<Scalar>& residual_norms)
774 {
775 using Vector = std::vector<Scalar>;
776
777 const int numComp = numEq;
778 Vector R_sum(numComp, 0.0 );
779 Vector maxCoeff(numComp, std::numeric_limits<Scalar>::lowest() );
780 std::vector<int> maxCoeffCell(numComp, -1);
781 const auto [ pvSum, numAquiferPvSum]
782 = this->localDomainConvergenceData(domain, R_sum, maxCoeff, B_avg, maxCoeffCell);
783
784 auto cnvErrorPvFraction = computeCnvErrorPvLocal(domain, B_avg, dt);
785 cnvErrorPvFraction /= (pvSum - numAquiferPvSum);
786
787 // Default value of relaxed_max_pv_fraction_ is 0.03 and min_strict_cnv_iter_ is 0.
788 // For each iteration, we need to determine whether to use the relaxed CNV tolerance.
789 // To disable the usage of relaxed CNV tolerance, you can set the relaxed_max_pv_fraction_ to be 0.
790 const bool use_relaxed_cnv = cnvErrorPvFraction < model_.param().relaxed_max_pv_fraction_ &&
791 iteration >= model_.param().min_strict_cnv_iter_;
792 // Tighter bound for local convergence should increase the
793 // likelyhood of: local convergence => global convergence
794 const Scalar tol_cnv = model_.param().local_tolerance_scaling_cnv_
795 * (use_relaxed_cnv ? model_.param().tolerance_cnv_relaxed_
796 : model_.param().tolerance_cnv_);
797
798 const bool use_relaxed_mb = iteration >= model_.param().min_strict_mb_iter_;
799 const Scalar tol_mb = model_.param().local_tolerance_scaling_mb_
800 * (use_relaxed_mb ? model_.param().tolerance_mb_relaxed_ : model_.param().tolerance_mb_);
801
802 // Finish computation
803 std::vector<Scalar> CNV(numComp);
804 std::vector<Scalar> mass_balance_residual(numComp);
805 for (int compIdx = 0; compIdx < numComp; ++compIdx )
806 {
807 CNV[compIdx] = B_avg[compIdx] * dt * maxCoeff[compIdx];
808 mass_balance_residual[compIdx] = std::abs(B_avg[compIdx]*R_sum[compIdx]) * dt / pvSum;
809 residual_norms.push_back(CNV[compIdx]);
810 }
811
812 // Create convergence report.
813 ConvergenceReport report{reportTime};
814 using CR = ConvergenceReport;
815 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
816 Scalar res[2] = { mass_balance_residual[compIdx], CNV[compIdx] };
817 CR::ReservoirFailure::Type types[2] = { CR::ReservoirFailure::Type::MassBalance,
818 CR::ReservoirFailure::Type::Cnv };
819 Scalar tol[2] = { tol_mb, tol_cnv };
820 for (int ii : {0, 1}) {
821 if (std::isnan(res[ii])) {
822 report.setReservoirFailed({types[ii], CR::Severity::NotANumber, compIdx});
823 logger.debug("NaN residual for " + model_.compNames().name(compIdx) + " equation.");
824 } else if (res[ii] > model_.param().max_residual_allowed_) {
825 report.setReservoirFailed({types[ii], CR::Severity::TooLarge, compIdx});
826 logger.debug("Too large residual for " + model_.compNames().name(compIdx) + " equation.");
827 } else if (res[ii] < 0.0) {
828 report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
829 logger.debug("Negative residual for " + model_.compNames().name(compIdx) + " equation.");
830 } else if (res[ii] > tol[ii]) {
831 report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
832 }
833
834 report.setReservoirConvergenceMetric(types[ii], compIdx, res[ii], tol[ii]);
835 }
836 }
837
838 // Output of residuals. If converged at initial state, log nothing.
839 const bool converged_at_initial_state = (report.converged() && iteration == 0);
840 if (!converged_at_initial_state) {
841 if (iteration == 0) {
842 // Log header.
843 std::string msg = fmt::format("Domain {} on rank {}, size {}, containing cell {}\n| Iter",
844 domain.index, this->rank_, domain.cells.size(), domain.cells[0]);
845 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
846 msg += " MB(";
847 msg += model_.compNames().name(compIdx)[0];
848 msg += ") ";
849 }
850 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
851 msg += " CNV(";
852 msg += model_.compNames().name(compIdx)[0];
853 msg += ") ";
854 }
855 logger.debug(msg);
856 }
857 // Log convergence data.
858 std::ostringstream ss;
859 ss << "| ";
860 const std::streamsize oprec = ss.precision(3);
861 const std::ios::fmtflags oflags = ss.setf(std::ios::scientific);
862 ss << std::setw(4) << iteration;
863 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
864 ss << std::setw(11) << mass_balance_residual[compIdx];
865 }
866 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
867 ss << std::setw(11) << CNV[compIdx];
868 }
869 ss.precision(oprec);
870 ss.flags(oflags);
871 logger.debug(ss.str());
872 }
873
874 return report;
875 }
876
877 ConvergenceReport getDomainConvergence(const Domain& domain,
878 const SimulatorTimerInterface& timer,
879 const int iteration,
880 DeferredLogger& logger,
881 std::vector<Scalar>& residual_norms)
882 {
883 OPM_TIMEBLOCK(getDomainConvergence);
884 std::vector<Scalar> B_avg(numEq, 0.0);
885 auto report = this->getDomainReservoirConvergence(timer.simulationTimeElapsed(),
886 timer.currentStepLength(),
887 iteration,
888 domain,
889 logger,
890 B_avg,
891 residual_norms);
892 report += wellModel_.getWellConvergence(domain, B_avg, logger);
893 return report;
894 }
895
897 std::vector<int> getSubdomainOrder()
898 {
899 const auto& modelSimulator = model_.simulator();
900 const auto& solution = modelSimulator.model().solution(0);
901
902 std::vector<int> domain_order(domains_.size());
903 std::iota(domain_order.begin(), domain_order.end(), 0);
904
905 if (model_.param().local_solve_approach_ == DomainSolveApproach::Jacobi) {
906 // Do nothing, 0..n-1 order is fine.
907 return domain_order;
908 } else if (model_.param().local_solve_approach_ == DomainSolveApproach::GaussSeidel) {
909 // Calculate the measure used to order the domains.
910 std::vector<Scalar> measure_per_domain(domains_.size());
911 switch (model_.param().local_domains_ordering_) {
913 // Use average pressures to order domains.
914 for (const auto& domain : domains_) {
915 const Scalar press_sum =
916 std::accumulate(domain.cells.begin(), domain.cells.end(), Scalar{0},
917 [&solution](const auto acc, const auto c)
918 { return acc + solution[c][Indices::pressureSwitchIdx]; });
919 const Scalar avgpress = press_sum / domain.cells.size();
920 measure_per_domain[domain.index] = avgpress;
921 }
922 break;
923 }
925 // Use max pressures to order domains.
926 for (const auto& domain : domains_) {
927 measure_per_domain[domain.index] =
928 std::accumulate(domain.cells.begin(), domain.cells.end(), Scalar{0},
929 [&solution](const auto acc, const auto c)
930 { return std::max(acc, solution[c][Indices::pressureSwitchIdx]); });
931 }
932 break;
933 }
935 // Use maximum residual to order domains.
936 const auto& residual = modelSimulator.model().linearizer().residual();
937 const int num_vars = residual[0].size();
938 for (const auto& domain : domains_) {
939 Scalar maxres = 0.0;
940 for (const int c : domain.cells) {
941 for (int ii = 0; ii < num_vars; ++ii) {
942 maxres = std::max(maxres, std::fabs(residual[c][ii]));
943 }
944 }
945 measure_per_domain[domain.index] = maxres;
946 }
947 break;
948 }
949 } // end of switch (model_.param().local_domains_ordering_)
950
951 // Sort by largest measure, keeping index order if equal.
952 const auto& m = measure_per_domain;
953 std::stable_sort(domain_order.begin(), domain_order.end(),
954 [&m](const int i1, const int i2){ return m[i1] > m[i2]; });
955 return domain_order;
956 } else {
957 throw std::logic_error("Domain solve approach must be Jacobi or Gauss-Seidel");
958 }
959 }
960
961 template<class GlobalEqVector>
962 void solveDomainJacobi(GlobalEqVector& solution,
963 GlobalEqVector& locally_solved,
964 SimulatorReportSingle& local_report,
965 DeferredLogger& logger,
966 const int iteration,
967 const SimulatorTimerInterface& timer,
968 const Domain& domain)
969 {
970 auto initial_local_well_primary_vars = wellModel_.getPrimaryVarsDomain(domain.index);
971 auto initial_local_solution = Details::extractVector(solution, domain.cells);
972 auto convrep = solveDomain(domain, timer, local_report, logger, iteration, false);
973 if (local_report.converged) {
974 auto local_solution = Details::extractVector(solution, domain.cells);
975 Details::setGlobal(local_solution, domain.cells, locally_solved);
976 Details::setGlobal(initial_local_solution, domain.cells, solution);
977 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
978 } else {
979 wellModel_.setPrimaryVarsDomain(domain.index, initial_local_well_primary_vars);
980 Details::setGlobal(initial_local_solution, domain.cells, solution);
981 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
982 }
983 }
984
985 template<class GlobalEqVector>
986 void solveDomainGaussSeidel(GlobalEqVector& solution,
987 GlobalEqVector& locally_solved,
988 SimulatorReportSingle& local_report,
989 DeferredLogger& logger,
990 const int iteration,
991 const SimulatorTimerInterface& timer,
992 const Domain& domain)
993 {
994 auto initial_local_well_primary_vars = wellModel_.getPrimaryVarsDomain(domain.index);
995 auto initial_local_solution = Details::extractVector(solution, domain.cells);
996 auto convrep = solveDomain(domain, timer, local_report, logger, iteration, true);
997 if (!local_report.converged) {
998 // We look at the detailed convergence report to evaluate
999 // if we should accept the unconverged solution.
1000 // We do not accept a solution if the wells are unconverged.
1001 if (!convrep.wellFailed()) {
1002 // Calculare the sums of the mb and cnv failures.
1003 Scalar mb_sum = 0.0;
1004 Scalar cnv_sum = 0.0;
1005 for (const auto& rc : convrep.reservoirConvergence()) {
1007 mb_sum += rc.value();
1008 } else if (rc.type() == ConvergenceReport::ReservoirFailure::Type::Cnv) {
1009 cnv_sum += rc.value();
1010 }
1011 }
1012 // If not too high, we overrule the convergence failure.
1013 const Scalar acceptable_local_mb_sum = 1e-3;
1014 const Scalar acceptable_local_cnv_sum = 1.0;
1015 if (mb_sum < acceptable_local_mb_sum && cnv_sum < acceptable_local_cnv_sum) {
1016 local_report.converged = true;
1017 local_report.accepted_unconverged_domains += 1;
1018 logger.debug(fmt::format("Accepting solution in unconverged domain {} on rank {}.", domain.index, rank_));
1019 logger.debug(fmt::format("Value of mb_sum: {} cnv_sum: {}", mb_sum, cnv_sum));
1020 } else {
1021 logger.debug("Unconverged local solution.");
1022 }
1023 } else {
1024 logger.debug("Unconverged local solution with well convergence failures:");
1025 for (const auto& wf : convrep.wellFailures()) {
1026 logger.debug(to_string(wf));
1027 }
1028 }
1029 }
1030 if (local_report.converged) {
1031 local_report.converged_domains += 1;
1032 auto local_solution = Details::extractVector(solution, domain.cells);
1033 Details::setGlobal(local_solution, domain.cells, locally_solved);
1034 } else {
1035 local_report.unconverged_domains += 1;
1036 wellModel_.setPrimaryVarsDomain(domain.index, initial_local_well_primary_vars);
1037 Details::setGlobal(initial_local_solution, domain.cells, solution);
1038 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
1039 }
1040 }
1041
1042 Scalar computeCnvErrorPvLocal(const Domain& domain,
1043 const std::vector<Scalar>& B_avg, double dt) const
1044 {
1045 Scalar errorPV{};
1046 const auto& simulator = model_.simulator();
1047 const auto& model = simulator.model();
1048 const auto& problem = simulator.problem();
1049 const auto& residual = simulator.model().linearizer().residual();
1050
1051 for (const int cell_idx : domain.cells) {
1052 const Scalar pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) *
1053 model.dofTotalVolume(cell_idx);
1054 const auto& cellResidual = residual[cell_idx];
1055 bool cnvViolated = false;
1056
1057 for (unsigned eqIdx = 0; eqIdx < cellResidual.size(); ++eqIdx) {
1058 using std::fabs;
1059 Scalar CNV = cellResidual[eqIdx] * dt * B_avg[eqIdx] / pvValue;
1060 cnvViolated = cnvViolated || (fabs(CNV) > model_.param().tolerance_cnv_);
1061 }
1062
1063 if (cnvViolated) {
1064 errorPV += pvValue;
1065 }
1066 }
1067 return errorPV;
1068 }
1069
1070 decltype(auto) partitionCells() const
1071 {
1072 const auto& grid = this->model_.simulator().vanguard().grid();
1073
1074 using GridView = std::remove_cv_t<std::remove_reference_t<
1075 decltype(grid.leafGridView())>>;
1076
1077 using Element = std::remove_cv_t<std::remove_reference_t<
1078 typename GridView::template Codim<0>::Entity>>;
1079
1080 const auto& param = this->model_.param();
1081
1082 auto zoltan_ctrl = ZoltanPartitioningControl<Element>{};
1083
1084 zoltan_ctrl.domain_imbalance = param.local_domains_partition_imbalance_;
1085
1086 zoltan_ctrl.index =
1087 [elementMapper = &this->model_.simulator().model().elementMapper()]
1088 (const Element& element)
1089 {
1090 return elementMapper->index(element);
1091 };
1092
1093 zoltan_ctrl.local_to_global =
1094 [cartMapper = &this->model_.simulator().vanguard().cartesianIndexMapper()]
1095 (const int elemIdx)
1096 {
1097 return cartMapper->cartesianIndex(elemIdx);
1098 };
1099
1100 // Forming the list of wells is expensive, so do this only if needed.
1101 const auto need_wells = param.local_domains_partition_method_ == "zoltan";
1102
1103 const auto wells = need_wells
1104 ? this->model_.simulator().vanguard().schedule().getWellsatEnd()
1105 : std::vector<Well>{};
1106
1107 const auto& possibleFutureConnectionSet = need_wells
1108 ? this->model_.simulator().vanguard().schedule().getPossibleFutureConnections()
1109 : std::unordered_map<std::string, std::set<int>> {};
1110
1111 // If defaulted parameter for number of domains, choose a reasonable default.
1112 constexpr int default_cells_per_domain = 1000;
1113 const int num_domains = (param.num_local_domains_ > 0)
1114 ? param.num_local_domains_
1115 : detail::countGlobalCells(grid) / default_cells_per_domain;
1116 return ::Opm::partitionCells(param.local_domains_partition_method_,
1117 num_domains, grid.leafGridView(), wells,
1118 possibleFutureConnectionSet, zoltan_ctrl,
1119 param.local_domains_partition_well_neighbor_levels_);
1120 }
1121
1122 void updateMobilities(const Domain& domain)
1123 {
1124 if (domain.skip || model_.param().nldd_relative_mobility_change_tol_ == 0.0) {
1125 return;
1126 }
1127 const auto numActivePhases = FluidSystem::numActivePhases();
1128 for (const auto globalDofIdx : domain.cells) {
1129 const auto& intQuants = model_.simulator().model().intensiveQuantities(globalDofIdx, /* time_idx = */ 0);
1130
1131 for (unsigned activePhaseIdx = 0; activePhaseIdx < numActivePhases; ++activePhaseIdx) {
1132 const auto phaseIdx = FluidSystem::activeToCanonicalPhaseIdx(activePhaseIdx);
1133 const auto mobIdx = globalDofIdx * numActivePhases + activePhaseIdx;
1134 previousMobilities_[mobIdx] = getValue(intQuants.mobility(phaseIdx));
1135 }
1136 }
1137 }
1138
1139 bool checkIfSubdomainNeedsSolving(const Domain& domain, const int iteration)
1140 {
1141 if (domain.skip) {
1142 return false;
1143 }
1144
1145 // If we domain was marked as needing solving in previous iterations,
1146 // we do not need to check again
1147 if (domain_needs_solving_[domain.index]) {
1148 return true;
1149 }
1150
1151 // If we do not check for mobility changes, we need to solve the domain
1152 if (model_.param().nldd_relative_mobility_change_tol_ == 0.0) {
1153 return true;
1154 }
1155
1156 // Skip mobility check on first iteration
1157 if (iteration == 0) {
1158 return true;
1159 }
1160
1161 return checkSubdomainChangeRelative(domain);
1162 }
1163
1164 bool checkSubdomainChangeRelative(const Domain& domain)
1165 {
1166 const auto numActivePhases = FluidSystem::numActivePhases();
1167
1168 // Check mobility changes for all cells in the domain
1169 for (const auto globalDofIdx : domain.cells) {
1170 const auto& intQuants = model_.simulator().model().intensiveQuantities(globalDofIdx, /* time_idx = */ 0);
1171
1172 // Calculate average previous mobility for normalization
1173 Scalar cellMob = 0.0;
1174 for (unsigned activePhaseIdx = 0; activePhaseIdx < numActivePhases; ++activePhaseIdx) {
1175 const auto mobIdx = globalDofIdx * numActivePhases + activePhaseIdx;
1176 cellMob += previousMobilities_[mobIdx] / numActivePhases;
1177 }
1178
1179 // Check relative changes for each phase
1180 for (unsigned activePhaseIdx = 0; activePhaseIdx < numActivePhases; ++activePhaseIdx) {
1181 const auto phaseIdx = FluidSystem::activeToCanonicalPhaseIdx(activePhaseIdx);
1182 const auto mobIdx = globalDofIdx * numActivePhases + activePhaseIdx;
1183 const auto mobility = getValue(intQuants.mobility(phaseIdx));
1184 const auto relDiff = std::abs(mobility - previousMobilities_[mobIdx]) / cellMob;
1185 if (relDiff > model_.param().nldd_relative_mobility_change_tol_) {
1186 return true;
1187 }
1188 }
1189 }
1190 return false;
1191 }
1192
1193 BlackoilModel<TypeTag>& model_;
1194 BlackoilWellModelNldd<TypeTag> wellModel_;
1195 std::vector<Domain> domains_;
1196 std::vector<std::unique_ptr<Mat>> domain_matrices_;
1197 std::vector<ISTLSolverType> domain_linsolvers_;
1198 SimulatorReport local_reports_accumulated_;
1199 // mutable because we need to update the number of wells for each domain in getDomainAccumulatedReports()
1200 mutable std::vector<SimulatorReport> domain_reports_accumulated_;
1201 int rank_ = 0;
1202 // Store previous mobilities to check for changes - single flat vector indexed by (globalCellIdx * numActivePhases + activePhaseIdx)
1203 std::vector<Scalar> previousMobilities_;
1204 // Flag indicating if this domain should be solved in the next iteration
1205 std::vector<bool> domain_needs_solving_;
1206};
1207
1208} // namespace Opm
1209
1210#endif // OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
#define OPM_END_PARALLEL_TRY_CATCH(prefix, comm)
Catch exception and throw in a parallel try-catch clause.
Definition: DeferredLoggingErrorHelpers.hpp:192
#define OPM_BEGIN_PARALLEL_TRY_CATCH()
Macro to setup the try of a parallel try-catch.
Definition: DeferredLoggingErrorHelpers.hpp:158
A NLDD implementation for three-phase black oil.
Definition: BlackoilModelNldd.hpp:85
GetPropType< TypeTag, Properties::SolutionVector > SolutionVector
Definition: BlackoilModelNldd.hpp:93
GetPropType< TypeTag, Properties::Indices > Indices
Definition: BlackoilModelNldd.hpp:90
const std::vector< SimulatorReport > & domainAccumulatedReports() const
return the statistics of local solves accumulated for each domain on this rank
Definition: BlackoilModelNldd.hpp:431
void writePartitions(const std::filesystem::path &odir) const
Definition: BlackoilModelNldd.hpp:449
BlackoilModelNldd(BlackoilModel< TypeTag > &model)
The constructor sets up the subdomains.
Definition: BlackoilModelNldd.hpp:106
GetPropType< TypeTag, Properties::Scalar > Scalar
Definition: BlackoilModelNldd.hpp:91
typename BlackoilModel< TypeTag >::Mat Mat
Definition: BlackoilModelNldd.hpp:98
const SimulatorReport & localAccumulatedReports() const
return the statistics of local solves accumulated for this rank
Definition: BlackoilModelNldd.hpp:425
SimulatorReportSingle nonlinearIterationNldd(const int iteration, const SimulatorTimerInterface &timer, NonlinearSolverType &nonlinear_solver)
Do one non-linear NLDD iteration.
Definition: BlackoilModelNldd.hpp:240
SubDomain< Grid > Domain
Definition: BlackoilModelNldd.hpp:96
GetPropType< TypeTag, Properties::ElementContext > ElementContext
Definition: BlackoilModelNldd.hpp:87
GetPropType< TypeTag, Properties::FluidSystem > FluidSystem
Definition: BlackoilModelNldd.hpp:88
void prepareStep()
Called before starting a time step.
Definition: BlackoilModelNldd.hpp:232
void writeNonlinearIterationsPerCell(const std::filesystem::path &odir) const
Write the number of nonlinear iterations per cell to a file in ResInsight compatible format.
Definition: BlackoilModelNldd.hpp:464
static constexpr int numEq
Definition: BlackoilModelNldd.hpp:100
GetPropType< TypeTag, Properties::Grid > Grid
Definition: BlackoilModelNldd.hpp:89
typename BlackoilModel< TypeTag >::BVector BVector
Definition: BlackoilModelNldd.hpp:95
Definition: BlackoilModel.hpp:61
BlackoilWellModel< TypeTag > & wellModel()
return the StandardWells object
Definition: BlackoilModel.hpp:305
typename SparseMatrixAdapter::IstlMatrix Mat
Definition: BlackoilModel.hpp:108
Dune::BlockVector< VectorBlockType > BVector
Definition: BlackoilModel.hpp:109
Definition: ConvergenceReport.hpp:38
Definition: DeferredLogger.hpp:57
void debug(const std::string &tag, const std::string &message)
Definition: ISTLSolver.hpp:149
Interface class for SimulatorTimer objects, to be improved.
Definition: SimulatorTimerInterface.hpp:34
Vector extractVector(const Vector &x, const std::vector< int > &indices)
Definition: extractMatrix.hpp:104
void copySubMatrix(const Matrix &A, const std::vector< int > &indices, Matrix &B)
Definition: extractMatrix.hpp:35
void setGlobal(const Vector &x, const std::vector< int > &indices, Vector &global_x)
Definition: extractMatrix.hpp:115
Matrix extractMatrix(const Matrix &m, const std::vector< int > &indices)
Definition: extractMatrix.hpp:47
void unPack(PV &privar, const std::size_t meanings)
Definition: priVarsPacking.hpp:41
std::size_t pack(const PV &privar)
Definition: priVarsPacking.hpp:31
std::size_t countGlobalCells(const Grid &grid)
Get the number of cells of a global grid.
Definition: countGlobalCells.hpp:80
void detectOscillations(const std::vector< std::vector< Scalar > > &residualHistory, const int it, const int numPhases, const Scalar relaxRelTol, const int minimumOscillatingPhases, bool &oscillate, bool &stagnate)
Detect oscillation or stagnation in a given residual history.
Definition: blackoilbioeffectsmodules.hh:43
Opm::DeferredLogger gatherDeferredLogger(const Opm::DeferredLogger &local_deferredlogger, Parallel::Communication communicator)
Create a global log combining local logs.
std::pair< std::vector< int >, int > partitionCells(const std::string &method, const int num_local_domains, const GridView &grid_view, const std::vector< Well > &wells, const std::unordered_map< std::string, std::set< int > > &possibleFutureConnections, const ZoltanPartitioningControl< Element > &zoltan_ctrl, const int num_neighbor_levels)
void printDomainDistributionSummary(const std::vector< int > &partition_vector, const std::vector< Domain > &domains, SimulatorReport &local_reports_accumulated, std::vector< SimulatorReport > &domain_reports_accumulated, const Grid &grid, int num_wells)
Definition: NlddReporting.hpp:219
typename Properties::Detail::GetPropImpl< TypeTag, Property >::type::type GetPropType
get the type alias defined in the property (equivalent to old macro GET_PROP_TYPE(....
Definition: propertysystem.hh:233
void writePartitions(const std::filesystem::path &odir, const std::vector< Domain > &domains, const Grid &grid, const ElementMapper &elementMapper, const CartMapper &cartMapper)
Definition: NlddReporting.hpp:164
std::string to_string(const ConvergenceReport::ReservoirFailure::Type t)
void writeNonlinearIterationsPerCell(const std::filesystem::path &odir, const std::vector< Domain > &domains, const std::vector< SimulatorReport > &domain_reports, const Grid &grid, const ElementMapper &elementMapper, const CartMapper &cartMapper)
Definition: NlddReporting.hpp:104
Solver parameters for the BlackoilModel.
Definition: BlackoilModelParameters.hpp:194
This class carries all parameters for the NewtonIterationBlackoilInterleaved class.
Definition: FlowLinearSolverParameters.hpp:98
bool is_nldd_local_solver_
Definition: FlowLinearSolverParameters.hpp:112
void init(bool cprRequestedInDataFile)
bool linear_solver_print_json_definition_
Definition: FlowLinearSolverParameters.hpp:114
std::string linsolver_
Definition: FlowLinearSolverParameters.hpp:113
Definition: SimulatorReport.hpp:122
SimulatorReportSingle success
Definition: SimulatorReport.hpp:123
A struct for returning timing data from a simulator to its caller.
Definition: SimulatorReport.hpp:34
double linear_solve_time
Definition: SimulatorReport.hpp:43
int skipped_domains
Definition: SimulatorReport.hpp:71
double assemble_time
Definition: SimulatorReport.hpp:39
double assemble_time_well
Definition: SimulatorReport.hpp:41
double solver_time
Definition: SimulatorReport.hpp:38
bool converged
Definition: SimulatorReport.hpp:55
double pre_post_time
Definition: SimulatorReport.hpp:40
double total_time
Definition: SimulatorReport.hpp:37
double linear_solve_setup_time
Definition: SimulatorReport.hpp:42
unsigned int total_newton_iterations
Definition: SimulatorReport.hpp:50
double update_time
Definition: SimulatorReport.hpp:45
double local_solve_time
Definition: SimulatorReport.hpp:44
unsigned int total_linearizations
Definition: SimulatorReport.hpp:49
unsigned int total_linear_iterations
Definition: SimulatorReport.hpp:51
Definition: SubDomain.hpp:85