BlackoilModelNldd.hpp
Go to the documentation of this file.
1/*
2 Copyright 2013, 2015 SINTEF ICT, Applied Mathematics.
3 Copyright 2014, 2015 Dr. Blatt - HPC-Simulation-Software & Services
4 Copyright 2014, 2015 Statoil ASA.
5 Copyright 2015 NTNU
6 Copyright 2015, 2016, 2017 IRIS AS
7
8 This file is part of the Open Porous Media project (OPM).
9
10 OPM is free software: you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation, either version 3 of the License, or
13 (at your option) any later version.
14
15 OPM is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with OPM. If not, see <http://www.gnu.org/licenses/>.
22*/
23
24#ifndef OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
25#define OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
26
27#include <dune/common/timer.hh>
28
29#include <opm/grid/common/SubGridPart.hpp>
30
32
37
39
40#if COMPILE_BDA_BRIDGE
42#else
44#endif
45
49
51
52#include <fmt/format.h>
53
54#include <algorithm>
55#include <array>
56#include <cassert>
57#include <cmath>
58#include <cstddef>
59#include <filesystem>
60#include <fstream>
61#include <functional>
62#include <iomanip>
63#include <ios>
64#include <memory>
65#include <numeric>
66#include <sstream>
67#include <string>
68#include <type_traits>
69#include <utility>
70#include <vector>
71
72namespace Opm {
73
74template<class TypeTag> class BlackoilModel;
75
77template <class TypeTag>
79public:
80 using ElementContext = GetPropType<TypeTag, Properties::ElementContext>;
81 using FluidSystem = GetPropType<TypeTag, Properties::FluidSystem>;
82 using Grid = GetPropType<TypeTag, Properties::Grid>;
83 using Indices = GetPropType<TypeTag, Properties::Indices>;
85 using Scalar = GetPropType<TypeTag, Properties::Scalar>;
86 using SolutionVector = GetPropType<TypeTag, Properties::SolutionVector>;
87
92
93 static constexpr int numEq = Indices::numEq;
94
100 : model_(model), rank_(model_.simulator().vanguard().grid().comm().rank())
101 {
102 // Create partitions.
103 const auto& [partition_vector, num_domains] = this->partitionCells();
104
105 // Scan through partitioning to get correct size for each.
106 std::vector<int> sizes(num_domains, 0);
107 for (const auto& p : partition_vector) {
108 ++sizes[p];
109 }
110
111 // Set up correctly sized vectors of entity seeds and of indices for each partition.
112 using EntitySeed = typename Grid::template Codim<0>::EntitySeed;
113 std::vector<std::vector<EntitySeed>> seeds(num_domains);
114 std::vector<std::vector<int>> partitions(num_domains);
115 for (int domain = 0; domain < num_domains; ++domain) {
116 seeds[domain].resize(sizes[domain]);
117 partitions[domain].resize(sizes[domain]);
118 }
119
120 // Iterate through grid once, setting the seeds of all partitions.
121 // Note: owned cells only!
122 const auto& grid = model_.simulator().vanguard().grid();
123
124 std::vector<int> count(num_domains, 0);
125 const auto& gridView = grid.leafGridView();
126 const auto beg = gridView.template begin<0, Dune::Interior_Partition>();
127 const auto end = gridView.template end<0, Dune::Interior_Partition>();
128 int cell = 0;
129 for (auto it = beg; it != end; ++it, ++cell) {
130 const int p = partition_vector[cell];
131 seeds[p][count[p]] = it->seed();
132 partitions[p][count[p]] = cell;
133 ++count[p];
134 }
135 assert(count == sizes);
136
137 // Create the domains.
138 for (int index = 0; index < num_domains; ++index) {
139 std::vector<bool> interior(partition_vector.size(), false);
140 for (int ix : partitions[index]) {
141 interior[ix] = true;
142 }
143
144 Dune::SubGridPart<Grid> view{grid, std::move(seeds[index])};
145
146 this->domains_.emplace_back(index,
147 std::move(partitions[index]),
148 std::move(interior),
149 std::move(view));
150 }
151
152 // Set up container for the local system matrices.
153 domain_matrices_.resize(num_domains);
154
155 // Set up container for the local linear solvers.
156 for (int index = 0; index < num_domains; ++index) {
157 // TODO: The ISTLSolver constructor will make
158 // parallel structures appropriate for the full grid
159 // only. This must be addressed before going parallel.
160 const auto& eclState = model_.simulator().vanguard().eclState();
162 loc_param.template init<TypeTag>(eclState.getSimulationConfig().useCPR());
163 // Override solver type with umfpack if small domain.
164 // Otherwise hardcode to ILU0
165 if (domains_[index].cells.size() < 200) {
166 loc_param.linsolver_ = "umfpack";
167 } else {
168 loc_param.linsolver_ = "ilu0";
169 loc_param.linear_solver_reduction_ = 1e-2;
170 }
171 loc_param.linear_solver_print_json_definition_ = false;
172 const bool force_serial = true;
173 domain_linsolvers_.emplace_back(model_.simulator(), loc_param, force_serial);
174 }
175
176 assert(int(domains_.size()) == num_domains);
177 }
178
181 {
182 // Setup domain->well mapping.
183 model_.wellModel().setupDomains(domains_);
184 }
185
187 template <class NonlinearSolverType>
189 const SimulatorTimerInterface& timer,
190 NonlinearSolverType& nonlinear_solver)
191 {
192 // ----------- Set up reports and timer -----------
194 Dune::Timer perfTimer;
195
196 model_.initialLinearization(report, iteration, nonlinear_solver.minIter(), nonlinear_solver.maxIter(), timer);
197
198 if (report.converged) {
199 return report;
200 }
201
202 // ----------- If not converged, do an NLDD iteration -----------
203
204 auto& solution = model_.simulator().model().solution(0);
205 auto initial_solution = solution;
206 auto locally_solved = initial_solution;
207
208 // ----------- Decide on an ordering for the domains -----------
209 const auto domain_order = this->getSubdomainOrder();
210
211 // ----------- Solve each domain separately -----------
212 DeferredLogger logger;
213 std::vector<SimulatorReportSingle> domain_reports(domains_.size());
214 for (const int domain_index : domain_order) {
215 const auto& domain = domains_[domain_index];
216 SimulatorReportSingle local_report;
217 try {
218 switch (model_.param().local_solve_approach_) {
220 solveDomainJacobi(solution, locally_solved, local_report, logger,
221 iteration, timer, domain);
222 break;
223 default:
225 solveDomainGaussSeidel(solution, locally_solved, local_report, logger,
226 iteration, timer, domain);
227 break;
228 }
229 }
230 catch (...) {
231 // Something went wrong during local solves.
232 local_report.converged = false;
233 }
234 // This should have updated the global matrix to be
235 // dR_i/du_j evaluated at new local solutions for
236 // i == j, at old solution for i != j.
237 if (!local_report.converged) {
238 // TODO: more proper treatment, including in parallel.
239 logger.debug(fmt::format("Convergence failure in domain {} on rank {}." , domain.index, rank_));
240 }
241 domain_reports[domain.index] = local_report;
242 }
243
244 // Communicate and log all messages.
245 auto global_logger = gatherDeferredLogger(logger, model_.simulator().vanguard().grid().comm());
246 global_logger.logMessages();
247
248 // Accumulate local solve data.
249 // Putting the counts in a single array to avoid multiple
250 // comm.sum() calls. Keeping the named vars for readability.
251 std::array<int, 4> counts{ 0, 0, 0, static_cast<int>(domain_reports.size()) };
252 int& num_converged = counts[0];
253 int& num_converged_already = counts[1];
254 int& num_local_newtons = counts[2];
255 int& num_domains = counts[3];
256 {
258 for (const auto& dr : domain_reports) {
259 if (dr.converged) {
260 ++num_converged;
261 if (dr.total_newton_iterations == 0) {
262 ++num_converged_already;
263 }
264 }
265 rep += dr;
266 }
267 num_local_newtons = rep.total_newton_iterations;
268 local_reports_accumulated_ += rep;
269 }
270
271 if (model_.param().local_solve_approach_ == DomainSolveApproach::Jacobi) {
272 solution = locally_solved;
273 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0);
274 }
275
276#if HAVE_MPI
277 // Communicate solutions:
278 // With multiple processes, this process' overlap (i.e. not
279 // owned) cells' solution values have been modified by local
280 // solves in the owning processes, and remain unchanged
281 // here. We must therefore receive the updated solution on the
282 // overlap cells and update their intensive quantities before
283 // we move on.
284 const auto& comm = model_.simulator().vanguard().grid().comm();
285 if (comm.size() > 1) {
286 const auto* ccomm = model_.simulator().model().newtonMethod().linearSolver().comm();
287
288 // Copy numerical values from primary vars.
289 ccomm->copyOwnerToAll(solution, solution);
290
291 // Copy flags from primary vars.
292 const std::size_t num = solution.size();
293 Dune::BlockVector<std::size_t> allmeanings(num);
294 for (std::size_t ii = 0; ii < num; ++ii) {
295 allmeanings[ii] = PVUtil::pack(solution[ii]);
296 }
297 ccomm->copyOwnerToAll(allmeanings, allmeanings);
298 for (std::size_t ii = 0; ii < num; ++ii) {
299 PVUtil::unPack(solution[ii], allmeanings[ii]);
300 }
301
302 // Update intensive quantities for our overlap values.
303 model_.simulator().model().invalidateAndUpdateIntensiveQuantitiesOverlap(/*timeIdx=*/0);
304
305 // Make total counts of domains converged.
306 comm.sum(counts.data(), counts.size());
307 }
308#endif // HAVE_MPI
309
310 const bool is_iorank = this->rank_ == 0;
311 if (is_iorank) {
312 OpmLog::debug(fmt::format("Local solves finished. Converged for {}/{} domains. {} domains did no work. {} total local Newton iterations.\n",
313 num_converged, num_domains, num_converged_already, num_local_newtons));
314 }
315
316 // Finish with a Newton step.
317 // Note that the "iteration + 100" is a simple way to avoid entering
318 // "if (iteration == 0)" and similar blocks, and also makes it a little
319 // easier to spot the iteration residuals in the DBG file. A more sophisticated
320 // approach can be done later.
321 auto rep = model_.nonlinearIterationNewton(iteration + 100, timer, nonlinear_solver);
322 report += rep;
323 if (rep.converged) {
324 report.converged = true;
325 }
326 return report;
327 }
328
331 {
332 return local_reports_accumulated_;
333 }
334
335 void writePartitions(const std::filesystem::path& odir) const
336 {
337 const auto& elementMapper = this->model_.simulator().model().elementMapper();
338 const auto& cartMapper = this->model_.simulator().vanguard().cartesianIndexMapper();
339
340 const auto& grid = this->model_.simulator().vanguard().grid();
341 const auto& comm = grid.comm();
342 const auto nDigit = 1 + static_cast<int>(std::floor(std::log10(comm.size())));
343
344 std::ofstream pfile { odir / fmt::format("{1:0>{0}}", nDigit, comm.rank()) };
345
346 const auto p = this->reconstitutePartitionVector();
347 auto i = 0;
348 for (const auto& cell : elements(grid.leafGridView(), Dune::Partitions::interior)) {
349 pfile << comm.rank() << ' '
350 << cartMapper.cartesianIndex(elementMapper.index(cell)) << ' '
351 << p[i++] << '\n';
352 }
353 }
354
355private:
357 std::pair<SimulatorReportSingle, ConvergenceReport>
358 solveDomain(const Domain& domain,
359 const SimulatorTimerInterface& timer,
360 DeferredLogger& logger,
361 [[maybe_unused]] const int global_iteration,
362 const bool initial_assembly_required)
363 {
364 auto& modelSimulator = model_.simulator();
365
367 Dune::Timer solveTimer;
368 solveTimer.start();
369 Dune::Timer detailTimer;
370
371 modelSimulator.model().newtonMethod().setIterationIndex(0);
372
373 // When called, if assembly has already been performed
374 // with the initial values, we only need to check
375 // for local convergence. Otherwise, we must do a local
376 // assembly.
377 int iter = 0;
378 if (initial_assembly_required) {
379 detailTimer.start();
380 modelSimulator.model().newtonMethod().setIterationIndex(iter);
381 // TODO: we should have a beginIterationLocal function()
382 // only handling the well model for now
383 modelSimulator.problem().wellModel().assembleDomain(modelSimulator.model().newtonMethod().numIterations(),
384 modelSimulator.timeStepSize(),
385 domain);
386 // Assemble reservoir locally.
387 report += this->assembleReservoirDomain(domain);
388 report.assemble_time += detailTimer.stop();
389 }
390 detailTimer.reset();
391 detailTimer.start();
392 std::vector<double> resnorms;
393 auto convreport = this->getDomainConvergence(domain, timer, 0, logger, resnorms);
394 if (convreport.converged()) {
395 // TODO: set more info, timing etc.
396 report.converged = true;
397 return { report, convreport };
398 }
399
400 // We have already assembled for the first iteration,
401 // but not done the Schur complement for the wells yet.
402 detailTimer.reset();
403 detailTimer.start();
404 model_.wellModel().linearizeDomain(domain,
405 modelSimulator.model().linearizer().jacobian(),
406 modelSimulator.model().linearizer().residual());
407 const double tt1 = detailTimer.stop();
408 report.assemble_time += tt1;
409 report.assemble_time_well += tt1;
410
411 // Local Newton loop.
412 const int max_iter = model_.param().max_local_solve_iterations_;
413 const auto& grid = modelSimulator.vanguard().grid();
414 do {
415 // Solve local linear system.
416 // Note that x has full size, we expect it to be nonzero only for in-domain cells.
417 const int nc = grid.size(0);
418 BVector x(nc);
419 detailTimer.reset();
420 detailTimer.start();
421 this->solveJacobianSystemDomain(domain, x);
422 model_.wellModel().postSolveDomain(x, domain);
423 report.linear_solve_time += detailTimer.stop();
424 report.linear_solve_setup_time += model_.linearSolveSetupTime();
425 report.total_linear_iterations = model_.linearIterationsLastSolve();
426
427 // Update local solution. // TODO: x is still full size, should we optimize it?
428 detailTimer.reset();
429 detailTimer.start();
430 this->updateDomainSolution(domain, x);
431 report.update_time += detailTimer.stop();
432
433 // Assemble well and reservoir.
434 detailTimer.reset();
435 detailTimer.start();
436 ++iter;
437 modelSimulator.model().newtonMethod().setIterationIndex(iter);
438 // TODO: we should have a beginIterationLocal function()
439 // only handling the well model for now
440 // Assemble reservoir locally.
441 modelSimulator.problem().wellModel().assembleDomain(modelSimulator.model().newtonMethod().numIterations(),
442 modelSimulator.timeStepSize(),
443 domain);
444 report += this->assembleReservoirDomain(domain);
445 report.assemble_time += detailTimer.stop();
446
447 // Check for local convergence.
448 detailTimer.reset();
449 detailTimer.start();
450 convreport = this->getDomainConvergence(domain, timer, iter, logger, resnorms);
451
452 // apply the Schur complement of the well model to the
453 // reservoir linearized equations
454 detailTimer.reset();
455 detailTimer.start();
456 model_.wellModel().linearizeDomain(domain,
457 modelSimulator.model().linearizer().jacobian(),
458 modelSimulator.model().linearizer().residual());
459 const double tt2 = detailTimer.stop();
460 report.assemble_time += tt2;
461 report.assemble_time_well += tt2;
462 } while (!convreport.converged() && iter <= max_iter);
463
464 modelSimulator.problem().endIteration();
465
466 report.converged = convreport.converged();
467 report.total_newton_iterations = iter;
468 report.total_linearizations = iter;
469 report.total_time = solveTimer.stop();
470 // TODO: set more info, timing etc.
471 return { report, convreport };
472 }
473
475 SimulatorReportSingle assembleReservoirDomain(const Domain& domain)
476 {
477 // -------- Mass balance equations --------
478 model_.simulator().model().linearizer().linearizeDomain(domain);
479 return model_.wellModel().lastReport();
480 }
481
483 void solveJacobianSystemDomain(const Domain& domain, BVector& global_x)
484 {
485 const auto& modelSimulator = model_.simulator();
486
487 Dune::Timer perfTimer;
488 perfTimer.start();
489
490 const Mat& main_matrix = modelSimulator.model().linearizer().jacobian().istlMatrix();
491 if (domain_matrices_[domain.index]) {
492 Details::copySubMatrix(main_matrix, domain.cells, *domain_matrices_[domain.index]);
493 } else {
494 domain_matrices_[domain.index] = std::make_unique<Mat>(Details::extractMatrix(main_matrix, domain.cells));
495 }
496 auto& jac = *domain_matrices_[domain.index];
497 auto res = Details::extractVector(modelSimulator.model().linearizer().residual(),
498 domain.cells);
499 auto x = res;
500
501 // set initial guess
502 global_x = 0.0;
503 x = 0.0;
504
505 auto& linsolver = domain_linsolvers_[domain.index];
506
507 linsolver.prepare(jac, res);
508 model_.linearSolveSetupTime() = perfTimer.stop();
509 linsolver.setResidual(res);
510 linsolver.solve(x);
511
512 Details::setGlobal(x, domain.cells, global_x);
513 }
514
516 void updateDomainSolution(const Domain& domain, const BVector& dx)
517 {
518 auto& simulator = model_.simulator();
519 auto& newtonMethod = simulator.model().newtonMethod();
520 SolutionVector& solution = simulator.model().solution(/*timeIdx=*/0);
521
522 newtonMethod.update_(/*nextSolution=*/solution,
523 /*curSolution=*/solution,
524 /*update=*/dx,
525 /*resid=*/dx,
526 domain.cells); // the update routines of the black
527 // oil model do not care about the
528 // residual
529
530 // if the solution is updated, the intensive quantities need to be recalculated
531 simulator.model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
532 }
533
535 std::pair<double, double> localDomainConvergenceData(const Domain& domain,
536 std::vector<Scalar>& R_sum,
537 std::vector<Scalar>& maxCoeff,
538 std::vector<Scalar>& B_avg,
539 std::vector<int>& maxCoeffCell)
540 {
541 const auto& modelSimulator = model_.simulator();
542
543 double pvSumLocal = 0.0;
544 double numAquiferPvSumLocal = 0.0;
545 const auto& model = modelSimulator.model();
546 const auto& problem = modelSimulator.problem();
547
548 const auto& modelResid = modelSimulator.model().linearizer().residual();
549
550 ElementContext elemCtx(modelSimulator);
551 const auto& gridView = domain.view;
552 const auto& elemEndIt = gridView.template end</*codim=*/0>();
553 IsNumericalAquiferCell isNumericalAquiferCell(gridView.grid());
554
555 for (auto elemIt = gridView.template begin</*codim=*/0>();
556 elemIt != elemEndIt;
557 ++elemIt)
558 {
559 if (elemIt->partitionType() != Dune::InteriorEntity) {
560 continue;
561 }
562 const auto& elem = *elemIt;
563 elemCtx.updatePrimaryStencil(elem);
564 elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
565
566 const unsigned cell_idx = elemCtx.globalSpaceIndex(/*spaceIdx=*/0, /*timeIdx=*/0);
567 const auto& intQuants = elemCtx.intensiveQuantities(/*spaceIdx=*/0, /*timeIdx=*/0);
568 const auto& fs = intQuants.fluidState();
569
570 const auto pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) *
571 model.dofTotalVolume(cell_idx);
572 pvSumLocal += pvValue;
573
574 if (isNumericalAquiferCell(elem))
575 {
576 numAquiferPvSumLocal += pvValue;
577 }
578
579 model_.getMaxCoeff(cell_idx, intQuants, fs, modelResid, pvValue,
580 B_avg, R_sum, maxCoeff, maxCoeffCell);
581 }
582
583 // compute local average in terms of global number of elements
584 const int bSize = B_avg.size();
585 for ( int i = 0; i<bSize; ++i )
586 {
587 B_avg[ i ] /= Scalar(domain.cells.size());
588 }
589
590 return {pvSumLocal, numAquiferPvSumLocal};
591 }
592
593 ConvergenceReport getDomainReservoirConvergence(const double reportTime,
594 const double dt,
595 const int iteration,
596 const Domain& domain,
597 DeferredLogger& logger,
598 std::vector<Scalar>& B_avg,
599 std::vector<Scalar>& residual_norms)
600 {
601 using Vector = std::vector<Scalar>;
602
603 const int numComp = numEq;
604 Vector R_sum(numComp, 0.0 );
605 Vector maxCoeff(numComp, std::numeric_limits<Scalar>::lowest() );
606 std::vector<int> maxCoeffCell(numComp, -1);
607 const auto [ pvSum, numAquiferPvSum]
608 = this->localDomainConvergenceData(domain, R_sum, maxCoeff, B_avg, maxCoeffCell);
609
610 auto cnvErrorPvFraction = computeCnvErrorPvLocal(domain, B_avg, dt);
611 cnvErrorPvFraction /= (pvSum - numAquiferPvSum);
612
613 // Default value of relaxed_max_pv_fraction_ is 0.03 and min_strict_cnv_iter_ is 0.
614 // For each iteration, we need to determine whether to use the relaxed CNV tolerance.
615 // To disable the usage of relaxed CNV tolerance, you can set the relaxed_max_pv_fraction_ to be 0.
616 const bool use_relaxed_cnv = cnvErrorPvFraction < model_.param().relaxed_max_pv_fraction_ &&
617 iteration >= model_.param().min_strict_cnv_iter_;
618 // Tighter bound for local convergence should increase the
619 // likelyhood of: local convergence => global convergence
620 const double tol_cnv = model_.param().local_tolerance_scaling_cnv_
621 * (use_relaxed_cnv ? model_.param().tolerance_cnv_relaxed_
622 : model_.param().tolerance_cnv_);
623
624 const bool use_relaxed_mb = iteration >= model_.param().min_strict_mb_iter_;
625 const double tol_mb = model_.param().local_tolerance_scaling_mb_
626 * (use_relaxed_mb ? model_.param().tolerance_mb_relaxed_ : model_.param().tolerance_mb_);
627
628 // Finish computation
629 std::vector<Scalar> CNV(numComp);
630 std::vector<Scalar> mass_balance_residual(numComp);
631 for (int compIdx = 0; compIdx < numComp; ++compIdx )
632 {
633 CNV[compIdx] = B_avg[compIdx] * dt * maxCoeff[compIdx];
634 mass_balance_residual[compIdx] = std::abs(B_avg[compIdx]*R_sum[compIdx]) * dt / pvSum;
635 residual_norms.push_back(CNV[compIdx]);
636 }
637
638 // Create convergence report.
639 ConvergenceReport report{reportTime};
640 using CR = ConvergenceReport;
641 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
642 double res[2] = { mass_balance_residual[compIdx], CNV[compIdx] };
643 CR::ReservoirFailure::Type types[2] = { CR::ReservoirFailure::Type::MassBalance,
644 CR::ReservoirFailure::Type::Cnv };
645 double tol[2] = { tol_mb, tol_cnv };
646 for (int ii : {0, 1}) {
647 if (std::isnan(res[ii])) {
648 report.setReservoirFailed({types[ii], CR::Severity::NotANumber, compIdx});
649 logger.debug("NaN residual for " + model_.compNames().name(compIdx) + " equation.");
650 } else if (res[ii] > model_.param().max_residual_allowed_) {
651 report.setReservoirFailed({types[ii], CR::Severity::TooLarge, compIdx});
652 logger.debug("Too large residual for " + model_.compNames().name(compIdx) + " equation.");
653 } else if (res[ii] < 0.0) {
654 report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
655 logger.debug("Negative residual for " + model_.compNames().name(compIdx) + " equation.");
656 } else if (res[ii] > tol[ii]) {
657 report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
658 }
659 report.setReservoirConvergenceMetric(types[ii], compIdx, res[ii]);
660 }
661 }
662
663 // Output of residuals. If converged at initial state, log nothing.
664 const bool converged_at_initial_state = (report.converged() && iteration == 0);
665 if (!converged_at_initial_state) {
666 if (iteration == 0) {
667 // Log header.
668 std::string msg = fmt::format("Domain {} on rank {}, size {}, containing cell {}\n| Iter",
669 domain.index, this->rank_, domain.cells.size(), domain.cells[0]);
670 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
671 msg += " MB(";
672 msg += model_.compNames().name(compIdx)[0];
673 msg += ") ";
674 }
675 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
676 msg += " CNV(";
677 msg += model_.compNames().name(compIdx)[0];
678 msg += ") ";
679 }
680 logger.debug(msg);
681 }
682 // Log convergence data.
683 std::ostringstream ss;
684 ss << "| ";
685 const std::streamsize oprec = ss.precision(3);
686 const std::ios::fmtflags oflags = ss.setf(std::ios::scientific);
687 ss << std::setw(4) << iteration;
688 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
689 ss << std::setw(11) << mass_balance_residual[compIdx];
690 }
691 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
692 ss << std::setw(11) << CNV[compIdx];
693 }
694 ss.precision(oprec);
695 ss.flags(oflags);
696 logger.debug(ss.str());
697 }
698
699 return report;
700 }
701
702 ConvergenceReport getDomainConvergence(const Domain& domain,
703 const SimulatorTimerInterface& timer,
704 const int iteration,
705 DeferredLogger& logger,
706 std::vector<double>& residual_norms)
707 {
708 std::vector<Scalar> B_avg(numEq, 0.0);
709 auto report = this->getDomainReservoirConvergence(timer.simulationTimeElapsed(),
710 timer.currentStepLength(),
711 iteration,
712 domain,
713 logger,
714 B_avg,
715 residual_norms);
716 report += model_.wellModel().getDomainWellConvergence(domain, B_avg, logger);
717 return report;
718 }
719
721 std::vector<int> getSubdomainOrder()
722 {
723 const auto& modelSimulator = model_.simulator();
724 const auto& solution = modelSimulator.model().solution(0);
725
726 std::vector<int> domain_order(domains_.size());
727 std::iota(domain_order.begin(), domain_order.end(), 0);
728
729 if (model_.param().local_solve_approach_ == DomainSolveApproach::Jacobi) {
730 // Do nothing, 0..n-1 order is fine.
731 return domain_order;
732 } else if (model_.param().local_solve_approach_ == DomainSolveApproach::GaussSeidel) {
733 // Calculate the measure used to order the domains.
734 std::vector<double> measure_per_domain(domains_.size());
735 switch (model_.param().local_domain_ordering_) {
737 // Use average pressures to order domains.
738 for (const auto& domain : domains_) {
739 double press_sum = 0.0;
740 for (const int c : domain.cells) {
741 press_sum += solution[c][Indices::pressureSwitchIdx];
742 }
743 const double avgpress = press_sum / domain.cells.size();
744 measure_per_domain[domain.index] = avgpress;
745 }
746 break;
747 }
749 // Use max pressures to order domains.
750 for (const auto& domain : domains_) {
751 double maxpress = 0.0;
752 for (const int c : domain.cells) {
753 maxpress = std::max(maxpress, solution[c][Indices::pressureSwitchIdx]);
754 }
755 measure_per_domain[domain.index] = maxpress;
756 }
757 break;
758 }
760 // Use maximum residual to order domains.
761 const auto& residual = modelSimulator.model().linearizer().residual();
762 const int num_vars = residual[0].size();
763 for (const auto& domain : domains_) {
764 double maxres = 0.0;
765 for (const int c : domain.cells) {
766 for (int ii = 0; ii < num_vars; ++ii) {
767 maxres = std::max(maxres, std::fabs(residual[c][ii]));
768 }
769 }
770 measure_per_domain[domain.index] = maxres;
771 }
772 break;
773 }
774 } // end of switch (model_.param().local_domain_ordering_)
775
776 // Sort by largest measure, keeping index order if equal.
777 const auto& m = measure_per_domain;
778 std::stable_sort(domain_order.begin(), domain_order.end(),
779 [&m](const int i1, const int i2){ return m[i1] > m[i2]; });
780 return domain_order;
781 } else {
782 throw std::logic_error("Domain solve approach must be Jacobi or Gauss-Seidel");
783 }
784 }
785
786 template<class GlobalEqVector>
787 void solveDomainJacobi(GlobalEqVector& solution,
788 GlobalEqVector& locally_solved,
789 SimulatorReportSingle& local_report,
790 DeferredLogger& logger,
791 const int iteration,
792 const SimulatorTimerInterface& timer,
793 const Domain& domain)
794 {
795 auto initial_local_well_primary_vars = model_.wellModel().getPrimaryVarsDomain(domain);
796 auto initial_local_solution = Details::extractVector(solution, domain.cells);
797 auto res = solveDomain(domain, timer, logger, iteration, false);
798 local_report = res.first;
799 if (local_report.converged) {
800 auto local_solution = Details::extractVector(solution, domain.cells);
801 Details::setGlobal(local_solution, domain.cells, locally_solved);
802 Details::setGlobal(initial_local_solution, domain.cells, solution);
803 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
804 } else {
805 model_.wellModel().setPrimaryVarsDomain(domain, initial_local_well_primary_vars);
806 Details::setGlobal(initial_local_solution, domain.cells, solution);
807 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
808 }
809 }
810
811 template<class GlobalEqVector>
812 void solveDomainGaussSeidel(GlobalEqVector& solution,
813 GlobalEqVector& locally_solved,
814 SimulatorReportSingle& local_report,
815 DeferredLogger& logger,
816 const int iteration,
817 const SimulatorTimerInterface& timer,
818 const Domain& domain)
819 {
820 auto initial_local_well_primary_vars = model_.wellModel().getPrimaryVarsDomain(domain);
821 auto initial_local_solution = Details::extractVector(solution, domain.cells);
822 auto res = solveDomain(domain, timer, logger, iteration, true);
823 local_report = res.first;
824 if (!local_report.converged) {
825 // We look at the detailed convergence report to evaluate
826 // if we should accept the unconverged solution.
827 const auto& convrep = res.second;
828 // We do not accept a solution if the wells are unconverged.
829 if (!convrep.wellFailed()) {
830 // Calculare the sums of the mb and cnv failures.
831 double mb_sum = 0.0;
832 double cnv_sum = 0.0;
833 for (const auto& rc : convrep.reservoirConvergence()) {
835 mb_sum += rc.value();
836 } else if (rc.type() == ConvergenceReport::ReservoirFailure::Type::Cnv) {
837 cnv_sum += rc.value();
838 }
839 }
840 // If not too high, we overrule the convergence failure.
841 const double acceptable_local_mb_sum = 1e-3;
842 const double acceptable_local_cnv_sum = 1.0;
843 if (mb_sum < acceptable_local_mb_sum && cnv_sum < acceptable_local_cnv_sum) {
844 local_report.converged = true;
845 logger.debug(fmt::format("Accepting solution in unconverged domain {} on rank {}.", domain.index, rank_));
846 logger.debug(fmt::format("Value of mb_sum: {} cnv_sum: {}", mb_sum, cnv_sum));
847 } else {
848 logger.debug("Unconverged local solution.");
849 }
850 } else {
851 logger.debug("Unconverged local solution with well convergence failures:");
852 for (const auto& wf : convrep.wellFailures()) {
853 logger.debug(to_string(wf));
854 }
855 }
856 }
857 if (local_report.converged) {
858 auto local_solution = Details::extractVector(solution, domain.cells);
859 Details::setGlobal(local_solution, domain.cells, locally_solved);
860 } else {
861 model_.wellModel().setPrimaryVarsDomain(domain, initial_local_well_primary_vars);
862 Details::setGlobal(initial_local_solution, domain.cells, solution);
863 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
864 }
865 }
866
867 double computeCnvErrorPvLocal(const Domain& domain,
868 const std::vector<Scalar>& B_avg, double dt) const
869 {
870 double errorPV{};
871 const auto& simulator = model_.simulator();
872 const auto& model = simulator.model();
873 const auto& problem = simulator.problem();
874 const auto& residual = simulator.model().linearizer().residual();
875
876 for (const int cell_idx : domain.cells) {
877 const double pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) *
878 model.dofTotalVolume(cell_idx);
879 const auto& cellResidual = residual[cell_idx];
880 bool cnvViolated = false;
881
882 for (unsigned eqIdx = 0; eqIdx < cellResidual.size(); ++eqIdx) {
883 using std::fabs;
884 Scalar CNV = cellResidual[eqIdx] * dt * B_avg[eqIdx] / pvValue;
885 cnvViolated = cnvViolated || (fabs(CNV) > model_.param().tolerance_cnv_);
886 }
887
888 if (cnvViolated) {
889 errorPV += pvValue;
890 }
891 }
892 return errorPV;
893 }
894
895 decltype(auto) partitionCells() const
896 {
897 const auto& grid = this->model_.simulator().vanguard().grid();
898
899 using GridView = std::remove_cv_t<std::remove_reference_t<decltype(grid.leafGridView())>>;
900 using Element = std::remove_cv_t<std::remove_reference_t<typename GridView::template Codim<0>::Entity>>;
901
902 const auto& param = this->model_.param();
903
904 auto zoltan_ctrl = ZoltanPartitioningControl<Element>{};
905 zoltan_ctrl.domain_imbalance = param.local_domain_partition_imbalance_;
906 zoltan_ctrl.index =
907 [elementMapper = &this->model_.simulator().model().elementMapper()]
908 (const Element& element)
909 {
910 return elementMapper->index(element);
911 };
912 zoltan_ctrl.local_to_global =
913 [cartMapper = &this->model_.simulator().vanguard().cartesianIndexMapper()]
914 (const int elemIdx)
915 {
916 return cartMapper->cartesianIndex(elemIdx);
917 };
918
919 // Forming the list of wells is expensive, so do this only if needed.
920 const auto need_wells = param.local_domain_partition_method_ == "zoltan";
921 const auto wells = need_wells
922 ? this->model_.simulator().vanguard().schedule().getWellsatEnd()
923 : std::vector<Well>{};
924
925 // If defaulted parameter for number of domains, choose a reasonable default.
926 constexpr int default_cells_per_domain = 1000;
927 const int num_cells = Opm::detail::countGlobalCells(grid);
928 const int num_domains = param.num_local_domains_ > 0
929 ? param.num_local_domains_
930 : num_cells / default_cells_per_domain;
931
932 return ::Opm::partitionCells(param.local_domain_partition_method_,
933 num_domains,
934 grid.leafGridView(), wells, zoltan_ctrl);
935 }
936
937 std::vector<int> reconstitutePartitionVector() const
938 {
939 const auto& grid = this->model_.simulator().vanguard().grid();
940
941 auto numD = std::vector<int>(grid.comm().size() + 1, 0);
942 numD[grid.comm().rank() + 1] = static_cast<int>(this->domains_.size());
943 grid.comm().sum(numD.data(), numD.size());
944 std::partial_sum(numD.begin(), numD.end(), numD.begin());
945
946 auto p = std::vector<int>(grid.size(0));
947 auto maxCellIdx = std::numeric_limits<int>::min();
948
949 auto d = numD[grid.comm().rank()];
950 for (const auto& domain : this->domains_) {
951 for (const auto& cell : domain.cells) {
952 p[cell] = d;
953 if (cell > maxCellIdx) {
954 maxCellIdx = cell;
955 }
956 }
957
958 ++d;
959 }
960
961 p.erase(p.begin() + maxCellIdx + 1, p.end());
962 return p;
963 }
964
965 BlackoilModel<TypeTag>& model_;
966 std::vector<Domain> domains_;
967 std::vector<std::unique_ptr<Mat>> domain_matrices_;
968 std::vector<ISTLSolverType> domain_linsolvers_;
969 SimulatorReportSingle local_reports_accumulated_;
970 int rank_ = 0;
971};
972
973} // namespace Opm
974
975#endif // OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
A NLDD implementation for three-phase black oil.
Definition: BlackoilModelNldd.hpp:78
GetPropType< TypeTag, Properties::SolutionVector > SolutionVector
Definition: BlackoilModelNldd.hpp:86
GetPropType< TypeTag, Properties::Indices > Indices
Definition: BlackoilModelNldd.hpp:83
void writePartitions(const std::filesystem::path &odir) const
Definition: BlackoilModelNldd.hpp:335
BlackoilModelNldd(BlackoilModel< TypeTag > &model)
The constructor sets up the subdomains.
Definition: BlackoilModelNldd.hpp:99
GetPropType< TypeTag, Properties::Scalar > Scalar
Definition: BlackoilModelNldd.hpp:85
typename BlackoilModel< TypeTag >::Mat Mat
Definition: BlackoilModelNldd.hpp:91
SimulatorReportSingle nonlinearIterationNldd(const int iteration, const SimulatorTimerInterface &timer, NonlinearSolverType &nonlinear_solver)
Do one non-linear NLDD iteration.
Definition: BlackoilModelNldd.hpp:188
SubDomain< Grid > Domain
Definition: BlackoilModelNldd.hpp:89
GetPropType< TypeTag, Properties::ElementContext > ElementContext
Definition: BlackoilModelNldd.hpp:80
const SimulatorReportSingle & localAccumulatedReports() const
return the statistics if the nonlinearIteration() method failed
Definition: BlackoilModelNldd.hpp:330
GetPropType< TypeTag, Properties::FluidSystem > FluidSystem
Definition: BlackoilModelNldd.hpp:81
void prepareStep()
Called before starting a time step.
Definition: BlackoilModelNldd.hpp:180
static constexpr int numEq
Definition: BlackoilModelNldd.hpp:93
GetPropType< TypeTag, Properties::Grid > Grid
Definition: BlackoilModelNldd.hpp:82
typename BlackoilModel< TypeTag >::BVector BVector
Definition: BlackoilModelNldd.hpp:88
Definition: BlackoilModel.hpp:164
typename SparseMatrixAdapter::IstlMatrix Mat
Definition: BlackoilModel.hpp:210
Dune::BlockVector< VectorBlockType > BVector
Definition: BlackoilModel.hpp:211
Definition: DeferredLogger.hpp:57
void debug(const std::string &tag, const std::string &message)
Definition: ISTLSolver.hpp:143
Interface class for SimulatorTimer objects, to be improved.
Definition: SimulatorTimerInterface.hpp:34
Vector extractVector(const Vector &x, const std::vector< int > &indices)
Definition: extractMatrix.hpp:104
void copySubMatrix(const Matrix &A, const std::vector< int > &indices, Matrix &B)
Definition: extractMatrix.hpp:35
void setGlobal(const Vector &x, const std::vector< int > &indices, Vector &global_x)
Definition: extractMatrix.hpp:115
Matrix extractMatrix(const Matrix &m, const std::vector< int > &indices)
Definition: extractMatrix.hpp:47
void unPack(PV &privar, const std::size_t meanings)
Definition: priVarsPacking.hpp:41
std::size_t pack(const PV &privar)
Definition: priVarsPacking.hpp:31
std::size_t countGlobalCells(const Grid &grid)
Get the number of cells of a global grid.
Definition: countGlobalCells.hpp:82
Definition: BlackoilPhases.hpp:27
Opm::DeferredLogger gatherDeferredLogger(const Opm::DeferredLogger &local_deferredlogger, Parallel::Communication communicator)
Create a global log combining local logs.
std::pair< std::vector< int >, int > partitionCells(const std::string &method, const int num_local_domains, const GridView &grid_view, const std::vector< Well > &wells, const ZoltanPartitioningControl< Element > &zoltan_ctrl)
std::string to_string(const ConvergenceReport::ReservoirFailure::Type t)
Solver parameters for the BlackoilModel.
Definition: BlackoilModelParameters.hpp:484
This class carries all parameters for the NewtonIterationBlackoilInterleaved class.
Definition: FlowLinearSolverParameters.hpp:237
double linear_solver_reduction_
Definition: FlowLinearSolverParameters.hpp:238
bool linear_solver_print_json_definition_
Definition: FlowLinearSolverParameters.hpp:252
std::string linsolver_
Definition: FlowLinearSolverParameters.hpp:251
A struct for returning timing data from a simulator to its caller.
Definition: SimulatorReport.hpp:34
double linear_solve_time
Definition: SimulatorReport.hpp:43
double assemble_time
Definition: SimulatorReport.hpp:39
double assemble_time_well
Definition: SimulatorReport.hpp:41
bool converged
Definition: SimulatorReport.hpp:54
double total_time
Definition: SimulatorReport.hpp:37
double linear_solve_setup_time
Definition: SimulatorReport.hpp:42
unsigned int total_newton_iterations
Definition: SimulatorReport.hpp:49
double update_time
Definition: SimulatorReport.hpp:44
unsigned int total_linearizations
Definition: SimulatorReport.hpp:48
unsigned int total_linear_iterations
Definition: SimulatorReport.hpp:50
Definition: SubDomain.hpp:62