BlackoilModelNldd.hpp
Go to the documentation of this file.
1/*
2 Copyright 2013, 2015 SINTEF ICT, Applied Mathematics.
3 Copyright 2014, 2015 Dr. Blatt - HPC-Simulation-Software & Services
4 Copyright 2014, 2015 Statoil ASA.
5 Copyright 2015 NTNU
6 Copyright 2015, 2016, 2017 IRIS AS
7
8 This file is part of the Open Porous Media project (OPM).
9
10 OPM is free software: you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation, either version 3 of the License, or
13 (at your option) any later version.
14
15 OPM is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with OPM. If not, see <http://www.gnu.org/licenses/>.
22*/
23
24#ifndef OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
25#define OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
26
27#include <dune/common/timer.hh>
28
29#include <opm/grid/common/SubGridPart.hpp>
30
32
38
40
41#if COMPILE_BDA_BRIDGE
43#else
45#endif
46
50
52
53#include <fmt/format.h>
54
55#include <algorithm>
56#include <array>
57#include <cassert>
58#include <cmath>
59#include <cstddef>
60#include <filesystem>
61#include <fstream>
62#include <functional>
63#include <iomanip>
64#include <ios>
65#include <memory>
66#include <numeric>
67#include <sstream>
68#include <string>
69#include <type_traits>
70#include <utility>
71#include <vector>
72
73namespace Opm {
74
75template<class TypeTag> class BlackoilModel;
76
78template <class TypeTag>
80public:
88
93
94 static constexpr int numEq = Indices::numEq;
95
101 : model_(model), rank_(model_.simulator().vanguard().grid().comm().rank())
102 {
103 // Create partitions.
104 const auto& [partition_vector, num_domains] = this->partitionCells();
105
106 // Scan through partitioning to get correct size for each.
107 std::vector<int> sizes(num_domains, 0);
108 for (const auto& p : partition_vector) {
109 ++sizes[p];
110 }
111
112 // Set up correctly sized vectors of entity seeds and of indices for each partition.
113 using EntitySeed = typename Grid::template Codim<0>::EntitySeed;
114 std::vector<std::vector<EntitySeed>> seeds(num_domains);
115 std::vector<std::vector<int>> partitions(num_domains);
116 for (int domain = 0; domain < num_domains; ++domain) {
117 seeds[domain].resize(sizes[domain]);
118 partitions[domain].resize(sizes[domain]);
119 }
120
121 // Iterate through grid once, setting the seeds of all partitions.
122 // Note: owned cells only!
123 const auto& grid = model_.simulator().vanguard().grid();
124
125 std::vector<int> count(num_domains, 0);
126 const auto& gridView = grid.leafGridView();
127 const auto beg = gridView.template begin<0, Dune::Interior_Partition>();
128 const auto end = gridView.template end<0, Dune::Interior_Partition>();
129 int cell = 0;
130 for (auto it = beg; it != end; ++it, ++cell) {
131 const int p = partition_vector[cell];
132 seeds[p][count[p]] = it->seed();
133 partitions[p][count[p]] = cell;
134 ++count[p];
135 }
136 assert(count == sizes);
137
138 // Create the domains.
139 for (int index = 0; index < num_domains; ++index) {
140 std::vector<bool> interior(partition_vector.size(), false);
141 for (int ix : partitions[index]) {
142 interior[ix] = true;
143 }
144
145 Dune::SubGridPart<Grid> view{grid, std::move(seeds[index])};
146
147 this->domains_.emplace_back(index,
148 std::move(partitions[index]),
149 std::move(interior),
150 std::move(view));
151 }
152
153 // Set up container for the local system matrices.
154 domain_matrices_.resize(num_domains);
155
156 // Set up container for the local linear solvers.
157 for (int index = 0; index < num_domains; ++index) {
158 // TODO: The ISTLSolver constructor will make
159 // parallel structures appropriate for the full grid
160 // only. This must be addressed before going parallel.
161 const auto& eclState = model_.simulator().vanguard().eclState();
163 loc_param.is_nldd_local_solver_ = true;
164 loc_param.init(eclState.getSimulationConfig().useCPR());
165 // Override solver type with umfpack if small domain.
166 if (domains_[index].cells.size() < 200) {
167 loc_param.linsolver_ = "umfpack";
168 }
169 loc_param.linear_solver_print_json_definition_ = false;
170 const bool force_serial = true;
171 domain_linsolvers_.emplace_back(model_.simulator(), loc_param, force_serial);
172 }
173
174 assert(int(domains_.size()) == num_domains);
175 }
176
179 {
180 // Setup domain->well mapping.
181 model_.wellModel().setupDomains(domains_);
182 }
183
185 template <class NonlinearSolverType>
187 const SimulatorTimerInterface& timer,
188 NonlinearSolverType& nonlinear_solver)
189 {
190 // ----------- Set up reports and timer -----------
192 Dune::Timer perfTimer;
193
194 model_.initialLinearization(report, iteration, nonlinear_solver.minIter(), nonlinear_solver.maxIter(), timer);
195
196 if (report.converged) {
197 return report;
198 }
199
200 // ----------- If not converged, do an NLDD iteration -----------
201
202 auto& solution = model_.simulator().model().solution(0);
203 auto initial_solution = solution;
204 auto locally_solved = initial_solution;
205
206 // ----------- Decide on an ordering for the domains -----------
207 const auto domain_order = this->getSubdomainOrder();
208
209 // ----------- Solve each domain separately -----------
210 DeferredLogger logger;
211 std::vector<SimulatorReportSingle> domain_reports(domains_.size());
212 for (const int domain_index : domain_order) {
213 const auto& domain = domains_[domain_index];
214 SimulatorReportSingle local_report;
215 try {
216 switch (model_.param().local_solve_approach_) {
218 solveDomainJacobi(solution, locally_solved, local_report, logger,
219 iteration, timer, domain);
220 break;
221 default:
223 solveDomainGaussSeidel(solution, locally_solved, local_report, logger,
224 iteration, timer, domain);
225 break;
226 }
227 }
228 catch (...) {
229 // Something went wrong during local solves.
230 local_report.converged = false;
231 }
232 // This should have updated the global matrix to be
233 // dR_i/du_j evaluated at new local solutions for
234 // i == j, at old solution for i != j.
235 if (!local_report.converged) {
236 // TODO: more proper treatment, including in parallel.
237 logger.debug(fmt::format("Convergence failure in domain {} on rank {}." , domain.index, rank_));
238 }
239 domain_reports[domain.index] = local_report;
240 }
241
242 // Communicate and log all messages.
243 auto global_logger = gatherDeferredLogger(logger, model_.simulator().vanguard().grid().comm());
244 global_logger.logMessages();
245
246 // Accumulate local solve data.
247 // Putting the counts in a single array to avoid multiple
248 // comm.sum() calls. Keeping the named vars for readability.
249 std::array<int, 4> counts{ 0, 0, 0, static_cast<int>(domain_reports.size()) };
250 int& num_converged = counts[0];
251 int& num_converged_already = counts[1];
252 int& num_local_newtons = counts[2];
253 int& num_domains = counts[3];
254 {
256 for (const auto& dr : domain_reports) {
257 if (dr.converged) {
258 ++num_converged;
259 if (dr.total_newton_iterations == 0) {
260 ++num_converged_already;
261 }
262 }
263 rep += dr;
264 }
265 num_local_newtons = rep.total_newton_iterations;
266 local_reports_accumulated_ += rep;
267 }
268
269 if (model_.param().local_solve_approach_ == DomainSolveApproach::Jacobi) {
270 solution = locally_solved;
271 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0);
272 }
273
274#if HAVE_MPI
275 // Communicate solutions:
276 // With multiple processes, this process' overlap (i.e. not
277 // owned) cells' solution values have been modified by local
278 // solves in the owning processes, and remain unchanged
279 // here. We must therefore receive the updated solution on the
280 // overlap cells and update their intensive quantities before
281 // we move on.
282 const auto& comm = model_.simulator().vanguard().grid().comm();
283 if (comm.size() > 1) {
284 const auto* ccomm = model_.simulator().model().newtonMethod().linearSolver().comm();
285
286 // Copy numerical values from primary vars.
287 ccomm->copyOwnerToAll(solution, solution);
288
289 // Copy flags from primary vars.
290 const std::size_t num = solution.size();
291 Dune::BlockVector<std::size_t> allmeanings(num);
292 for (std::size_t ii = 0; ii < num; ++ii) {
293 allmeanings[ii] = PVUtil::pack(solution[ii]);
294 }
295 ccomm->copyOwnerToAll(allmeanings, allmeanings);
296 for (std::size_t ii = 0; ii < num; ++ii) {
297 PVUtil::unPack(solution[ii], allmeanings[ii]);
298 }
299
300 // Update intensive quantities for our overlap values.
301 model_.simulator().model().invalidateAndUpdateIntensiveQuantitiesOverlap(/*timeIdx=*/0);
302
303 // Make total counts of domains converged.
304 comm.sum(counts.data(), counts.size());
305 }
306#endif // HAVE_MPI
307
308 const bool is_iorank = this->rank_ == 0;
309 if (is_iorank) {
310 OpmLog::debug(fmt::format("Local solves finished. Converged for {}/{} domains. {} domains did no work. {} total local Newton iterations.\n",
311 num_converged, num_domains, num_converged_already, num_local_newtons));
312 }
313
314 // Finish with a Newton step.
315 // Note that the "iteration + 100" is a simple way to avoid entering
316 // "if (iteration == 0)" and similar blocks, and also makes it a little
317 // easier to spot the iteration residuals in the DBG file. A more sophisticated
318 // approach can be done later.
319 auto rep = model_.nonlinearIterationNewton(iteration + 100, timer, nonlinear_solver);
320 report += rep;
321 if (rep.converged) {
322 report.converged = true;
323 }
324 return report;
325 }
326
329 {
330 return local_reports_accumulated_;
331 }
332
333 void writePartitions(const std::filesystem::path& odir) const
334 {
335 const auto& elementMapper = this->model_.simulator().model().elementMapper();
336 const auto& cartMapper = this->model_.simulator().vanguard().cartesianIndexMapper();
337
338 const auto& grid = this->model_.simulator().vanguard().grid();
339 const auto& comm = grid.comm();
340 const auto nDigit = 1 + static_cast<int>(std::floor(std::log10(comm.size())));
341
342 std::ofstream pfile { odir / fmt::format("{1:0>{0}}", nDigit, comm.rank()) };
343
344 const auto p = this->reconstitutePartitionVector();
345 auto i = 0;
346 for (const auto& cell : elements(grid.leafGridView(), Dune::Partitions::interior)) {
347 pfile << comm.rank() << ' '
348 << cartMapper.cartesianIndex(elementMapper.index(cell)) << ' '
349 << p[i++] << '\n';
350 }
351 }
352
353private:
354
356 std::pair<SimulatorReportSingle, ConvergenceReport>
357 solveDomain(const Domain& domain,
358 const SimulatorTimerInterface& timer,
359 DeferredLogger& logger,
360 [[maybe_unused]] const int global_iteration,
361 const bool initial_assembly_required)
362 {
363 auto& modelSimulator = model_.simulator();
364
366 Dune::Timer solveTimer;
367 solveTimer.start();
368 Dune::Timer detailTimer;
369
370 modelSimulator.model().newtonMethod().setIterationIndex(0);
371
372 // When called, if assembly has already been performed
373 // with the initial values, we only need to check
374 // for local convergence. Otherwise, we must do a local
375 // assembly.
376 int iter = 0;
377 if (initial_assembly_required) {
378 detailTimer.start();
379 modelSimulator.model().newtonMethod().setIterationIndex(iter);
380 // TODO: we should have a beginIterationLocal function()
381 // only handling the well model for now
382 modelSimulator.problem().wellModel().assembleDomain(modelSimulator.model().newtonMethod().numIterations(),
383 modelSimulator.timeStepSize(),
384 domain);
385 // Assemble reservoir locally.
386 report += this->assembleReservoirDomain(domain);
387 report.assemble_time += detailTimer.stop();
388 }
389 detailTimer.reset();
390 detailTimer.start();
391 std::vector<Scalar> resnorms;
392 auto convreport = this->getDomainConvergence(domain, timer, 0, logger, resnorms);
393 if (convreport.converged()) {
394 // TODO: set more info, timing etc.
395 report.converged = true;
396 return { report, convreport };
397 }
398
399 // We have already assembled for the first iteration,
400 // but not done the Schur complement for the wells yet.
401 detailTimer.reset();
402 detailTimer.start();
403 model_.wellModel().linearizeDomain(domain,
404 modelSimulator.model().linearizer().jacobian(),
405 modelSimulator.model().linearizer().residual());
406 const double tt1 = detailTimer.stop();
407 report.assemble_time += tt1;
408 report.assemble_time_well += tt1;
409
410 // Local Newton loop.
411 const int max_iter = model_.param().max_local_solve_iterations_;
412 const auto& grid = modelSimulator.vanguard().grid();
413 double damping_factor = 1.0;
414 std::vector<std::vector<Scalar>> convergence_history;
415 convergence_history.reserve(20);
416 convergence_history.push_back(resnorms);
417 do {
418 // Solve local linear system.
419 // Note that x has full size, we expect it to be nonzero only for in-domain cells.
420 const int nc = grid.size(0);
421 BVector x(nc);
422 detailTimer.reset();
423 detailTimer.start();
424 this->solveJacobianSystemDomain(domain, x);
425 model_.wellModel().postSolveDomain(x, domain);
426 if (damping_factor != 1.0) {
427 x *= damping_factor;
428 }
429 report.linear_solve_time += detailTimer.stop();
430 report.linear_solve_setup_time += model_.linearSolveSetupTime();
431 report.total_linear_iterations = model_.linearIterationsLastSolve();
432
433 // Update local solution. // TODO: x is still full size, should we optimize it?
434 detailTimer.reset();
435 detailTimer.start();
436 this->updateDomainSolution(domain, x);
437 report.update_time += detailTimer.stop();
438
439 // Assemble well and reservoir.
440 detailTimer.reset();
441 detailTimer.start();
442 ++iter;
443 modelSimulator.model().newtonMethod().setIterationIndex(iter);
444 // TODO: we should have a beginIterationLocal function()
445 // only handling the well model for now
446 // Assemble reservoir locally.
447 modelSimulator.problem().wellModel().assembleDomain(modelSimulator.model().newtonMethod().numIterations(),
448 modelSimulator.timeStepSize(),
449 domain);
450 report += this->assembleReservoirDomain(domain);
451 report.assemble_time += detailTimer.stop();
452
453 // Check for local convergence.
454 detailTimer.reset();
455 detailTimer.start();
456 resnorms.clear();
457 convreport = this->getDomainConvergence(domain, timer, iter, logger, resnorms);
458 convergence_history.push_back(resnorms);
459
460 // apply the Schur complement of the well model to the
461 // reservoir linearized equations
462 detailTimer.reset();
463 detailTimer.start();
464 model_.wellModel().linearizeDomain(domain,
465 modelSimulator.model().linearizer().jacobian(),
466 modelSimulator.model().linearizer().residual());
467 const double tt2 = detailTimer.stop();
468 report.assemble_time += tt2;
469 report.assemble_time_well += tt2;
470
471 // Check if we should dampen. Only do so if wells are converged.
472 if (!convreport.converged() && !convreport.wellFailed()) {
473 bool oscillate = false;
474 bool stagnate = false;
475 const int numPhases = convergence_history.front().size();
476 detail::detectOscillations(convergence_history, iter, numPhases,
477 Scalar{0.2}, 1, oscillate, stagnate);
478 if (oscillate) {
479 damping_factor *= 0.85;
480 logger.debug(fmt::format("| Damping factor is now {}", damping_factor));
481 }
482 }
483 } while (!convreport.converged() && iter <= max_iter);
484
485 modelSimulator.problem().endIteration();
486
487 report.converged = convreport.converged();
488 report.total_newton_iterations = iter;
489 report.total_linearizations = iter;
490 report.total_time = solveTimer.stop();
491 // TODO: set more info, timing etc.
492 return { report, convreport };
493 }
494
496 SimulatorReportSingle assembleReservoirDomain(const Domain& domain)
497 {
498 // -------- Mass balance equations --------
499 model_.simulator().model().linearizer().linearizeDomain(domain);
500 return model_.wellModel().lastReport();
501 }
502
504 void solveJacobianSystemDomain(const Domain& domain, BVector& global_x)
505 {
506 const auto& modelSimulator = model_.simulator();
507
508 Dune::Timer perfTimer;
509 perfTimer.start();
510
511 const Mat& main_matrix = modelSimulator.model().linearizer().jacobian().istlMatrix();
512 if (domain_matrices_[domain.index]) {
513 Details::copySubMatrix(main_matrix, domain.cells, *domain_matrices_[domain.index]);
514 } else {
515 domain_matrices_[domain.index] = std::make_unique<Mat>(Details::extractMatrix(main_matrix, domain.cells));
516 }
517 auto& jac = *domain_matrices_[domain.index];
518 auto res = Details::extractVector(modelSimulator.model().linearizer().residual(),
519 domain.cells);
520 auto x = res;
521
522 // set initial guess
523 global_x = 0.0;
524 x = 0.0;
525
526 auto& linsolver = domain_linsolvers_[domain.index];
527
528 linsolver.prepare(jac, res);
529 model_.linearSolveSetupTime() = perfTimer.stop();
530 linsolver.setResidual(res);
531 linsolver.solve(x);
532
533 Details::setGlobal(x, domain.cells, global_x);
534 }
535
537 void updateDomainSolution(const Domain& domain, const BVector& dx)
538 {
539 auto& simulator = model_.simulator();
540 auto& newtonMethod = simulator.model().newtonMethod();
541 SolutionVector& solution = simulator.model().solution(/*timeIdx=*/0);
542
543 newtonMethod.update_(/*nextSolution=*/solution,
544 /*curSolution=*/solution,
545 /*update=*/dx,
546 /*resid=*/dx,
547 domain.cells); // the update routines of the black
548 // oil model do not care about the
549 // residual
550
551 // if the solution is updated, the intensive quantities need to be recalculated
552 simulator.model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
553 }
554
556 std::pair<Scalar, Scalar> localDomainConvergenceData(const Domain& domain,
557 std::vector<Scalar>& R_sum,
558 std::vector<Scalar>& maxCoeff,
559 std::vector<Scalar>& B_avg,
560 std::vector<int>& maxCoeffCell)
561 {
562 const auto& modelSimulator = model_.simulator();
563
564 Scalar pvSumLocal = 0.0;
565 Scalar numAquiferPvSumLocal = 0.0;
566 const auto& model = modelSimulator.model();
567 const auto& problem = modelSimulator.problem();
568
569 const auto& modelResid = modelSimulator.model().linearizer().residual();
570
571 ElementContext elemCtx(modelSimulator);
572 const auto& gridView = domain.view;
573 const auto& elemEndIt = gridView.template end</*codim=*/0>();
574 IsNumericalAquiferCell isNumericalAquiferCell(gridView.grid());
575
576 for (auto elemIt = gridView.template begin</*codim=*/0>();
577 elemIt != elemEndIt;
578 ++elemIt)
579 {
580 if (elemIt->partitionType() != Dune::InteriorEntity) {
581 continue;
582 }
583 const auto& elem = *elemIt;
584 elemCtx.updatePrimaryStencil(elem);
585 elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
586
587 const unsigned cell_idx = elemCtx.globalSpaceIndex(/*spaceIdx=*/0, /*timeIdx=*/0);
588 const auto& intQuants = elemCtx.intensiveQuantities(/*spaceIdx=*/0, /*timeIdx=*/0);
589 const auto& fs = intQuants.fluidState();
590
591 const auto pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) *
592 model.dofTotalVolume(cell_idx);
593 pvSumLocal += pvValue;
594
595 if (isNumericalAquiferCell(elem))
596 {
597 numAquiferPvSumLocal += pvValue;
598 }
599
600 model_.getMaxCoeff(cell_idx, intQuants, fs, modelResid, pvValue,
601 B_avg, R_sum, maxCoeff, maxCoeffCell);
602 }
603
604 // compute local average in terms of global number of elements
605 const int bSize = B_avg.size();
606 for ( int i = 0; i<bSize; ++i )
607 {
608 B_avg[ i ] /= Scalar(domain.cells.size());
609 }
610
611 return {pvSumLocal, numAquiferPvSumLocal};
612 }
613
614 ConvergenceReport getDomainReservoirConvergence(const double reportTime,
615 const double dt,
616 const int iteration,
617 const Domain& domain,
618 DeferredLogger& logger,
619 std::vector<Scalar>& B_avg,
620 std::vector<Scalar>& residual_norms)
621 {
622 using Vector = std::vector<Scalar>;
623
624 const int numComp = numEq;
625 Vector R_sum(numComp, 0.0 );
626 Vector maxCoeff(numComp, std::numeric_limits<Scalar>::lowest() );
627 std::vector<int> maxCoeffCell(numComp, -1);
628 const auto [ pvSum, numAquiferPvSum]
629 = this->localDomainConvergenceData(domain, R_sum, maxCoeff, B_avg, maxCoeffCell);
630
631 auto cnvErrorPvFraction = computeCnvErrorPvLocal(domain, B_avg, dt);
632 cnvErrorPvFraction /= (pvSum - numAquiferPvSum);
633
634 // Default value of relaxed_max_pv_fraction_ is 0.03 and min_strict_cnv_iter_ is 0.
635 // For each iteration, we need to determine whether to use the relaxed CNV tolerance.
636 // To disable the usage of relaxed CNV tolerance, you can set the relaxed_max_pv_fraction_ to be 0.
637 const bool use_relaxed_cnv = cnvErrorPvFraction < model_.param().relaxed_max_pv_fraction_ &&
638 iteration >= model_.param().min_strict_cnv_iter_;
639 // Tighter bound for local convergence should increase the
640 // likelyhood of: local convergence => global convergence
641 const Scalar tol_cnv = model_.param().local_tolerance_scaling_cnv_
642 * (use_relaxed_cnv ? model_.param().tolerance_cnv_relaxed_
643 : model_.param().tolerance_cnv_);
644
645 const bool use_relaxed_mb = iteration >= model_.param().min_strict_mb_iter_;
646 const Scalar tol_mb = model_.param().local_tolerance_scaling_mb_
647 * (use_relaxed_mb ? model_.param().tolerance_mb_relaxed_ : model_.param().tolerance_mb_);
648
649 // Finish computation
650 std::vector<Scalar> CNV(numComp);
651 std::vector<Scalar> mass_balance_residual(numComp);
652 for (int compIdx = 0; compIdx < numComp; ++compIdx )
653 {
654 CNV[compIdx] = B_avg[compIdx] * dt * maxCoeff[compIdx];
655 mass_balance_residual[compIdx] = std::abs(B_avg[compIdx]*R_sum[compIdx]) * dt / pvSum;
656 residual_norms.push_back(CNV[compIdx]);
657 }
658
659 // Create convergence report.
660 ConvergenceReport report{reportTime};
661 using CR = ConvergenceReport;
662 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
663 Scalar res[2] = { mass_balance_residual[compIdx], CNV[compIdx] };
664 CR::ReservoirFailure::Type types[2] = { CR::ReservoirFailure::Type::MassBalance,
665 CR::ReservoirFailure::Type::Cnv };
666 Scalar tol[2] = { tol_mb, tol_cnv };
667 for (int ii : {0, 1}) {
668 if (std::isnan(res[ii])) {
669 report.setReservoirFailed({types[ii], CR::Severity::NotANumber, compIdx});
670 logger.debug("NaN residual for " + model_.compNames().name(compIdx) + " equation.");
671 } else if (res[ii] > model_.param().max_residual_allowed_) {
672 report.setReservoirFailed({types[ii], CR::Severity::TooLarge, compIdx});
673 logger.debug("Too large residual for " + model_.compNames().name(compIdx) + " equation.");
674 } else if (res[ii] < 0.0) {
675 report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
676 logger.debug("Negative residual for " + model_.compNames().name(compIdx) + " equation.");
677 } else if (res[ii] > tol[ii]) {
678 report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
679 }
680
681 report.setReservoirConvergenceMetric(types[ii], compIdx, res[ii]);
682 }
683 }
684
685 // Output of residuals. If converged at initial state, log nothing.
686 const bool converged_at_initial_state = (report.converged() && iteration == 0);
687 if (!converged_at_initial_state) {
688 if (iteration == 0) {
689 // Log header.
690 std::string msg = fmt::format("Domain {} on rank {}, size {}, containing cell {}\n| Iter",
691 domain.index, this->rank_, domain.cells.size(), domain.cells[0]);
692 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
693 msg += " MB(";
694 msg += model_.compNames().name(compIdx)[0];
695 msg += ") ";
696 }
697 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
698 msg += " CNV(";
699 msg += model_.compNames().name(compIdx)[0];
700 msg += ") ";
701 }
702 logger.debug(msg);
703 }
704 // Log convergence data.
705 std::ostringstream ss;
706 ss << "| ";
707 const std::streamsize oprec = ss.precision(3);
708 const std::ios::fmtflags oflags = ss.setf(std::ios::scientific);
709 ss << std::setw(4) << iteration;
710 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
711 ss << std::setw(11) << mass_balance_residual[compIdx];
712 }
713 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
714 ss << std::setw(11) << CNV[compIdx];
715 }
716 ss.precision(oprec);
717 ss.flags(oflags);
718 logger.debug(ss.str());
719 }
720
721 return report;
722 }
723
724 ConvergenceReport getDomainConvergence(const Domain& domain,
725 const SimulatorTimerInterface& timer,
726 const int iteration,
727 DeferredLogger& logger,
728 std::vector<Scalar>& residual_norms)
729 {
730 std::vector<Scalar> B_avg(numEq, 0.0);
731 auto report = this->getDomainReservoirConvergence(timer.simulationTimeElapsed(),
732 timer.currentStepLength(),
733 iteration,
734 domain,
735 logger,
736 B_avg,
737 residual_norms);
738 report += model_.wellModel().getDomainWellConvergence(domain, B_avg, logger);
739 return report;
740 }
741
743 std::vector<int> getSubdomainOrder()
744 {
745 const auto& modelSimulator = model_.simulator();
746 const auto& solution = modelSimulator.model().solution(0);
747
748 std::vector<int> domain_order(domains_.size());
749 std::iota(domain_order.begin(), domain_order.end(), 0);
750
751 if (model_.param().local_solve_approach_ == DomainSolveApproach::Jacobi) {
752 // Do nothing, 0..n-1 order is fine.
753 return domain_order;
754 } else if (model_.param().local_solve_approach_ == DomainSolveApproach::GaussSeidel) {
755 // Calculate the measure used to order the domains.
756 std::vector<Scalar> measure_per_domain(domains_.size());
757 switch (model_.param().local_domain_ordering_) {
759 // Use average pressures to order domains.
760 for (const auto& domain : domains_) {
761 Scalar press_sum = 0.0;
762 for (const int c : domain.cells) {
763 press_sum += solution[c][Indices::pressureSwitchIdx];
764 }
765 const Scalar avgpress = press_sum / domain.cells.size();
766 measure_per_domain[domain.index] = avgpress;
767 }
768 break;
769 }
771 // Use max pressures to order domains.
772 for (const auto& domain : domains_) {
773 Scalar maxpress = 0.0;
774 for (const int c : domain.cells) {
775 maxpress = std::max(maxpress, solution[c][Indices::pressureSwitchIdx]);
776 }
777 measure_per_domain[domain.index] = maxpress;
778 }
779 break;
780 }
782 // Use maximum residual to order domains.
783 const auto& residual = modelSimulator.model().linearizer().residual();
784 const int num_vars = residual[0].size();
785 for (const auto& domain : domains_) {
786 Scalar maxres = 0.0;
787 for (const int c : domain.cells) {
788 for (int ii = 0; ii < num_vars; ++ii) {
789 maxres = std::max(maxres, std::fabs(residual[c][ii]));
790 }
791 }
792 measure_per_domain[domain.index] = maxres;
793 }
794 break;
795 }
796 } // end of switch (model_.param().local_domain_ordering_)
797
798 // Sort by largest measure, keeping index order if equal.
799 const auto& m = measure_per_domain;
800 std::stable_sort(domain_order.begin(), domain_order.end(),
801 [&m](const int i1, const int i2){ return m[i1] > m[i2]; });
802 return domain_order;
803 } else {
804 throw std::logic_error("Domain solve approach must be Jacobi or Gauss-Seidel");
805 }
806 }
807
808 template<class GlobalEqVector>
809 void solveDomainJacobi(GlobalEqVector& solution,
810 GlobalEqVector& locally_solved,
811 SimulatorReportSingle& local_report,
812 DeferredLogger& logger,
813 const int iteration,
814 const SimulatorTimerInterface& timer,
815 const Domain& domain)
816 {
817 auto initial_local_well_primary_vars = model_.wellModel().getPrimaryVarsDomain(domain);
818 auto initial_local_solution = Details::extractVector(solution, domain.cells);
819 auto res = solveDomain(domain, timer, logger, iteration, false);
820 local_report = res.first;
821 if (local_report.converged) {
822 auto local_solution = Details::extractVector(solution, domain.cells);
823 Details::setGlobal(local_solution, domain.cells, locally_solved);
824 Details::setGlobal(initial_local_solution, domain.cells, solution);
825 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
826 } else {
827 model_.wellModel().setPrimaryVarsDomain(domain, initial_local_well_primary_vars);
828 Details::setGlobal(initial_local_solution, domain.cells, solution);
829 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
830 }
831 }
832
833 template<class GlobalEqVector>
834 void solveDomainGaussSeidel(GlobalEqVector& solution,
835 GlobalEqVector& locally_solved,
836 SimulatorReportSingle& local_report,
837 DeferredLogger& logger,
838 const int iteration,
839 const SimulatorTimerInterface& timer,
840 const Domain& domain)
841 {
842 auto initial_local_well_primary_vars = model_.wellModel().getPrimaryVarsDomain(domain);
843 auto initial_local_solution = Details::extractVector(solution, domain.cells);
844 auto res = solveDomain(domain, timer, logger, iteration, true);
845 local_report = res.first;
846 if (!local_report.converged) {
847 // We look at the detailed convergence report to evaluate
848 // if we should accept the unconverged solution.
849 const auto& convrep = res.second;
850 // We do not accept a solution if the wells are unconverged.
851 if (!convrep.wellFailed()) {
852 // Calculare the sums of the mb and cnv failures.
853 Scalar mb_sum = 0.0;
854 Scalar cnv_sum = 0.0;
855 for (const auto& rc : convrep.reservoirConvergence()) {
857 mb_sum += rc.value();
858 } else if (rc.type() == ConvergenceReport::ReservoirFailure::Type::Cnv) {
859 cnv_sum += rc.value();
860 }
861 }
862 // If not too high, we overrule the convergence failure.
863 const Scalar acceptable_local_mb_sum = 1e-3;
864 const Scalar acceptable_local_cnv_sum = 1.0;
865 if (mb_sum < acceptable_local_mb_sum && cnv_sum < acceptable_local_cnv_sum) {
866 local_report.converged = true;
867 logger.debug(fmt::format("Accepting solution in unconverged domain {} on rank {}.", domain.index, rank_));
868 logger.debug(fmt::format("Value of mb_sum: {} cnv_sum: {}", mb_sum, cnv_sum));
869 } else {
870 logger.debug("Unconverged local solution.");
871 }
872 } else {
873 logger.debug("Unconverged local solution with well convergence failures:");
874 for (const auto& wf : convrep.wellFailures()) {
875 logger.debug(to_string(wf));
876 }
877 }
878 }
879 if (local_report.converged) {
880 auto local_solution = Details::extractVector(solution, domain.cells);
881 Details::setGlobal(local_solution, domain.cells, locally_solved);
882 } else {
883 model_.wellModel().setPrimaryVarsDomain(domain, initial_local_well_primary_vars);
884 Details::setGlobal(initial_local_solution, domain.cells, solution);
885 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
886 }
887 }
888
889 Scalar computeCnvErrorPvLocal(const Domain& domain,
890 const std::vector<Scalar>& B_avg, double dt) const
891 {
892 Scalar errorPV{};
893 const auto& simulator = model_.simulator();
894 const auto& model = simulator.model();
895 const auto& problem = simulator.problem();
896 const auto& residual = simulator.model().linearizer().residual();
897
898 for (const int cell_idx : domain.cells) {
899 const Scalar pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) *
900 model.dofTotalVolume(cell_idx);
901 const auto& cellResidual = residual[cell_idx];
902 bool cnvViolated = false;
903
904 for (unsigned eqIdx = 0; eqIdx < cellResidual.size(); ++eqIdx) {
905 using std::fabs;
906 Scalar CNV = cellResidual[eqIdx] * dt * B_avg[eqIdx] / pvValue;
907 cnvViolated = cnvViolated || (fabs(CNV) > model_.param().tolerance_cnv_);
908 }
909
910 if (cnvViolated) {
911 errorPV += pvValue;
912 }
913 }
914 return errorPV;
915 }
916
917 decltype(auto) partitionCells() const
918 {
919 const auto& grid = this->model_.simulator().vanguard().grid();
920
921 using GridView = std::remove_cv_t<std::remove_reference_t<
922 decltype(grid.leafGridView())>>;
923
924 using Element = std::remove_cv_t<std::remove_reference_t<
925 typename GridView::template Codim<0>::Entity>>;
926
927 const auto& param = this->model_.param();
928
929 auto zoltan_ctrl = ZoltanPartitioningControl<Element>{};
930
931 zoltan_ctrl.domain_imbalance = param.local_domain_partition_imbalance_;
932
933 zoltan_ctrl.index =
934 [elementMapper = &this->model_.simulator().model().elementMapper()]
935 (const Element& element)
936 {
937 return elementMapper->index(element);
938 };
939
940 zoltan_ctrl.local_to_global =
941 [cartMapper = &this->model_.simulator().vanguard().cartesianIndexMapper()]
942 (const int elemIdx)
943 {
944 return cartMapper->cartesianIndex(elemIdx);
945 };
946
947 // Forming the list of wells is expensive, so do this only if needed.
948 const auto need_wells = param.local_domain_partition_method_ == "zoltan";
949
950 const auto wells = need_wells
951 ? this->model_.simulator().vanguard().schedule().getWellsatEnd()
952 : std::vector<Well>{};
953
954 const auto& possibleFutureConnectionSet = need_wells
955 ? this->model_.simulator().vanguard().schedule().getPossibleFutureConnections()
956 : std::unordered_map<std::string, std::set<int>> {};
957
958 // If defaulted parameter for number of domains, choose a reasonable default.
959 constexpr int default_cells_per_domain = 1000;
960 const int num_domains = (param.num_local_domains_ > 0)
961 ? param.num_local_domains_
962 : detail::countGlobalCells(grid) / default_cells_per_domain;
963
964 return ::Opm::partitionCells(param.local_domain_partition_method_,
965 num_domains, grid.leafGridView(), wells,
966 possibleFutureConnectionSet, zoltan_ctrl);
967 }
968
969 std::vector<int> reconstitutePartitionVector() const
970 {
971 const auto& grid = this->model_.simulator().vanguard().grid();
972
973 auto numD = std::vector<int>(grid.comm().size() + 1, 0);
974 numD[grid.comm().rank() + 1] = static_cast<int>(this->domains_.size());
975 grid.comm().sum(numD.data(), numD.size());
976 std::partial_sum(numD.begin(), numD.end(), numD.begin());
977
978 auto p = std::vector<int>(grid.size(0));
979 auto maxCellIdx = std::numeric_limits<int>::min();
980
981 auto d = numD[grid.comm().rank()];
982 for (const auto& domain : this->domains_) {
983 for (const auto& cell : domain.cells) {
984 p[cell] = d;
985 if (cell > maxCellIdx) {
986 maxCellIdx = cell;
987 }
988 }
989
990 ++d;
991 }
992
993 p.erase(p.begin() + maxCellIdx + 1, p.end());
994 return p;
995 }
996
997 BlackoilModel<TypeTag>& model_;
998 std::vector<Domain> domains_;
999 std::vector<std::unique_ptr<Mat>> domain_matrices_;
1000 std::vector<ISTLSolverType> domain_linsolvers_;
1001 SimulatorReportSingle local_reports_accumulated_;
1002 int rank_ = 0;
1003};
1004
1005} // namespace Opm
1006
1007#endif // OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
A NLDD implementation for three-phase black oil.
Definition: BlackoilModelNldd.hpp:79
GetPropType< TypeTag, Properties::SolutionVector > SolutionVector
Definition: BlackoilModelNldd.hpp:87
GetPropType< TypeTag, Properties::Indices > Indices
Definition: BlackoilModelNldd.hpp:84
void writePartitions(const std::filesystem::path &odir) const
Definition: BlackoilModelNldd.hpp:333
BlackoilModelNldd(BlackoilModel< TypeTag > &model)
The constructor sets up the subdomains.
Definition: BlackoilModelNldd.hpp:100
GetPropType< TypeTag, Properties::Scalar > Scalar
Definition: BlackoilModelNldd.hpp:85
typename BlackoilModel< TypeTag >::Mat Mat
Definition: BlackoilModelNldd.hpp:92
SimulatorReportSingle nonlinearIterationNldd(const int iteration, const SimulatorTimerInterface &timer, NonlinearSolverType &nonlinear_solver)
Do one non-linear NLDD iteration.
Definition: BlackoilModelNldd.hpp:186
SubDomain< Grid > Domain
Definition: BlackoilModelNldd.hpp:90
GetPropType< TypeTag, Properties::ElementContext > ElementContext
Definition: BlackoilModelNldd.hpp:81
const SimulatorReportSingle & localAccumulatedReports() const
return the statistics if the nonlinearIteration() method failed
Definition: BlackoilModelNldd.hpp:328
GetPropType< TypeTag, Properties::FluidSystem > FluidSystem
Definition: BlackoilModelNldd.hpp:82
void prepareStep()
Called before starting a time step.
Definition: BlackoilModelNldd.hpp:178
static constexpr int numEq
Definition: BlackoilModelNldd.hpp:94
GetPropType< TypeTag, Properties::Grid > Grid
Definition: BlackoilModelNldd.hpp:83
typename BlackoilModel< TypeTag >::BVector BVector
Definition: BlackoilModelNldd.hpp:89
Definition: BlackoilModel.hpp:163
typename SparseMatrixAdapter::IstlMatrix Mat
Definition: BlackoilModel.hpp:208
Dune::BlockVector< VectorBlockType > BVector
Definition: BlackoilModel.hpp:209
Definition: DeferredLogger.hpp:57
void debug(const std::string &tag, const std::string &message)
Definition: ISTLSolver.hpp:144
Interface class for SimulatorTimer objects, to be improved.
Definition: SimulatorTimerInterface.hpp:34
Vector extractVector(const Vector &x, const std::vector< int > &indices)
Definition: extractMatrix.hpp:104
void copySubMatrix(const Matrix &A, const std::vector< int > &indices, Matrix &B)
Definition: extractMatrix.hpp:35
void setGlobal(const Vector &x, const std::vector< int > &indices, Vector &global_x)
Definition: extractMatrix.hpp:115
Matrix extractMatrix(const Matrix &m, const std::vector< int > &indices)
Definition: extractMatrix.hpp:47
void unPack(PV &privar, const std::size_t meanings)
Definition: priVarsPacking.hpp:41
std::size_t pack(const PV &privar)
Definition: priVarsPacking.hpp:31
std::size_t countGlobalCells(const Grid &grid)
Get the number of cells of a global grid.
Definition: countGlobalCells.hpp:82
void detectOscillations(const std::vector< std::vector< Scalar > > &residualHistory, const int it, const int numPhases, const Scalar relaxRelTol, const int minimumOscillatingPhases, bool &oscillate, bool &stagnate)
Detect oscillation or stagnation in a given residual history.
Definition: blackoilboundaryratevector.hh:37
Opm::DeferredLogger gatherDeferredLogger(const Opm::DeferredLogger &local_deferredlogger, Parallel::Communication communicator)
Create a global log combining local logs.
typename Properties::Detail::GetPropImpl< TypeTag, Property >::type::type GetPropType
get the type alias defined in the property (equivalent to old macro GET_PROP_TYPE(....
Definition: propertysystem.hh:235
std::string to_string(const ConvergenceReport::ReservoirFailure::Type t)
std::pair< std::vector< int >, int > partitionCells(const std::string &method, const int num_local_domains, const GridView &grid_view, const std::vector< Well > &wells, const std::unordered_map< std::string, std::set< int > > &possibleFutureConnections, const ZoltanPartitioningControl< Element > &zoltan_ctrl)
Solver parameters for the BlackoilModel.
Definition: BlackoilModelParameters.hpp:147
This class carries all parameters for the NewtonIterationBlackoilInterleaved class.
Definition: FlowLinearSolverParameters.hpp:95
bool is_nldd_local_solver_
Definition: FlowLinearSolverParameters.hpp:109
void init(bool cprRequestedInDataFile)
bool linear_solver_print_json_definition_
Definition: FlowLinearSolverParameters.hpp:111
std::string linsolver_
Definition: FlowLinearSolverParameters.hpp:110
A struct for returning timing data from a simulator to its caller.
Definition: SimulatorReport.hpp:34
double linear_solve_time
Definition: SimulatorReport.hpp:43
double assemble_time
Definition: SimulatorReport.hpp:39
double assemble_time_well
Definition: SimulatorReport.hpp:41
bool converged
Definition: SimulatorReport.hpp:54
double total_time
Definition: SimulatorReport.hpp:37
double linear_solve_setup_time
Definition: SimulatorReport.hpp:42
unsigned int total_newton_iterations
Definition: SimulatorReport.hpp:49
double update_time
Definition: SimulatorReport.hpp:44
unsigned int total_linearizations
Definition: SimulatorReport.hpp:48
unsigned int total_linear_iterations
Definition: SimulatorReport.hpp:50
Definition: SubDomain.hpp:62