DNDSR 0.1.0.dev1+gcd065ad
Distributed Numeric Data Structure for CFV
Loading...
Searching...
No Matches
Euler.hpp
Go to the documentation of this file.
1/**
2 * @file Euler.hpp
3 * @brief Core type definitions, enumerations, and distributed array wrappers for compressible
4 * Navier-Stokes / RANS solvers in the DNDSR CFD framework.
5 *
6 * This header defines:
7 * - @ref EulerModel and @ref RANSModel enumerations for selecting physics models and
8 * turbulence closures.
9 * - Compile-time query functions (@ref getnVarsFixed, @ref getDim_Fixed, @ref getGeomDim_Fixed)
10 * that map model enumerants to their variable counts and dimensionality.
11 * - @ref EulerModelTraits, a trait struct that bundles all model properties for use in
12 * template-parameterized solver code.
13 * - MPI-parallel distributed array wrappers (@ref ArrayDOFV, @ref ArrayRECV, @ref ArrayGRADV)
14 * that extend CFV storage types with element-wise arithmetic, dot products, norms, and
15 * MPI collective reductions. These are the primary data containers for conservative
16 * variables, reconstruction coefficients, and gradients throughout the Euler/RANS solvers.
17 * - @ref JacobianValue, a placeholder for implicit-solver Jacobian storage in various formats.
18 * - The @ref DNDS_FV_EULEREVALUATOR_GET_FIXED_EIGEN_SEQS macro, which declares compile-time
19 * Eigen sequence objects for efficient sub-vector indexing into conservative state vectors.
20 */
21#pragma once
22#include "DNDS/Defines.hpp"
24#include "DNDS/EigenUtil.hpp"
25#include "DNDS/ConfigEnum.hpp"
26#include "CFV/VRDefines.hpp"
27#include "DNDS/Errors.hpp"
28
29namespace DNDS::Euler
30{
31/**
32 * @brief Declares compile-time Eigen index sequences for sub-vector access into conservative state vectors.
33 *
34 * This macro is intended to be expanded inside EulerEvaluator methods (or any templated
35 * context where @c dim, @c gDim, and @c I4 are available). It creates `static const`
36 * Eigen fixed-size sequences that enable zero-overhead slicing of Eigen vectors representing
37 * the conservative variable state \f$\mathbf{U} = [\rho,\; \rho u,\; \rho v,\; \rho w,\; E,\; \ldots]\f$.
38 *
39 * The naming convention encodes the index range (inclusive on both ends):
40 * | Name | Range (indices) | Typical use |
41 * |-------------|------------------|--------------------------------------------------|
42 * | Seq012 | [0, dim-1] | All spatial indices (0-based), e.g. x,y,z coords |
43 * | Seq12 | [1, dim-1] | Spatial indices excluding the first |
44 * | Seq123 | [1, dim] | Momentum components (rhoU, rhoV, rhoW) |
45 * | Seq23 | [2, dim] | Momentum components excluding rhoU |
46 * | Seq234 | [2, dim+1] | Momentum tail through energy index |
47 * | Seq34 | [3, dim+1] | Last momentum component(s) and energy |
48 * | Seq01234 | [0, dim+1] | Full base NS variables (rho through E) |
49 * | SeqG012 | [0, gDim-1] | Geometry-dimension spatial indices |
50 * | SeqI52Last | [I4+1, last] | Turbulence/extension variables after energy |
51 * | I4 | dim+1 | Index of the energy variable in the state vector |
52 *
53 * @note Requires @c dim (physics dimension), @c gDim (geometry dimension), and
54 * @c EigenLast (alias for `Eigen::last`) to be in scope.
55 */
56#define DNDS_FV_EULEREVALUATOR_GET_FIXED_EIGEN_SEQS \
57 static const auto Seq012 = Eigen::seq(Eigen::fix<0>, Eigen::fix<dim - 1>); \
58 static const auto Seq12 = Eigen::seq(Eigen::fix<1>, Eigen::fix<dim - 1>); \
59 static const auto Seq123 = Eigen::seq(Eigen::fix<1>, Eigen::fix<dim>); \
60 static const auto Seq23 = Eigen::seq(Eigen::fix<2>, Eigen::fix<dim>); \
61 static const auto Seq234 = Eigen::seq(Eigen::fix<2>, Eigen::fix<dim + 1>); \
62 static const auto Seq34 = Eigen::seq(Eigen::fix<3>, Eigen::fix<dim + 1>); \
63 static const auto Seq01234 = Eigen::seq(Eigen::fix<0>, Eigen::fix<dim + 1>); \
64 static const auto SeqG012 = Eigen::seq(Eigen::fix<0>, Eigen::fix<gDim - 1>); \
65 static const auto SeqI52Last = Eigen::seq(Eigen::fix<I4 + 1>, EigenLast); \
66 static const auto I4 = dim + 1;
67
68 /**
69 * @brief MPI-parallel distributed array of per-cell Degrees-of-Freedom (conservative variable vectors).
70 *
71 * Wraps `CFV::tUDof<nVarsFixed>` (which is `ArrayDof<nVarsFixed, 1>`), adding element-wise
72 * arithmetic operators, norms, dot products, and MPI-collective reductions that operate
73 * across all ranks transparently.
74 *
75 * Each cell stores a single column vector of @p nVarsFixed conservative variables
76 * (e.g. \f$[\rho,\; \rho u,\; \rho v,\; \rho w,\; E]\f$ for a 3D NS model).
77 * Instances of this class are used for the solution vector @c u, the solution increment
78 * @c uInc, the right-hand-side @c rhs, and other per-cell vector quantities throughout
79 * the EulerEvaluator.
80 *
81 * The "father" sub-array holds locally-owned cells; the "son" sub-array holds ghost
82 * cells received from neighboring MPI ranks. Reduction operations (norm, dot, min)
83 * only iterate over father cells and then call `MPI_Allreduce` for global results.
84 *
85 * @tparam nVarsFixed Compile-time number of conservative variables per cell, or
86 * `Eigen::Dynamic` for runtime-sized vectors.
87 *
88 * @see ArrayRECV for reconstruction coefficient storage.
89 * @see ArrayGRADV for gradient storage.
90 */
91 template <int nVarsFixed>
92 class ArrayDOFV : public CFV::tUDof<nVarsFixed>
93 {
94 public:
95 using t_self = ArrayDOFV<nVarsFixed>; ///< Self type alias for CRTP-style forwarding.
96 using t_base = CFV::tUDof<nVarsFixed>; ///< Base distributed DOF array type.
97 using t_element_mat = typename t_base::t_element_mat; ///< Per-element Eigen matrix/vector type.
98
99 /**
100 * @brief Set all DOF entries to a uniform scalar value.
101 * @param R The scalar value to assign to every component of every cell.
102 */
104 {
105 this->t_base::setConstant(R);
106 }
107 /**
108 * @brief Set all DOF entries to copies of a given vector.
109 * @param R Reference vector; each cell's DOF vector is set to this value.
110 */
111 void setConstant(const Eigen::Ref<const t_element_mat> &R )
112 {
113 this->t_base::setConstant(R);
114 }
115 /**
116 * @brief Element-wise addition: `this[i] += R[i]` for every cell.
117 * @param R Source array with the same distribution layout.
118 */
119 void operator+=(const t_self &R)
120 {
121 this->t_base::operator+=(R);
122 }
123 /**
124 * @brief Element-wise subtraction: `this[i] -= R[i]` for every cell.
125 * @param R Source array with the same distribution layout.
126 */
127 void operator-=(const t_self &R)
128 {
129 this->t_base::operator-=(R);
130 }
131 /**
132 * @brief Uniform scalar multiplication: `this[i] *= R` for every cell.
133 * @param R Scalar multiplier applied to all components of all cells.
134 */
136 {
137 this->t_base::operator*=(R);
138 }
139 /**
140 * @brief Deep copy assignment from another ArrayDOFV.
141 * @param R Source array to copy from. Must have the same distribution layout.
142 */
143 void operator=(const t_self &R)
144 {
145 this->t_base::operator=(R);
146 }
147
148 /**
149 * @brief Scaled addition: `this[i] += r * R[i]` for every cell (axpy-style).
150 * @param R Source array to add.
151 * @param r Scalar multiplier applied to @p R before addition.
152 */
153 void addTo(const t_self &R, real r)
154 {
155 this->t_base::addTo(R, r);
156 }
157
158 /**
159 * @brief Per-cell scalar multiplication from a vector of cell-wise weights.
160 *
161 * Multiplies each cell's DOF vector by the corresponding scalar in @p R:
162 * `this[i] *= R[i]`. Useful for applying cell-volume or time-step scaling.
163 *
164 * @param R Vector of per-cell scalar multipliers. Must have size >= number of
165 * locally-owned cells.
166 *
167 * @note Only operates on host memory. Asserts if device storage is active.
168 * @note OpenMP parallelized when `DNDS_DIST_MT_USE_OMP` is defined.
169 */
170 void operator*=(const std::vector<real> &R)
171 {
172 DNDS_assert(R.size() >= this->father->Size());
173 DNDS_assert(this->father->device() == DeviceBackend::Host ||
174 this->father->device() == DeviceBackend::Unknown);
175#if defined(DNDS_DIST_MT_USE_OMP)
176# pragma omp parallel for schedule(static)
177#endif
178 for (index i = 0; i < this->father->Size(); i++)
179 this->operator[](i) *= R[i];
180 }
181 /**
182 * @brief Component-wise multiplication by a scalar-per-cell ArrayDOFV<1>.
183 *
184 * Each cell's vector is scaled by the single scalar stored in the
185 * corresponding cell of @p R. Disabled via SFINAE when nVarsFixed == 1
186 * (to avoid ambiguity with the self-multiplication overload).
187 *
188 * @tparam nVarsFixed_T SFINAE helper; defaults to @p nVarsFixed.
189 * @param R Scalar-per-cell array (ArrayDOFV<1>).
190 */
191 template <int nVarsFixed_T = nVarsFixed>
192 std::enable_if_t<!(nVarsFixed_T == 1)>
193 operator*=(const ArrayDOFV<1> &R)
194 {
195 this->t_base::operator*=(R);
196 }
197
198 /**
199 * @brief Add a constant vector to every cell: `this[i] += R` for all cells.
200 * @param R Vector added uniformly to each cell's DOF.
201 */
202 void operator+=(const Eigen::Vector<real, nVarsFixed> &R)
203 {
204 this->t_base::operator+=(R);
205 }
206
207 /**
208 * @brief Add a scalar to every component of every cell.
209 * @param R Scalar value to add uniformly.
210 */
212 {
213 this->t_base::operator+=(R);
214 }
215
216 /**
217 * @brief Component-wise multiplication by a constant vector.
218 *
219 * Multiplies each cell's DOF vector component-wise by @p R:
220 * `this[i] = this[i].cwiseProduct(R)`.
221 *
222 * @param R Per-variable multiplier vector applied identically to every cell.
223 */
224 void operator*=(const Eigen::Vector<real, nVarsFixed> &R)
225 {
226 this->t_base::operator*=(R);
227 }
228
229 /**
230 * @brief Element-wise (Hadamard) multiplication: `this[i] = this[i] .* R[i]`.
231 * @param R Source array; each cell's vector is multiplied component-wise.
232 */
233 void operator*=(const t_self &R)
234 {
235 this->t_base::operator*=(R);
236 }
237
238 /**
239 * @brief Element-wise division: `this[i] = this[i] ./ R[i]`.
240 * @param R Divisor array; each cell's vector is divided component-wise.
241 */
242 void operator/=(const t_self &R)
243 {
244 this->t_base::operator/=(R);
245 }
246
247 /**
248 * @brief Compute the global L2 norm across all MPI ranks.
249 *
250 * Calculates \f$\sqrt{\sum_i \|\mathbf{u}_i\|^2}\f$ over all locally-owned cells,
251 * then performs an `MPI_Allreduce(SUM)` to obtain the global result.
252 *
253 * @return Global L2 norm (same value on all ranks).
254 */
256 {
257 return this->t_base::norm2();
258 }
259
260 /**
261 * @brief Compute the per-component global L1 norm.
262 *
263 * Returns a vector where each component @c j contains
264 * \f$\sum_i |u_{i,j}|\f$ summed over all locally-owned cells across all ranks
265 * via `MPI_Allreduce(SUM)`.
266 *
267 * @return Vector of component-wise L1 norms (same on all ranks).
268 */
269 Eigen::Vector<real, nVarsFixed> componentWiseNorm1()
270 {
271 return this->t_base::componentWiseNorm1();
272 }
273
274 /**
275 * @brief Compute the global component-wise minimum across all MPI ranks.
276 *
277 * For each conservative variable component, finds the minimum value across
278 * all locally-owned cells on all ranks. Uses `MPI_Allreduce(MIN)`.
279 *
280 * Useful for checking physical validity (e.g. minimum density or pressure).
281 *
282 * @return Vector of per-component global minimums (same on all ranks).
283 *
284 * @note Uses OpenMP custom reduction (`EigenVecMin`) when OMP is enabled.
285 * @note Only iterates over father (locally-owned) cells, not ghost cells.
286 */
287 Eigen::Vector<real, nVarsFixed> min()
288 {
289 Eigen::Vector<real, nVarsFixed> minLocal, min;
290 minLocal.resize(this->RowSize());
291 minLocal.setConstant(veryLargeReal);
292 min = minLocal;
293#if defined(DNDS_DIST_MT_USE_OMP)
294# pragma omp declare reduction(EigenVecMin : Eigen::Vector<real, nVarsFixed> : omp_out = omp_out.array().min(omp_in.array())) initializer(omp_priv = omp_orig)
295# pragma omp parallel for schedule(static) reduction(EigenVecMin : minLocal)
296#endif
297 for (index i = 0; i < this->father->Size(); i++) //*note that only father is included
298 minLocal = minLocal.array().min(this->operator[](i).array());
299 MPI::Allreduce(minLocal.data(), min.data(), minLocal.size(), DNDS_MPI_REAL, MPI_MIN, this->father->getMPI().comm);
300 return min;
301 }
302
303 /**
304 * @brief Compute the global dot product `<this, R>` with MPI reduction.
305 *
306 * Sums \f$\sum_i \mathbf{u}_i \cdot \mathbf{R}_i\f$ over locally-owned cells,
307 * then calls `MPI_Allreduce(SUM)`.
308 *
309 * @param R The other ArrayDOFV operand.
310 * @return Global dot product (same value on all ranks).
311 */
312 real dot(const t_self &R)
313 {
314 return this->t_base::dot(R);
315 }
316
317 /**
318 * @brief Compute a weighted global dot product with per-component multipliers.
319 *
320 * Calculates \f$\sum_i (u_i \circ m_L) \cdot (R_i \circ m_R)\f$ where
321 * \f$\circ\f$ denotes component-wise (Hadamard) multiplication by the
322 * multiplier arrays. Performs `MPI_Allreduce(SUM)` for the global result.
323 *
324 * This is useful for preconditioned inner products in iterative solvers
325 * where different variable components require different scaling.
326 *
327 * @tparam TMultL Type of the left multiplier (typically Eigen array expression).
328 * @tparam TMultR Type of the right multiplier (typically Eigen array expression).
329 * @param R The other ArrayDOFV operand.
330 * @param mL Per-component multiplier applied to `this` entries.
331 * @param mR Per-component multiplier applied to @p R entries.
332 * @return Global weighted dot product (same value on all ranks).
333 *
334 * @note Only operates on host memory. Asserts if device storage is active.
335 * @note OpenMP parallelized when `DNDS_DIST_MT_USE_OMP` is defined.
336 */
337 template <class TMultL, class TMultR>
339 {
340 DNDS_assert(this->father->device() == DeviceBackend::Host ||
341 this->father->device() == DeviceBackend::Unknown);
343#if defined(DNDS_DIST_MT_USE_OMP)
344# pragma omp parallel for schedule(static) reduction(+ : sqrSum)
345#endif
346 for (index i = 0; i < this->father->Size(); i++) //*note that only father is included
347 sqrSum += (this->operator[](i).array() * mL).matrix().dot((R.operator[](i).array() * mR).matrix());
348 MPI::Allreduce(&sqrSum, &sqrSumAll, 1, DNDS_MPI_REAL, MPI_SUM, this->father->getMPI().comm);
349 return sqrSumAll;
350 }
351
352 /**
353 * @brief Clamp all components from below: `this[i] = max(this[i], R)`.
354 *
355 * Applies a component-wise maximum with the given value @p R, ensuring no
356 * component drops below @p R. Operates over all cells including ghosts.
357 *
358 * @tparam TR Type of the lower bound (scalar or array expression broadcastable
359 * by Eigen's `.max()`).
360 * @param R Lower bound value.
361 *
362 * @note Only operates on host memory. Asserts if device storage is active.
363 * @note OpenMP parallelized when `DNDS_DIST_MT_USE_OMP` is defined.
364 */
365 template <class TR>
367 {
368 DNDS_assert(this->father->device() == DeviceBackend::Host ||
369 this->father->device() == DeviceBackend::Unknown);
370#if defined(DNDS_DIST_MT_USE_OMP)
371# pragma omp parallel for schedule(static)
372#endif
373 for (index i = 0; i < this->Size(); i++)
374 this->operator[](i).array() = this->operator[](i).array().max(R);
375 }
376 };
377
378 /**
379 * @brief MPI-parallel distributed array of per-cell reconstruction coefficient matrices.
380 *
381 * Wraps `CFV::tURec<nVarsFixed>` (which is `ArrayDof<DynamicSize, nVarsFixed>`), adding
382 * element-wise arithmetic, norms, dot products, and MPI-collective reductions.
383 *
384 * Each cell stores a matrix of shape (nRecBases x nVars), where @c nRecBases is the
385 * number of polynomial reconstruction basis functions (determined at runtime by the
386 * reconstruction order) and @c nVars is the number of conservative variables. These
387 * coefficients define the high-order polynomial representation of the solution within
388 * each cell, as used by the Compact Finite Volume variational reconstruction scheme.
389 *
390 * @tparam nVarsFixed Compile-time number of conservative variables (matrix columns),
391 * or `Eigen::Dynamic` for runtime-sized.
392 *
393 * @todo Add more arithmetic operators (see existing TODO in source).
394 *
395 * @see ArrayDOFV for per-cell DOF vectors.
396 * @see ArrayGRADV for per-cell gradient matrices.
397 */
398 ///@todo://TODO add operators
399 template <int nVarsFixed>
400 class ArrayRECV : public CFV::tURec<nVarsFixed>
401 {
402 public:
403 using t_self = ArrayRECV<nVarsFixed>; ///< Self type alias.
404 using t_base = CFV::tURec<nVarsFixed>; ///< Base distributed reconstruction array type.
405
406 /**
407 * @brief Set all reconstruction coefficients to a uniform scalar value.
408 * @param R Scalar value to fill every matrix entry with.
409 */
411 {
412 this->t_base::setConstant(R);
413 }
414 /**
415 * @brief Set all cells' reconstruction matrices to copies of a given matrix.
416 *
417 * @tparam TR Matrix type compatible with the per-cell storage (must be assignable
418 * to each cell's matrix block).
419 * @param R Reference matrix; each cell's coefficient matrix is set to this value.
420 *
421 * @note Only operates on host memory. Asserts if device storage is active.
422 * @note OpenMP parallelized when `DNDS_DIST_MT_USE_OMP` is defined.
423 */
424 template <class TR>
425 void setConstant(const TR &R)
426 {
427 DNDS_assert(this->father->device() == DeviceBackend::Host ||
428 this->father->device() == DeviceBackend::Unknown);
429#if defined(DNDS_DIST_MT_USE_OMP)
430# pragma omp parallel for schedule(static)
431#endif
432 for (index i = 0; i < this->Size(); i++)
433 this->operator[](i) = R;
434 }
435 /**
436 * @brief Element-wise addition: `this[i] += R[i]` for every cell.
437 * @param R Source array with the same distribution layout.
438 */
439 void operator+=(const t_self &R)
440 {
441 this->t_base::operator+=(R);
442 }
443 /**
444 * @brief Element-wise subtraction: `this[i] -= R[i]` for every cell.
445 * @param R Source array with the same distribution layout.
446 */
447 void operator-=(const t_self &R)
448 {
449 this->t_base::operator-=(R);
450 }
451 /**
452 * @brief Uniform scalar multiplication: `this[i] *= R` for every cell.
453 * @param R Scalar multiplier applied to all entries of all cells.
454 */
456 {
457 this->t_base::operator*=(R);
458 }
459 /**
460 * @brief Per-cell scalar multiplication from a vector of cell-wise weights.
461 *
462 * Multiplies each cell's reconstruction matrix by the corresponding scalar in @p R:
463 * `this[i] *= R[i]`. Only operates on father (locally-owned) cells.
464 *
465 * @param R Vector of per-cell scalar multipliers. Must have size >= number of
466 * locally-owned cells.
467 *
468 * @note Only operates on host memory. Asserts if device storage is active.
469 * @note OpenMP parallelized when `DNDS_DIST_MT_USE_OMP` is defined.
470 */
471 void operator*=(const std::vector<real> &R)
472 {
473 DNDS_assert(this->father->device() == DeviceBackend::Host ||
474 this->father->device() == DeviceBackend::Unknown);
475 DNDS_assert(R.size() >= this->father->Size());
476#if defined(DNDS_DIST_MT_USE_OMP)
477# pragma omp parallel for schedule(static)
478#endif
479 for (index i = 0; i < this->father->Size(); i++)
480 this->operator[](i) *= R[i];
481 }
482 /**
483 * @brief Component-wise multiplication by a scalar-per-cell ArrayDOFV<1>.
484 *
485 * Each cell's reconstruction matrix is scaled by the single scalar stored in
486 * the corresponding cell of @p R.
487 *
488 * @param R Scalar-per-cell array (ArrayDOFV<1>).
489 */
491 {
492 this->t_base::operator*=(R);
493 }
494 /**
495 * @brief Column-wise scaling by a per-variable multiplier row-vector.
496 *
497 * Each column (variable) of every cell's reconstruction matrix is multiplied by
498 * the corresponding element of @p R: `this[i].col(j) *= R(j)`.
499 * Useful for applying per-variable scaling (e.g. non-dimensionalization).
500 *
501 * @param R Row array of per-variable multipliers (1 x nVarsFixed).
502 *
503 * @note Only operates on host memory. Asserts if device storage is active.
504 * @note OpenMP parallelized when `DNDS_DIST_MT_USE_OMP` is defined.
505 */
506 void operator*=(const Eigen::Array<real, 1, nVarsFixed> &R)
507 {
508 DNDS_assert(this->father->device() == DeviceBackend::Host ||
509 this->father->device() == DeviceBackend::Unknown);
510#if defined(DNDS_DIST_MT_USE_OMP)
511# pragma omp parallel for schedule(static)
512#endif
513 for (index i = 0; i < this->Size(); i++)
514 this->operator[](i).array().rowwise() *= R;
515 }
516 /**
517 * @brief Deep copy assignment from another ArrayRECV.
518 * @param R Source array to copy from. Must have the same distribution layout.
519 */
520 void operator=(const t_self &R)
521 {
522 this->t_base::operator=(R);
523 }
524
525 /**
526 * @brief Column-wise scaled addition: `this[i] += R[i] .* r` (per-variable scaling).
527 *
528 * For each cell, adds @p R's reconstruction matrix with each column scaled by the
529 * corresponding element of @p r: `this[i].col(j) += r(j) * R[i].col(j)`.
530 *
531 * @param R Source reconstruction array to add.
532 * @param r Per-variable (column-wise) scaling factors (1 x nVarsFixed).
533 *
534 * @note Only operates on host memory. Asserts if device storage is active.
535 * @note OpenMP parallelized when `DNDS_DIST_MT_USE_OMP` is defined.
536 */
537 void addTo(const t_self &R, const Eigen::Array<real, 1, nVarsFixed> &r)
538 {
539 DNDS_assert(this->father->device() == DeviceBackend::Host ||
540 this->father->device() == DeviceBackend::Unknown);
541#if defined(DNDS_DIST_MT_USE_OMP)
542# pragma omp parallel for schedule(static)
543#endif
544 for (index i = 0; i < this->Size(); i++)
545 this->operator[](i).array() += R.operator[](i).array().rowwise() * r;
546 }
547
548 /**
549 * @brief Uniform scaled addition: `this[i] += r * R[i]` for every cell (axpy-style).
550 * @param R Source reconstruction array to add.
551 * @param r Uniform scalar multiplier applied to @p R before addition.
552 */
553 void addTo(const t_self &R, real r)
554 {
555 this->t_base::addTo(R, r);
556 }
557
558 /**
559 * @brief Compute the global L2 norm of all reconstruction coefficients with MPI reduction.
560 * @return Global Frobenius-like L2 norm (same value on all ranks).
561 */
563 {
564 return this->t_base::norm2();
565 }
566
567 /**
568 * @brief Compute the global dot product `<this, R>` with MPI reduction.
569 *
570 * Sums the element-wise product of all matrix entries across locally-owned cells,
571 * then performs `MPI_Allreduce(SUM)`.
572 *
573 * @param R The other ArrayRECV operand.
574 * @return Global dot product (same value on all ranks).
575 */
576 real dot(const t_self &R)
577 {
578 return this->t_base::dot(R);
579 }
580
581 /**
582 * @brief Compute a per-variable (column-wise) dot product with MPI reduction.
583 *
584 * Returns a row vector where each component @c j contains
585 * \f$\sum_i \sum_k \text{this}[i](k,j) \cdot R[i](k,j)\f$
586 * summed over all locally-owned cells and reconstruction bases,
587 * then reduced across MPI ranks via `MPI_Allreduce(SUM)`.
588 *
589 * Useful for monitoring per-variable convergence of reconstruction coefficients.
590 *
591 * @param R The other ArrayRECV operand.
592 * @return Row vector of per-variable dot products (1 x nVarsFixed, same on all ranks).
593 *
594 * @note Only operates on host memory. Asserts if device storage is active.
595 * @note Uses OpenMP custom reduction (`EigenVecSum`) when OMP is enabled.
596 * @note Only iterates over father (locally-owned) cells, not ghost cells.
597 */
598 auto dotV(const t_self &R)
599 {
600 DNDS_assert(this->father->device() == DeviceBackend::Host ||
601 this->father->device() == DeviceBackend::Unknown);
602 Eigen::RowVector<real, nVarsFixed> sqrSum, sqrSumAll;
603 sqrSum.resize(this->father->MatColSize());
604 sqrSumAll.resizeLike(sqrSum);
605 sqrSum.setZero();
606#if defined(DNDS_DIST_MT_USE_OMP)
607# pragma omp declare reduction(EigenVecSum : Eigen::RowVector<real, nVarsFixed> : omp_out += omp_in) initializer(omp_priv = omp_orig)
608# pragma omp parallel for schedule(static) reduction(EigenVecSum : sqrSum)
609#endif
610 for (index i = 0; i < this->father->Size(); i++) //*note that only father is included
611 sqrSum += (this->operator[](i).array() * R.operator[](i).array()).colwise().sum().matrix();
612 MPI::Allreduce(sqrSum.data(), sqrSumAll.data(), sqrSum.size(), DNDS_MPI_REAL, MPI_SUM, this->father->getMPI().comm);
613 return sqrSumAll;
614 }
615 };
616
617 /**
618 * @brief MPI-parallel distributed array of per-cell gradient matrices.
619 *
620 * Wraps `CFV::tUGrad<nVarsFixed, gDim>` (which is `ArrayDof<gDim, nVarsFixed>`), adding
621 * element-wise arithmetic operators for gradient manipulation.
622 *
623 * Each cell stores a (gDim x nVars) matrix where row @c d contains the partial derivative
624 * of all conservative variables with respect to spatial coordinate @c d:
625 * \f[
626 * \mathbf{G}_i = \begin{bmatrix}
627 * \partial u_1/\partial x_1 & \cdots & \partial u_{nVars}/\partial x_1 \\
628 * \vdots & \ddots & \vdots \\
629 * \partial u_1/\partial x_{gDim} & \cdots & \partial u_{nVars}/\partial x_{gDim}
630 * \end{bmatrix}
631 * \f]
632 *
633 * @tparam nVarsFixed Compile-time number of conservative variables (matrix columns),
634 * or `Eigen::Dynamic` for runtime-sized.
635 * @tparam gDim Geometry (mesh) spatial dimension (2 or 3), determining the
636 * number of gradient rows.
637 *
638 * @see ArrayDOFV for per-cell DOF vectors.
639 * @see ArrayRECV for reconstruction coefficient storage.
640 */
641 template <int nVarsFixed, int gDim>
642 class ArrayGRADV : public CFV::tUGrad<nVarsFixed, gDim>
643 {
644 public:
645 using t_self = ArrayGRADV<nVarsFixed, gDim>; ///< Self type alias.
646 using t_base = CFV::tUGrad<nVarsFixed, gDim>; ///< Base distributed gradient array type.
647
648 /**
649 * @brief Set all gradient entries to a uniform scalar value.
650 * @param R Scalar value to fill every matrix entry with.
651 */
653 {
654 this->t_base::setConstant(R);
655 }
656 /**
657 * @brief Set all cells' gradient matrices to copies of a given matrix.
658 *
659 * @tparam TR Matrix type compatible with per-cell gradient storage.
660 * @param R Reference matrix; each cell's gradient is set to this value.
661 *
662 * @note Only operates on host memory. Asserts if device storage is active.
663 * @note OpenMP parallelized when `DNDS_DIST_MT_USE_OMP` is defined.
664 */
665 template <class TR>
666 void setConstant(const TR &R)
667 {
668 DNDS_assert(this->father->device() == DeviceBackend::Host ||
669 this->father->device() == DeviceBackend::Unknown);
670#if defined(DNDS_DIST_MT_USE_OMP)
671# pragma omp parallel for schedule(static)
672#endif
673 for (index i = 0; i < this->Size(); i++)
674 this->operator[](i) = R;
675 }
676
677 /**
678 * @brief Element-wise addition: `this[i] += R[i]` for every cell.
679 * @param R Source gradient array with the same distribution layout.
680 */
682 {
683 this->t_base::operator+=(R);
684 }
685 /**
686 * @brief Element-wise subtraction: `this[i] -= R[i]` for every cell.
687 * @param R Source gradient array with the same distribution layout.
688 */
690 {
691 this->t_base::operator-=(R);
692 }
693 /**
694 * @brief Uniform scalar multiplication: `this[i] *= R` for every cell.
695 * @param R Scalar multiplier applied to all entries of all cells.
696 */
698 {
699 this->t_base::operator*=(R);
700 }
701 /**
702 * @brief Per-cell scalar multiplication from a vector of cell-wise weights.
703 *
704 * Multiplies each cell's gradient matrix by the corresponding scalar in @p R:
705 * `this[i] *= R[i]`. Only operates on father (locally-owned) cells.
706 *
707 * @param R Vector of per-cell scalar multipliers. Must have size >= number of
708 * locally-owned cells.
709 *
710 * @note OpenMP parallelized when `DNDS_DIST_MT_USE_OMP` is defined.
711 */
712 void operator*=(std::vector<real> &R)
713 {
714 DNDS_assert(R.size() >= this->father->Size());
715#if defined(DNDS_DIST_MT_USE_OMP)
716# pragma omp parallel for schedule(static)
717#endif
718 for (index i = 0; i < this->father->Size(); i++)
719 this->operator[](i) *= R[i];
720 }
721 /**
722 * @brief Component-wise multiplication by a scalar-per-cell ArrayDOFV<1>.
723 *
724 * Each cell's gradient matrix is scaled by the single scalar stored in
725 * the corresponding cell of @p R.
726 *
727 * @param R Scalar-per-cell array (ArrayDOFV<1>).
728 */
730 {
731 this->t_base::operator*=(R);
732 }
733 /**
734 * @brief Column-wise scaling by a per-variable multiplier row-vector.
735 *
736 * Each column (variable) of every cell's gradient matrix is multiplied by
737 * the corresponding element of @p R: `this[i].col(j) *= R(j)`.
738 *
739 * @param R Row array of per-variable multipliers (1 x nVarsFixed).
740 *
741 * @note Only operates on host memory. Asserts if device storage is active.
742 * @note OpenMP parallelized when `DNDS_DIST_MT_USE_OMP` is defined.
743 */
744 void operator*=(const Eigen::Array<real, 1, nVarsFixed> &R)
745 {
746 DNDS_assert(this->father->device() == DeviceBackend::Host ||
747 this->father->device() == DeviceBackend::Unknown);
748#if defined(DNDS_DIST_MT_USE_OMP)
749# pragma omp parallel for schedule(static)
750#endif
751 for (index i = 0; i < this->Size(); i++)
752 this->operator[](i).array().rowwise() *= R;
753 }
754 /**
755 * @brief Deep copy assignment from another ArrayGRADV.
756 * @param R Source array to copy from. Must have the same distribution layout.
757 */
759 {
760 this->t_base::operator=(R);
761 }
762 };
763
764 /**
765 * @brief Placeholder container for implicit-solver Jacobian matrix storage.
766 *
767 * Supports three storage modes for the flux Jacobian \f$\partial \mathbf{R}/\partial \mathbf{U}\f$:
768 * - **Diagonal:** One scalar per variable per cell (cheapest, first-order implicit).
769 * - **DiagonalBlock:** One dense (nVars x nVars) block per cell (block-Jacobi preconditioner).
770 * - **Full:** Sparse block structure following cell-to-cell adjacency (full implicit).
771 *
772 * @warning This class is currently a stub. The `Set*` and `InverseDiag` methods contain
773 * only allocation placeholders (marked with `// todo`). Actual Jacobian assembly
774 * and inversion are not yet implemented.
775 *
776 * @tparam nVarsFixed Compile-time number of conservative variables, determining the
777 * block size for DiagonalBlock and Full modes.
778 */
779 template <int nVarsFixed>
781 {
782 public:
783 /**
784 * @brief Jacobian storage mode selector.
785 */
786 enum Type
787 {
788 Diagonal = 0, ///< One scalar per variable per cell (diagonal approximation).
789 DiagonalBlock = 1, ///< One (nVars x nVars) dense block per cell.
790 Full = 2, ///< Sparse block structure following cell adjacency graph.
791 };
792 ArrayDOFV<nVarsFixed> diag; ///< Diagonal Jacobian values (one vector per cell).
793 ArrayDOFV<nVarsFixed> diagInv; ///< Inverse of diagonal Jacobian values.
794 ArrayEigenMatrix<nVarsFixed, nVarsFixed> diagBlock; ///< Block-diagonal Jacobian (one nVars x nVars matrix per cell).
795 ArrayEigenMatrix<nVarsFixed, nVarsFixed> diagBlockInv; ///< Inverse of block-diagonal Jacobian.
796 ArrayRECV<nVarsFixed> offDiagBlock; ///< Off-diagonal blocks for full Jacobian (sparse, adjacency-based).
797
798 /**
799 * @brief Initialize storage for diagonal (scalar-per-variable) Jacobian mode.
800 * @param uDof Reference DOF array whose distribution layout is used for allocation.
801 * @todo Implement actual allocation of diagonal storage.
802 */
804 {
805 type = Diagonal;
806 // todo ! allocate square blocks!
807 }
808
809 /**
810 * @brief Initialize storage for block-diagonal (dense block-per-cell) Jacobian mode.
811 * @param uDof Reference DOF array whose distribution layout is used for allocation.
812 * @todo Implement actual allocation of (nVars x nVars) block storage.
813 */
815 {
816 type = DiagonalBlock;
817 // todo ! allocate square blocks!
818 }
819
820 /**
821 * @brief Initialize storage for the full sparse block Jacobian following cell adjacency.
822 * @param uDof Reference DOF array whose distribution layout is used for allocation.
823 * @param cell2cell Cell-to-cell adjacency pair defining the sparsity pattern.
824 * @todo Implement actual allocation using the adjacency graph.
825 */
827 {
828 type = Full;
829 // todo ! allocate with adjacency!
830 }
831
832 /**
833 * @brief Compute the inverse of the diagonal (or block-diagonal) Jacobian.
834 *
835 * Stores the result in @ref diagInv (Diagonal mode) or @ref diagBlockInv
836 * (DiagonalBlock mode) for use as a preconditioner in iterative solvers.
837 *
838 * @todo Implement actual inversion logic.
839 */
841 {
842 // todo get inverse!
843 }
844
845 private:
846 Type type = Diagonal; ///< Currently active Jacobian storage mode.
847 };
848
849 /**
850 * @brief Enumerates the available compressible flow solver model configurations.
851 *
852 * Each enumerant selects a combination of:
853 * - **Physics dimension** (`dim`): number of velocity components in the equations.
854 * All models use `dim=3` (three velocity components) except `NS_2D` which uses `dim=2`.
855 * - **Geometry dimension** (`gDim`): mesh spatial dimension (2D or 3D).
856 * Models without `_3D` suffix use `gDim=2`; those with `_3D` use `gDim=3`.
857 * - **Turbulence model**: plain NS (no turbulence transport), Spalart-Allmaras (`SA`, +1 var),
858 * two-equation RANS (`2EQ`, +2 vars), or extended/dynamic (`EX`, runtime-determined).
859 *
860 * The compile-time number of conservative variables (`nVarsFixed`) follows from the model:
861 * | Model | nVarsFixed | Variables |
862 * |-------------|------------|------------------------------------------------------|
863 * | NS, NS_3D | 5 | \f$\rho,\; \rho u,\; \rho v,\; \rho w,\; E\f$ |
864 * | NS_2D | 4 | \f$\rho,\; \rho u,\; \rho v,\; E\f$ |
865 * | NS_SA, NS_SA_3D | 6 | NS + \f$\rho\tilde{\nu}\f$ |
866 * | NS_2EQ, NS_2EQ_3D | 7 | NS + \f$\rho k,\; \rho\omega\f$ |
867 * | NS_EX, NS_EX_3D | Dynamic | Runtime-determined (Eigen::Dynamic) |
868 *
869 * @see getnVarsFixed() for the nVarsFixed mapping.
870 * @see getDim_Fixed() for the physics dimension mapping.
871 * @see getGeomDim_Fixed() for the geometry dimension mapping.
872 * @see EulerModelTraits for a compile-time trait bundle.
873 */
875 {
876 NS = 0, ///< Navier-Stokes, 2D geometry, 3D physics (5 vars).
877 NS_SA = 1, ///< NS + Spalart-Allmaras, 2D geometry (6 vars).
878 NS_2D = 2, ///< NS with 2D physics (2 velocity components, 4 vars). The only model with dim=2.
879 NS_3D = 3, ///< Navier-Stokes, 3D geometry, 3D physics (5 vars).
880 NS_SA_3D = 4, ///< NS + Spalart-Allmaras, 3D geometry (6 vars).
881 NS_2EQ = 5, ///< NS + 2-equation RANS, 2D geometry (7 vars).
882 NS_2EQ_3D = 6, ///< NS + 2-equation RANS, 3D geometry (7 vars).
883 NS_EX = 101, ///< Extended NS, 2D geometry, dynamic nVars (Eigen::Dynamic).
884 NS_EX_3D = 102, ///< Extended NS, 3D geometry, dynamic nVars (Eigen::Dynamic).
885 };
886
887 /**
888 * @brief Enumerates the available RANS turbulence closure models.
889 *
890 * Selects which turbulence transport equations are solved at runtime. This is orthogonal
891 * to the compile-time @ref EulerModel selection: `EulerModel` determines the number of
892 * transport variables, while `RANSModel` selects the specific closure (e.g. which
893 * two-equation model to use when `EulerModel` is `NS_2EQ` or `NS_2EQ_3D`).
894 *
895 * JSON serialization is provided via @c DNDS_DEFINE_ENUM_JSON.
896 */
898 {
899 RANS_Unknown = 0, ///< Sentinel / uninitialized value.
900 RANS_None, ///< No turbulence model (laminar or DNS).
901 RANS_SA, ///< Spalart-Allmaras one-equation model.
902 RANS_KOWilcox, ///< Wilcox k-omega two-equation model.
903 RANS_KOSST, ///< Menter k-omega SST two-equation model.
904 RANS_RKE, ///< Realizable k-epsilon two-equation model.
905 };
906
908 RANSModel,
909 {
910 {RANS_Unknown, nullptr},
911 {RANS_None, "RANS_None"},
912 {RANS_SA, "RANS_SA"},
913 {RANS_KOWilcox, "RANS_KOWilcox"},
914 {RANS_KOSST, "RANS_KOSST"},
915 {RANS_RKE, "RANS_RKE"},
916 })
917
918 /**
919 * @brief Returns the compile-time (fixed) number of conservative variables for a given model.
920 *
921 * This is a `constexpr` function suitable for use as a template argument. The mapping is:
922 * - NS, NS_3D: 5 (\f$\rho, \rho u, \rho v, \rho w, E\f$)
923 * - NS_SA, NS_SA_3D: 6 (+ \f$\rho\tilde{\nu}\f$)
924 * - NS_2D: 4 (\f$\rho, \rho u, \rho v, E\f$)
925 * - NS_2EQ, NS_2EQ_3D: 7 (+ \f$\rho k, \rho\omega\f$)
926 * - NS_EX, NS_EX_3D: `Eigen::Dynamic` (determined at runtime)
927 *
928 * @param model The Euler model enumerant.
929 * @return Compile-time variable count, or `Eigen::Dynamic` (-1) for extended models.
930 *
931 * @see getNVars() for a version that always returns a positive runtime value.
932 */
933 constexpr static inline int getnVarsFixed(const EulerModel model)
934 {
935 if (model == NS || model == NS_3D)
936 return 5;
937 else if (model == NS_SA)
938 return 6;
939 else if (model == NS_SA_3D)
940 return 6;
941 else if (model == NS_2D)
942 return 4;
943 else if (model == NS_2EQ || model == NS_2EQ_3D)
944 return 7;
945 return Eigen::Dynamic;
946 }
947
948 /**
949 * @brief Returns the runtime number of conservative variables for a given model.
950 *
951 * Unlike @ref getnVarsFixed(), this function always returns a positive integer.
952 * For fixed-size models it returns the same value as `getnVarsFixed()`. For
953 * extended models (`NS_EX`, `NS_EX_3D`) where `getnVarsFixed()` returns
954 * `Eigen::Dynamic`, this function falls through to a secondary lookup that
955 * returns the base variable count (though extended models may require additional
956 * runtime configuration to determine the true count).
957 *
958 * @param model The Euler model enumerant.
959 * @return Positive number of conservative variables.
960 *
961 * @note For extended models, the returned value is a base count and may need
962 * to be augmented by runtime configuration (e.g. number of species).
963 */
964 constexpr static inline int getNVars(EulerModel model)
965 {
966 int nVars = getnVarsFixed(model);
967 if (nVars < 0)
968 {
969 if (model == NS || model == NS_3D)
970 return 5;
971 else if (model == NS_SA)
972 return 6;
973 else if (model == NS_SA_3D)
974 return 6;
975 else if (model == NS_2D)
976 return 4;
977 else if (model == NS_2EQ || model == NS_2EQ_3D)
978 return 7;
979 // *** handle variable nVars
980 }
981 return nVars;
982 }
983
984 /**
985 * @brief Returns the physics spatial dimension for a given model at compile time.
986 *
987 * The physics dimension determines how many velocity components appear in the
988 * governing equations:
989 * - `NS_2D`: returns 2 (only \f$u, v\f$ velocity components).
990 * - All other models: returns 3 (full \f$u, v, w\f$ velocity components),
991 * even for 2D-geometry models where the mesh is two-dimensional.
992 *
993 * @param model The Euler model enumerant.
994 * @return Physics dimension (2 or 3), or `Eigen::Dynamic` if unrecognized.
995 *
996 * @see getGeomDim_Fixed() for the mesh/geometry dimension.
997 */
998 constexpr static inline int getDim_Fixed(const EulerModel model)
999 {
1000 if (model == NS || model == NS_3D)
1001 return 3;
1002 else if (model == NS_SA)
1003 return 3;
1004 else if (model == NS_SA_3D)
1005 return 3;
1006 else if (model == NS_2D)
1007 return 2;
1008 else if (model == NS_2EQ || model == NS_2EQ_3D)
1009 return 3;
1010 else if (model == NS_EX || model == NS_EX_3D)
1011 return 3;
1012 return Eigen::Dynamic;
1013 }
1014
1015 /**
1016 * @brief Returns the geometry (mesh) spatial dimension for a given model at compile time.
1017 *
1018 * The geometry dimension determines whether the mesh is two-dimensional or
1019 * three-dimensional:
1020 * - 2D geometry (`gDim=2`): NS, NS_SA, NS_2D, NS_2EQ, NS_EX.
1021 * - 3D geometry (`gDim=3`): NS_3D, NS_SA_3D, NS_2EQ_3D, NS_EX_3D.
1022 *
1023 * Note that `gDim` differs from the physics dimension (`dim`). For example,
1024 * `NS` has `gDim=2` (2D mesh) but `dim=3` (3 velocity components in the equations).
1025 *
1026 * @param model The Euler model enumerant.
1027 * @return Geometry dimension (2 or 3), or `Eigen::Dynamic` if unrecognized.
1028 *
1029 * @see getDim_Fixed() for the physics dimension.
1030 */
1031 constexpr static inline int getGeomDim_Fixed(const EulerModel model)
1032 {
1033 if (model == NS)
1034 return 2;
1035 else if (model == NS_SA)
1036 return 2;
1037 else if (model == NS_2D)
1038 return 2;
1039 else if (model == NS_3D)
1040 return 3;
1041 else if (model == NS_SA_3D)
1042 return 3;
1043 else if (model == NS_2EQ)
1044 return 2;
1045 else if (model == NS_2EQ_3D)
1046 return 3;
1047 else if (model == NS_EX)
1048 return 2;
1049 else if (model == NS_EX_3D)
1050 return 3;
1051 return Eigen::Dynamic;
1052 }
1053
1054 // constexpr static inline bool ifFixedNvars(EulerModel model)
1055 // {
1056 // return (
1057 // model == NS ||
1058 // model == NS_SA);
1059 // } // use +/- is ok
1060
1061 /**
1062 * @brief Compile-time traits for EulerModel variants.
1063 *
1064 * Bundles all model-dependent compile-time constants (variable count, dimensions,
1065 * feature flags) into a single struct, replacing scattered `if constexpr` chains
1066 * with clean named traits. All members are `static constexpr`.
1067 *
1068 * Usage example:
1069 * @code
1070 * using Traits = EulerModelTraits<NS_SA_3D>;
1071 * static_assert(Traits::nVarsFixed == 6);
1072 * static_assert(Traits::dim == 3);
1073 * static_assert(Traits::gDim == 3);
1074 * static_assert(Traits::hasSA);
1075 * static_assert(Traits::hasRANS);
1076 * static_assert(Traits::nRANSVars == 1);
1077 * @endcode
1078 *
1079 * Future models (reactive, multi-species) should add traits here rather than
1080 * adding new if-constexpr chains.
1081 *
1082 * @tparam model The EulerModel enumerant to query traits for.
1083 */
1084 template <EulerModel model>
1086 {
1087 /// Number of fixed conservative variables (Eigen::Dynamic for NS_EX).
1088 static constexpr int nVarsFixed = getnVarsFixed(model);
1089 /// Physics spatial dimension (2 for NS_2D, 3 for all others).
1090 static constexpr int dim = getDim_Fixed(model);
1091 /// Geometry (mesh) spatial dimension.
1092 static constexpr int gDim = getGeomDim_Fixed(model);
1093
1094 /// True for Spalart-Allmaras models (NS_SA, NS_SA_3D).
1095 static constexpr bool hasSA = (model == NS_SA || model == NS_SA_3D);
1096 /// True for 2-equation RANS models (NS_2EQ, NS_2EQ_3D).
1097 static constexpr bool has2EQ = (model == NS_2EQ || model == NS_2EQ_3D);
1098 /// True for any RANS turbulence model (SA or 2-equation).
1099 static constexpr bool hasRANS = hasSA || has2EQ;
1100 /// Number of extra RANS transport variables (0, 1, or 2).
1101 static constexpr int nRANSVars = hasSA ? 1 : (has2EQ ? 2 : 0);
1102
1103 /// True for extended/dynamic models (NS_EX, NS_EX_3D).
1104 static constexpr bool isExtended = (model == NS_EX || model == NS_EX_3D);
1105 /// True for plain NS without turbulence or extensions.
1106 static constexpr bool isPlainNS = !hasRANS && !isExtended;
1107 /// True for 2D geometry models.
1108 static constexpr bool isGeom2D = (gDim == 2);
1109 /// True for 3D geometry models.
1110 static constexpr bool isGeom3D = (gDim == 3);
1111 };
1112
1113 /**
1114 * @brief Compile-time multiplication that propagates `Eigen::Dynamic`.
1115 *
1116 * Returns `nVarsFixed * mul` when @p nVarsFixed is a positive compile-time constant.
1117 * If @p nVarsFixed equals `Eigen::Dynamic` (i.e. -1), the result is `Eigen::Dynamic`,
1118 * preserving the dynamic-size sentinel through arithmetic. This is needed when
1119 * computing derived template sizes (e.g. Jacobian block dimensions) from `nVarsFixed`.
1120 *
1121 * @tparam nVarsFixed Base compile-time size, or `Eigen::Dynamic`.
1122 * @tparam mul Positive integer multiplier.
1123 * @return `nVarsFixed * mul`, or `Eigen::Dynamic` if input is dynamic.
1124 */
1125 template <int nVarsFixed, int mul>
1126 constexpr static inline int nvarsFixedMultiply()
1127 {
1128 return nVarsFixed != Eigen::Dynamic ? nVarsFixed * mul : Eigen::Dynamic;
1129 }
1130}
Extended enum-to-JSON serialization macro that also exposes allowed string values for JSON Schema gen...
#define DNDS_DEFINE_ENUM_JSON(EnumType_,...)
Define JSON serialization for an enum AND expose its allowed string values.
Core type aliases, constants, and metaprogramming utilities for the DNDS framework.
Device memory abstraction layer with backend-specific storage and factory creation.
Eigen extensions: to_string, an fmt-safe wrapper, and fmt formatter specialisations for dense Eigen m...
Assertion / error-handling macros and supporting helper functions.
#define DNDS_assert(expr)
Debug-only assertion (compiled out when DNDS_NDEBUG is defined). Prints the expression + file/line + ...
Definition Errors.hpp:108
Mutable device view of an ArrayDof father/son pair.
Definition ArrayDOF.hpp:34
Primary solver state container: an ArrayEigenMatrix pair with MPI-collective vector-space operations.
Definition ArrayDOF.hpp:172
real norm2()
Global L2 norm (MPI-collective). sqrt(sum_i sum_j x_ij^2).
Definition ArrayDOF.hpp:301
t_element_mat componentWiseNorm1()
Per-component global L1 norm, returned as an n_m x n_n matrix (collective).
Definition ArrayDOF.hpp:331
void operator-=(const t_self &R)
In-place element-wise subtract: this -= R.
Definition ArrayDOF.hpp:249
real dot(const t_self &R)
Global inner product: sum_i sum_j x_ij * R_ij (collective).
Definition ArrayDOF.hpp:343
void operator+=(const t_self &R)
In-place element-wise add: this += R.
Definition ArrayDOF.hpp:231
void operator/=(const t_self &R)
Element-wise divide: this /= R.
Definition ArrayDOF.hpp:283
void operator*=(real R)
Scalar multiply in place.
Definition ArrayDOF.hpp:255
void setConstant(real R)
Set every entry of every (father+son) row to the scalar R.
Definition ArrayDOF.hpp:219
void addTo(const t_self &R, real r)
AXPY: this += r * R. One of the hot-path solver primitives.
Definition ArrayDOF.hpp:295
ArrayDofDeviceView< B, n_m, n_n > t_deviceView
Mutable device view alias.
Definition ArrayDOF.hpp:179
Eigen::Matrix< real, RowSize_To_EigenSize(n_m), RowSize_To_EigenSize(n_n)> t_element_mat
Shape of one DOF row as an Eigen matrix.
Definition ArrayDOF.hpp:208
void operator=(const t_self &R)
Value-copy assignment from another ArrayDof of identical layout.
Definition ArrayDOF.hpp:289
ParArray<real> whose operator[] returns an Eigen::Map<Matrix<real, Ni, Nj>>.
MPI-parallel distributed array of per-cell Degrees-of-Freedom (conservative variable vectors).
Definition Euler.hpp:93
void operator/=(const t_self &R)
Element-wise division: this[i] = this[i] ./ R[i].
Definition Euler.hpp:242
void operator*=(const std::vector< real > &R)
Per-cell scalar multiplication from a vector of cell-wise weights.
Definition Euler.hpp:170
void addTo(const t_self &R, real r)
Scaled addition: this[i] += r * R[i] for every cell (axpy-style).
Definition Euler.hpp:153
void operator-=(const t_self &R)
Element-wise subtraction: this[i] -= R[i] for every cell.
Definition Euler.hpp:127
typename t_base::t_element_mat t_element_mat
Per-element Eigen matrix/vector type.
Definition Euler.hpp:97
real dot(const t_self &R)
Compute the global dot product <this, R> with MPI reduction.
Definition Euler.hpp:312
void setConstant(real R)
Set all DOF entries to a uniform scalar value.
Definition Euler.hpp:103
void operator+=(const Eigen::Vector< real, nVarsFixed > &R)
Add a constant vector to every cell: this[i] += R for all cells.
Definition Euler.hpp:202
void setMaxWith(TR R)
Clamp all components from below: this[i] = max(this[i], R).
Definition Euler.hpp:366
void operator*=(const Eigen::Vector< real, nVarsFixed > &R)
Component-wise multiplication by a constant vector.
Definition Euler.hpp:224
void setConstant(const Eigen::Ref< const t_element_mat > &R)
Set all DOF entries to copies of a given vector.
Definition Euler.hpp:111
void operator*=(real R)
Uniform scalar multiplication: this[i] *= R for every cell.
Definition Euler.hpp:135
real norm2()
Compute the global L2 norm across all MPI ranks.
Definition Euler.hpp:255
real dot(const t_self &R, TMultL &&mL, TMultR &&mR)
Compute a weighted global dot product with per-component multipliers.
Definition Euler.hpp:338
void operator*=(const t_self &R)
Element-wise (Hadamard) multiplication: this[i] = this[i] .* R[i].
Definition Euler.hpp:233
void operator+=(real R)
Add a scalar to every component of every cell.
Definition Euler.hpp:211
Eigen::Vector< real, nVarsFixed > componentWiseNorm1()
Compute the per-component global L1 norm.
Definition Euler.hpp:269
void operator=(const t_self &R)
Deep copy assignment from another ArrayDOFV.
Definition Euler.hpp:143
void operator+=(const t_self &R)
Element-wise addition: this[i] += R[i] for every cell.
Definition Euler.hpp:119
Eigen::Vector< real, nVarsFixed > min()
Compute the global component-wise minimum across all MPI ranks.
Definition Euler.hpp:287
MPI-parallel distributed array of per-cell gradient matrices.
Definition Euler.hpp:643
void operator+=(t_self &R)
Element-wise addition: this[i] += R[i] for every cell.
Definition Euler.hpp:681
void setConstant(real R)
Set all gradient entries to a uniform scalar value.
Definition Euler.hpp:652
void setConstant(const TR &R)
Set all cells' gradient matrices to copies of a given matrix.
Definition Euler.hpp:666
void operator*=(ArrayDOFV< 1 > &R)
Component-wise multiplication by a scalar-per-cell ArrayDOFV<1>.
Definition Euler.hpp:729
void operator*=(const Eigen::Array< real, 1, nVarsFixed > &R)
Column-wise scaling by a per-variable multiplier row-vector.
Definition Euler.hpp:744
void operator*=(real R)
Uniform scalar multiplication: this[i] *= R for every cell.
Definition Euler.hpp:697
void operator=(t_self &R)
Deep copy assignment from another ArrayGRADV.
Definition Euler.hpp:758
void operator*=(std::vector< real > &R)
Per-cell scalar multiplication from a vector of cell-wise weights.
Definition Euler.hpp:712
void operator-=(t_self &R)
Element-wise subtraction: this[i] -= R[i] for every cell.
Definition Euler.hpp:689
MPI-parallel distributed array of per-cell reconstruction coefficient matrices.
Definition Euler.hpp:401
real norm2()
Compute the global L2 norm of all reconstruction coefficients with MPI reduction.
Definition Euler.hpp:562
void setConstant(const TR &R)
Set all cells' reconstruction matrices to copies of a given matrix.
Definition Euler.hpp:425
real dot(const t_self &R)
Compute the global dot product <this, R> with MPI reduction.
Definition Euler.hpp:576
void operator=(const t_self &R)
Deep copy assignment from another ArrayRECV.
Definition Euler.hpp:520
void addTo(const t_self &R, real r)
Uniform scaled addition: this[i] += r * R[i] for every cell (axpy-style).
Definition Euler.hpp:553
void operator*=(const std::vector< real > &R)
Per-cell scalar multiplication from a vector of cell-wise weights.
Definition Euler.hpp:471
void operator*=(const Eigen::Array< real, 1, nVarsFixed > &R)
Column-wise scaling by a per-variable multiplier row-vector.
Definition Euler.hpp:506
void operator-=(const t_self &R)
Element-wise subtraction: this[i] -= R[i] for every cell.
Definition Euler.hpp:447
void setConstant(real R)
Set all reconstruction coefficients to a uniform scalar value.
Definition Euler.hpp:410
void addTo(const t_self &R, const Eigen::Array< real, 1, nVarsFixed > &r)
Column-wise scaled addition: this[i] += R[i] .* r (per-variable scaling).
Definition Euler.hpp:537
void operator*=(const ArrayDOFV< 1 > &R)
Component-wise multiplication by a scalar-per-cell ArrayDOFV<1>.
Definition Euler.hpp:490
auto dotV(const t_self &R)
Compute a per-variable (column-wise) dot product with MPI reduction.
Definition Euler.hpp:598
void operator+=(const t_self &R)
Element-wise addition: this[i] += R[i] for every cell.
Definition Euler.hpp:439
void operator*=(real R)
Uniform scalar multiplication: this[i] *= R for every cell.
Definition Euler.hpp:455
Placeholder container for implicit-solver Jacobian matrix storage.
Definition Euler.hpp:781
ArrayDOFV< nVarsFixed > diag
Diagonal Jacobian values (one vector per cell).
Definition Euler.hpp:792
ArrayEigenMatrix< nVarsFixed, nVarsFixed > diagBlockInv
Inverse of block-diagonal Jacobian.
Definition Euler.hpp:795
ArrayEigenMatrix< nVarsFixed, nVarsFixed > diagBlock
Block-diagonal Jacobian (one nVars x nVars matrix per cell).
Definition Euler.hpp:794
Type
Jacobian storage mode selector.
Definition Euler.hpp:787
@ DiagonalBlock
One (nVars x nVars) dense block per cell.
Definition Euler.hpp:789
@ Diagonal
One scalar per variable per cell (diagonal approximation).
Definition Euler.hpp:788
@ Full
Sparse block structure following cell adjacency graph.
Definition Euler.hpp:790
void SetDiagonal(ArrayDOFV< nVarsFixed > &uDof)
Initialize storage for diagonal (scalar-per-variable) Jacobian mode.
Definition Euler.hpp:803
void SetDiagonalBlock(ArrayDOFV< nVarsFixed > &uDof)
Initialize storage for block-diagonal (dense block-per-cell) Jacobian mode.
Definition Euler.hpp:814
ArrayRECV< nVarsFixed > offDiagBlock
Off-diagonal blocks for full Jacobian (sparse, adjacency-based).
Definition Euler.hpp:796
void SetFull(ArrayDOFV< nVarsFixed > &uDof, Geom::tAdjPair &cell2cell)
Initialize storage for the full sparse block Jacobian following cell adjacency.
Definition Euler.hpp:826
void InverseDiag()
Compute the inverse of the diagonal (or block-diagonal) Jacobian.
Definition Euler.hpp:840
ArrayDOFV< nVarsFixed > diagInv
Inverse of diagonal Jacobian values.
Definition Euler.hpp:793
RANSModel
Enumerates the available RANS turbulence closure models.
Definition Euler.hpp:898
@ RANS_KOSST
Menter k-omega SST two-equation model.
Definition Euler.hpp:903
@ RANS_Unknown
Sentinel / uninitialized value.
Definition Euler.hpp:899
@ RANS_RKE
Realizable k-epsilon two-equation model.
Definition Euler.hpp:904
@ RANS_SA
Spalart-Allmaras one-equation model.
Definition Euler.hpp:901
@ RANS_None
No turbulence model (laminar or DNS).
Definition Euler.hpp:900
@ RANS_KOWilcox
Wilcox k-omega two-equation model.
Definition Euler.hpp:902
EulerModel
Enumerates the available compressible flow solver model configurations.
Definition Euler.hpp:875
@ NS_2EQ_3D
NS + 2-equation RANS, 3D geometry (7 vars).
Definition Euler.hpp:882
@ NS_SA_3D
NS + Spalart-Allmaras, 3D geometry (6 vars).
Definition Euler.hpp:880
@ NS
Navier-Stokes, 2D geometry, 3D physics (5 vars).
Definition Euler.hpp:876
@ NS_EX_3D
Extended NS, 3D geometry, dynamic nVars (Eigen::Dynamic).
Definition Euler.hpp:884
@ NS_SA
NS + Spalart-Allmaras, 2D geometry (6 vars).
Definition Euler.hpp:877
@ NS_2EQ
NS + 2-equation RANS, 2D geometry (7 vars).
Definition Euler.hpp:881
@ NS_3D
Navier-Stokes, 3D geometry, 3D physics (5 vars).
Definition Euler.hpp:879
@ NS_EX
Extended NS, 2D geometry, dynamic nVars (Eigen::Dynamic).
Definition Euler.hpp:883
@ NS_2D
NS with 2D physics (2 velocity components, 4 vars). The only model with dim=2.
Definition Euler.hpp:878
MPI_int Allreduce(const void *sendbuf, void *recvbuf, MPI_int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
Wrapper over MPI_Allreduce.
Definition MPI.cpp:203
@ Unknown
Unset / sentinel.
@ Host
Plain CPU memory.
int64_t index
Global row / DOF index type (signed 64-bit; handles multi-billion-cell meshes).
Definition Defines.hpp:107
double real
Canonical floating-point scalar used throughout DNDSR (double precision).
Definition Defines.hpp:105
DNDS_CONSTANT const real veryLargeReal
Catch-all upper bound ("practically infinity") for physical scalars.
Definition Defines.hpp:190
const MPI_Datatype DNDS_MPI_REAL
MPI datatype matching real (= MPI_REAL8).
Definition MPI.hpp:92
DNDS_DEVICE_CALLABLE index Size() const
Combined father + son row count.
Definition ArrayPair.hpp:33
t_arrayDeviceView father
Convenience bundle of a father, son, and attached ArrayTransformer.
decltype(father->operator[](index(0))) operator[](index i) const
Read-only row-pointer access in the combined address space.
ssp< TArray > father
Owned-side array (must be resized before ghost setup).
index Size() const
Combined row count (father->Size() + son->Size()).
auto RowSize() const
Uniform row width (delegates to father).
Compile-time traits for EulerModel variants.
Definition Euler.hpp:1086
static constexpr bool isGeom2D
True for 2D geometry models.
Definition Euler.hpp:1108
static constexpr int nRANSVars
Number of extra RANS transport variables (0, 1, or 2).
Definition Euler.hpp:1101
static constexpr bool isPlainNS
True for plain NS without turbulence or extensions.
Definition Euler.hpp:1106
static constexpr bool hasRANS
True for any RANS turbulence model (SA or 2-equation).
Definition Euler.hpp:1099
static constexpr int gDim
Geometry (mesh) spatial dimension.
Definition Euler.hpp:1092
static constexpr bool hasSA
True for Spalart-Allmaras models (NS_SA, NS_SA_3D).
Definition Euler.hpp:1095
static constexpr bool isGeom3D
True for 3D geometry models.
Definition Euler.hpp:1110
static constexpr bool has2EQ
True for 2-equation RANS models (NS_2EQ, NS_2EQ_3D).
Definition Euler.hpp:1097
static constexpr bool isExtended
True for extended/dynamic models (NS_EX, NS_EX_3D).
Definition Euler.hpp:1104
static constexpr int dim
Physics spatial dimension (2 for NS_2D, 3 for all others).
Definition Euler.hpp:1090
static constexpr int nVarsFixed
Number of fixed conservative variables (Eigen::Dynamic for NS_EX).
Definition Euler.hpp:1088
tVec r(NCells)