51 template <DeviceBackend B = DeviceBackend::Host>
56 int nVars,
int nVarsScalar)
59 auto &mesh = self_view.
fv.
mesh;
60 auto &fv = self_view.
fv;
73 auto bcid = mesh.GetBndZone(iBnd);
77 index iFace = mesh.bnd2face(iBnd, 0);
78 index iCell = mesh.bnd2cell(iBnd, 0);
85 return uScalar[i - nVarsFlow](iCell);
87 auto uOut = [faceBCBuffer, faceBCScalarBuffer, iFace]
DNDS_DEVICE(
int i)
mutable ->
real &
90 return faceBCBuffer(iFace, i);
92 return faceBCScalarBuffer[i - nVarsFlow](iFace);
95 auto bc = bcHandler.id2bc(bcid);
96 bc.apply(uI, uOut, nVars,
97 fv.GetFaceQuadraturePPhys(iFace, -1),
98 fv.GetFaceNorm(iFace, -1),
106 int nVars,
int nVarsScalar);
126 template <DeviceBackend B = DeviceBackend::Host>
131 int nVars,
int nVarsScalar)
133 using namespace Geom;
134 auto &mesh = self_view.
fv.
mesh;
135 auto &fv = self_view.
fv;
146 __shared__ CUDA::SharedBuffer<index, 128> buf_idx;
147 __shared__ CUDA::SharedBuffer<
real, 128 * (3 * nVarsFlow)> buf_val;
152 if (iCell < iCellEnd)
154 for (
int iVarS = 0; iVarS < nVarsScalar; iVarS++)
155 uScalarGrad[iVarS].
father[iCell].setZero();
156 auto c2f = mesh.cell2face[iCell];
159 for (
long iFace : c2f)
161 index iCellOther = mesh.CellFaceOther(iCell, iFace);
162 rowsize if2c = mesh.CellIsFaceBack(iCell, iFace) ? 0 : 1;
163 tPoint norm = 0.5 * fv.GetFaceNormFromCell(iFace, iCell, if2c, -1) *
164 ((if2c ? -1.0 : 1.0) * fv.GetFaceArea(iFace) / fv.GetCellVol(iCell));
171 uOther = u[iCellOther];
173 uOther = faceBCBuffer.father[iFace];
175 grad_flow.noalias() += norm * (uOther - uI).transpose();
176 for (
int iVarS = 0; iVarS < nVarsScalar; iVarS++)
178 real uI = uScalar[iVarS].father(iCell);
181 uOther = uScalar[iVarS](iCell);
183 uOther = faceBCScalarBuffer[iVarS](iCell);
184 uScalarGrad[iVarS].father[iCell].noalias() += norm * (uOther - uI);
189 if constexpr (B == DeviceBackend::CUDA &&
true)
213 detail::CUDA_Local2GlobalAssign<3 * nVarsFlow, 128>(
215 {
return grad_flow(i); },
217 {
return uGrad.father[iCellC](i); },
224 if (iCell < iCellEnd)
225 uGrad.father[iCell] = grad_flow;
233 int nVars,
int nVarsScalar);
251 template <DeviceBackend B = DeviceBackend::Host>
256 int nVars,
int nVarsScalar)
258 using namespace Geom;
259 auto &mesh = self_view.
fv.
mesh;
260 auto &fv = self_view.
fv;
267 __shared__ CUDA::SharedBuffer<index, 128> buf_idx;
268 __shared__ CUDA::SharedBuffer<
real, 128 * (3 * nVarsFlow)> buf_val;
273 if (iCell < iCellEnd)
275 auto c2f = mesh.cell2face[iCell];
277 grad = uGrad.father[iCell];
280 uI_mag << uI(0), uI(I4),
U123(uI).norm();
284 tPoint uOtherMax = uI_mag;
285 tPoint uOtherMin = uI_mag;
288 uIIncMax.setConstant(0.0);
289 uIIncMin.setConstant(0.0);
291 real EInternalI = phy.Cons2EInternal(uI, nVarsFlow);
292 real EInternalMin = EInternalI;
294 for (
int ic2f = 0; ic2f < c2f.size(); ic2f++)
296 index iFace = c2f[ic2f];
297 index iCellOther = mesh.CellFaceOther(iCell, iFace);
298 TU uIncPoint = grad.transpose() *
299 (fv.GetFaceQuadraturePPhysFromCell(iFace, iCell, -1, -1) -
300 fv.GetCellQuadraturePPhys(iCell, -1));
301 tPoint uIncPoint_mag;
302 uIncPoint_mag << uIncPoint(0), uIncPoint(I4),
U123(uIncPoint).norm();
303 uIIncMax = uIIncMax.array().max(uIncPoint_mag.array());
304 uIIncMin = uIIncMin.array().min(uIncPoint_mag.array());
308 uIncPoint = u[iCellOther];
309 uIncPoint_mag << uIncPoint(0), uIncPoint(I4),
U123(uIncPoint).norm();
310 uOtherMax = uOtherMax.array().max(uIncPoint_mag.array());
311 uOtherMin = uOtherMin.array().min(uIncPoint_mag.array());
312 real EOther = phy.Cons2EInternal(uIncPoint, nVarsFlow);
313 EInternalMin = std::min(EOther, EInternalMin);
319 uOtherMax = (uOtherMax.array().abs()) / (uIIncMax.array().abs() +
verySmallReal);
320 uOtherMin = (uOtherMin.array().abs()) / (uIIncMin.array().abs() +
verySmallReal);
321 uOtherMin = uOtherMin.array().min(uOtherMax.array());
322 uOtherMin = uOtherMin.array().max(0.0).min(1.0);
325 grad.col(0) *= uOtherMin(0);
326 grad.col(I4) *= uOtherMin(1);
327 for (
int i = 1; i < 1 + 3; i++)
328 grad.col(i) *= uOtherMin(2);
331 real EInternalPointMin = EInternalI;
332 for (
int ic2f = 0; ic2f < c2f.size(); ic2f++)
334 index iFace = c2f[ic2f];
335 index iCellOther = mesh.CellFaceOther(iCell, iFace);
336 TU uIncPoint = grad.transpose() *
337 (fv.GetFaceQuadraturePPhysFromCell(iFace, iCell, -1, -1) -
338 fv.GetCellQuadraturePPhys(iCell, -1));
340 real EOther = phy.Cons2EInternal(uIncPoint, nVarsFlow);
341 EInternalPointMin = std::min(EOther, EInternalPointMin);
344 (EInternalI - EInternalMin * 0.1) / std::max(EInternalI - EInternalPointMin,
verySmallReal);
345 grad *= std::clamp(alphaE, 0., 1.);
360 if constexpr (B == DeviceBackend::CUDA)
362 detail::CUDA_Local2GlobalAssign<3 * nVarsFlow, 128>(
366 {
return uGrad.father[iCellC](i); },
373 if (iCell < iCellEnd)
374 uGrad.father[iCell] = grad;
382 int nVars,
int nVarsScalar);
399 template <DeviceBackend B = DeviceBackend::Host>
404 int nVars,
int nVarsScalar)
406 using namespace Geom;
407 auto &mesh = self_view.
fv.
mesh;
408 auto &fv = self_view.
fv;
416 if (iCell >= iCellEnd)
419 auto c2f = mesh.cell2face[iCell];
427 constexpr int bufSize = nVarsFlow;
428 for (
int iVar = 0; iVar < nVarsScalar; iVar += bufSize)
430 int nCur = std::min(nVarsScalar - iVar, bufSize);
431 for (
int iVarS = 0; iVarS < nCur; iVarS++)
432 uI(iVarS) = uScalar[iVar + iVarS](iCell);
434 uIIncMax.setConstant(-0.0);
435 uIIncMin.setConstant(0.0);
436 uOtherMax = uOtherMin = uI;
438 for (
auto iFace : c2f)
440 index iCellOther = mesh.CellFaceOther(iCell, iFace);
441 tPoint p = (fv.GetFaceQuadraturePPhysFromCell(iFace, iCell, -1, -1) -
442 fv.GetCellQuadraturePPhys(iCell, -1));
443 for (
int iVarS = 0; iVarS < nCur; iVarS++)
445 real uIncPoint = uScalarGrad[iVar + iVarS].father[iCell].dot(p);
446 uIIncMax[iVarS] = std::max(uIIncMax[iVarS], uIncPoint);
447 uIIncMin[iVarS] = std::max(uIIncMin[iVarS], uIncPoint);
451 for (
int iVarS = 0; iVarS < nCur; iVarS++)
453 real uIncPoint = uScalar[iVar + iVarS](iCellOther);
454 uOtherMax[iVarS] = std::max(uIIncMax[iVarS], uIncPoint);
455 uOtherMin[iVarS] = std::min(uIIncMin[iVarS], uIncPoint);
461 uOtherMax = uOtherMax.array().abs() / (uIIncMax.array().abs() +
verySmallReal);
462 uOtherMin = uOtherMin.array().abs() / (uIIncMin.array().abs() +
verySmallReal);
463 uOtherMax = uOtherMax.array().max(0.0).min(1.0);
464 uOtherMin = uOtherMin.array().max(0.0).min(1.0);
465 for (
int iVarS = 0; iVarS < nCur; iVarS++)
466 uScalarGrad[iVar + iVarS].
father[iCell] *= std::min(uOtherMax[iVarS], uOtherMin[iVarS]);
474 int nVars,
int nVarsScalar);
492 template <DeviceBackend B = DeviceBackend::Host>
496 index iPt,
index iPtEnd,
int nVars,
int nVarsScalar)
498 using namespace Geom;
499 auto &mesh = self_view.
fv.
mesh;
500 auto &fv = self_view.
fv;
520 __shared__ CUDA::SharedBuffer<index, 128> buf_idx;
521 __shared__ CUDA::SharedBuffer<
real, 128 * (3 * nVarsFlow)> buf_val;
525 Eigen::Map<TU> uPrimCM(uPrimC.data());
527 Eigen::Map<TDiffU> uGradPrimCM(uGradPrimC.data());
542 return uScalar[i - nVarsFlow](iPt);
544 auto uPrimI = [uPrimCM, uScalarPrim, iPt]
DNDS_DEVICE(
int i)
mutable ->
real &
549 return uScalarPrim[i - nVarsFlow](iPt);
551 auto diffUConsI = [uGrad, uScalarGrad, iPt]
DNDS_DEVICE(
int d,
int i)
mutable ->
real &
554 return uGrad[iPt](d, i);
556 return uScalarGrad[i - nVarsFlow](iPt, d);
558 auto diffUPrimI = [uGradPrimCM, uScalarGradPrim, iPt]
DNDS_DEVICE(
int d,
int i)
mutable ->
real &
561 return uGradPrimCM(d, i);
563 return uScalarGradPrim[i - nVarsFlow](iPt, d);
565 phy.Cons2Prim(uConsI, uPrimI, nVars);
566 phy.Cons2PrimDiff(uConsI, uPrimI,
567 diffUConsI, diffUPrimI, nVars);
569 real TT = phy.Prim2Temperature(uPrimI, nVars);
570 real pp = phy.Prim2Pressure(uPrimI, nVars, TT);
571 auto [gammaG, aa] = phy.Prim2GammaAcousticSpeed(uPrimI, nVars, pp);
572 real muTot = phy.getMuTot(uPrimI, diffUPrimI, nVars, pp, TT);
582 if constexpr (B == DeviceBackend::CUDA)
584 detail::CUDA_Local2GlobalAssign<3 * nVarsFlow, 128>(
586 {
return uGradPrimCM(i); },
588 {
return uGradPrim[iPtC](i); },
592 detail::CUDA_Local2GlobalAssign<nVarsFlow, 128>(
594 {
return uPrimCM(i); },
596 {
return uPrim[iPtC](i); },
606 uGradPrim[iPt] = uGradPrimC;
615 int nVars,
int nVarsScalar);
632 template <DeviceBackend B = DeviceBackend::Host>
637 int nVars,
int nVarsScalar)
639 using namespace Geom;
640 auto &mesh = self_view.
fv.
mesh;
641 auto &fv = self_view.
fv;
656 __shared__ CUDA::SharedBuffer<index, 128> buf_idx;
657 __shared__ CUDA::SharedBuffer<
real, 128 * (1 * nVarsFlow)> buf_val;
660 Eigen::Map<TU> uPrimCM(uPrimC.data());
668 return uScalar[i - nVarsFlow](iPt);
670 auto uPrimI = [uPrimCM, uScalarPrim, iPt]
DNDS_DEVICE(
int i)
mutable ->
real &
675 return uScalarPrim[i - nVarsFlow](iPt);
677 phy.Cons2Prim(uConsI, uPrimI, nVars);
679 real TT = phy.Prim2Temperature(uPrimI, nVars);
680 real pp = phy.Prim2Pressure(uPrimI, nVars, TT);
681 auto [gammaG, aa] = phy.Prim2GammaAcousticSpeed(uPrimI, nVars, pp);
689 if constexpr (B == DeviceBackend::CUDA)
691 detail::CUDA_Local2GlobalAssign<nVarsFlow, 128>(
693 {
return uPrimCM(i); },
695 {
return uPrim[iPtC](i); },
713 int nVars,
int nVarsScalar);
731 template <DeviceBackend B = DeviceBackend::Host>
736 int nVars,
int nVarsScalar)
738 using namespace Geom;
739 auto &mesh = self_view.
fv.
mesh;
740 auto &fv = self_view.
fv;
751 if (iFace >= iFaceEnd)
754 index iCellL = mesh.face2cell(iFace, 0);
755 index iCellR = mesh.face2cell(iFace, 1);
756 if ((0 > iCellL || iCellL >= mesh.NumCell()) &&
757 (0 > iCellR || iCellR >= mesh.NumCell()) && iCellR !=
UnInitIndex)
768 tPoint nFace = fv.GetFaceNorm(iFace, -1);
769 real volL = fv.GetCellVol(iCellL);
771 real vnL =
U123(uL).dot(nFace) / uL[0];
772 real vnR =
U123(uR).dot(nFace) / uR[0];
773 real aL = aCell(iCellL);
775 real muFace = muCell(iCellL);
780 muFace = 0.5 * (muFace + muCell(iCellR));
781 volR = fv.GetCellVol(iCellR);
783 faceLamEst(iFace, 0) = std::max(std::abs(vnL - aL), std::abs(vnR - aR));
784 faceLamEst(iFace, 1) = std::max(std::abs(vnL), std::abs(vnR));
785 faceLamEst(iFace, 2) = std::max(std::abs(vnL + aL), std::abs(vnR + aR));
786 real rhoMean = 0.5 * (uL[0] + uR[0]);
787 real nuVis = muFace / rhoMean * 2.;
788 faceLamVisEst(iFace) = nuVis * fv.GetFaceArea(iFace) *
789 (1. / volL + 1. / volR);
792 deltaLamFace(iFace) = std::max({std::abs(vnL - aL - vnR + aR),
794 std::abs(vnL - aL - vnR + aR)});
802 int nVars,
int nVarsScalar);
820 template <DeviceBackend B = DeviceBackend::Host>
825 int nVars,
int nVarsScalar)
827 using namespace Geom;
828 auto &mesh = self_view.
fv.
mesh;
829 auto &fv = self_view.
fv;
839 if (iCell >= iCellEnd)
842 real lambdaCellC = 0.0;
843 real dLambdaCellC = 0.0;
844 auto c2f = mesh.cell2face.father[iCell];
845 for (
auto iFace : c2f)
848 std::max({faceLamEst(iFace, 0),
849 faceLamEst(iFace, 1),
850 faceLamEst(iFace, 2)}) +
851 faceLamVisEst(iFace);
852 lambdaCellC += lambdaFace * fv.GetFaceArea(iFace);
853 dLambdaCellC = std::max(dLambdaCellC, deltaLamFace(iFace));
855 dt(iCell) = fv.GetCellVol(iCell) * fv.GetCellSmoothScaleRatio(iCell) /
857 deltaLamCell(iCell) = dLambdaCellC;
865 int nVars,
int nVarsScalar);
885 template <DeviceBackend B = DeviceBackend::Host>
890 int nVars,
int nVarsScalar)
892 using namespace Geom;
893 auto &mesh = self_view.
fv.
mesh;
894 auto &fv = self_view.
fv;
912 __shared__ CUDA::SharedBuffer<index, 128> buf_idx;
913 __shared__ CUDA::SharedBuffer<
real, 128 * (3 * nVarsFlow)> buf_val;
917 Eigen::Map<TDiffU> uGradFFCM(uGradFFC.data());
919 if (iFace < iFaceEnd)
921 index iCellL = mesh.face2cell(iFace, 0);
922 index iCellR = mesh.face2cell(iFace, 1);
923 if ((0 > iCellL || iCellL >= mesh.NumCell()) &&
924 (0 > iCellR || iCellR >= mesh.NumCell()) && iCellR !=
UnInitIndex)
929 tPoint xrel_L = (fv.GetFaceQuadraturePPhys(iFace, -1) -
930 fv.GetCellQuadraturePPhys(iCellL, -1));
932 tPoint faceNorm = fv.GetFaceNorm(iFace, -1);
937 xrel_R = (fv.GetFaceQuadraturePPhysFromCell(iFace, iCellR, 1, -1) -
938 fv.GetCellQuadraturePPhys(iCellR, -1));
939 faceLScale = (fv.GetOtherCellBaryFromCell(iCellL, iCellR, iFace) - fv.GetCellBary(iCellL)).norm();
942 uFL[iFace].noalias() = u[iCellL] + uGrad[iCellL].transpose() * xrel_L;
943 uGradFFC = uGrad[iCellL];
946 uFR[iFace].noalias() = u[iCellR] + uGrad[iCellR].transpose() * xrel_R;
947 uGradFFC.noalias() += uGrad[iCellR];
949 uGradFFC.noalias() += faceNorm * (uFR[iFace] - uFL[iFace]).transpose() * (1. / faceLScale);
952 for (
int iVarS = 0; iVarS < nVarsScalar; iVarS++)
954 uScalarFL[iVarS](iFace) = uScalarGrad[iVarS][iCellL].dot(xrel_L);
955 uScalarGradFF[iVarS][iFace] = uScalarGrad[iVarS][iCellL];
958 uScalarFR[iVarS](iFace) = uScalarGrad[iVarS][iCellL].dot(xrel_L);
959 uScalarGradFF[iVarS][iFace].noalias() += uScalarGrad[iVarS][iCellR];
960 uScalarGradFF[iVarS][iFace] *= 0.5;
961 uScalarGradFF[iVarS][iFace].noalias() += faceNorm * (uScalarFR[iVarS](iFace) - uScalarFL[iVarS](iFace)) * (1. / faceLScale);
968 return uFL(iFace, i);
970 return uScalarFL[i - nVarsFlow](iFace);
975 return uFR(iFace, i);
977 return uScalarFR[i - nVarsFlow](iFace);
981 auto bc = bcHandler.id2bc(mesh.GetFaceZone(iFace));
982 bc.apply(uL, uR, nVars,
983 fv.GetFaceQuadraturePPhys(iFace, -1),
984 fv.GetFaceNorm(iFace, -1),
990 if constexpr (B == DeviceBackend::CUDA)
992 detail::CUDA_Local2GlobalAssign<3 * nVarsFlow, 128>(
994 {
return uGradFFCM(i); },
996 {
return uGradFF[iFaceC](i); },
1003 if (iFace < iFaceEnd)
1005 uGradFF[iFace] = uGradFFC;
1015 int nVars,
int nVarsScalar);
1034 template <DeviceBackend B = DeviceBackend::Host>
1039 int nVars,
int nVarsScalar)
1041 using namespace Geom;
1042 auto &mesh = self_view.
fv.
mesh;
1043 auto &fv = self_view.
fv;
1045 auto &phy = self_view.
physics;
1081 __shared__ CUDA::SharedBuffer<index, 128> buf_idx;
1082 __shared__ CUDA::SharedBuffer<
real, 128 * (1 * nVarsFlow)> buf_val;
1086 Eigen::Map<TU> FFlowM(FFlow.data());
1088 if (iFace < iFaceEnd)
1090 index iCellL = mesh.face2cell(iFace, 0);
1091 index iCellR = mesh.face2cell(iFace, 1);
1092 if ((0 > iCellL || iCellL >= mesh.NumCell()) &&
1093 (0 > iCellR || iCellR >= mesh.NumCell()) && iCellR !=
UnInitIndex)
1098 tPoint
n = fv.GetFaceNorm(iFace, -1);
1104 return u(iCellL, i);
1106 return uScalar[i - nVarsFlow](iCellL);
1111 return u(iCellRR, i);
1113 return uScalar[i - nVarsFlow](iCellRR);
1115 auto uLmPrim = [uPrim, uScalarPrim, iCellL]
DNDS_DEVICE(
int i)
mutable ->
real &
1118 return uPrim(iCellL, i);
1120 return uScalarPrim[i - nVarsFlow](iCellL);
1122 auto uRmPrim = [uPrim, uScalarPrim, iCellRR]
DNDS_DEVICE(
int i)
mutable ->
real &
1125 return uPrim(iCellRR, i);
1127 return uScalarPrim[i - nVarsFlow](iCellRR);
1130 real vsqrRoe{0}, HRoe{0}, rhoRoe{0}, aSqrRoe{0};
1132 RoeAverageNS(uLm, uRm, uLmPrim, uRmPrim, nVars, p(iCellL), p(iCellRR),
1133 phy, veloRoe, vsqrRoe, HRoe, rhoRoe, aSqrRoe);
1134 real veloRoeN = veloRoe.dot(
n);
1135 real aRoe = std::sqrt(std::abs(aSqrRoe));
1136 real lam0 = std::abs(veloRoeN - aRoe);
1137 real lam123 = std::abs(veloRoeN);
1138 real lam4 = std::abs(veloRoeN + aRoe);
1140 tPoint vL =
U123(uPrim[iCellL]);
1141 tPoint vR =
U123(uPrim[iCellRR]);
1142 real aL = a(iCellL);
1143 real aR = a(iCellRR);
1144 real pL = p(iCellL);
1145 real pR = p(iCellRR);
1150 aRoe, aRoe, veloRoeN, veloRoeN,
1151 std::max(deltaLamCell(iCellL), deltaLamCell(iCellRR)),
1152 1.0, lam0, lam123, lam4);
1154 real lamm = std::max(aL + std::abs(vL.dot(
n)), aR + std::abs(vR.dot(
n)));
1160 pFL(iFace), pFR(iFace), veloRoe,
1163 HRoe, phy, lam0, lam123, lam4, FFlow);
1165 fluxFF[iFace] = FFlow;
1171 if constexpr (B == DeviceBackend::CUDA)
1173 detail::CUDA_Local2GlobalAssign<1 * nVarsFlow, 128>(
1175 {
return FFlowM(i); },
1177 {
return fluxFF[iFaceC](i); },
1184 if (iFace < iFaceEnd)
1186 fluxFF[iFace] = FFlow;
1196 int nVars,
int nVarsScalar);
1214 template <DeviceBackend B = DeviceBackend::Host>
1219 int nVars,
int nVarsScalar)
1221 using namespace Geom;
1222 auto &mesh = self_view.
fv.
mesh;
1223 auto &fv = self_view.
fv;
1225 auto &phy = self_view.
physics;
1233 __shared__ CUDA::SharedBuffer<index, 128> buf_idx;
1234 __shared__ CUDA::SharedBuffer<
real, 128 * (1 * nVarsFlow)> buf_val;
1238 Eigen::Map<TU> rhsCM(rhsC.data());
1240 if (iCell < iCellEnd)
1243 auto c2f = mesh.cell2face.father[iCell];
1244 for (
auto iFace : c2f)
1246 real face_sign = mesh.CellIsFaceBack(iCell, iFace) ? 1 : -1;
1247 real face_coef = -fv.GetFaceArea(iFace) / fv.GetCellVol(iCell) * face_sign;
1248 rhsC += fluxFF[iFace] * face_coef;
1249 for (
int iVarS = 0; iVarS < nVarsScalar; iVarS++)
1250 rhsScalar[iVarS](iCell) += fluxScalarFF[iVarS](iFace) * face_coef;
1254 if constexpr (B == DeviceBackend::CUDA)
1256 detail::CUDA_Local2GlobalAssign<1 * nVarsFlow, 128>(
1258 {
return rhsCM(i); },
1260 {
return rhs[iCellC](i); },
1267 if (iCell < iCellEnd)
1279 int nVars,
int nVarsScalar);
Core type aliases, constants, and metaprogramming utilities for the DNDS framework.
Device memory abstraction layer with backend-specific storage and factory creation.
Assertion / error-handling macros and supporting helper functions.
Core type definitions and utilities for the EulerP alternative Navier-Stokes evaluator module.
Approximate Riemann Solver (Roe scheme) for the EulerP 5-equation Navier-Stokes system.
Backend-specific implementation layer for EulerP Evaluator kernel dispatch.
#define DNDS_EULERP_IMPL_ARG_GET_REF(member)
Internal utility types for the EulerP Evaluator kernel implementations.
Geom::UnstructuredMeshDeviceView< B > mesh
Device-callable view of the Evaluator, combining finite volume, BC handler, and physics views.
t_bcHandler bcHandler
Device view of the boundary condition handler.
t_fv fv
Device view of the finite volume / VFV reconstruction object.
t_physics physics
Device view of the gas physics model.
Namespace for the EulerP alternative evaluator module with GPU support.
DNDS_DEVICE void EstEigenDt_FaceLam2CellDt_Kernel(EvaluatorDeviceView< B > &self_view, typename Evaluator_impl< B >::EstEigenDt_Arg::Portable &arg, index iCell, index iCellEnd, int nVars, int nVarsScalar)
Per-cell kernel converting face eigenvalues to a local CFL time step.
DNDS_DEVICE_CALLABLE void RoeAverageNS(TUL &&UL, TUR &&UR, TULPrim &&ULPrim, TURPrim &&URPrim, int nVars, real pL, real pR, PhysicsDeviceView< B > &phy, Geom::tPoint &veloRoe, real &vsqrRoe, real &HRoe, real &rhoRoe, real &aSqrRoe)
Computes Roe-averaged quantities from left and right states.
template DNDS_DEVICE void EstEigenDt_GetFaceLam_Kernel< DeviceBackend::Host >(EvaluatorDeviceView< DeviceBackend::Host > &self_view, typename Evaluator_impl< DeviceBackend::Host >::EstEigenDt_Arg::Portable &arg, index iFace, index iFaceEnd, int nVars, int nVarsScalar)
DNDS_DEVICE void Flux2nd_Kernel_FluxFace(EvaluatorDeviceView< B > &self_view, typename Evaluator_impl< B >::Flux2nd_Arg::Portable &arg, index iFace, index iFaceEnd, int nVars, int nVarsScalar)
2nd-order Roe inviscid flux computation kernel (per-face).
template DNDS_DEVICE void Flux2nd_Kernel_FluxFace< DeviceBackend::Host >(EvaluatorDeviceView< DeviceBackend::Host > &self_view, typename Evaluator_impl< DeviceBackend::Host >::Flux2nd_Arg::Portable &arg, index iFace, index iFaceEnd, int nVars, int nVarsScalar)
template DNDS_DEVICE void RecGradient_BarthLimiter_Kernel_FlowPart< DeviceBackend::Host >(EvaluatorDeviceView< DeviceBackend::Host > &self_view, typename Evaluator_impl< DeviceBackend::Host >::RecGradient_Arg::Portable &arg, index iCell, index iCellEnd, int nVars, int nVarsScalar)
template DNDS_DEVICE void Flux2nd_Kernel_Face2Cell< DeviceBackend::Host >(EvaluatorDeviceView< DeviceBackend::Host > &self_view, typename Evaluator_impl< DeviceBackend::Host >::Flux2nd_Arg::Portable &arg, index iCell, index iCellEnd, int nVars, int nVarsScalar)
DNDS_DEVICE void RecGradient_BarthLimiter_Kernel_FlowPart(EvaluatorDeviceView< B > &self_view, typename Evaluator_impl< B >::RecGradient_Arg::Portable &arg, index iCell, index iCellEnd, int nVars, int nVarsScalar)
Barth-Jespersen gradient limiter kernel for flow variables (per-cell).
DNDS_DEVICE void RecFace2nd_Kernel(EvaluatorDeviceView< B > &self_view, typename Evaluator_impl< B >::RecFace2nd_Arg::Portable &arg, index iFace, index iFaceEnd, int nVars, int nVarsScalar)
2nd-order face value reconstruction kernel (per-face).
DNDS_DEVICE void RecGradient_GGRec_Kernel_GG(EvaluatorDeviceView< B > &self_view, typename Evaluator_impl< B >::RecGradient_Arg::Portable &arg, index iCell, index iCellEnd, int nVars, int nVarsScalar)
Green-Gauss gradient reconstruction kernel (per-cell).
template DNDS_DEVICE void RecFace2nd_Kernel< DeviceBackend::Host >(EvaluatorDeviceView< DeviceBackend::Host > &self_view, typename Evaluator_impl< DeviceBackend::Host >::RecFace2nd_Arg::Portable &arg, index iFace, index iFaceEnd, int nVars, int nVarsScalar)
DNDS_DEVICE_CALLABLE void RoeFluxFlow(const TU &UL, const TU &UR, real pL, real pR, const Geom::tPoint &veloRoe, real vsqrRoe, real vgn, const Geom::tPoint &n, real asqrRoe, real aRoe, real HRoe, PhysicsDeviceView< B > &phy, real lam0, real lam123, real lam4, TU &F)
Computes the complete Roe numerical flux for the 5-equation flow system.
DNDS_DEVICE void Cons2Prim_Kernel(EvaluatorDeviceView< B > &self_view, typename Evaluator_impl< B >::Cons2Prim_Arg::Portable &arg, index iPt, index iPtEnd, int nVars, int nVarsScalar)
Conservative-to-primitive conversion without gradient transformation or viscosity (per-cell).
DNDS_DEVICE_CALLABLE DNDS_FORCEINLINE auto U123(TU &&v)
Extracts the momentum components (indices 1,2,3) from a state vector as a 3x1 block.
DNDS_DEVICE void RecGradient_BarthLimiter_Kernel_ScalarPart(EvaluatorDeviceView< B > &self_view, typename Evaluator_impl< B >::RecGradient_Arg::Portable &arg, index iCell, index iCellEnd, int nVars, int nVarsScalar)
Barth-Jespersen gradient limiter kernel for transported scalar variables (per-cell).
template DNDS_DEVICE void EstEigenDt_FaceLam2CellDt_Kernel< DeviceBackend::Host >(EvaluatorDeviceView< DeviceBackend::Host > &self_view, typename Evaluator_impl< DeviceBackend::Host >::EstEigenDt_Arg::Portable &arg, index iCell, index iCellEnd, int nVars, int nVarsScalar)
template DNDS_DEVICE void RecGradient_GGRec_Kernel_BndVal< DeviceBackend::Host >(EvaluatorDeviceView< DeviceBackend::Host > &self_view, typename Evaluator_impl< DeviceBackend::Host >::RecGradient_Arg::Portable &arg, index iBnd, index iBndEnd, int nVars, int nVarsScalar)
template DNDS_DEVICE void RecGradient_BarthLimiter_Kernel_ScalarPart< DeviceBackend::Host >(EvaluatorDeviceView< DeviceBackend::Host > &self_view, typename Evaluator_impl< DeviceBackend::Host >::RecGradient_Arg::Portable &arg, index iCell, index iCellEnd, int nVars, int nVarsScalar)
DNDS_DEVICE void Flux2nd_Kernel_Face2Cell(EvaluatorDeviceView< B > &self_view, typename Evaluator_impl< B >::Flux2nd_Arg::Portable &arg, index iCell, index iCellEnd, int nVars, int nVarsScalar)
Scatters face fluxes to cell RHS residual (per-cell).
template DNDS_DEVICE void Cons2PrimMu_Kernel< DeviceBackend::Host >(EvaluatorDeviceView< DeviceBackend::Host > &self_view, typename Evaluator_impl< DeviceBackend::Host >::Cons2PrimMu_Arg::Portable &arg, index iPt, index iPtEnd, int nVars, int nVarsScalar)
DNDS_DEVICE void RecGradient_GGRec_Kernel_BndVal(EvaluatorDeviceView< B > &self_view, typename Evaluator_impl< B >::RecGradient_Arg::Portable &arg, index iBnd, index iBndEnd, int nVars, int nVarsScalar)
Generates boundary ghost values for Green-Gauss gradient reconstruction.
Eigen::Vector< real, nVarsFlow > TU
Fixed-size 5-component conservative state vector (rho, rhoU, rhoV, rhoW, E).
DNDS_DEVICE void EstEigenDt_GetFaceLam_Kernel(EvaluatorDeviceView< B > &self_view, typename Evaluator_impl< B >::EstEigenDt_Arg::Portable &arg, index iFace, index iFaceEnd, int nVars, int nVarsScalar)
Per-face eigenvalue estimation kernel for time-step computation.
DNDS_DEVICE_CALLABLE void RoeEigenValueFixer(real aL, real aR, real vnL, real vnR, real dLambda, real fixScale, real &lam0, real &lam123, real &lam4)
Applies entropy fix to Roe eigenvalues to prevent expansion shocks.
template DNDS_DEVICE void RecGradient_GGRec_Kernel_GG< DeviceBackend::Host >(EvaluatorDeviceView< DeviceBackend::Host > &self_view, typename Evaluator_impl< DeviceBackend::Host >::RecGradient_Arg::Portable &arg, index iCell, index iCellEnd, int nVars, int nVarsScalar)
DNDS_DEVICE void Cons2PrimMu_Kernel(EvaluatorDeviceView< B > &self_view, typename Evaluator_impl< B >::Cons2PrimMu_Arg::Portable &arg, index iPt, index iPtEnd, int nVars, int nVarsScalar)
Conservative-to-primitive conversion with gradient transformation and viscosity (per-cell).
template DNDS_DEVICE void Cons2Prim_Kernel< DeviceBackend::Host >(EvaluatorDeviceView< DeviceBackend::Host > &self_view, typename Evaluator_impl< DeviceBackend::Host >::Cons2Prim_Arg::Portable &arg, index iPt, index iPtEnd, int nVars, int nVarsScalar)
Eigen::Matrix< real, 3, nVarsFlow > TDiffU
Fixed-size 3x5 spatial gradient of the conservative state (one row per spatial dimension).
DNDS_DEVICE_CALLABLE bool FaceIDIsInternal(t_index id)
DNDS_CONSTANT const index UnInitIndex
Sentinel "not initialised" index value (= INT64_MIN).
DNDS_CONSTANT const real verySmallReal
Catch-all lower bound ("effectively zero").
int32_t rowsize
Row-width / per-row element-count type (signed 32-bit).
DNDS_CONSTANT const real UnInitReal
Sentinel "not initialised" real value (NaN). Cheap to detect with std::isnan or IsUnInitReal; survive...
int64_t index
Global row / DOF index type (signed 64-bit; handles multi-billion-cell meshes).
double real
Canonical floating-point scalar used throughout DNDSR (double precision).
DNDS_CONSTANT const real veryLargeReal
Catch-all upper bound ("practically infinity") for physical scalars.
Trivially-copyable payload for Cons2PrimMu kernel data.
Trivially-copyable payload holding device views of all kernel data.
Eigen::Vector3d n(1.0, 0.0, 0.0)