14#define DOCTEST_CONFIG_IMPLEMENT
24int main(
int argc,
char **argv)
26 MPI_Init(&argc, &argv);
28 ctx.applyCommandLine(argc, argv);
41 SUBCASE(
"setWorld populates rank, size, comm")
47 CHECK(mpi.rank < mpi.size);
49 CHECK(mpi.comm != MPI_COMM_NULL);
52 SUBCASE(
"two MPIInfos with setWorld are equal")
61 SUBCASE(
"constructor from MPI_COMM_WORLD matches setWorld")
63 MPIInfo fromCtor(MPI_COMM_WORLD);
72 SUBCASE(
"MPIWorldSize and MPIWorldRank agree with MPIInfo")
89 int expected = mpi.size * (mpi.size + 1) / 2;
91 SUBCASE(
"Allreduce MPI_SUM with real type")
95 MPI::Allreduce(&sendVal, &recvVal, 1,
DNDS_MPI_REAL, MPI_SUM, mpi.comm);
100 SUBCASE(
"Allreduce MPI_SUM with index type")
104 MPI::Allreduce(&sendVal, &recvVal, 1,
DNDS_MPI_INDEX, MPI_SUM, mpi.comm);
109 SUBCASE(
"AllreduceOneReal")
112 MPI::AllreduceOneReal(val, MPI_SUM, mpi);
117 SUBCASE(
"AllreduceOneIndex")
120 MPI::AllreduceOneIndex(val, MPI_SUM, mpi);
125 SUBCASE(
"Allreduce MPI_MAX")
143 SUBCASE(
"Scan MPI_SUM with constant 1")
147 MPI::Scan(&sendVal, &recvVal, 1,
DNDS_MPI_INDEX, MPI_SUM, mpi.comm);
152 SUBCASE(
"Scan MPI_SUM with rank value")
156 MPI::Scan(&sendVal, &recvVal, 1,
DNDS_MPI_INDEX, MPI_SUM, mpi.comm);
172 SUBCASE(
"gather rank values into ordered array")
175 std::vector<DNDS::index> recvBuf(mpi.size, -1);
180 for (
int i = 0; i < mpi.size; ++i)
186 SUBCASE(
"gather multiple elements per rank")
188 std::array<DNDS::real, 2> sendBuf = {
191 std::vector<DNDS::real> recvBuf(mpi.size * 2, -1.0);
196 for (
int i = 0; i < mpi.size; ++i)
199 CHECK(recvBuf[2 * i + 1] == doctest::Approx(
static_cast<DNDS::real>(i * 10)));
212 SUBCASE(
"broadcast real from rank 0")
214 DNDS::real val = (mpi.rank == 0) ? 42.0 : -1.0;
217 CHECK(val == doctest::Approx(42.0));
220 SUBCASE(
"broadcast index from rank 0")
228 SUBCASE(
"broadcast from last rank")
230 DNDS::real val = (mpi.rank == mpi.size - 1) ? 99.5 : 0.0;
233 CHECK(val == doctest::Approx(99.5));
246 MPI_int ret = MPI::Barrier(mpi.comm);
247 CHECK(ret == MPI_SUCCESS);
255 SUBCASE(
"scalar types")
257 auto [realType, realMult] = BasicType_To_MPIIntType<DNDS::real>();
258 CHECK(realType == MPI_DOUBLE);
259 CHECK(realMult == 1);
261 auto [idxType, idxMult] = BasicType_To_MPIIntType<DNDS::index>();
262 CHECK(idxType == MPI_INT64_T);
265 auto [fType, fMult] = BasicType_To_MPIIntType<float>();
266 CHECK(fType == MPI_FLOAT);
269 auto [i32Type, i32Mult] = BasicType_To_MPIIntType<int32_t>();
270 CHECK(i32Type == MPI_INT32_T);
274 SUBCASE(
"std::array<real, 3>")
276 auto [arrType, arrMult] = BasicType_To_MPIIntType<std::array<DNDS::real, 3>>();
277 CHECK(arrType == MPI_DOUBLE);
281 SUBCASE(
"Eigen::Matrix<real, 3, 3>")
283 using Mat33 = Eigen::Matrix<DNDS::real, 3, 3>;
284 auto [matType, matMult] = BasicType_To_MPIIntType<Mat33>();
289 CHECK(matType != MPI_DATATYPE_NULL);
293 SUBCASE(
"DNDS_MPI_INDEX and DNDS_MPI_REAL globals")
307 SUBCASE(
"default strategy is HIndexed")
314 SUBCASE(
"set to InSituPack and verify")
333 SUBCASE(
"each rank sends its rank value to every other rank")
336 std::vector<DNDS::index> sendBuf(mpi.size,
static_cast<DNDS::index>(mpi.rank));
337 std::vector<DNDS::index> recvBuf(mpi.size, -1);
343 for (
int i = 0; i < mpi.size; ++i)
349 SUBCASE(
"Alltoall with multiple elements per peer")
351 const int perPeer = 2;
352 std::vector<DNDS::real> sendBuf(mpi.size * perPeer);
353 std::vector<DNDS::real> recvBuf(mpi.size * perPeer, -1.0);
355 for (
int i = 0; i < mpi.size; ++i)
357 sendBuf[i * perPeer + 0] =
static_cast<DNDS::real>(mpi.rank);
358 sendBuf[i * perPeer + 1] =
static_cast<DNDS::real>(mpi.rank + 100);
364 for (
int i = 0; i < mpi.size; ++i)
366 CHECK(recvBuf[i * perPeer + 0] == doctest::Approx(
static_cast<DNDS::real>(i)));
367 CHECK(recvBuf[i * perPeer + 1] == doctest::Approx(
static_cast<DNDS::real>(i + 100)));
391 using T =
typename Tag::type;
396 T sendVal =
static_cast<T
>(mpi.rank + 1);
399 auto [mpiType, mpiMult] = BasicType_To_MPIIntType<T>();
400 MPI::Allreduce(&sendVal, &recvVal, mpiMult, mpiType, MPI_SUM, mpi.comm);
403 T
expected =
static_cast<T
>(mpi.size * (mpi.size + 1) / 2);
405 if constexpr (std::is_floating_point_v<T>)
419 using T =
typename Tag::type;
424 T sendVal =
static_cast<T
>(mpi.rank);
425 std::vector<T> recvBuf(mpi.size, T{});
427 auto [mpiType, mpiMult] = BasicType_To_MPIIntType<T>();
428 MPI::Allgather(&sendVal, mpiMult, mpiType,
429 recvBuf.data(), mpiMult, mpiType, mpi.comm);
431 for (
int i = 0; i < mpi.size; ++i)
434 if constexpr (std::is_floating_point_v<T>)
Core type aliases, constants, and metaprogramming utilities for the DNDS framework.
MPI wrappers: MPIInfo, collective operations, type mapping, CommStrategy.
static CommStrategy & Instance()
Access the process-wide singleton.
@ InSituPack
Manually pack / unpack into contiguous buffers.
@ HIndexed
Use MPI_Type_create_hindexed derived types (default).
the host side operators are provided as implemented
const MPI_Datatype DNDS_MPI_INDEX
MPI datatype matching index (= MPI_INT64_T).
MPI_int MPIWorldRank()
Convenience: MPI_Comm_rank(MPI_COMM_WORLD).
MPI_int MPIWorldSize()
Convenience: MPI_Comm_size(MPI_COMM_WORLD).
int64_t index
Global row / DOF index type (signed 64-bit; handles multi-billion-cell meshes).
double real
Canonical floating-point scalar used throughout DNDSR (double precision).
int MPI_int
MPI counterpart type for MPI_int (= C int). Used for counts and ranks in MPI calls.
const MPI_Datatype DNDS_MPI_REAL
MPI datatype matching real (= MPI_REAL8).
Lightweight bundle of an MPI communicator and the calling rank's coordinates.
int size
Number of ranks in comm (-1 until initialised).
int rank
This rank's 0-based index within comm (-1 until initialised).
MPI_Comm comm
The underlying MPI communicator handle.
void setWorld()
Initialise the object to MPI_COMM_WORLD. Requires MPI_Init to have run.
TYPE_TO_STRING(MPITypeTag< DNDS::real >)
TEST_CASE_TEMPLATE("Parametric Allreduce MPI_SUM", Tag, MPITypeTag< DNDS::real >, MPITypeTag< DNDS::index >, MPITypeTag< int32_t >, MPITypeTag< uint16_t >)
TEST_CASE("3D: VFV P2 HQM error < P1 on sinCos3D")