DNDSR 0.1.0.dev1+gcd065ad
Distributed Numeric Data Structure for CFV
Loading...
Searching...
No Matches
test_MPI.cpp
Go to the documentation of this file.
1/**
2 * @file test_MPI.cpp
3 * @brief Doctest-based unit tests for DNDS MPI wrapper functionality.
4 *
5 * Run under mpirun with varying rank counts (1, 2, 4). Covers
6 * DNDS::MPIInfo, collective operations (Allreduce, Scan, Allgather,
7 * Bcast, Barrier, Alltoall), MPI type mapping, and CommStrategy.
8 *
9 * @see @ref dnds_unit_tests for the full test-suite overview.
10 * @test MPIInfo, Allreduce, Scan, Allgather, Bcast, Barrier, Alltoall,
11 * BasicType_To_MPIIntType, CommStrategy.
12 */
13
14#define DOCTEST_CONFIG_IMPLEMENT
15#include "doctest.h"
16#include "DNDS/MPI.hpp"
17#include "DNDS/Defines.hpp"
18
19#include <array>
20#include <cstdint>
21#include <numeric>
22#include <vector>
23
24int main(int argc, char **argv)
25{
26 MPI_Init(&argc, &argv);
27 doctest::Context ctx;
28 ctx.applyCommandLine(argc, argv);
29 int res = ctx.run();
30 MPI_Finalize();
31 return res;
32}
33
34using namespace DNDS;
35
36// ---------------------------------------------------------------------------
37// MPIInfo basics
38// ---------------------------------------------------------------------------
39TEST_CASE("MPIInfo basics")
40{
41 SUBCASE("setWorld populates rank, size, comm")
42 {
43 MPIInfo mpi;
44 mpi.setWorld();
45
46 CHECK(mpi.rank >= 0);
47 CHECK(mpi.rank < mpi.size);
48 CHECK(mpi.size >= 1);
49 CHECK(mpi.comm != MPI_COMM_NULL);
50 }
51
52 SUBCASE("two MPIInfos with setWorld are equal")
53 {
54 MPIInfo a, b;
55 a.setWorld();
56 b.setWorld();
57
58 CHECK(a == b);
59 }
60
61 SUBCASE("constructor from MPI_COMM_WORLD matches setWorld")
62 {
63 MPIInfo fromCtor(MPI_COMM_WORLD);
64 MPIInfo fromSet;
65 fromSet.setWorld();
66
67 CHECK(fromCtor.rank == fromSet.rank);
68 CHECK(fromCtor.size == fromSet.size);
69 CHECK(fromCtor.comm == fromSet.comm);
70 }
71
72 SUBCASE("MPIWorldSize and MPIWorldRank agree with MPIInfo")
73 {
74 MPIInfo mpi;
75 mpi.setWorld();
76
77 CHECK(MPIWorldSize() == mpi.size);
78 CHECK(MPIWorldRank() == mpi.rank);
79 }
80}
81
82// ---------------------------------------------------------------------------
83// MPI Allreduce
84// ---------------------------------------------------------------------------
85TEST_CASE("MPI Allreduce")
86{
87 MPIInfo mpi;
88 mpi.setWorld();
89 int expected = mpi.size * (mpi.size + 1) / 2;
90
91 SUBCASE("Allreduce MPI_SUM with real type")
92 {
93 DNDS::real sendVal = static_cast<DNDS::real>(mpi.rank + 1);
94 DNDS::real recvVal = 0.0;
95 MPI::Allreduce(&sendVal, &recvVal, 1, DNDS_MPI_REAL, MPI_SUM, mpi.comm);
96
97 CHECK(recvVal == doctest::Approx(static_cast<DNDS::real>(expected)));
98 }
99
100 SUBCASE("Allreduce MPI_SUM with index type")
101 {
102 DNDS::index sendVal = static_cast<DNDS::index>(mpi.rank + 1);
103 DNDS::index recvVal = 0;
104 MPI::Allreduce(&sendVal, &recvVal, 1, DNDS_MPI_INDEX, MPI_SUM, mpi.comm);
105
106 CHECK(recvVal == static_cast<DNDS::index>(expected));
107 }
108
109 SUBCASE("AllreduceOneReal")
110 {
111 DNDS::real val = static_cast<DNDS::real>(mpi.rank + 1);
112 MPI::AllreduceOneReal(val, MPI_SUM, mpi);
113
114 CHECK(val == doctest::Approx(static_cast<DNDS::real>(expected)));
115 }
116
117 SUBCASE("AllreduceOneIndex")
118 {
119 DNDS::index val = static_cast<DNDS::index>(mpi.rank + 1);
120 MPI::AllreduceOneIndex(val, MPI_SUM, mpi);
121
122 CHECK(val == static_cast<DNDS::index>(expected));
123 }
124
125 SUBCASE("Allreduce MPI_MAX")
126 {
127 DNDS::real val = static_cast<DNDS::real>(mpi.rank);
128 DNDS::real result = 0.0;
129 MPI::Allreduce(&val, &result, 1, DNDS_MPI_REAL, MPI_MAX, mpi.comm);
130
131 CHECK(result == doctest::Approx(static_cast<DNDS::real>(mpi.size - 1)));
132 }
133}
134
135// ---------------------------------------------------------------------------
136// MPI Scan
137// ---------------------------------------------------------------------------
138TEST_CASE("MPI Scan")
139{
140 MPIInfo mpi;
141 mpi.setWorld();
142
143 SUBCASE("Scan MPI_SUM with constant 1")
144 {
145 DNDS::index sendVal = 1;
146 DNDS::index recvVal = 0;
147 MPI::Scan(&sendVal, &recvVal, 1, DNDS_MPI_INDEX, MPI_SUM, mpi.comm);
148
149 CHECK(recvVal == static_cast<DNDS::index>(mpi.rank + 1));
150 }
151
152 SUBCASE("Scan MPI_SUM with rank value")
153 {
154 DNDS::index sendVal = static_cast<DNDS::index>(mpi.rank);
155 DNDS::index recvVal = 0;
156 MPI::Scan(&sendVal, &recvVal, 1, DNDS_MPI_INDEX, MPI_SUM, mpi.comm);
157
158 // prefix sum of 0 + 1 + ... + rank = rank*(rank+1)/2
159 DNDS::index expected = static_cast<DNDS::index>(mpi.rank) * (mpi.rank + 1) / 2;
160 CHECK(recvVal == expected);
161 }
162}
163
164// ---------------------------------------------------------------------------
165// MPI Allgather
166// ---------------------------------------------------------------------------
167TEST_CASE("MPI Allgather")
168{
169 MPIInfo mpi;
170 mpi.setWorld();
171
172 SUBCASE("gather rank values into ordered array")
173 {
174 DNDS::index sendVal = static_cast<DNDS::index>(mpi.rank);
175 std::vector<DNDS::index> recvBuf(mpi.size, -1);
176
177 MPI::Allgather(&sendVal, 1, DNDS_MPI_INDEX,
178 recvBuf.data(), 1, DNDS_MPI_INDEX, mpi.comm);
179
180 for (int i = 0; i < mpi.size; ++i)
181 {
182 CHECK(recvBuf[i] == static_cast<DNDS::index>(i));
183 }
184 }
185
186 SUBCASE("gather multiple elements per rank")
187 {
188 std::array<DNDS::real, 2> sendBuf = {
189 static_cast<DNDS::real>(mpi.rank),
190 static_cast<DNDS::real>(mpi.rank * 10)};
191 std::vector<DNDS::real> recvBuf(mpi.size * 2, -1.0);
192
193 MPI::Allgather(sendBuf.data(), 2, DNDS_MPI_REAL,
194 recvBuf.data(), 2, DNDS_MPI_REAL, mpi.comm);
195
196 for (int i = 0; i < mpi.size; ++i)
197 {
198 CHECK(recvBuf[2 * i] == doctest::Approx(static_cast<DNDS::real>(i)));
199 CHECK(recvBuf[2 * i + 1] == doctest::Approx(static_cast<DNDS::real>(i * 10)));
200 }
201 }
202}
203
204// ---------------------------------------------------------------------------
205// MPI Bcast
206// ---------------------------------------------------------------------------
207TEST_CASE("MPI Bcast")
208{
209 MPIInfo mpi;
210 mpi.setWorld();
211
212 SUBCASE("broadcast real from rank 0")
213 {
214 DNDS::real val = (mpi.rank == 0) ? 42.0 : -1.0;
215 MPI::Bcast(&val, 1, DNDS_MPI_REAL, 0, mpi.comm);
216
217 CHECK(val == doctest::Approx(42.0));
218 }
219
220 SUBCASE("broadcast index from rank 0")
221 {
222 DNDS::index val = (mpi.rank == 0) ? 12345 : -1;
223 MPI::Bcast(&val, 1, DNDS_MPI_INDEX, 0, mpi.comm);
224
225 CHECK(val == 12345);
226 }
227
228 SUBCASE("broadcast from last rank")
229 {
230 DNDS::real val = (mpi.rank == mpi.size - 1) ? 99.5 : 0.0;
231 MPI::Bcast(&val, 1, DNDS_MPI_REAL, mpi.size - 1, mpi.comm);
232
233 CHECK(val == doctest::Approx(99.5));
234 }
235}
236
237// ---------------------------------------------------------------------------
238// MPI Barrier
239// ---------------------------------------------------------------------------
240TEST_CASE("MPI Barrier")
241{
242 MPIInfo mpi;
243 mpi.setWorld();
244
245 // Barrier should return without hanging.
246 MPI_int ret = MPI::Barrier(mpi.comm);
247 CHECK(ret == MPI_SUCCESS);
248}
249
250// ---------------------------------------------------------------------------
251// BasicType_To_MPIIntType
252// ---------------------------------------------------------------------------
253TEST_CASE("MPI BasicType_To_MPIIntType")
254{
255 SUBCASE("scalar types")
256 {
257 auto [realType, realMult] = BasicType_To_MPIIntType<DNDS::real>();
258 CHECK(realType == MPI_DOUBLE);
259 CHECK(realMult == 1);
260
261 auto [idxType, idxMult] = BasicType_To_MPIIntType<DNDS::index>();
262 CHECK(idxType == MPI_INT64_T);
263 CHECK(idxMult == 1);
264
265 auto [fType, fMult] = BasicType_To_MPIIntType<float>();
266 CHECK(fType == MPI_FLOAT);
267 CHECK(fMult == 1);
268
269 auto [i32Type, i32Mult] = BasicType_To_MPIIntType<int32_t>();
270 CHECK(i32Type == MPI_INT32_T);
271 CHECK(i32Mult == 1);
272 }
273
274 SUBCASE("std::array<real, 3>")
275 {
276 auto [arrType, arrMult] = BasicType_To_MPIIntType<std::array<DNDS::real, 3>>();
277 CHECK(arrType == MPI_DOUBLE);
278 CHECK(arrMult == 3);
279 }
280
281 SUBCASE("Eigen::Matrix<real, 3, 3>")
282 {
283 using Mat33 = Eigen::Matrix<DNDS::real, 3, 3>;
284 auto [matType, matMult] = BasicType_To_MPIIntType<Mat33>();
285
286 // The MPI type should be DNDS_MPI_REAL (MPI_REAL8 / MPI_DOUBLE).
287 // Multiplicity should cover at least 9 doubles (may be rounded up
288 // due to divide_ceil over sizeof).
289 CHECK(matType != MPI_DATATYPE_NULL);
290 CHECK(matMult >= 9);
291 }
292
293 SUBCASE("DNDS_MPI_INDEX and DNDS_MPI_REAL globals")
294 {
295 CHECK(DNDS_MPI_INDEX != MPI_DATATYPE_NULL);
296 CHECK(DNDS_MPI_REAL != MPI_DATATYPE_NULL);
297 }
298}
299
300// ---------------------------------------------------------------------------
301// CommStrategy
302// ---------------------------------------------------------------------------
303TEST_CASE("CommStrategy")
304{
305 auto &cs = MPI::CommStrategy::Instance();
306
307 SUBCASE("default strategy is HIndexed")
308 {
309 // Restore known state first.
310 cs.SetArrayStrategy(MPI::CommStrategy::HIndexed);
311 CHECK(cs.GetArrayStrategy() == MPI::CommStrategy::HIndexed);
312 }
313
314 SUBCASE("set to InSituPack and verify")
315 {
316 cs.SetArrayStrategy(MPI::CommStrategy::InSituPack);
317 CHECK(cs.GetArrayStrategy() == MPI::CommStrategy::InSituPack);
318
319 // Restore default.
320 cs.SetArrayStrategy(MPI::CommStrategy::HIndexed);
321 CHECK(cs.GetArrayStrategy() == MPI::CommStrategy::HIndexed);
322 }
323}
324
325// ---------------------------------------------------------------------------
326// MPI Alltoall
327// ---------------------------------------------------------------------------
328TEST_CASE("MPI Alltoall")
329{
330 MPIInfo mpi;
331 mpi.setWorld();
332
333 SUBCASE("each rank sends its rank value to every other rank")
334 {
335 // sendBuf: every element is my rank.
336 std::vector<DNDS::index> sendBuf(mpi.size, static_cast<DNDS::index>(mpi.rank));
337 std::vector<DNDS::index> recvBuf(mpi.size, -1);
338
339 MPI::Alltoall(sendBuf.data(), 1, DNDS_MPI_INDEX,
340 recvBuf.data(), 1, DNDS_MPI_INDEX, mpi.comm);
341
342 // recvBuf[i] should be i (received from rank i, which sent its rank).
343 for (int i = 0; i < mpi.size; ++i)
344 {
345 CHECK(recvBuf[i] == static_cast<DNDS::index>(i));
346 }
347 }
348
349 SUBCASE("Alltoall with multiple elements per peer")
350 {
351 const int perPeer = 2;
352 std::vector<DNDS::real> sendBuf(mpi.size * perPeer);
353 std::vector<DNDS::real> recvBuf(mpi.size * perPeer, -1.0);
354
355 for (int i = 0; i < mpi.size; ++i)
356 {
357 sendBuf[i * perPeer + 0] = static_cast<DNDS::real>(mpi.rank);
358 sendBuf[i * perPeer + 1] = static_cast<DNDS::real>(mpi.rank + 100);
359 }
360
361 MPI::Alltoall(sendBuf.data(), perPeer, DNDS_MPI_REAL,
362 recvBuf.data(), perPeer, DNDS_MPI_REAL, mpi.comm);
363
364 for (int i = 0; i < mpi.size; ++i)
365 {
366 CHECK(recvBuf[i * perPeer + 0] == doctest::Approx(static_cast<DNDS::real>(i)));
367 CHECK(recvBuf[i * perPeer + 1] == doctest::Approx(static_cast<DNDS::real>(i + 100)));
368 }
369 }
370}
371
372// ---------------------------------------------------------------------------
373// Parametric Allreduce over element types
374// ---------------------------------------------------------------------------
375
376template <class T>
378{
379 using type = T;
380};
381
386
387TEST_CASE_TEMPLATE("Parametric Allreduce MPI_SUM", Tag,
390{
391 using T = typename Tag::type;
392
393 MPIInfo mpi;
394 mpi.setWorld();
395
396 T sendVal = static_cast<T>(mpi.rank + 1);
397 T recvVal{};
398
399 auto [mpiType, mpiMult] = BasicType_To_MPIIntType<T>();
400 MPI::Allreduce(&sendVal, &recvVal, mpiMult, mpiType, MPI_SUM, mpi.comm);
401
402 // Expected: sum_{r=0}^{size-1} (r+1) = size*(size+1)/2
403 T expected = static_cast<T>(mpi.size * (mpi.size + 1) / 2);
404
405 if constexpr (std::is_floating_point_v<T>)
406 CHECK(recvVal == doctest::Approx(expected));
407 else
408 CHECK(recvVal == expected);
409}
410
411// ---------------------------------------------------------------------------
412// Parametric Allgather over element types
413// ---------------------------------------------------------------------------
414
415TEST_CASE_TEMPLATE("Parametric Allgather rank values", Tag,
418{
419 using T = typename Tag::type;
420
421 MPIInfo mpi;
422 mpi.setWorld();
423
424 T sendVal = static_cast<T>(mpi.rank);
425 std::vector<T> recvBuf(mpi.size, T{});
426
427 auto [mpiType, mpiMult] = BasicType_To_MPIIntType<T>();
428 MPI::Allgather(&sendVal, mpiMult, mpiType,
429 recvBuf.data(), mpiMult, mpiType, mpi.comm);
430
431 for (int i = 0; i < mpi.size; ++i)
432 {
433 T expected = static_cast<T>(i);
434 if constexpr (std::is_floating_point_v<T>)
435 CHECK(recvBuf[i] == doctest::Approx(expected));
436 else
437 CHECK(recvBuf[i] == expected);
438 }
439}
Core type aliases, constants, and metaprogramming utilities for the DNDS framework.
MPI wrappers: MPIInfo, collective operations, type mapping, CommStrategy.
static CommStrategy & Instance()
Access the process-wide singleton.
Definition MPI.cpp:415
@ InSituPack
Manually pack / unpack into contiguous buffers.
Definition MPI.hpp:736
@ HIndexed
Use MPI_Type_create_hindexed derived types (default).
Definition MPI.hpp:735
int main()
Definition dummy.cpp:3
the host side operators are provided as implemented
const MPI_Datatype DNDS_MPI_INDEX
MPI datatype matching index (= MPI_INT64_T).
Definition MPI.hpp:90
MPI_int MPIWorldRank()
Convenience: MPI_Comm_rank(MPI_COMM_WORLD).
Definition MPI.hpp:307
MPI_int MPIWorldSize()
Convenience: MPI_Comm_size(MPI_COMM_WORLD).
Definition MPI.hpp:299
int64_t index
Global row / DOF index type (signed 64-bit; handles multi-billion-cell meshes).
Definition Defines.hpp:107
double real
Canonical floating-point scalar used throughout DNDSR (double precision).
Definition Defines.hpp:105
int MPI_int
MPI counterpart type for MPI_int (= C int). Used for counts and ranks in MPI calls.
Definition MPI.hpp:43
const MPI_Datatype DNDS_MPI_REAL
MPI datatype matching real (= MPI_REAL8).
Definition MPI.hpp:92
Lightweight bundle of an MPI communicator and the calling rank's coordinates.
Definition MPI.hpp:215
int size
Number of ranks in comm (-1 until initialised).
Definition MPI.hpp:221
int rank
This rank's 0-based index within comm (-1 until initialised).
Definition MPI.hpp:219
MPI_Comm comm
The underlying MPI communicator handle.
Definition MPI.hpp:217
void setWorld()
Initialise the object to MPI_COMM_WORLD. Requires MPI_Init to have run.
Definition MPI.hpp:242
tVec b(NCells)
CHECK(result.size()==3)
real expected
auto result
TYPE_TO_STRING(MPITypeTag< DNDS::real >)
TEST_CASE_TEMPLATE("Parametric Allreduce MPI_SUM", Tag, MPITypeTag< DNDS::real >, MPITypeTag< DNDS::index >, MPITypeTag< int32_t >, MPITypeTag< uint16_t >)
Definition test_MPI.cpp:387
auto res
Definition test_ODE.cpp:196
TEST_CASE("3D: VFV P2 HQM error < P1 on sinCos3D")