5#include <pybind11/stl.h>
6#include <pybind11/numpy.h>
14 .def(py::init([](uintptr_t pComm)
15 {
return std::make_unique<MPIInfo>(MPI_Comm(pComm)); }))
19 .def(
"comm", [](
const MPIInfo &self)
20 {
return size_t(self.
comm); })
21 .def(
"equals", &MPIInfo::operator==);
29 auto m_MPI = m.def_submodule(
"MPI");
32 [](
const std::vector<std::string> &pArgv)
45 int initial_argc =
static_cast<int>(pArgv.size());
46 int initial_argc_mine = initial_argc;
49 char **argv_array =
new char *[initial_argc + 1];
50 char **argv_array_mine = argv_array;
53 for (
int i = 0; i < initial_argc; ++i)
55 const std::string &s = pArgv[i];
56 size_t len = s.length();
57 char *cstr =
new char[len + 1];
58 strcpy(cstr, s.c_str());
61 argv_array[initial_argc] =
nullptr;
63 int *pargc = &initial_argc;
64 char ***pargv = &argv_array;
69 std::vector<std::string> pArgvOut;
70 for (
int i = 0; i < *pargc; ++i)
71 pArgvOut.push_back(std::string(argv_array[i]));
76 for (
int i = 0; i <= initial_argc_mine; ++i)
77 delete[] argv_array_mine[i];
78 delete[] argv_array_mine;
80 return std::make_tuple(ret, pArgvOut);
93 auto m_MPI = m.def_submodule(
"MPI");
96 [](py::buffer py_sendbuf, py::buffer py_recvbuf,
const std::string &op,
const MPIInfo &mpi)
98 auto send_info = py_sendbuf.request(
false);
99 auto recv_info = py_recvbuf.request(
true);
102 fmt::format(
"send and recv buffer format incompatible: [{}], [{}]",
103 send_info.format, recv_info.format));
114 datatype, mpi_op, mpi.comm);
116 py::arg(
"send"), py::arg(
"recv"), py::arg(
"op"), py::arg(
"mpi"));
127 auto m_Debug = m.def_submodule(
"Debug");
#define DNDS_assert_info(expr, info)
Debug-only assertion with an extra std::string info message.
#define DNDS_assert(expr)
Debug-only assertion (compiled out when DNDS_NDEBUG is defined). Prints the expression + file/line + ...
MPI wrappers: MPIInfo, collective operations, type mapping, CommStrategy.
pybind11 bindings for the DNDS MPI wrappers (MPIInfo, CommStrategy, buffer-protocol datatype helpers)...
bool IsDebugged()
Whether the current process is running under a debugger. Implemented via /proc/self/status TracerPid ...
void MPIDebugHold(const MPIInfo &mpi)
If isDebugging is set, block every rank in a busy-wait loop so the user can attach a debugger and ins...
void pybind11_Debug(py::module_ &m)
void pybind11_Init_thread(py::module_ &m)
int Finalize()
Release DNDSR-registered MPI resources then call MPI_Finalize.
void pybind11_MPI_Operations(py::module_ &m)
MPI_int Allreduce(const void *sendbuf, void *recvbuf, MPI_int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
Wrapper over MPI_Allreduce.
MPI_int Init_thread(int *argc, char ***argv)
Initialise MPI with thread support, honouring the DNDS_DISABLE_ASYNC_MPI environment override.
int GetMPIThreadLevel()
Return the MPI thread-support level the current process was initialised with.
the host side operators are provided as implemented
MPI_Op py_get_simple_mpi_op_by_name(const std::string &op)
py::classh< T > py_class_ssp
std::tuple< ssize_t, char > py_buffer_get_contigious_size(const py::buffer_info &info)
void pybind11_MPIInfo(py::module_ &m)
MPI_Datatype py_get_buffer_basic_mpi_datatype(const py::buffer_info &info)
void pybind11_bind_MPI_All(py::module_ &m)
int MPI_int
MPI counterpart type for MPI_int (= C int). Used for counts and ranks in MPI calls.
Lightweight bundle of an MPI communicator and the calling rank's coordinates.
int size
Number of ranks in comm (-1 until initialised).
int rank
This rank's 0-based index within comm (-1 until initialised).
MPI_Comm comm
The underlying MPI communicator handle.
void setWorld()
Initialise the object to MPI_COMM_WORLD. Requires MPI_Init to have run.