maybe I missed some ..
smpi_process()->mark_as_initialized();
smpi_mpi_init();
+ CHECK_COLLECTIVE(smpi_process()->comm_world(), "MPI_Init")
return MPI_SUCCESS;
}
int PMPI_Finalize()
{
smpi_bench_end();
+ CHECK_COLLECTIVE(smpi_process()->comm_world(), "MPI_Finalize")
aid_t rank_traced = simgrid::s4u::this_actor::get_pid();
smpi_process()->mark_as_finalizing();
TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::NoOpTIData("finalize"));
{
CHECK_COMM(1)
CHECK_REQUEST(2)
-
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Barrier" : "PMPI_Ibarrier")
const SmpiBenchGuard suspend_bench;
aid_t pid = simgrid::s4u::this_actor::get_pid();
TRACE_smpi_comm_in(pid, request == MPI_REQUEST_IGNORED ? "PMPI_Barrier" : "PMPI_Ibarrier",
CHECK_BUFFER(1, buf, count, datatype)
CHECK_ROOT(4)
CHECK_REQUEST(6)
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Bcast" : "PMPI_Ibcast")
const SmpiBenchGuard suspend_bench;
aid_t pid = simgrid::s4u::this_actor::get_pid();
}
CHECK_ROOT(7)
CHECK_REQUEST(9)
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Gather" : "PMPI_Igather")
const void* real_sendbuf = sendbuf;
int real_sendcount = sendcount;
}
CHECK_ROOT(8)
CHECK_REQUEST(10)
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Gatherv" : "PMPI_Igatherv")
if (rank == root){
for (int i = 0; i < comm->size(); i++) {
CHECK_BUFFER(1, sendbuf, sendcount, sendtype)
CHECK_BUFFER(4, recvbuf, recvcount, recvtype)
CHECK_REQUEST(8)
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Allgather" : "PMPI_Iallggather")
if (sendbuf == MPI_IN_PLACE) {
sendbuf = static_cast<char*>(recvbuf) + recvtype->get_extent() * recvcount * comm->rank();
CHECK_COUNT(5, recvcounts[i])
CHECK_BUFFER(4, recvbuf, recvcounts[i], recvtype)
}
+ CHECK_COLLECTIVE(comm, MPI_REQUEST_IGNORED ? "PMPI_Allgatherv" : "PMPI_Iallgatherv")
const SmpiBenchGuard suspend_bench;
if (sendbuf == MPI_IN_PLACE) {
}
CHECK_ROOT(8)
CHECK_REQUEST(9)
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Scatter" : "PMPI_Iscatter")
if (recvbuf == MPI_IN_PLACE) {
recvtype = sendtype;
} else {
CHECK_NOT_IN_PLACE_ROOT(4, recvbuf)
}
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Scatterv" : "PMPI_Iscatterv")
const SmpiBenchGuard suspend_bench;
CHECK_OP(5, op, datatype)
CHECK_ROOT(7)
CHECK_REQUEST(8)
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Reduce" : "PMPI_Ireduce")
const SmpiBenchGuard suspend_bench;
aid_t pid = simgrid::s4u::this_actor::get_pid();
CHECK_BUFFER(1, sendbuf, count, datatype)
CHECK_BUFFER(2, recvbuf, count, datatype)
CHECK_REQUEST(7)
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Allreduce" : "PMPI_Iallreduce")
const SmpiBenchGuard suspend_bench;
std::vector<unsigned char> tmp_sendbuf;
CHECK_BUFFER(2,recvbuf,count, datatype)
CHECK_REQUEST(7)
CHECK_OP(5, op, datatype)
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Scan" : "PMPI_Iscan")
const SmpiBenchGuard suspend_bench;
aid_t pid = simgrid::s4u::this_actor::get_pid();
CHECK_BUFFER(2, recvbuf, count, datatype)
CHECK_REQUEST(7)
CHECK_OP(5, op, datatype)
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Exscan" : "PMPI_Iexscan")
const SmpiBenchGuard suspend_bench;
aid_t pid = simgrid::s4u::this_actor::get_pid();
CHECK_BUFFER(1, sendbuf, recvcounts[i], datatype)
CHECK_BUFFER(2, recvbuf, recvcounts[i], datatype)
}
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Reduce_scatter" : "PMPI_Ireduce_scatter")
const SmpiBenchGuard suspend_bench;
aid_t pid = simgrid::s4u::this_actor::get_pid();
CHECK_BUFFER(2, recvbuf, recvcount, datatype)
CHECK_REQUEST(7)
CHECK_OP(5, op, datatype)
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Reduce_scatter_block" : "PMPI_Ireduce_scatter_block")
const SmpiBenchGuard suspend_bench;
int count = comm->size();
CHECK_COUNT(5, recvcount)
CHECK_BUFFER(4, recvbuf, recvcount, recvtype)
CHECK_REQUEST(8)
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Alltoall" : "PMPI_Ialltoall")
aid_t pid = simgrid::s4u::this_actor::get_pid();
int real_sendcount = sendcount;
CHECK_NULL(6, MPI_ERR_COUNT, recvcounts)
CHECK_NULL(7, MPI_ERR_ARG, recvdispls)
CHECK_REQUEST(10)
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Alltoallv" : "PMPI_Ialltoallv")
aid_t pid = simgrid::s4u::this_actor::get_pid();
int size = comm->size();
CHECK_TYPE(8, recvtypes[i])
CHECK_BUFFER(5, recvbuf, recvcounts[i], recvtypes[i])
}
+ CHECK_COLLECTIVE(comm, request == MPI_REQUEST_IGNORED ? "PMPI_Alltoallw" : "PMPI_Ialltoallw")
const SmpiBenchGuard suspend_bench;
{
CHECK_NULL(4, MPI_ERR_ARG, comm_out)
CHECK_COMM2(1, comm)
+ CHECK_COLLECTIVE(comm, __func__)
if( color != MPI_UNDEFINED)//we use a negative value for MPI_UNDEFINED
CHECK_NEGATIVE(3, MPI_ERR_ARG, color)
const SmpiBenchGuard suspend_bench;
{
CHECK_COMM(1)
CHECK_NULL(5, MPI_ERR_ARG, newcomm)
+ CHECK_COLLECTIVE(comm, __func__)
const SmpiBenchGuard suspend_bench;
*newcomm = comm->split_type(split_type, key, info);
return MPI_SUCCESS;
int PMPI_File_open(MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh){
CHECK_COMM(1)
+ CHECK_COLLECTIVE(comm, "MPI_File_open")
CHECK_NULL(2, MPI_ERR_FILE, filename)
if (amode < 0)
return MPI_ERR_AMODE;
int PMPI_File_close(MPI_File *fh){
CHECK_NULL(2, MPI_ERR_ARG, fh)
+ CHECK_COLLECTIVE((*fh)->comm(), __func__)
const SmpiBenchGuard suspend_bench;
int ret = simgrid::smpi::File::close(fh);
*fh = MPI_FILE_NULL;
int PMPI_File_seek_shared(MPI_File fh, MPI_Offset offset, int whence){
CHECK_FILE(1, fh)
+ CHECK_COLLECTIVE(fh->comm(), __func__)
const SmpiBenchGuard suspend_bench;
int ret = fh->seek_shared(offset,whence);
return ret;
int PMPI_File_read_all(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
CHECK_FILE_INPUTS
CHECK_WRONLY(fh)
+ CHECK_COLLECTIVE(fh->comm(), __func__)
const SmpiBenchGuard suspend_bench;
aid_t rank_traced = simgrid::s4u::this_actor::get_pid();
TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read_all", count * datatype->size()));
int PMPI_File_read_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
CHECK_FILE_INPUTS
CHECK_WRONLY(fh)
+ CHECK_COLLECTIVE(fh->comm(), __func__)
const SmpiBenchGuard suspend_bench;
aid_t rank_traced = simgrid::s4u::this_actor::get_pid();
TRACE_smpi_comm_in(rank_traced, __func__,
int PMPI_File_write_all(MPI_File fh, const void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
CHECK_FILE_INPUTS
CHECK_RDONLY(fh)
+ CHECK_COLLECTIVE(fh->comm(), __func__)
const SmpiBenchGuard suspend_bench;
aid_t rank_traced = simgrid::s4u::this_actor::get_pid();
TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write_all", count * datatype->size()));
int PMPI_File_write_ordered(MPI_File fh, const void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
CHECK_FILE_INPUTS
CHECK_RDONLY(fh)
+ CHECK_COLLECTIVE(fh->comm(), __func__)
const SmpiBenchGuard suspend_bench;
aid_t rank_traced = simgrid::s4u::this_actor::get_pid();
TRACE_smpi_comm_in(rank_traced, __func__,
int PMPI_File_read_at_all(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
CHECK_FILE_INPUT_OFFSET
CHECK_WRONLY(fh)
+ CHECK_COLLECTIVE(fh->comm(), __func__)
const SmpiBenchGuard suspend_bench;
aid_t rank_traced = simgrid::s4u::this_actor::get_pid();
TRACE_smpi_comm_in(rank_traced, __func__,
int PMPI_File_write_at_all(MPI_File fh, MPI_Offset offset, const void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
CHECK_FILE_INPUT_OFFSET
CHECK_RDONLY(fh)
+ CHECK_COLLECTIVE(fh->comm(), __func__)
const SmpiBenchGuard suspend_bench;
aid_t rank_traced = simgrid::s4u::this_actor::get_pid();
TRACE_smpi_comm_in(rank_traced, __func__,
int PMPI_File_set_view(MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, const char *datarep, MPI_Info info){
CHECK_FILE(1, fh)
+ CHECK_COLLECTIVE(fh->comm(), __func__)
if(not ((fh->flags() & MPI_MODE_SEQUENTIAL) && (disp == MPI_DISPLACEMENT_CURRENT)))
CHECK_OFFSET(2, disp)
CHECK_TYPE(3, etype)
int PMPI_File_set_size(MPI_File fh, MPI_Offset size)
{
CHECK_FILE(1, fh)
+ CHECK_COLLECTIVE(fh->comm(), __func__)
fh->set_size(size);
return MPI_SUCCESS;
}
CHECK_NEGATIVE(2, MPI_ERR_ARG, ndims)
for (int i = 0; i < ndims; i++)
CHECK_NEGATIVE(2, MPI_ERR_ARG, dims[i])
+ CHECK_COLLECTIVE(comm, __func__)
const auto* topo = new simgrid::smpi::Topo_Cart(comm, ndims, dims, periodic, reorder, comm_cart);
if (*comm_cart == MPI_COMM_NULL) {
delete topo;
CHECK_COMM(1)
CHECK_NULL(1, MPI_ERR_TOPOLOGY, comm->topo())
CHECK_NULL(3, MPI_ERR_ARG, comm_new)
+ CHECK_COLLECTIVE(comm, __func__)
auto* topo = static_cast<MPIR_Cart_Topology>(comm->topo().get());
if (topo==nullptr) {
return MPI_ERR_ARG;
CHECK_BUFFER(1, base, size, MPI_BYTE)
CHECK_NEGATIVE(2, MPI_ERR_OTHER, size)
CHECK_NEGATIVE(3, MPI_ERR_OTHER, disp_unit)
+ CHECK_COLLECTIVE(comm, __func__)
const SmpiBenchGuard suspend_bench;
if (base == nullptr && size != 0){
retval= MPI_ERR_OTHER;
int PMPI_Win_free( MPI_Win* win){
CHECK_NULL(1, MPI_ERR_WIN, win)
CHECK_WIN(1, (*win))
+ CHECK_COLLECTIVE((*win)->comm(), __func__)
if (_smpi_cfg_pedantic && (*win)->opened() == 1){//only check in pedantic mode, as it's not clear this is illegal
XBT_WARN("Attempt to destroy a MPI_Win too early -missing MPI_Win_fence ?");
return MPI_ERR_WIN;
#define CHECK_COMM2(num, comm)\
CHECK_MPI_NULL((num), MPI_COMM_NULL, MPI_ERR_COMM, (comm))
+#define CHECK_COLLECTIVE(comm, call)\
+ CHECK_ARGS((simgrid::smpi::utils::check_collectives_ordering((comm), std::string(call)) != MPI_SUCCESS), MPI_ERR_OTHER,\
+ "%s: collective mismatch", call)
+
#define CHECK_DELETED(num, err, obj)\
CHECK_ARGS((obj)->deleted(), (err), "%s: param %d %s has already been freed", __func__, (num),\
_XBT_STRINGIFY(obj))