XBT
- Remove XBT_LOG_CONNECT, it should be useless nowadays.
+ SMPI
+ - Removed index notion from SMPI -- no more "getPid() - 1"!
+ This also means that the trace files
+ now contain actors called rank-<PROCESS-ID>. These are NOT the real
+ ranks, because tracing several communicators right now is not supported.
+ - Because of this, smpi_process_index() has now been deprecated and will
+ be removed in 3.21
+
Fixed bugs:
- #245: migrating an actor does not migrate its execution
> 0 1 0 MPI
> 2 2 1 MPI_STATE
> 4 3 0 1 1 MPI_LINK
-> 6 0 1 1 0 "rank-0"
+> 6 0 1 1 0 "rank-1"
> 5 4 2 computing "0 1 1"
> 5 5 2 smpi_replay_run_init "0 1 0"
-> 6 0 2 1 0 "rank-1"
-> 6 0 3 1 0 "rank-2"
+> 6 0 2 1 0 "rank-2"
+> 6 0 3 1 0 "rank-3"
> 12 0 2 1 5
> 13 0 2 1
> 12 0 2 2 5
msg_error_t res;
MSG_init(&argc, argv);
+ SMPI_init();
xbt_assert(argc > 3, "Usage: %s description_file platform_file deployment_file\n"
"\tExample: %s smpi_multiple_apps msg_platform.xml msg_deployment.xml\n", argv[0], argv[0]);
fclose(fp);
MSG_launch_application(argv[3]);
- SMPI_init();
res = MSG_main();
! timeout 120
$ ./replay_multiple description_file ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${bindir:=.}/deployment.xml --log=smpi.:info --cfg=maxmin/concurrency-limit:100
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'maxmin/concurrency-limit' to '100'
+> [0.000000] [smpi_kernel/INFO] You did not set the power of the host running the simulation. The timings will certainly not be accurate. Use the option "--cfg=smpi/host-speed:<flops>" to set its value.Check http://simgrid.org/simgrid/latest/doc/options.html#options_smpi_bench for more information.
> [0.000000] [msg_test/INFO] Initializing instance 1 of size 32
> [0.000000] [msg_test/INFO] Initializing instance 2 of size 32
-> [0.000000] [smpi_kernel/INFO] You did not set the power of the host running the simulation. The timings will certainly not be accurate. Use the option "--cfg=smpi/host-speed:<flops>" to set its value.Check http://simgrid.org/simgrid/latest/doc/options.html#options_smpi_bench for more information.
> [Jupiter:2:(62) 1140698.106357] [smpi_replay/INFO] Simulation time 1124380.753685
> [1140698.106357] [msg_test/INFO] Simulation time 1.1407e+06
msg_error_t res;
MSG_init(&argc, argv);
+ SMPI_init();
xbt_assert(argc > 2,"Usage: %s platform_file deployment_file\n"
"\nexample: %s msg_platform.xml msg_deployment.xml\n", argv[0], argv[0]);
// the second performing an alltoall on 4 nodes
SMPI_app_instance_register("alltoall_mpi", alltoall_mpi,4);
MSG_launch_application(argv[2]);
- SMPI_init();
res = MSG_main();
/* Fortran specific stuff */
XBT_PUBLIC(int) smpi_main(const char* program, int argc, char *argv[]);
-XBT_PUBLIC(int) smpi_process_index();
+XBT_ATTRIB_DEPRECATED_v321("Use Actor::self()->getPid(): v3.21 will turn this warning into an error.") XBT_PUBLIC(int) smpi_process_index();
XBT_PUBLIC(void) smpi_process_init(int *argc, char ***argv);
/* Trace replay specific stuff */
name = NULL; \
}
-#define SMPI_VARGET_GLOBAL(name) name[smpi_process_index()]
+#define SMPI_VARGET_GLOBAL(name) name[SIMIX_process_self()->pid]
/**
* This is used for the old privatization method, i.e., on old
smpi_register_static(name, xbt_free_f); \
}
-#define SMPI_VARGET_STATIC(name) name[smpi_process_index()]
+#define SMPI_VARGET_STATIC(name) name[SIMIX_process_self()->pid]
SG_END_DECL()
if (getContainer()->getName().find("rank-") != 0)
stream << getContainer()->getName() << " " << extra_->print();
else
- stream << getContainer()->getName().erase(0, 5) << " " << extra_->print();
+ /* Subtract -1 because this is the process id and we transform it to the rank id */
+ stream << stoi(getContainer()->getName().erase(0, 5)) - 1 << " " << extra_->print();
fprintf(tracing_files.at(getContainer()), "%s\n", stream.str().c_str());
} else {
if (parent_process != nullptr) {
process->ppid = parent_process->pid;
-/* SMPI process have their own data segment and each other inherit from their father */
-#if HAVE_SMPI
- if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) {
- if (parent_process->pid != 0) {
- process->segment_index = parent_process->segment_index;
- } else {
- process->segment_index = process->pid - 1;
- }
- }
-#endif
}
process->code = code;
if (parent_process != nullptr) {
process->ppid = parent_process->pid;
- /* SMPI process have their own data segment and each other inherit from their father */
-#if HAVE_SMPI
- if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) {
- if (parent_process->pid != 0) {
- process->segment_index = parent_process->segment_index;
- } else {
- process->segment_index = process->pid - 1;
- }
- }
-#endif
}
/* Process data for auto-restart */
std::rethrow_exception(std::move(exception));
}
- if(SMPI_switch_data_segment && self->segment_index != -1){
- SMPI_switch_data_segment(self->segment_index);
+ if(SMPI_switch_data_segment){
+ SMPI_switch_data_segment(self->pid);
}
}
std::function<void()> code;
smx_timer_t kill_timer = nullptr;
- int segment_index = -1; /* Reference to an SMPI process' data segment. Default value is -1 if not in SMPI context*/
/* Refcounting */
private:
if(already_init == 0){
simgrid::smpi::Process::init(argc, argv);
smpi_process()->mark_as_initialized();
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_init(rank);
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("init"));
TRACE_smpi_comm_out(rank);
int PMPI_Finalize()
{
smpi_bench_end();
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("finalize"));
smpi_process()->finalize();
if (flag == nullptr) {
return MPI_ERR_ARG;
} else {
- *flag = smpi_process()->index() == 0; // FIXME: I don't think this is correct: This just returns true if the process ID is 1,
+ *flag = simgrid::s4u::Actor::self()->getPid() == 1; // FIXME: I don't think this is correct: This just returns true if the process ID is 1,
// regardless of whether this process called MPI_Thread_Init() or not.
return MPI_SUCCESS;
}
} else if (not datatype->is_valid()) {
retval = MPI_ERR_ARG;
} else {
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::CollTIData("bcast", comm->group()->index(root), -1.0,
+ new simgrid::instr::CollTIData("bcast", root, -1.0,
datatype->is_replayable() ? count : count * datatype->size(), -1,
encode_datatype(datatype), ""));
if (comm->size() > 1)
if (comm == MPI_COMM_NULL) {
retval = MPI_ERR_COMM;
} else {
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("barrier"));
simgrid::smpi::Colls::barrier(comm);
sendtmpcount=0;
sendtmptype=recvtype;
}
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__,
new simgrid::instr::CollTIData(
- "gather", comm->group()->index(root), -1.0,
+ "gather", root, -1.0,
sendtmptype->is_replayable() ? sendtmpcount : sendtmpcount * sendtmptype->size(),
(comm->rank() != root || recvtype->is_replayable()) ? recvcount : recvcount * recvtype->size(),
encode_datatype(sendtmptype), encode_datatype(recvtype)));
sendtmptype=recvtype;
}
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
int dt_size_recv = recvtype->is_replayable() ? 1 : recvtype->size();
std::vector<int>* trace_recvcounts = new std::vector<int>;
TRACE_smpi_comm_in(rank, __FUNCTION__,
new simgrid::instr::VarCollTIData(
- "gatherV", comm->group()->index(root),
+ "gatherV", root,
sendtmptype->is_replayable() ? sendtmpcount : sendtmpcount * sendtmptype->size(), nullptr,
dt_size_recv, trace_recvcounts, encode_datatype(sendtmptype), encode_datatype(recvtype)));
sendcount=recvcount;
sendtype=recvtype;
}
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__,
new simgrid::instr::CollTIData("allGather", -1, -1.0,
sendcount=recvcounts[comm->rank()];
sendtype=recvtype;
}
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
int dt_size_recv = recvtype->is_replayable() ? 1 : recvtype->size();
std::vector<int>* trace_recvcounts = new std::vector<int>;
recvtype = sendtype;
recvcount = sendcount;
}
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__,
new simgrid::instr::CollTIData(
- "scatter", comm->group()->index(root), -1.0,
+ "scatter", root, -1.0,
(comm->rank() != root || sendtype->is_replayable()) ? sendcount : sendcount * sendtype->size(),
recvtype->is_replayable() ? recvcount : recvcount * recvtype->size(), encode_datatype(sendtype),
encode_datatype(recvtype)));
recvtype = sendtype;
recvcount = sendcounts[comm->rank()];
}
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
int dt_size_send = sendtype->is_replayable() ? 1 : sendtype->size();
std::vector<int>* trace_sendcounts = new std::vector<int>;
}
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::VarCollTIData(
- "scatterV", comm->group()->index(root), dt_size_send, trace_sendcounts,
+ "scatterV", root, dt_size_send, trace_sendcounts,
recvtype->is_replayable() ? recvcount : recvcount * recvtype->size(), nullptr,
encode_datatype(sendtype), encode_datatype(recvtype)));
} else if (not datatype->is_valid() || op == MPI_OP_NULL) {
retval = MPI_ERR_ARG;
} else {
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::CollTIData("reduce", comm->group()->index(root), 0,
+ new simgrid::instr::CollTIData("reduce", root, 0,
datatype->is_replayable() ? count : count * datatype->size(), -1,
encode_datatype(datatype), ""));
sendtmpbuf = static_cast<char*>(xbt_malloc(count*datatype->get_extent()));
simgrid::smpi::Datatype::copy(recvbuf, count, datatype,sendtmpbuf, count, datatype);
}
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__,
new simgrid::instr::CollTIData("allReduce", -1, 0,
} else if (op == MPI_OP_NULL) {
retval = MPI_ERR_OP;
} else {
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData(
"scan", -1, datatype->is_replayable() ? count : count * datatype->size(),
} else if (op == MPI_OP_NULL) {
retval = MPI_ERR_OP;
} else {
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
void* sendtmpbuf = sendbuf;
if (sendbuf == MPI_IN_PLACE) {
sendtmpbuf = static_cast<void*>(xbt_malloc(count * datatype->size()));
} else if (recvcounts == nullptr) {
retval = MPI_ERR_ARG;
} else {
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
std::vector<int>* trace_recvcounts = new std::vector<int>;
int dt_send_size = datatype->is_replayable() ? 1 : datatype->size();
int totalcount = 0;
} else {
int count = comm->size();
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
int dt_send_size = datatype->is_replayable() ? 1 : datatype->size();
std::vector<int>* trace_recvcounts = new std::vector<int>(recvcount * dt_send_size); // copy data to avoid bad free
} else if ((sendbuf != MPI_IN_PLACE && sendtype == MPI_DATATYPE_NULL) || recvtype == MPI_DATATYPE_NULL) {
retval = MPI_ERR_TYPE;
} else {
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
void* sendtmpbuf = static_cast<char*>(sendbuf);
int sendtmpcount = sendcount;
MPI_Datatype sendtmptype = sendtype;
recvdisps == nullptr) {
retval = MPI_ERR_ARG;
} else {
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
int size = comm->size();
int send_size = 0;
int recv_size = 0;
return MPI_ERR_GROUP;
} else if (newcomm == nullptr) {
return MPI_ERR_ARG;
- } else if(group->rank(smpi_process()->index())==MPI_UNDEFINED){
+ } else if(group->rank(simgrid::s4u::Actor::self()->getPid())==MPI_UNDEFINED){
*newcomm= MPI_COMM_NULL;
return MPI_SUCCESS;
}else{
} else if (rank == nullptr) {
return MPI_ERR_ARG;
} else {
- *rank = group->rank(smpi_process()->index());
+ *rank = group->rank(simgrid::s4u::Actor::self()->getPid());
return MPI_SUCCESS;
}
}
if(ranks1[i]==MPI_PROC_NULL){
ranks2[i]=MPI_PROC_NULL;
}else{
- int index = group1->index(ranks1[i]);
- ranks2[i] = group2->rank(index);
+ simgrid::s4u::ActorPtr actor = group1->actor(ranks1[i]);
+ ranks2[i] = group2->rank(actor);
}
}
return MPI_SUCCESS;
XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi);
+static int getPid(MPI_Comm, int);
+static int getPid(MPI_Comm comm, int id)
+{
+ simgrid::s4u::ActorPtr actor = comm->group()->actor(id);
+ return (actor == nullptr) ? MPI_UNDEFINED : actor->getPid();
+}
+
/* PMPI User level calls */
extern "C" { // Obviously, the C MPI interface should use the C linkage
retval = MPI_ERR_TAG;
} else {
- int rank = smpi_process()->index();
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
- TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::Pt2PtTIData("Irecv", comm->group()->index(src),
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData("Irecv", src,
datatype->is_replayable() ? count : count * datatype->size(),
encode_datatype(datatype)));
*request = simgrid::smpi::Request::irecv(buf, count, datatype, src, tag, comm);
retval = MPI_SUCCESS;
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
} else if(tag<0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
- int rank = smpi_process()->index();
- int trace_dst = comm->group()->index(dst);
- TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::Pt2PtTIData("Isend", trace_dst,
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ int trace_dst = getPid(comm, dst);
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData("Isend", dst,
datatype->is_replayable() ? count : count * datatype->size(),
encode_datatype(datatype)));
- TRACE_smpi_send(rank, rank, trace_dst, tag, count * datatype->size());
+ TRACE_smpi_send(my_proc_id, my_proc_id, trace_dst, tag, count * datatype->size());
*request = simgrid::smpi::Request::isend(buf, count, datatype, dst, tag, comm);
retval = MPI_SUCCESS;
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
} else if(tag<0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
- int rank = smpi_process()->index();
- int trace_dst = comm->group()->index(dst);
- TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::Pt2PtTIData("ISsend", trace_dst,
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ int trace_dst = getPid(comm, dst);
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData("ISsend", dst,
datatype->is_replayable() ? count : count * datatype->size(),
encode_datatype(datatype)));
- TRACE_smpi_send(rank, rank, trace_dst, tag, count * datatype->size());
+ TRACE_smpi_send(my_proc_id, my_proc_id, trace_dst, tag, count * datatype->size());
*request = simgrid::smpi::Request::issend(buf, count, datatype, dst, tag, comm);
retval = MPI_SUCCESS;
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
} else if(tag<0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
- int rank = smpi_process()->index();
- int src_traced = comm->group()->index(src);
- TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::Pt2PtTIData("recv", src_traced,
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ int src_traced = getPid(comm, src);
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData("recv", src,
datatype->is_replayable() ? count : count * datatype->size(),
encode_datatype(datatype)));
// the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
if (status != MPI_STATUS_IGNORE) {
- src_traced = comm->group()->index(status->MPI_SOURCE);
+ src_traced = getPid(comm, status->MPI_SOURCE);
if (not TRACE_smpi_view_internals()) {
- TRACE_smpi_recv(src_traced, rank, tag);
+ TRACE_smpi_recv(src_traced, my_proc_id, tag);
}
}
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
} else if(tag < 0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
- int rank = smpi_process()->index();
- int dst_traced = comm->group()->index(dst);
- TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::Pt2PtTIData("send", dst_traced,
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ int dst_traced = getPid(comm, dst);
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData("send", dst,
datatype->is_replayable() ? count : count * datatype->size(),
encode_datatype(datatype)));
if (not TRACE_smpi_view_internals()) {
- TRACE_smpi_send(rank, rank, dst_traced, tag,count*datatype->size());
+ TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, tag, count * datatype->size());
}
simgrid::smpi::Request::send(buf, count, datatype, dst, tag, comm);
retval = MPI_SUCCESS;
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
} else if(tag<0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
- int rank = smpi_process()->index();
- int dst_traced = comm->group()->index(dst);
- TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::Pt2PtTIData("Ssend", dst_traced,
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ int dst_traced = getPid(comm, dst);
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData("Ssend", dst,
datatype->is_replayable() ? count : count * datatype->size(),
encode_datatype(datatype)));
- TRACE_smpi_send(rank, rank, dst_traced, tag, count * datatype->size());
+ TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, tag, count * datatype->size());
simgrid::smpi::Request::ssend(buf, count, datatype, dst, tag, comm);
retval = MPI_SUCCESS;
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
} else if((sendtag<0 && sendtag != MPI_ANY_TAG)||(recvtag<0 && recvtag != MPI_ANY_TAG)){
retval = MPI_ERR_TAG;
} else {
- int rank = smpi_process()->index();
- int dst_traced = comm->group()->index(dst);
- int src_traced = comm->group()->index(src);
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ int dst_traced = getPid(comm, dst);
+ int src_traced = getPid(comm, src);
// FIXME: Hack the way to trace this one
std::vector<int>* dst_hack = new std::vector<int>;
std::vector<int>* src_hack = new std::vector<int>;
dst_hack->push_back(dst_traced);
src_hack->push_back(src_traced);
- TRACE_smpi_comm_in(rank, __FUNCTION__,
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
new simgrid::instr::VarCollTIData(
- "sendRecv", -1, sendtype->is_replayable() ? sendcount : sendcount * sendtype->size(), dst_hack,
- recvtype->is_replayable() ? recvcount : recvcount * recvtype->size(), src_hack,
+ "sendRecv", -1, sendtype->is_replayable() ? sendcount : sendcount * sendtype->size(),
+ dst_hack, recvtype->is_replayable() ? recvcount : recvcount * recvtype->size(), src_hack,
encode_datatype(sendtype), encode_datatype(recvtype)));
- TRACE_smpi_send(rank, rank, dst_traced, sendtag, sendcount * sendtype->size());
+ TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, sendtag, sendcount * sendtype->size());
simgrid::smpi::Request::sendrecv(sendbuf, sendcount, sendtype, dst, sendtag, recvbuf, recvcount, recvtype, src,
recvtag, comm, status);
retval = MPI_SUCCESS;
- TRACE_smpi_recv(src_traced, rank, recvtag);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_recv(src_traced, my_proc_id, recvtag);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
simgrid::smpi::Status::empty(status);
retval = MPI_SUCCESS;
} else {
- int rank = ((*request)->comm() != MPI_COMM_NULL) ? smpi_process()->index() : -1;
+ int my_proc_id = ((*request)->comm() != MPI_COMM_NULL) ? simgrid::s4u::Actor::self()->getPid() : -1;
- TRACE_smpi_testing_in(rank);
+ TRACE_smpi_testing_in(my_proc_id);
*flag = simgrid::smpi::Request::test(request,status);
- TRACE_smpi_testing_out(rank);
+ TRACE_smpi_testing_out(my_proc_id);
retval = MPI_SUCCESS;
}
smpi_bench_begin();
} else if (*request == MPI_REQUEST_NULL) {
retval = MPI_SUCCESS;
} else {
- int rank = (*request)->comm() != MPI_COMM_NULL ? smpi_process()->index() : -1;
+ int my_proc_id = (*request)->comm() != MPI_COMM_NULL
+ ? simgrid::s4u::Actor::self()->getPid()
+ : -1; // TODO: cheinrich: Check if this correct or if it should be MPI_UNDEFINED
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("wait"));
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("wait"));
simgrid::smpi::Request::wait(request, status);
retval = MPI_SUCCESS;
//the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
trace_smpi_recv_helper(request, status);
}
smpi_bench_end();
- int rank_traced = smpi_process()->index(); // FIXME: In PMPI_Wait, we check if the comm is null?
+ int rank_traced = simgrid::s4u::Actor::self()->getPid(); // FIXME: In PMPI_Wait, we check if the comm is null?
TRACE_smpi_comm_in(rank_traced, __FUNCTION__, new simgrid::instr::CpuTIData("waitAny", static_cast<double>(count)));
*index = simgrid::smpi::Request::waitany(count, requests, status);
{
smpi_bench_end();
- int rank_traced = smpi_process()->index(); // FIXME: In PMPI_Wait, we check if the comm is null?
+ int rank_traced = simgrid::s4u::Actor::self()->getPid(); // FIXME: In PMPI_Wait, we check if the comm is null?
TRACE_smpi_comm_in(rank_traced, __FUNCTION__, new simgrid::instr::CpuTIData("waitAll", static_cast<double>(count)));
int retval = simgrid::smpi::Request::waitall(count, requests, status);
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int rank = smpi_process()->index();
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_fence"));
- retval = win->fence(assert);
- TRACE_smpi_comm_out(rank);
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_fence"));
+ retval = win->fence(assert);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
((not origin_datatype->is_valid()) || (not target_datatype->is_valid()))) {
retval = MPI_ERR_TYPE;
} else {
- int rank = smpi_process()->index();
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Get", target_rank,
- origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(),
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData("Get", target_rank, origin_datatype->is_replayable()
+ ? origin_count
+ : origin_count * origin_datatype->size(),
encode_datatype(origin_datatype)));
retval = win->get( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
target_datatype);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
} else if(request == nullptr){
retval = MPI_ERR_REQUEST;
} else {
- int rank = smpi_process()->index();
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Rget", target_rank,
- origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(),
- encode_datatype(origin_datatype)));
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData(
+ "Rget", target_rank,
+ origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(),
+ encode_datatype(origin_datatype)));
retval = win->get( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
target_datatype, request);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
((not origin_datatype->is_valid()) || (not target_datatype->is_valid()))) {
retval = MPI_ERR_TYPE;
} else {
- int rank = smpi_process()->index();
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
- int dst_traced = group->index(target_rank);
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Put", dst_traced,
- origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(),
+ int dst_traced = group->actor(target_rank)->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData("Put", target_rank, origin_datatype->is_replayable()
+ ? origin_count
+ : origin_count * origin_datatype->size(),
encode_datatype(origin_datatype)));
- TRACE_smpi_send(rank, rank, dst_traced, SMPI_RMA_TAG, origin_count*origin_datatype->size());
+ TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, SMPI_RMA_TAG, origin_count * origin_datatype->size());
retval = win->put( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
target_datatype);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
} else if(request == nullptr){
retval = MPI_ERR_REQUEST;
} else {
- int rank = smpi_process()->index();
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
- int dst_traced = group->index(target_rank);
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Rput", dst_traced,
- origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(),
- encode_datatype(origin_datatype)));
- TRACE_smpi_send(rank, rank, dst_traced, SMPI_RMA_TAG, origin_count*origin_datatype->size());
+ int dst_traced = group->actor(target_rank)->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData(
+ "Rput", target_rank,
+ origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(),
+ encode_datatype(origin_datatype)));
+ TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, SMPI_RMA_TAG, origin_count * origin_datatype->size());
retval = win->put( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
target_datatype, request);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
} else if (op == MPI_OP_NULL) {
retval = MPI_ERR_OP;
} else {
- int rank = smpi_process()->index();
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Accumulate", target_rank,
- origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(),
- encode_datatype(origin_datatype)));
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData(
+ "Accumulate", target_rank,
+ origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(),
+ encode_datatype(origin_datatype)));
retval = win->accumulate( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
target_datatype, op);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
} else if(request == nullptr){
retval = MPI_ERR_REQUEST;
} else {
- int rank = smpi_process()->index();
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Raccumulate", target_rank,
- origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(),
- encode_datatype(origin_datatype)));
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData(
+ "Raccumulate", target_rank,
+ origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(),
+ encode_datatype(origin_datatype)));
retval = win->accumulate( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
target_datatype, op, request);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
} else if (op == MPI_OP_NULL) {
retval = MPI_ERR_OP;
} else {
- int rank = smpi_process()->index();
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Get_accumulate", target_rank,
- target_datatype->is_replayable() ? target_count : target_count * target_datatype->size(),
- encode_datatype(target_datatype)));
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData(
+ "Get_accumulate", target_rank,
+ target_datatype->is_replayable() ? target_count : target_count * target_datatype->size(),
+ encode_datatype(target_datatype)));
retval = win->get_accumulate( origin_addr, origin_count, origin_datatype, result_addr,
result_count, result_datatype, target_rank, target_disp,
target_count, target_datatype, op);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
} else if(request == nullptr){
retval = MPI_ERR_REQUEST;
} else {
- int rank = smpi_process()->index();
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Rget_accumulate", target_rank,
- target_datatype->is_replayable() ? target_count : target_count * target_datatype->size(),
- encode_datatype(target_datatype)));
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData(
+ "Rget_accumulate", target_rank,
+ target_datatype->is_replayable() ? target_count : target_count * target_datatype->size(),
+ encode_datatype(target_datatype)));
retval = win->get_accumulate( origin_addr, origin_count, origin_datatype, result_addr,
result_count, result_datatype, target_rank, target_disp,
target_count, target_datatype, op, request);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
} else if ((datatype == MPI_DATATYPE_NULL) || (not datatype->is_valid())) {
retval = MPI_ERR_TYPE;
} else {
- int rank = smpi_process()->index();
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Compare_and_swap", target_rank,
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData("Compare_and_swap", target_rank,
datatype->is_replayable() ? 1 : datatype->size(),
encode_datatype(datatype)));
retval = win->compare_and_swap(origin_addr, compare_addr, result_addr, datatype, target_rank, target_disp);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
} else if (group==MPI_GROUP_NULL){
retval = MPI_ERR_GROUP;
} else {
- int rank = smpi_process()->index();
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_post"));
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_post"));
retval = win->post(group,assert);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
} else if (group==MPI_GROUP_NULL){
retval = MPI_ERR_GROUP;
} else {
- int rank = smpi_process()->index();
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_start"));
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_start"));
retval = win->start(group,assert);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int rank = smpi_process()->index();
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_complete"));
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_complete"));
retval = win->complete();
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int rank = smpi_process()->index();
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_wait"));
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_wait"));
retval = win->wait();
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
} else if (rank == MPI_PROC_NULL){
retval = MPI_SUCCESS;
} else {
- int myrank = smpi_process()->index();
- TRACE_smpi_comm_in(myrank, __func__, new simgrid::instr::NoOpTIData("Win_lock"));
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::NoOpTIData("Win_lock"));
retval = win->lock(lock_type,rank,assert);
- TRACE_smpi_comm_out(myrank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
} else if (rank == MPI_PROC_NULL){
retval = MPI_SUCCESS;
} else {
- int myrank = smpi_process()->index();
- TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_unlock"));
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_unlock"));
retval = win->unlock(rank);
- TRACE_smpi_comm_out(myrank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int myrank = smpi_process()->index();
- TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_lock_all"));
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_lock_all"));
retval = win->lock_all(assert);
- TRACE_smpi_comm_out(myrank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int myrank = smpi_process()->index();
- TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_unlock_all"));
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_unlock_all"));
retval = win->unlock_all();
- TRACE_smpi_comm_out(myrank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
} else if (rank == MPI_PROC_NULL){
retval = MPI_SUCCESS;
} else {
- int myrank = smpi_process()->index();
- TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush"));
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush"));
retval = win->flush(rank);
- TRACE_smpi_comm_out(myrank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
} else if (rank == MPI_PROC_NULL){
retval = MPI_SUCCESS;
} else {
- int myrank = smpi_process()->index();
- TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush_local"));
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush_local"));
retval = win->flush_local(rank);
- TRACE_smpi_comm_out(myrank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int myrank = smpi_process()->index();
- TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush_all"));
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush_all"));
retval = win->flush_all();
- TRACE_smpi_comm_out(myrank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int myrank = smpi_process()->index();
- TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush_local_all"));
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush_local_all"));
retval = win->flush_local_all();
- TRACE_smpi_comm_out(myrank);
+ TRACE_smpi_comm_out(my_proc_id);
}
smpi_bench_begin();
return retval;
if (TRACE_is_enabled()) { \
simgrid::instr::EventType* type = simgrid::instr::Container::getRoot()->type_->getOrCreateEventType(#cat); \
\
- std::string cont_name = std::string("rank-" + std::to_string(smpi_process()->index())); \
+ std::string cont_name = std::string("rank-" + std::to_string(simgrid::s4u::Actor::self()->getPid())); \
type->addEntityValue(Colls::mpi_coll_##cat##_description[i].name, "1.0 1.0 1.0"); \
new simgrid::instr::NewEvent(SIMIX_get_clock(), simgrid::instr::Container::byName(cont_name), type, \
type->getEntityValue(Colls::mpi_coll_##cat##_description[i].name)); \
typedef SMPI_Dist_Graph_topology* MPIR_Dist_Graph_Topology;
XBT_PRIVATE SMPI_Process* smpi_process();
-XBT_PRIVATE SMPI_Process* smpi_process_remote(int index);
+XBT_PRIVATE SMPI_Process* smpi_process_remote(simgrid::s4u::ActorPtr actor);
XBT_PRIVATE int smpi_process_count();
-XBT_PRIVATE void smpi_deployment_register_process(const char* instance_id, int rank, int index);
+XBT_PRIVATE void smpi_deployment_register_process(const char* instance_id, int rank, simgrid::s4u::ActorPtr actor);
XBT_PRIVATE MPI_Comm* smpi_deployment_comm_world(const char* instance_id);
XBT_PRIVATE msg_bar_t smpi_deployment_finalization_barrier(const char* instance_id);
XBT_PRIVATE void smpi_deployment_cleanup_instances();
#endif
extern std::unordered_map<std::string, double> location2speedup;
+// TODO: Move this to the right location (if we keep this...)
+void smpi_add_process(simgrid::s4u::ActorPtr actor);
/** @brief Returns the last call location (filename, linenumber). Process-specific. */
extern "C" {
#include "smpi_f2c.hpp"
#include <smpi/smpi.h>
+#include <map>
#include <vector>
namespace simgrid{
* std::map here, but looking up a value there costs O(log(n)).
* For a vector, this costs O(1). We hence go with the vector.
*/
+ std::vector<simgrid::s4u::ActorPtr> rank_to_actor_map_;
+ std::map<simgrid::s4u::ActorPtr, int> actor_to_rank_map_;
std::vector<int> rank_to_index_map_;
std::vector<int> index_to_rank_map_;
+
int refcount_;
public:
explicit Group();
explicit Group(int size);
explicit Group(Group* origin);
- void set_mapping(int index, int rank);
- int index(int rank);
+ void set_mapping(simgrid::s4u::ActorPtr actor, int rank);
int rank(int index);
+ simgrid::s4u::ActorPtr actor(int rank);
+ int rank(const simgrid::s4u::ActorPtr process);
void ref();
static void unref(MPI_Group group);
int size();
MPI_Comm comm_intra_ = MPI_COMM_NULL;
MPI_Comm* comm_world_ = nullptr;
void* data_ = nullptr; /* user data */
- int index_ = MPI_UNDEFINED;
char state_;
int sampling_ = 0; /* inside an SMPI_SAMPLE_ block? */
- char* instance_id_ = nullptr;
+ std::string instance_id_;
bool replaying_ = false; /* is the process replaying a trace */
msg_bar_t finalization_barrier_;
int return_value_ = 0;
smpi_trace_call_location_t trace_call_loc_;
- smx_actor_t process_ = nullptr;
+ simgrid::s4u::ActorPtr process_ = nullptr;
smpi_privatization_region_t privatized_region_;
#if HAVE_PAPI
/** Contains hardware data as read by PAPI **/
papi_counter_t papi_counter_data_;
#endif
public:
- explicit Process(int index, msg_bar_t barrier);
- void set_data(int index, int* argc, char*** argv);
+ explicit Process(simgrid::s4u::ActorPtr actor, msg_bar_t barrier);
+ void set_data(int* argc, char*** argv);
void finalize();
int finalized();
int initialized();
smpi_trace_call_location_t* call_location();
void set_privatized_region(smpi_privatization_region_t region);
smpi_privatization_region_t privatized_region();
- int index();
smx_mailbox_t mailbox();
smx_mailbox_t mailbox_small();
xbt_mutex_t mailboxes_mutex();
int return_value();
void set_return_value(int val);
static void init(int *argc, char ***argv);
- smx_actor_t process();
+ simgrid::s4u::ActorPtr process();
};
void smpi_execute_flops(double flops) {
XBT_DEBUG("Handle real computation time: %f flops", flops);
- smx_activity_t action = simcall_execution_start("computation", flops, 1, 0, smpi_process()->process()->host);
+ smx_activity_t action = simcall_execution_start("computation", flops, 1, 0, smpi_process()->process()->getImpl()->host);
simcall_set_category (action, TRACE_internal_smpi_get_category());
simcall_execution_wait(action);
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
void smpi_execute(double duration)
if (duration >= smpi_cpu_threshold) {
XBT_DEBUG("Sleep for %g to handle real computation time", duration);
double flops = duration * smpi_host_speed;
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_computing_in(rank, flops);
smpi_execute_flops(flops);
void smpi_bench_begin()
{
if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) {
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
if (MC_is_active() || MC_record_replay_is_active())
#if HAVE_PAPI
if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0' && TRACE_smpi_is_enabled()) {
container_t container =
- new simgrid::instr::Container(std::string("rank-") + std::to_string(smpi_process()->index()));
+ new simgrid::instr::Container(std::string("rank-") + std::to_string(simgrid::s4u::Actor::self()->getPid()));
papi_counter_t& counter_data = smpi_process()->papi_counters();
for (auto const& pair : counter_data) {
SampleLocation(bool global, const char* file, int line) : std::string(std::string(file) + ":" + std::to_string(line))
{
if (not global)
- this->append(":" + std::to_string(smpi_process()->index()));
+ this->append(":" + std::to_string(simgrid::s4u::Actor::self()->getPid()));
}
};
: name(name)
, size(max_no_processes)
, present_processes(0)
- , index(process_count)
, comm_world(comm)
, finalization_barrier(finalization_barrier)
- {
- }
+ { }
const char* name;
int size;
int present_processes;
- int index; // Badly named. This should be "no_processes_when_registering" ;)
MPI_Comm comm_world;
msg_bar_t finalization_barrier;
};
static std::map<std::string, Instance> smpi_instances;
extern int process_count; // How many processes have been allocated over all instances?
-extern int* index_to_process_data;
/** \ingroup smpi_simulation
* \brief Registers a running instance of a MPI program.
smpi_instances.insert(std::pair<std::string, Instance>(name, instance));
}
-void smpi_deployment_register_process(const char* instance_id, int rank, int index)
+void smpi_deployment_register_process(const char* instance_id, int rank, simgrid::s4u::ActorPtr actor)
{
- if (smpi_instances.empty()) { // no instance registered, we probably used smpirun.
- index_to_process_data[index]=index;
+ if (smpi_instances.empty()) // no instance registered, we probably used smpirun.
return;
- }
Instance& instance = smpi_instances.at(instance_id);
instance.present_processes++;
- index_to_process_data[index] = instance.index + rank;
- instance.comm_world->group()->set_mapping(index, rank);
+ instance.comm_world->group()->set_mapping(actor, rank);
}
MPI_Comm* smpi_deployment_comm_world(const char* instance_id)
#include "private.hpp"
#include "simgrid/s4u/Host.hpp"
#include "simgrid/s4u/Mailbox.hpp"
+#include "simgrid/s4u/forward.hpp"
#include "smpi_coll.hpp"
#include "smpi_comm.hpp"
#include "smpi_group.hpp"
};
#endif
+using simgrid::s4u::Actor;
+using simgrid::s4u::ActorPtr;
std::unordered_map<std::string, double> location2speedup;
-static simgrid::smpi::Process** process_data = nullptr;
+static std::map</*process_id*/ ActorPtr, simgrid::smpi::Process*> process_data;
int process_count = 0;
int smpi_universe_size = 0;
-int* index_to_process_data = nullptr;
extern double smpi_total_benched_time;
xbt_os_timer_t global_timer;
/**
void (*smpi_comm_copy_data_callback) (smx_activity_t, void*, size_t) = &smpi_comm_copy_buffer_callback;
+void smpi_add_process(ActorPtr actor)
+{
+ process_data.insert({actor, new simgrid::smpi::Process(actor, nullptr)});
+}
+
int smpi_process_count()
{
return process_count;
simgrid::smpi::Process* smpi_process()
{
- smx_actor_t me = SIMIX_process_self();
+ ActorPtr me = Actor::self();
if (me == nullptr) // This happens sometimes (eg, when linking against NS3 because it pulls openMPI...)
return nullptr;
- simgrid::msg::ActorExt* msgExt = static_cast<simgrid::msg::ActorExt*>(me->userdata);
+ simgrid::msg::ActorExt* msgExt = static_cast<simgrid::msg::ActorExt*>(me->getImpl()->userdata);
return static_cast<simgrid::smpi::Process*>(msgExt->data);
}
-simgrid::smpi::Process* smpi_process_remote(int index)
+simgrid::smpi::Process* smpi_process_remote(ActorPtr actor)
{
- return process_data[index_to_process_data[index]];
+ return process_data.at(actor);
}
MPI_Comm smpi_process_comm_self(){
}
int smpi_process_index(){
- return smpi_process()->index();
+ return simgrid::s4u::Actor::self()->getPid();
}
void * smpi_process_get_user_data(){
(static_cast<char*>(buff) < smpi_data_exe_start + smpi_data_exe_size)) {
XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
- smpi_switch_data_segment(
- static_cast<simgrid::smpi::Process*>((static_cast<simgrid::msg::ActorExt*>(comm->src_proc->userdata)->data))
- ->index());
+ smpi_switch_data_segment(Actor::self()->getPid());
tmpbuff = static_cast<void*>(xbt_malloc(buff_size));
memcpy_private(tmpbuff, buff, private_blocks);
}
if ((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) && ((char*)comm->dst_buff >= smpi_data_exe_start) &&
((char*)comm->dst_buff < smpi_data_exe_start + smpi_data_exe_size)) {
XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
- smpi_switch_data_segment(
- static_cast<simgrid::smpi::Process*>((static_cast<simgrid::msg::ActorExt*>(comm->dst_proc->userdata)->data))
- ->index());
+ smpi_switch_data_segment(Actor::self()->getPid());
}
XBT_DEBUG("Copying %zu bytes from %p to %p", buff_size, tmpbuff,comm->dst_buff);
memcpy_private(comm->dst_buff, tmpbuff, private_blocks);
}
int smpi_enabled() {
- return process_data != nullptr;
+ return not process_data.empty();
}
void smpi_global_init()
}
}
#endif
-
- if (index_to_process_data == nullptr) {
- index_to_process_data = new int[SIMIX_process_count()];
- }
-
- bool smpirun = 0;
- if (process_count == 0) { // The program has been dispatched but no other
- // SMPI instances have been registered. We're using smpirun.
- smpirun = true;
- SMPI_app_instance_register(smpi_default_instance_name, nullptr,
- SIMIX_process_count()); // This call has a side effect on process_count...
- MPI_COMM_WORLD = *smpi_deployment_comm_world(smpi_default_instance_name);
- }
- smpi_universe_size = process_count;
- process_data = new simgrid::smpi::Process*[process_count];
- for (int i = 0; i < process_count; i++) {
- if (smpirun) {
- process_data[i] = new simgrid::smpi::Process(i, smpi_deployment_finalization_barrier(smpi_default_instance_name));
- smpi_deployment_register_process(smpi_default_instance_name, i, i);
- } else {
- // TODO We can pass a nullptr here because Process::set_data() assigns the
- // barrier from the instance anyway. This is ugly and should be changed
- process_data[i] = new simgrid::smpi::Process(i, nullptr);
- }
- }
}
void smpi_global_destroy()
smpi_bench_destroy();
smpi_shared_destroy();
smpi_deployment_cleanup_instances();
- int count = smpi_process_count();
- for (int i = 0; i < count; i++) {
- if(process_data[i]->comm_self()!=MPI_COMM_NULL){
- simgrid::smpi::Comm::destroy(process_data[i]->comm_self());
+ for (auto& pair : process_data) {
+ auto& process = pair.second;
+ if (process->comm_self() != MPI_COMM_NULL) {
+ simgrid::smpi::Comm::destroy(process->comm_self());
}
- if(process_data[i]->comm_intra()!=MPI_COMM_NULL){
- simgrid::smpi::Comm::destroy(process_data[i]->comm_intra());
+ if (process->comm_intra() != MPI_COMM_NULL) {
+ simgrid::smpi::Comm::destroy(process->comm_intra());
}
- xbt_os_timer_free(process_data[i]->timer());
- xbt_mutex_destroy(process_data[i]->mailboxes_mutex());
- delete process_data[i];
+ xbt_os_timer_free(process->timer());
+ xbt_mutex_destroy(process->mailboxes_mutex());
}
- delete[] process_data;
- process_data = nullptr;
+ process_data.clear();
if (simgrid::smpi::Colls::smpi_coll_cleanup_callback != nullptr)
simgrid::smpi::Colls::smpi_coll_cleanup_callback();
xbt_os_timer_free(global_timer);
}
- delete[] index_to_process_data;
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP)
smpi_destroy_global_memory_segments();
smpi_free_static();
SMPI_switch_data_segment = &smpi_switch_data_segment;
+ // TODO This will not be executed in the case where smpi_main is not called,
+ // e.g., not for smpi_msg_masterslave. This should be moved to another location
+ // that is always called -- maybe close to Actor::onCreation?
simgrid::s4u::Host::onCreation.connect([](simgrid::s4u::Host& host) {
host.extension_set(new simgrid::smpi::SmpiHost(&host));
});
SIMIX_comm_set_copy_data_callback(smpi_comm_copy_buffer_callback);
smpi_init_options();
-
if (smpi_privatize_global_variables == SMPI_PRIVATIZE_DLOPEN) {
std::string executable_copy = executable;
}
+ SMPI_init();
SIMIX_launch_application(argv[2]);
+ SMPI_app_instance_register(smpi_default_instance_name, nullptr,
+ SIMIX_process_count()); // This call has a side effect on process_count...
+ MPI_COMM_WORLD = *smpi_deployment_comm_world(smpi_default_instance_name);
+ smpi_universe_size = process_count;
- SMPI_init();
/* Clean IO before the run */
fflush(stdout);
}
}
int ret = 0;
- int count = smpi_process_count();
- for (int i = 0; i < count; i++) {
- if(process_data[i]->return_value()!=0){
- ret=process_data[i]->return_value();//return first non 0 value
+ for (auto& pair : process_data) {
+ auto& smpi_process = pair.second;
+ if (smpi_process->return_value() != 0) {
+ ret = smpi_process->return_value(); // return first non 0 value
break;
}
}
// Called either directly from the user code, or from the code called by smpirun
void SMPI_init(){
+ simgrid::s4u::Actor::onCreation.connect([](simgrid::s4u::ActorPtr actor) {
+ smpi_add_process(actor);
+ });
smpi_init_options();
smpi_global_init();
smpi_check_options();
#if HAVE_PRIVATIZATION
// FIXME, cross-process support (mmap across process when necessary)
- simgrid::smpi::Process* process = smpi_process_remote(dest);
+ simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dest+1));
int current = process->privatized_region()->file_descriptor;
XBT_DEBUG("Switching data frame to the one of process %d", dest);
void* tmp =
#include "smpi_process.hpp"
#include "mc/mc.h"
#include "private.hpp"
+#include "simgrid/s4u/forward.hpp"
#include "smpi_comm.hpp"
#include "smpi_group.hpp"
#include "src/mc/mc_replay.hpp"
#include "src/msg/msg_private.hpp"
#include "src/simix/smx_private.hpp"
+#include <sstream>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
-extern int* index_to_process_data;
-
-#define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1)
-
-static char *get_mailbox_name(char *str, int index)
-{
- snprintf(str, MAILBOX_NAME_MAXLEN, "SMPI-%0*x", static_cast<int>(sizeof(int) * 2), static_cast<unsigned>(index));
- return str;
-}
-
-static char *get_mailbox_name_small(char *str, int index)
-{
- snprintf(str, MAILBOX_NAME_MAXLEN, "small%0*x", static_cast<int>(sizeof(int) * 2), static_cast<unsigned>(index));
- return str;
-}
-
namespace simgrid{
namespace smpi{
-Process::Process(int index, msg_bar_t finalization_barrier)
+using simgrid::s4u::Actor;
+using simgrid::s4u::ActorPtr;
+
+Process::Process(ActorPtr actor, msg_bar_t finalization_barrier)
: finalization_barrier_(finalization_barrier)
{
- char name[MAILBOX_NAME_MAXLEN];
- mailbox_ = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, index));
- mailbox_small_ = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, index));
+ std::stringstream mailboxname, mailboxname_small;
+ process_ = actor;
+ mailboxname << std::string("SMPI-") << process_->getPid();
+ mailboxname_small << std::string("small-") << process_->getPid();
+ mailbox_ = simgrid::s4u::Mailbox::byName(mailboxname.str());
+ mailbox_small_ = simgrid::s4u::Mailbox::byName(mailboxname_small.str());
mailboxes_mutex_ = xbt_mutex_init();
timer_ = xbt_os_timer_new();
state_ = SMPI_UNINITIALIZED;
#endif
}
-void Process::set_data(int index, int* argc, char*** argv)
+void Process::set_data(int* argc, char*** argv)
{
- char* instance_id = (*argv)[1];
- comm_world_ = smpi_deployment_comm_world(instance_id);
- msg_bar_t barrier = smpi_deployment_finalization_barrier(instance_id);
- if (barrier != nullptr) // don't overwrite the current one if the instance has none
- finalization_barrier_ = barrier;
- instance_id_ = instance_id;
- index_ = index;
-
- process_ = SIMIX_process_self();
- static_cast<simgrid::msg::ActorExt*>(process_->userdata)->data = this;
-
- if (*argc > 3) {
- memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2));
- (*argv)[(*argc) - 1] = nullptr;
- (*argv)[(*argc) - 2] = nullptr;
- }
- (*argc)-=2;
- argc_ = argc;
- argv_ = argv;
- // set the process attached to the mailbox
- mailbox_small_->setReceiver(simgrid::s4u::Actor::self());
- XBT_DEBUG("<%d> New process in the game: %p", index_, process_);
+ instance_id_ = std::string((*argv)[1]);
+ comm_world_ = smpi_deployment_comm_world(instance_id_.c_str());
+ msg_bar_t barrier = smpi_deployment_finalization_barrier(instance_id_.c_str());
+ if (barrier != nullptr) // don't overwrite the current one if the instance has none
+ finalization_barrier_ = barrier;
+
+ process_ = simgrid::s4u::Actor::self();
+ static_cast<simgrid::msg::ActorExt*>(process_->getImpl()->userdata)->data = this;
+
+ if (*argc > 3) {
+ memmove(&(*argv)[0], &(*argv)[2], sizeof(char*) * (*argc - 2));
+ (*argv)[(*argc) - 1] = nullptr;
+ (*argv)[(*argc) - 2] = nullptr;
+ }
+ (*argc) -= 2;
+ argc_ = argc;
+ argv_ = argv;
+ // set the process attached to the mailbox
+ mailbox_small_->setReceiver(process_);
+ XBT_DEBUG("<%lu> New process in the game: %p", process_->getPid(), process_.get());
}
/** @brief Prepares the current process for termination. */
void Process::finalize()
{
state_ = SMPI_FINALIZED;
- XBT_DEBUG("<%d> Process left the game", index_);
+ XBT_DEBUG("<%lu> Process left the game", process_->getPid());
- // This leads to an explosion of the search graph which cannot be reduced:
- if(MC_is_active() || MC_record_replay_is_active())
- return;
- // wait for all pending asynchronous comms to finish
- MSG_barrier_wait(finalization_barrier_);
+ // This leads to an explosion of the search graph which cannot be reduced:
+ if(MC_is_active() || MC_record_replay_is_active())
+ return;
+ // wait for all pending asynchronous comms to finish
+ MSG_barrier_wait(finalization_barrier_);
}
/** @brief Check if a process is finalized */
int Process::finalized()
{
- if (index_ != MPI_UNDEFINED)
- return (state_ == SMPI_FINALIZED);
- else
- return 0;
+ return (state_ == SMPI_FINALIZED);
}
/** @brief Check if a process is initialized */
int Process::initialized()
{
- if (index_to_process_data == nullptr){
- return false;
- } else{
- return ((index_ != MPI_UNDEFINED) && (state_ == SMPI_INITIALIZED));
- }
+ // TODO cheinrich: Check if we still need this. This should be a global condition, not for a
+ // single process ... ?
+ return (state_ == SMPI_INITIALIZED);
}
/** @brief Mark a process as initialized (=MPI_Init called) */
void Process::mark_as_initialized()
{
- if ((index_ != MPI_UNDEFINED) && (state_ != SMPI_FINALIZED))
+ if (state_ != SMPI_FINALIZED)
state_ = SMPI_INITIALIZED;
}
void Process::set_replaying(bool value){
- if ((index_ != MPI_UNDEFINED) && (state_ != SMPI_FINALIZED))
+ if (state_ != SMPI_FINALIZED)
replaying_ = value;
}
bool Process::replaying(){
- if (index_ != MPI_UNDEFINED)
- return replaying_;
- else
- return false;
+ return replaying_;
}
void Process::set_user_data(void *data)
return data_;
}
-smx_actor_t Process::process(){
+ActorPtr Process::process(){
return process_;
}
return privatized_region_;
}
-int Process::index()
-{
- return index_;
-}
-
MPI_Comm Process::comm_world()
{
return comm_world_==nullptr ? MPI_COMM_NULL : *comm_world_;
if(comm_self_==MPI_COMM_NULL){
MPI_Group group = new Group(1);
comm_self_ = new Comm(group, nullptr);
- group->set_mapping(index_, 0);
+ group->set_mapping(process_, 0);
}
return comm_self_;
}
xbt_die("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
}
if (argc != nullptr && argv != nullptr) {
- smx_actor_t proc = SIMIX_process_self();
- proc->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
-
- int index = proc->pid - 1; // The maestro process has always ID 0 but we don't need that process here
-
- if(index_to_process_data == nullptr){
- index_to_process_data=static_cast<int*>(xbt_malloc(SIMIX_process_count()*sizeof(int)));
- }
+ simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
+ proc->getImpl()->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
char* instance_id = (*argv)[1];
try {
int rank = std::stoi(std::string((*argv)[2]));
- smpi_deployment_register_process(instance_id, rank, index);
+ smpi_deployment_register_process(instance_id, rank, proc);
} catch (std::invalid_argument& ia) {
throw std::invalid_argument(std::string("Invalid rank: ") + (*argv)[2]);
}
// cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
// this up here so that I can set the privatized region before the switch.
- Process* process = smpi_process_remote(index);
+ Process* process = smpi_process_remote(proc);
+ int my_proc_id = proc->getPid();
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
/* Now using the segment index of this process */
- index = proc->segment_index;
process->set_privatized_region(smpi_init_global_memory_segment_process());
/* Done at the process's creation */
- SMPI_switch_data_segment(index);
+ SMPI_switch_data_segment(my_proc_id);
}
- process->set_data(index, argc, argv);
+ process->set_data(argc, argv);
}
xbt_assert(smpi_process(),
"smpi_process() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
#define KEY_SIZE (sizeof(int) * 2 + 1)
+using simgrid::s4u::Actor;
+
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_replay,smpi,"Trace Replay with SMPI");
int communicator_size = 0;
static std::vector<MPI_Request>* get_reqq_self()
{
- return reqq.at(smpi_process()->index());
+ return reqq.at(Actor::self()->getPid());
}
static void set_reqq_self(std::vector<MPI_Request> *mpi_request)
{
- reqq.insert({smpi_process()->index(), mpi_request});
+ reqq.insert({Actor::self()->getPid(), mpi_request});
}
//allocate a single buffer for all sends, growing it if needed
CHECK_ACTION_PARAMS(action, 1, 0)
double clock = smpi_process()->simulated_elapsed();
double flops= parse_double(action[2]);
- int rank = smpi_process()->index();
+ int my_proc_id = Actor::self()->getPid();
- TRACE_smpi_computing_in(rank, flops);
+ TRACE_smpi_computing_in(my_proc_id, flops);
smpi_execute_flops(flops);
- TRACE_smpi_computing_out(rank);
+ TRACE_smpi_computing_out(my_proc_id);
log_timed_action (action, clock);
}
else
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
- int rank = smpi_process()->index();
- int dst_traced = MPI_COMM_WORLD->group()->rank(to);
+ int my_proc_id = Actor::self()->getPid();
+ int dst_traced = MPI_COMM_WORLD->group()->actor(to)->getPid();
- TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::Pt2PtTIData("send", dst_traced, size, encode_datatype(MPI_CURRENT_TYPE)));
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData("send", to, size, encode_datatype(MPI_CURRENT_TYPE)));
if (not TRACE_smpi_view_internals())
- TRACE_smpi_send(rank, rank, dst_traced, 0, size*MPI_CURRENT_TYPE->size());
+ TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, 0, size * MPI_CURRENT_TYPE->size());
Request::send(nullptr, size, MPI_CURRENT_TYPE, to , 0, MPI_COMM_WORLD);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
log_timed_action(action, clock);
}
else
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
- int rank = smpi_process()->index();
- int dst_traced = MPI_COMM_WORLD->group()->rank(to);
- TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::Pt2PtTIData("Isend", dst_traced, size, encode_datatype(MPI_CURRENT_TYPE)));
+ int my_proc_id = Actor::self()->getPid();
+ int dst_traced = MPI_COMM_WORLD->group()->actor(to)->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData("Isend", to, size, encode_datatype(MPI_CURRENT_TYPE)));
if (not TRACE_smpi_view_internals())
- TRACE_smpi_send(rank, rank, dst_traced, 0, size*MPI_CURRENT_TYPE->size());
+ TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, 0, size * MPI_CURRENT_TYPE->size());
MPI_Request request = Request::isend(nullptr, size, MPI_CURRENT_TYPE, to, 0, MPI_COMM_WORLD);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
get_reqq_self()->push_back(request);
else
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
- int rank = smpi_process()->index();
- int src_traced = MPI_COMM_WORLD->group()->rank(from);
+ int my_proc_id = Actor::self()->getPid();
+ int src_traced = MPI_COMM_WORLD->group()->actor(from)->getPid();
- TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::Pt2PtTIData("recv", src_traced, size, encode_datatype(MPI_CURRENT_TYPE)));
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData("recv", from, size, encode_datatype(MPI_CURRENT_TYPE)));
//unknown size from the receiver point of view
if (size <= 0.0) {
Request::recv(nullptr, size, MPI_CURRENT_TYPE, from, 0, MPI_COMM_WORLD, &status);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
if (not TRACE_smpi_view_internals()) {
- TRACE_smpi_recv(src_traced, rank, 0);
+ TRACE_smpi_recv(src_traced, my_proc_id, 0);
}
log_timed_action (action, clock);
else
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
- int rank = smpi_process()->index();
- int src_traced = MPI_COMM_WORLD->group()->rank(from);
- TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::Pt2PtTIData("Irecv", src_traced, size, encode_datatype(MPI_CURRENT_TYPE)));
+ int my_proc_id = Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData("Irecv", from, size, encode_datatype(MPI_CURRENT_TYPE)));
MPI_Status status;
//unknow size from the receiver pov
if (size <= 0.0) {
MPI_Request request = Request::irecv(nullptr, size, MPI_CURRENT_TYPE, from, 0, MPI_COMM_WORLD);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
get_reqq_self()->push_back(request);
log_timed_action (action, clock);
//Different times in traced application and replayed version may lead to this
//In this case, ignore the extra calls.
if(request!=nullptr){
- int rank = smpi_process()->index();
- TRACE_smpi_testing_in(rank);
+ int my_proc_id = Actor::self()->getPid();
+ TRACE_smpi_testing_in(my_proc_id);
int flag = Request::test(&request, &status);
/* push back request in vector to be caught by a subsequent wait. if the test did succeed, the request is now nullptr.*/
get_reqq_self()->push_back(request);
- TRACE_smpi_testing_out(rank);
+ TRACE_smpi_testing_out(my_proc_id);
}
log_timed_action (action, clock);
}
if (count_requests>0) {
MPI_Status status[count_requests];
- int rank_traced = smpi_process()->index();
- TRACE_smpi_comm_in(rank_traced, __FUNCTION__, new simgrid::instr::Pt2PtTIData("waitAll", -1, count_requests, ""));
- int recvs_snd[count_requests];
- int recvs_rcv[count_requests];
- for (unsigned int i = 0; i < count_requests; i++) {
- const auto& req = (*get_reqq_self())[i];
- if (req && (req->flags () & RECV)){
- recvs_snd[i]=req->src();
- recvs_rcv[i]=req->dst();
- }else
- recvs_snd[i]=-100;
+ int my_proc_id_traced = Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id_traced, __FUNCTION__,
+ new simgrid::instr::Pt2PtTIData("waitAll", -1, count_requests, ""));
+ int recvs_snd[count_requests];
+ int recvs_rcv[count_requests];
+ for (unsigned int i = 0; i < count_requests; i++) {
+ const auto& req = (*get_reqq_self())[i];
+ if (req && (req->flags() & RECV)) {
+ recvs_snd[i] = req->src();
+ recvs_rcv[i] = req->dst();
+ } else
+ recvs_snd[i] = -100;
}
Request::waitall(count_requests, &(*get_reqq_self())[0], status);
if (recvs_snd[i]!=-100)
TRACE_smpi_recv(recvs_snd[i], recvs_rcv[i],0);
}
- TRACE_smpi_comm_out(rank_traced);
+ TRACE_smpi_comm_out(my_proc_id_traced);
}
log_timed_action (action, clock);
}
static void action_barrier(const char *const *action){
double clock = smpi_process()->simulated_elapsed();
- int rank = smpi_process()->index();
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("barrier"));
+ int my_proc_id = Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("barrier"));
Colls::barrier(MPI_COMM_WORLD);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
log_timed_action (action, clock);
}
MPI_CURRENT_TYPE=decode_datatype(action[4]);
}
- int rank = smpi_process()->index();
- TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->index(root), -1.0, size, -1,
- encode_datatype(MPI_CURRENT_TYPE), ""));
+ int my_proc_id = Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(root)->getPid(), -1.0, size,
+ -1, encode_datatype(MPI_CURRENT_TYPE), ""));
void *sendbuf = smpi_get_tmp_sendbuffer(size* MPI_CURRENT_TYPE->size());
Colls::bcast(sendbuf, size, MPI_CURRENT_TYPE, root, MPI_COMM_WORLD);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
log_timed_action (action, clock);
}
MPI_CURRENT_TYPE=decode_datatype(action[5]);
}
- int rank = smpi_process()->index();
- TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->index(root), comp_size,
+ int my_proc_id = Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(root)->getPid(), comp_size,
comm_size, -1, encode_datatype(MPI_CURRENT_TYPE), ""));
void *recvbuf = smpi_get_tmp_sendbuffer(comm_size* MPI_CURRENT_TYPE->size());
Colls::reduce(sendbuf, recvbuf, comm_size, MPI_CURRENT_TYPE, MPI_OP_NULL, root, MPI_COMM_WORLD);
smpi_execute_flops(comp_size);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
log_timed_action (action, clock);
}
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
double clock = smpi_process()->simulated_elapsed();
- int rank = smpi_process()->index();
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::CollTIData("allReduce", -1, comp_size, comm_size, -1,
- encode_datatype(MPI_CURRENT_TYPE), ""));
+ int my_proc_id = Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::CollTIData("allReduce", -1, comp_size, comm_size, -1,
+ encode_datatype(MPI_CURRENT_TYPE), ""));
void *recvbuf = smpi_get_tmp_sendbuffer(comm_size* MPI_CURRENT_TYPE->size());
void *sendbuf = smpi_get_tmp_sendbuffer(comm_size* MPI_CURRENT_TYPE->size());
Colls::allreduce(sendbuf, recvbuf, comm_size, MPI_CURRENT_TYPE, MPI_OP_NULL, MPI_COMM_WORLD);
smpi_execute_flops(comp_size);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
log_timed_action (action, clock);
}
void *send = smpi_get_tmp_sendbuffer(send_size*comm_size* MPI_CURRENT_TYPE->size());
void *recv = smpi_get_tmp_recvbuffer(recv_size*comm_size* MPI_CURRENT_TYPE2->size());
- int rank = smpi_process()->index();
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::CollTIData("allToAll", -1, -1.0, send_size, recv_size,
- encode_datatype(MPI_CURRENT_TYPE),
- encode_datatype(MPI_CURRENT_TYPE2)));
+ int my_proc_id = Actor::self()->getPid();
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::CollTIData("allToAll", -1, -1.0, send_size, recv_size,
+ encode_datatype(MPI_CURRENT_TYPE),
+ encode_datatype(MPI_CURRENT_TYPE2)));
Colls::alltoall(send, send_size, MPI_CURRENT_TYPE, recv, recv_size, MPI_CURRENT_TYPE2, MPI_COMM_WORLD);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
log_timed_action (action, clock);
}
Colls::gather(send, send_size, MPI_CURRENT_TYPE, recv, recv_size, MPI_CURRENT_TYPE2, root, MPI_COMM_WORLD);
- TRACE_smpi_comm_out(smpi_process()->index());
+ TRACE_smpi_comm_out(Actor::self()->getPid());
log_timed_action (action, clock);
}
Colls::scatter(send, send_size, MPI_CURRENT_TYPE, recv, recv_size, MPI_CURRENT_TYPE2, root, MPI_COMM_WORLD);
- TRACE_smpi_comm_out(smpi_process()->index());
+ TRACE_smpi_comm_out(Actor::self()->getPid());
log_timed_action(action, clock);
}
Colls::gatherv(send, send_size, MPI_CURRENT_TYPE, recv, recvcounts, disps, MPI_CURRENT_TYPE2, root, MPI_COMM_WORLD);
- TRACE_smpi_comm_out(smpi_process()->index());
+ TRACE_smpi_comm_out(Actor::self()->getPid());
log_timed_action (action, clock);
}
Colls::scatterv(send, sendcounts, disps, MPI_CURRENT_TYPE, recv, recv_size, MPI_CURRENT_TYPE2, root, MPI_COMM_WORLD);
- TRACE_smpi_comm_out(smpi_process()->index());
+ TRACE_smpi_comm_out(Actor::self()->getPid());
log_timed_action(action, clock);
}
CHECK_ACTION_PARAMS(action, comm_size+1, 1)
int comp_size = parse_double(action[2+comm_size]);
int recvcounts[comm_size];
- int rank = smpi_process()->index();
+ int my_proc_id = Actor::self()->getPid();
int size = 0;
std::vector<int>* trace_recvcounts = new std::vector<int>;
if(action[3+comm_size])
size+=recvcounts[i];
}
- TRACE_smpi_comm_in(rank, __FUNCTION__,
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
new simgrid::instr::VarCollTIData("reduceScatter", -1, 0, nullptr, -1, trace_recvcounts,
std::to_string(comp_size), /* ugly hack to print comp_size */
encode_datatype(MPI_CURRENT_TYPE)));
Colls::reduce_scatter(sendbuf, recvbuf, recvcounts, MPI_CURRENT_TYPE, MPI_OP_NULL, MPI_COMM_WORLD);
smpi_execute_flops(comp_size);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
log_timed_action (action, clock);
}
void *sendbuf = smpi_get_tmp_sendbuffer(sendcount* MPI_CURRENT_TYPE->size());
void *recvbuf = smpi_get_tmp_recvbuffer(recvcount* MPI_CURRENT_TYPE2->size());
- int rank = smpi_process()->index();
+ int my_proc_id = Actor::self()->getPid();
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::CollTIData("allGather", -1, -1.0, sendcount, recvcount,
- encode_datatype(MPI_CURRENT_TYPE),
- encode_datatype(MPI_CURRENT_TYPE2)));
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::CollTIData("allGather", -1, -1.0, sendcount, recvcount,
+ encode_datatype(MPI_CURRENT_TYPE),
+ encode_datatype(MPI_CURRENT_TYPE2)));
Colls::allgather(sendbuf, sendcount, MPI_CURRENT_TYPE, recvbuf, recvcount, MPI_CURRENT_TYPE2, MPI_COMM_WORLD);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
log_timed_action (action, clock);
}
}
void *recvbuf = smpi_get_tmp_recvbuffer(recv_sum* MPI_CURRENT_TYPE2->size());
- int rank = smpi_process()->index();
+ int my_proc_id = Actor::self()->getPid();
std::vector<int>* trace_recvcounts = new std::vector<int>;
for (int i = 0; i < comm_size; i++) // copy data to avoid bad free
trace_recvcounts->push_back(recvcounts[i]);
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::VarCollTIData(
- "allGatherV", -1, sendcount, nullptr, -1, trace_recvcounts,
- encode_datatype(MPI_CURRENT_TYPE), encode_datatype(MPI_CURRENT_TYPE2)));
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::VarCollTIData("allGatherV", -1, sendcount, nullptr, -1, trace_recvcounts,
+ encode_datatype(MPI_CURRENT_TYPE),
+ encode_datatype(MPI_CURRENT_TYPE2)));
Colls::allgatherv(sendbuf, sendcount, MPI_CURRENT_TYPE, recvbuf, recvcounts, disps, MPI_CURRENT_TYPE2,
MPI_COMM_WORLD);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
log_timed_action (action, clock);
}
else
MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE;
- int rank = smpi_process()->index();
+ int my_proc_id = Actor::self()->getPid();
void *sendbuf = smpi_get_tmp_sendbuffer(send_buf_size* MPI_CURRENT_TYPE->size());
void *recvbuf = smpi_get_tmp_recvbuffer(recv_buf_size* MPI_CURRENT_TYPE2->size());
recvdisps[i] = 0;
}
- TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::VarCollTIData(
- "allToAllV", -1, send_size, trace_sendcounts, recv_size, trace_recvcounts,
- encode_datatype(MPI_CURRENT_TYPE), encode_datatype(MPI_CURRENT_TYPE2)));
+ TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
+ new simgrid::instr::VarCollTIData("allToAllV", -1, send_size, trace_sendcounts, recv_size,
+ trace_recvcounts, encode_datatype(MPI_CURRENT_TYPE),
+ encode_datatype(MPI_CURRENT_TYPE2)));
Colls::alltoallv(sendbuf, sendcounts, senddisps, MPI_CURRENT_TYPE,recvbuf, recvcounts, recvdisps,
MPI_CURRENT_TYPE, MPI_COMM_WORLD);
- TRACE_smpi_comm_out(rank);
+ TRACE_smpi_comm_out(my_proc_id);
log_timed_action (action, clock);
}
smpi_process()->mark_as_initialized();
smpi_process()->set_replaying(true);
- int rank = smpi_process()->index();
- TRACE_smpi_init(rank);
- TRACE_smpi_computing_init(rank);
- TRACE_smpi_comm_in(rank, "smpi_replay_run_init", new simgrid::instr::NoOpTIData("init"));
- TRACE_smpi_comm_out(rank);
+ int my_proc_id = Actor::self()->getPid();
+ TRACE_smpi_init(my_proc_id);
+ TRACE_smpi_computing_init(my_proc_id);
+ TRACE_smpi_comm_in(my_proc_id, "smpi_replay_run_init", new simgrid::instr::NoOpTIData("init"));
+ TRACE_smpi_comm_out(my_proc_id);
xbt_replay_action_register("init", simgrid::smpi::action_init);
xbt_replay_action_register("finalize", simgrid::smpi::action_finalize);
xbt_replay_action_register("comm_size", simgrid::smpi::action_comm_size);
xbt_free(recvbuffer);
}
- TRACE_smpi_comm_in(smpi_process()->index(), "smpi_replay_run_finalize", new simgrid::instr::NoOpTIData("finalize"));
+ TRACE_smpi_comm_in(Actor::self()->getPid(), "smpi_replay_run_finalize", new simgrid::instr::NoOpTIData("finalize"));
smpi_process()->finalize();
- TRACE_smpi_comm_out(smpi_process()->index());
- TRACE_smpi_finalize(smpi_process()->index());
+ TRACE_smpi_comm_out(Actor::self()->getPid());
+ TRACE_smpi_finalize(Actor::self()->getPid());
}
/** @brief chain a replay initialization and a replay start */
simgrid::smpi::Comm mpi_MPI_COMM_UNINITIALIZED;
MPI_Comm MPI_COMM_UNINITIALIZED=&mpi_MPI_COMM_UNINITIALIZED;
+using simgrid::s4u::ActorPtr;
+
/* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
* support them, we have to add a field SMPI_Topo_type, and replace the MPI_Topology field by an union. */
int Comm::dup(MPI_Comm* newcomm){
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
MPI_Group cp = new Group(this->group());
(*newcomm) = new Comm(cp, this->topo());
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->rank();
- return group_->rank(smpi_process()->index());
+ return group_->rank(simgrid::s4u::Actor::self());
}
void Comm::get_name (char* name, int* len)
group_root = group_out; /* Save root's group */
}
for (unsigned j = 0; j < rankmap.size(); j++) {
- int index = group->index(rankmap[j].second);
- group_out->set_mapping(index, j);
+ ActorPtr actor = group->actor(rankmap[j].second);
+ group_out->set_mapping(actor, j);
}
MPI_Request* requests = xbt_new(MPI_Request, rankmap.size());
int reqs = 0;
}
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
//identify neighbours in comm
//get the indices of all processes sharing the same simix host
- const auto& process_list = sg_host_self()->extension<simgrid::simix::Host>()->process_list;
+ auto& process_list = sg_host_self()->extension<simgrid::simix::Host>()->process_list;
int intra_comm_size = 0;
int min_index = INT_MAX; // the minimum index will be the leader
- for (auto const& actor : process_list) {
- int index = actor.pid - 1;
- if (this->group()->rank(index) != MPI_UNDEFINED) {
+ for (auto& actor : process_list) {
+ int index = actor.pid;
+ if (this->group()->rank(actor.iface()) != MPI_UNDEFINED) { // Is this process in the current group?
intra_comm_size++;
- // the process is in the comm
if (index < min_index)
min_index = index;
}
XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
MPI_Group group_intra = new Group(intra_comm_size);
int i = 0;
- for (auto const& actor : process_list) {
- int index = actor.pid - 1;
- if(this->group()->rank(index)!=MPI_UNDEFINED){
- group_intra->set_mapping(index, i);
+ for (auto& actor : process_list) {
+ if (this->group()->rank(actor.iface()) != MPI_UNDEFINED) {
+ group_intra->set_mapping(actor.iface(), i);
i++;
}
}
Coll_allgather_mpich::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
if(leaders_map_==nullptr){
if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
//create leader_communicator
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(leader_list[i], i);
+ leaders_group->set_mapping(simgrid::s4u::Actor::byPid(leader_list[i]), i);
leader_comm = new Comm(leaders_group, nullptr);
this->set_leaders_comm(leader_comm);
this->set_intra_comm(comm_intra);
// create intracommunicator
}else{
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(leader_list[i], i);
+ leaders_group->set_mapping(simgrid::s4u::Actor::byPid(leader_list[i]), i);
if(this->get_leaders_comm()==MPI_COMM_NULL){
leader_comm = new Comm(leaders_group, nullptr);
Coll_bcast_mpich::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
// Are the ranks blocked ? = allocated contiguously on the SMP nodes
int is_blocked=1;
- int prev=this->group()->rank(comm_intra->group()->index(0));
+ int prev=this->group()->rank(comm_intra->group()->actor(0));
for (i = 1; i < my_local_size; i++) {
- int that = this->group()->rank(comm_intra->group()->index(i));
+ int that = this->group()->rank(comm_intra->group()->actor(i));
if (that != prev + 1) {
is_blocked = 0;
break;
// FIXME Handle the case of a partial shared malloc.
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
/* First check if we really have something to do */
if (recvcount > 0 && recvbuf != sendbuf) {
}
char* F2C::get_key_id(char* key, int id) {
- std::snprintf(key, KEY_SIZE, "%x_%d", static_cast<unsigned>(id), smpi_process()->index());
+ std::snprintf(key, KEY_SIZE, "%x_%lu", static_cast<unsigned>(id), simgrid::s4u::Actor::self()->getPid());
return key;
}
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
+#include "simgrid/s4u/Actor.hpp"
#include "smpi_group.hpp"
#include "smpi_comm.hpp"
#include <string>
namespace simgrid{
namespace smpi{
+using simgrid::s4u::ActorPtr;
+
Group::Group()
{
size_ = 0; /* size */
refcount_ = 1; /* refcount_: start > 0 so that this group never gets freed */
}
-Group::Group(int n) : size_(n), rank_to_index_map_(size_, MPI_UNDEFINED)
+Group::Group(int n) : size_(n), rank_to_actor_map_(size_, nullptr), rank_to_index_map_(size_, MPI_UNDEFINED), index_to_rank_map_(size_, MPI_UNDEFINED)
{
refcount_ = 1;
}
refcount_ = 1;
rank_to_index_map_ = origin->rank_to_index_map_;
index_to_rank_map_ = origin->index_to_rank_map_;
+ rank_to_actor_map_ = origin->rank_to_actor_map_;
+ actor_to_rank_map_ = origin->actor_to_rank_map_;
}
}
-void Group::set_mapping(int index, int rank)
+void Group::set_mapping(simgrid::s4u::ActorPtr actor, int rank)
{
if (0 <= rank && rank < size_) {
+ int index = actor->getPid();
rank_to_index_map_[rank] = index;
if (index != MPI_UNDEFINED) {
if ((unsigned)index >= index_to_rank_map_.size())
index_to_rank_map_.resize(index + 1, MPI_UNDEFINED);
index_to_rank_map_[index] = rank;
}
- }
-}
-int Group::index(int rank)
-{
- int index;
- if (0 <= rank && rank < size_)
- index = rank_to_index_map_[rank];
- else
- index = MPI_UNDEFINED;
- return index;
+ rank_to_actor_map_[rank] = actor;
+ if (actor != nullptr) {
+ actor_to_rank_map_.insert({actor, rank});
+ }
+ }
}
int Group::rank(int index)
rank = index_to_rank_map_[index];
else
rank = MPI_UNDEFINED;
+
return rank;
}
+simgrid::s4u::ActorPtr Group::actor(int rank) {
+ if (0 <= rank && rank < size_)
+ return rank_to_actor_map_[rank];
+ else
+ return nullptr;
+}
+
+int Group::rank(const simgrid::s4u::ActorPtr actor) {
+ auto iterator = actor_to_rank_map_.find(actor);
+ return (iterator == actor_to_rank_map_.end()) ? MPI_UNDEFINED : (*iterator).second;
+}
+
void Group::ref()
{
refcount_++;
result = MPI_UNEQUAL;
} else {
for (int i = 0; i < size_; i++) {
- int index = this->index(i);
- int rank = group2->rank(index);
+ ActorPtr actor = this->actor(i);
+ int rank = group2->rank(actor);
if (rank == MPI_UNDEFINED) {
result = MPI_UNEQUAL;
break;
int Group::incl(int n, int* ranks, MPI_Group* newgroup)
{
int i=0;
- int index=0;
if (n == 0) {
*newgroup = MPI_GROUP_EMPTY;
} else if (n == size_) {
} else {
*newgroup = new Group(n);
for (i = 0; i < n; i++) {
- index = this->index(ranks[i]);
- (*newgroup)->set_mapping(index, i);
+ ActorPtr actor = this->actor(ranks[i]); // ranks[] was passed as a param!
+ (*newgroup)->set_mapping(actor, i);
}
}
return MPI_SUCCESS;
int size1 = size_;
int size2 = group2->size();
for (int i = 0; i < size2; i++) {
- int proc2 = group2->index(i);
- int proc1 = this->rank(proc2);
+ ActorPtr actor = group2->actor(i);
+ int proc1 = this->rank(actor);
if (proc1 == MPI_UNDEFINED) {
size1++;
}
*newgroup = new Group(size1);
size2 = this->size();
for (int i = 0; i < size2; i++) {
- int proc1 = this->index(i);
- (*newgroup)->set_mapping(proc1, i);
+ ActorPtr actor1 = this->actor(i);
+ (*newgroup)->set_mapping(actor1, i);
}
for (int i = size2; i < size1; i++) {
- int proc2 = group2->index(i - size2);
- (*newgroup)->set_mapping(proc2, i);
+ ActorPtr actor = group2->actor(i - size2);
+ (*newgroup)->set_mapping(actor, i);
}
}
return MPI_SUCCESS;
{
int size2 = group2->size();
for (int i = 0; i < size2; i++) {
- int proc2 = group2->index(i);
- int proc1 = this->rank(proc2);
+ ActorPtr actor = group2->actor(i);
+ int proc1 = this->rank(actor);
if (proc1 == MPI_UNDEFINED) {
size2--;
}
*newgroup = new Group(size2);
int j=0;
for (int i = 0; i < group2->size(); i++) {
- int proc2 = group2->index(i);
- int proc1 = this->rank(proc2);
+ ActorPtr actor = group2->actor(i);
+ int proc1 = this->rank(actor);
if (proc1 != MPI_UNDEFINED) {
- (*newgroup)->set_mapping(proc2, j);
+ (*newgroup)->set_mapping(actor, j);
j++;
}
}
int newsize = size_;
int size2 = size_;
for (int i = 0; i < size2; i++) {
- int proc1 = this->index(i);
- int proc2 = group2->rank(proc1);
+ ActorPtr actor = this->actor(i);
+ int proc2 = group2->rank(actor);
if (proc2 != MPI_UNDEFINED) {
newsize--;
}
} else {
*newgroup = new Group(newsize);
for (int i = 0; i < size2; i++) {
- int proc1 = this->index(i);
- int proc2 = group2->rank(proc1);
+ ActorPtr actor = this->actor(i);
+ int proc2 = group2->rank(actor);
if (proc2 == MPI_UNDEFINED) {
- (*newgroup)->set_mapping(proc1, i);
+ (*newgroup)->set_mapping(actor, i);
}
}
}
int j = 0;
for (int i = 0; i < oldsize; i++) {
if(to_exclude[i]==0){
- int index = this->index(i);
- (*newgroup)->set_mapping(index, j);
+ ActorPtr actor = this->actor(i);
+ (*newgroup)->set_mapping(actor, j);
j++;
}
}
for (int rank = ranges[i][0]; /* First */
rank >= 0 && rank < size_; /* Last */
) {
- int index = this->index(rank);
- (*newgroup)->set_mapping(index, j);
+ ActorPtr actor = this->actor(rank);
+ (*newgroup)->set_mapping(actor, j);
j++;
if(rank == ranges[i][1]){/*already last ?*/
break;
}
}
if(add==1){
- int index = this->index(oldrank);
- (*newgroup)->set_mapping(index, newrank);
+ ActorPtr actor = this->actor(oldrank);
+ (*newgroup)->set_mapping(actor, newrank);
newrank++;
}
oldrank++;
{
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){//we need to switch as the called function may silently touch global variables
XBT_DEBUG("Applying operation, switch to the right data frame ");
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
if (not smpi_process()->replaying() && *len > 0) {
MPI_Request Request::send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, PERSISTENT | SEND | PREPARED);
+ return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | SEND | PREPARED);
}
MPI_Request Request::ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
+ return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
}
MPI_Request Request::isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- return new Request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag,comm, PERSISTENT | ISEND | SEND | PREPARED);
+ return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | ISEND | SEND | PREPARED);
}
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
if(op==MPI_OP_NULL){
- request = new Request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, src, dst, tag,
+ request = new Request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, comm->group()->actor(src)->getPid(),
+ comm->group()->actor(dst)->getPid(), tag,
comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED);
}else{
- request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src, dst, tag,
+ request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->getPid(),
+ comm->group()->actor(dst)->getPid(), tag,
comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED | ACCUMULATE);
request->op_ = op;
}
MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
{
- return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype,
- src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->index(src),
- smpi_process()->index(), tag, comm, PERSISTENT | RECV | PREPARED);
+ return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
+ src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
+ simgrid::s4u::Actor::self()->getPid(), tag, comm, PERSISTENT | RECV | PREPARED);
}
MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
if(op==MPI_OP_NULL){
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, src, dst, tag, comm,
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->getPid(), comm->group()->actor(dst)->getPid(), tag, comm,
RMA | NON_PERSISTENT | RECV | PREPARED);
}else{
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, src, dst, tag, comm,
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->getPid(), comm->group()->actor(dst)->getPid(), tag, comm,
RMA | NON_PERSISTENT | RECV | PREPARED | ACCUMULATE);
request->op_ = op;
}
MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
- src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->index(src), smpi_process()->index(), tag,
- comm, PERSISTENT | RECV | PREPARED);
+ src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
+ simgrid::s4u::Actor::self()->getPid(), tag, comm, PERSISTENT | RECV | PREPARED);
}
MPI_Request Request::isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, NON_PERSISTENT | ISEND | SEND);
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | ISEND | SEND);
request->start();
return request;
}
MPI_Request Request::issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, NON_PERSISTENT | ISEND | SSEND | SEND);
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | ISEND | SSEND | SEND);
request->start();
return request;
}
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
- src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->index(src), smpi_process()->index(),
- tag, comm, NON_PERSISTENT | RECV);
+ src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
+ simgrid::s4u::Actor::self()->getPid(), tag, comm, NON_PERSISTENT | RECV);
request->start();
return request;
}
void Request::send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, NON_PERSISTENT | SEND);
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | SEND);
request->start();
wait(&request, MPI_STATUS_IGNORE);
void Request::ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, NON_PERSISTENT | SSEND | SEND);
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | SSEND | SEND);
request->start();
wait(&request,MPI_STATUS_IGNORE);
{
MPI_Request requests[2];
MPI_Status stats[2];
- int myid=smpi_process()->index();
- if ((comm->group()->index(dst) == myid) && (comm->group()->index(src) == myid)){
+ unsigned int myid = simgrid::s4u::Actor::self()->getPid();
+ if ((comm->group()->actor(dst)->getPid() == myid) && (comm->group()->actor(src)->getPid() == myid)){
Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
return;
}
if ((flags_ & RECV) != 0) {
this->print_request("New recv");
- simgrid::smpi::Process* process = smpi_process_remote(dst_);
+ simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_));
int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
// we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
real_size_=size_;
action_ = simcall_comm_irecv(
- process->process(), mailbox, buf_, &real_size_, &match_recv,
+ process->process()->getImpl(), mailbox, buf_, &real_size_, &match_recv,
process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this, -1.0);
XBT_DEBUG("recv simcall posted");
if (async_small_thresh != 0 || (flags_ & RMA) != 0 )
xbt_mutex_release(mut);
} else { /* the RECV flag was not set, so this is a send */
- simgrid::smpi::Process* process = smpi_process_remote(dst_);
+ simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_));
int rank = src_;
if (TRACE_smpi_view_internals()) {
TRACE_smpi_send(rank, rank, dst_, tag_, size_);
// we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
real_size_=size_;
action_ = simcall_comm_isend(
- simgrid::s4u::Actor::byPid(src_ + 1)->getImpl(), mailbox, size_, -1.0, buf, real_size_, &match_send,
+ simgrid::s4u::Actor::byPid(src_)->getImpl(), mailbox, size_, -1.0, buf, real_size_, &match_send,
&xbt_free_f, // how to free the userdata if a detached send fails
not process->replaying() ? smpi_comm_copy_data_callback : &smpi_comm_null_copy_buffer_callback, this,
// detach if msg size < eager/rdv switch limit
static int nsleeps = 1;
double speed = simgrid::s4u::Actor::self()->getHost()->getSpeed();
double maxrate = xbt_cfg_get_double("smpi/iprobe-cpu-usage");
- MPI_Request request = new Request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
- comm->group()->index(source), comm->rank(), tag, comm, PERSISTENT | RECV);
+ MPI_Request request = new Request(
+ nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->getPid(),
+ simgrid::s4u::Actor::self()->getPid(), tag, comm, PERSISTENT | RECV);
if (smpi_iprobe_sleep > 0) {
smx_activity_t iprobe_sleep = simcall_execution_start(
"iprobe", /* flops to executek*/ nsleeps * smpi_iprobe_sleep * speed * maxrate, /* priority */ 1.0,
- /* performance bound */ maxrate * speed, smpi_process()->process()->host);
+ /* performance bound */ maxrate * speed, smpi_process()->process()->getImpl()->host);
simcall_execution_wait(iprobe_sleep);
}
// behave like a receive, but don't do it
static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
- smpi_switch_data_segment(smpi_process()->index());
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
if(datatype->flags() & DT_FLAG_DERIVED){
}
if (TRACE_smpi_view_internals() && ((req->flags_ & RECV) != 0)){
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
int src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
TRACE_smpi_recv(src_traced, rank,req->tag_);
}
oldGroup = comm_old->group();
newGroup = new Group(newSize);
for (int i = 0 ; i < newSize ; i++) {
- newGroup->set_mapping(oldGroup->index(i), i);
+ newGroup->set_mapping(oldGroup->actor(i), i);
}
nnodes_ = newSize;
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_rma, smpi, "Logging specific to SMPI (RMA operations)");
+using simgrid::s4u::Actor;
+
namespace simgrid{
namespace smpi{
std::unordered_map<int, smpi_key_elem> Win::keyvals_;
Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, int allocated, int dynamic): base_(base), size_(size), disp_unit_(disp_unit), assert_(0), info_(info), comm_(comm), allocated_(allocated), dynamic_(dynamic){
int comm_size = comm->size();
- rank_ = comm->rank();
+ rank_ = comm->rank();
XBT_DEBUG("Creating window");
if(info!=MPI_INFO_NULL)
info->ref();
- name_ = nullptr;
- opened_ = 0;
- group_ = MPI_GROUP_NULL;
- requests_ = new std::vector<MPI_Request>();
- mut_=xbt_mutex_init();
- lock_mut_=xbt_mutex_init();
- atomic_mut_=xbt_mutex_init();
- connected_wins_ = new MPI_Win[comm_size];
+ name_ = nullptr;
+ opened_ = 0;
+ group_ = MPI_GROUP_NULL;
+ requests_ = new std::vector<MPI_Request>();
+ mut_ = xbt_mutex_init();
+ lock_mut_ = xbt_mutex_init();
+ atomic_mut_ = xbt_mutex_init();
+ connected_wins_ = new MPI_Win[comm_size];
connected_wins_[rank_] = this;
- count_ = 0;
+ count_ = 0;
if(rank_==0){
bar_ = MSG_barrier_init(comm_size);
}
comm_->remove_rma_win(this);
Colls::barrier(comm_);
- int rank=comm_->rank();
- if(rank == 0)
+ if (rank_ == 0)
MSG_barrier_destroy(bar_);
xbt_mutex_destroy(mut_);
xbt_mutex_destroy(lock_mut_);
return MPI_ERR_ARG;
void* recv_addr = static_cast<void*> ( static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_);
- XBT_DEBUG("Entering MPI_Put to %d", target_rank);
- if(target_rank != comm_->rank()){
- //prepare send_request
- MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype, smpi_process()->index(),
- comm_->group()->index(target_rank), SMPI_RMA_TAG+1, comm_, MPI_OP_NULL);
+ if (target_rank != comm_->rank()) { // This is not for myself, so we need to send messages
+ XBT_DEBUG("Entering MPI_Put to remote rank %d", target_rank);
+ // prepare send_request
+ MPI_Request sreq =
+ // TODO cheinrich Check for rank / pid conversion
+ Request::rma_send_init(origin_addr, origin_count, origin_datatype, comm_->rank(), target_rank, SMPI_RMA_TAG + 1,
+ comm_, MPI_OP_NULL);
//prepare receiver request
- MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, smpi_process()->index(),
- comm_->group()->index(target_rank), SMPI_RMA_TAG+1, recv_win->comm_, MPI_OP_NULL);
+ // TODO cheinrich Check for rank / pid conversion
+ MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, recv_win->comm_->rank(),
+ target_rank, SMPI_RMA_TAG + 1, recv_win->comm_, MPI_OP_NULL);
//start send
sreq->start();
xbt_mutex_release(recv_win->mut_);
}else{
+ XBT_DEBUG("Entering MPI_Put from myself to myself, rank %d", target_rank);
Datatype::copy(origin_addr, origin_count, origin_datatype, recv_addr, target_count, target_datatype);
if(request!=nullptr)
*request = MPI_REQUEST_NULL;
if(target_rank != comm_->rank()){
//prepare send_request
- MPI_Request sreq = Request::rma_send_init(send_addr, target_count, target_datatype,
- comm_->group()->index(target_rank), smpi_process()->index(), SMPI_RMA_TAG+2, send_win->comm_,
- MPI_OP_NULL);
+ MPI_Request sreq = Request::rma_send_init(send_addr, target_count, target_datatype, target_rank,
+ send_win->comm_->rank(), SMPI_RMA_TAG + 2, send_win->comm_, MPI_OP_NULL);
//prepare receiver request
- MPI_Request rreq = Request::rma_recv_init(origin_addr, origin_count, origin_datatype,
- comm_->group()->index(target_rank), smpi_process()->index(), SMPI_RMA_TAG+2, comm_,
- MPI_OP_NULL);
+ MPI_Request rreq = Request::rma_recv_init(
+ origin_addr, origin_count, origin_datatype, target_rank,
+ comm_->rank(), // TODO cheinrich Check here if comm_->rank() and above send_win->comm_->rank() are correct
+ SMPI_RMA_TAG + 2, comm_, MPI_OP_NULL);
//start the send, with another process than us as sender.
sreq->start();
int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request)
{
-
+ XBT_DEBUG("Entering MPI_Win_Accumulate");
//get receiver pointer
MPI_Win recv_win = connected_wins_[target_rank];
//As the tag will be used for ordering of the operations, substract count from it (to avoid collisions with other SMPI tags, SMPI_RMA_TAG is set below all the other ones we use )
//prepare send_request
- MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype,
- smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG-3-count_, comm_, op);
+ MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype, comm_->rank(), target_rank,
+ SMPI_RMA_TAG - 3 - count_, comm_, op);
- //prepare receiver request
- MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype,
- smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG-3-count_, recv_win->comm_, op);
+ // prepare receiver request
+ MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, recv_win->comm_->rank(),
+ recv_win->comm_->group()->rank(comm_->group()->actor(target_rank)), SMPI_RMA_TAG - 3 - count_, recv_win->comm_, op);
- count_++;
+ count_++;
- //start send
- sreq->start();
- //push request to receiver's win
- xbt_mutex_acquire(recv_win->mut_);
- recv_win->requests_->push_back(rreq);
- rreq->start();
- xbt_mutex_release(recv_win->mut_);
+ // start send
+ sreq->start();
+ // push request to receiver's win
+ xbt_mutex_acquire(recv_win->mut_);
+ recv_win->requests_->push_back(rreq);
+ rreq->start();
+ xbt_mutex_release(recv_win->mut_);
- if(request!=nullptr){
- *request=sreq;
- }else{
- xbt_mutex_acquire(mut_);
- requests_->push_back(sreq);
- xbt_mutex_release(mut_);
- }
+ if (request != nullptr) {
+ *request = sreq;
+ } else {
+ xbt_mutex_acquire(mut_);
+ requests_->push_back(sreq);
+ xbt_mutex_release(mut_);
+ }
+ XBT_DEBUG("Leaving MPI_Win_Accumulate");
return MPI_SUCCESS;
}
}
int Win::start(MPI_Group group, int assert){
- /* From MPI forum advices
- The call to MPI_WIN_COMPLETE does not return until the put call has completed at the origin; and the target window
- will be accessed by the put operation only after the call to MPI_WIN_START has matched a call to MPI_WIN_POST by
- the target process. This still leaves much choice to implementors. The call to MPI_WIN_START can block until the
- matching call to MPI_WIN_POST occurs at all target processes. One can also have implementations where the call to
- MPI_WIN_START is nonblocking, but the call to MPI_PUT blocks until the matching call to MPI_WIN_POST occurred; or
- implementations where the first two calls are nonblocking, but the call to MPI_WIN_COMPLETE blocks until the call
- to MPI_WIN_POST occurred; or even implementations where all three calls can complete before any target process
- called MPI_WIN_POST --- the data put must be buffered, in this last case, so as to allow the put to complete at the
- origin ahead of its completion at the target. However, once the call to MPI_WIN_POST is issued, the sequence above
- must complete, without further dependencies. */
+ /* From MPI forum advices
+ The call to MPI_WIN_COMPLETE does not return until the put call has completed at the origin; and the target window
+ will be accessed by the put operation only after the call to MPI_WIN_START has matched a call to MPI_WIN_POST by
+ the target process. This still leaves much choice to implementors. The call to MPI_WIN_START can block until the
+ matching call to MPI_WIN_POST occurs at all target processes. One can also have implementations where the call to
+ MPI_WIN_START is nonblocking, but the call to MPI_PUT blocks until the matching call to MPI_WIN_POST occurred; or
+ implementations where the first two calls are nonblocking, but the call to MPI_WIN_COMPLETE blocks until the call
+ to MPI_WIN_POST occurred; or even implementations where all three calls can complete before any target process
+ called MPI_WIN_POST --- the data put must be buffered, in this last case, so as to allow the put to complete at the
+ origin ahead of its completion at the target. However, once the call to MPI_WIN_POST is issued, the sequence above
+ must complete, without further dependencies. */
//naive, blocking implementation.
- int i = 0;
- int j = 0;
- int size = group->size();
- MPI_Request* reqs = xbt_new0(MPI_Request, size);
-
- while (j != size) {
- int src = group->index(j);
- if (src != smpi_process()->index() && src != MPI_UNDEFINED) {
- reqs[i] = Request::irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 4, MPI_COMM_WORLD);
- i++;
- }
- j++;
+ int i = 0;
+ int j = 0;
+ int size = group->size();
+ MPI_Request* reqs = xbt_new0(MPI_Request, size);
+
+ XBT_DEBUG("Entering MPI_Win_Start");
+ while (j != size) {
+ int src = comm_->group()->rank(group->actor(j));
+ if (src != rank_ && src != MPI_UNDEFINED) { // TODO cheinrich: The check of MPI_UNDEFINED should be useless here
+ reqs[i] = Request::irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 4, comm_);
+ i++;
+ }
+ j++;
}
- size=i;
+ size = i;
Request::startall(size, reqs);
Request::waitall(size, reqs, MPI_STATUSES_IGNORE);
- for(i=0;i<size;i++){
+ for (i = 0; i < size; i++) {
Request::unref(&reqs[i]);
}
xbt_free(reqs);
opened_++; //we're open for business !
group_=group;
group->ref();
+ XBT_DEBUG("Leaving MPI_Win_Start");
return MPI_SUCCESS;
}
int size = group->size();
MPI_Request* reqs = xbt_new0(MPI_Request, size);
+ XBT_DEBUG("Entering MPI_Win_Post");
while(j!=size){
- int dst=group->index(j);
- if(dst!=smpi_process()->index() && dst!=MPI_UNDEFINED){
- reqs[i]=Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+4, MPI_COMM_WORLD);
+ int dst = comm_->group()->rank(group->actor(j));
+ if (dst != rank_ && dst != MPI_UNDEFINED) {
+ reqs[i] = Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG + 4, comm_);
i++;
}
j++;
opened_++; //we're open for business !
group_=group;
group->ref();
+ XBT_DEBUG("Leaving MPI_Win_Post");
return MPI_SUCCESS;
}
XBT_DEBUG("Entering MPI_Win_Complete");
int i = 0;
int j = 0;
- int size = group_->size();
+ int size = group_->size();
MPI_Request* reqs = xbt_new0(MPI_Request, size);
while(j!=size){
- int dst=group_->index(j);
- if(dst!=smpi_process()->index() && dst!=MPI_UNDEFINED){
- reqs[i]=Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+5, MPI_COMM_WORLD);
+ int dst = comm_->group()->rank(group_->actor(j));
+ if (dst != rank_ && dst != MPI_UNDEFINED) {
+ reqs[i] = Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG + 5, comm_);
i++;
}
j++;
MPI_Request* reqs = xbt_new0(MPI_Request, size);
while(j!=size){
- int src=group_->index(j);
- if(src!=smpi_process()->index() && src!=MPI_UNDEFINED){
- reqs[i]=Request::irecv_init(nullptr, 0, MPI_CHAR, src,SMPI_RMA_TAG+5, MPI_COMM_WORLD);
+ int src = comm_->group()->rank(group_->actor(j));
+ if (src != rank_ && src != MPI_UNDEFINED) {
+ reqs[i] = Request::irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 5, comm_);
i++;
}
j++;
int Win::flush(int rank){
MPI_Win target_win = connected_wins_[rank];
- int finished = finish_comms(rank);
+ int finished = finish_comms(rank_);
XBT_DEBUG("Win_flush on local %d - Finished %d RMA calls", rank_, finished);
- finished = target_win->finish_comms(rank_);
+ finished = target_win->finish_comms(rank);
XBT_DEBUG("Win_flush on remote %d - Finished %d RMA calls", rank, finished);
return MPI_SUCCESS;
}
size = 0;
std::vector<MPI_Request> myreqqs;
std::vector<MPI_Request>::iterator iter = reqqs->begin();
+ int proc_id = comm_->group()->actor(rank)->getPid();
while (iter != reqqs->end()){
- if(((*iter)!=MPI_REQUEST_NULL) && (((*iter)->src() == rank) || ((*iter)->dst() == rank))){
+ // Let's see if we're either the destination or the sender of this request
+ // because we only wait for requests that we are responsible for.
+ // Also use the process id here since the request itself returns from src()
+ // and dst() the process id, NOT the rank (which only exists in the context of a communicator).
+ if (((*iter) != MPI_REQUEST_NULL) && (((*iter)->src() == proc_id) || ((*iter)->dst() == proc_id))) {
myreqqs.push_back(*iter);
iter = reqqs->erase(iter);
size++;
int main(int argc, char *argv[])
{
- int n;
+ int n, rank;
MPI_Init(&argc, &argv);
int verbose = argc <= 1;
MPI_Comm_size(MPI_COMM_WORLD, &n);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
double d = 2.0;
for (int i = 0; i < 5; i++) {
/* I want no more than n + 1 benchs (thres < 0) */
SMPI_SAMPLE_GLOBAL(n + 1, -1) {
if (verbose)
- fprintf(stderr, "(%12.6f) [rank:%d]", MPI_Wtime(), smpi_process_index());
+ fprintf(stderr, "(%12.6f) [rank:%d]", MPI_Wtime(), rank);
else
fprintf(stderr, "(0)");
fprintf(stderr, " Run the first computation. It's globally benched, "
fprintf(stderr, "(1)");
fprintf(stderr,
" [rank:%d] Run the first (locally benched) computation. It's locally benched, and I want the "
- "standard error to go below 0.1 second (count is not >0)\n", smpi_process_index());
+ "standard error to go below 0.1 second (count is not >0)\n", rank);
}
d = compute(d);
}
}
if (verbose)
- fprintf(stderr, "(%12.6f) [rank:%d] The result of the computation is: %f\n", MPI_Wtime(), smpi_process_index(), d);
+ fprintf(stderr, "(%12.6f) [rank:%d] The result of the computation is: %f\n", MPI_Wtime(), rank, d);
else
- fprintf(stderr, "(2) [rank:%d] Done.\n", smpi_process_index());
+ fprintf(stderr, "(2) [rank:%d] Done.\n", rank);
MPI_Finalize();
return 0;