Comm::destroy(smpi_process()->comm_world());
return;
}
+ if(comm != MPI_COMM_WORLD)
+ comm->mark_as_deleted();
Comm::unref(comm);
}
if(info_!=MPI_INFO_NULL)
(*newcomm)->info_ = new simgrid::smpi::Info(info_);
//duplicate errhandler
- (*newcomm)->set_errhandler(errhandler_);
+ if (errhandlers_ != nullptr)//MPI_COMM_WORLD, only grab our own
+ (*newcomm)->set_errhandler(errhandlers_[this->rank()]);
+ else
+ (*newcomm)->set_errhandler(errhandler_);
return ret;
}
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->rank();
- return group_->rank(s4u::Actor::self());
+ return group_->rank(s4u::this_actor::get_pid());
}
int Comm::id() const
if(this == MPI_COMM_WORLD && name_.empty()) {
strncpy(name, "MPI_COMM_WORLD", 15);
*len = 14;
- } else if(this == MPI_COMM_SELF && name_.empty()) {
- strncpy(name, "MPI_COMM_SELF", 14);
- *len = 13;
} else {
*len = snprintf(name, MPI_MAX_NAME_STRING+1, "%s", name_.c_str());
}
}
+std::string Comm::name() const
+{
+ int size;
+ char name[MPI_MAX_NAME_STRING+1];
+ this->get_name(name, &size);
+ if (name[0]=='\0')
+ return std::string("MPI_Comm");
+ else
+ return std::string(name);
+}
+
+
void Comm::set_name (const char* name)
{
if (this == MPI_COMM_UNINITIALIZED){
MPI_Group group_root = nullptr;
MPI_Group group_out = nullptr;
- MPI_Group group = this->group();
+ const Group* group = this->group();
int myrank = this->rank();
int size = this->size();
/* Gather all colors and keys on rank 0 */
group_root = group_out; /* Save root's group */
}
for (unsigned j = 0; j < rankmap.size(); j++) {
- s4u::Actor* actor = group->actor(rankmap[j].second);
+ aid_t actor = group->actor(rankmap[j].second);
group_out->set_mapping(actor, j);
}
std::vector<MPI_Request> requests(rankmap.size());
if(comm->refcount_==0){
if(simgrid::smpi::F2C::lookup() != nullptr)
- F2C::free_f(comm->c2f());
+ F2C::free_f(comm->f2c_id());
comm->cleanup_smp();
comm->cleanup_attr<Comm>();
if (comm->info_ != MPI_INFO_NULL)
//get the indices of all processes sharing the same simix host
int intra_comm_size = 0;
int min_index = INT_MAX; // the minimum index will be the leader
- sg_host_self()->pimpl_->foreach_actor([this, &intra_comm_size, &min_index](auto& actor) {
- int index = actor.get_pid();
- if (this->group()->rank(actor.get_ciface()) != MPI_UNDEFINED) { // Is this process in the current group?
+ sg_host_self()->get_impl()->foreach_actor([this, &intra_comm_size, &min_index](auto& actor) {
+ if (this->group()->rank(actor.get_pid()) != MPI_UNDEFINED) { // Is this process in the current group?
intra_comm_size++;
+ int index = actor.get_pid();
if (index < min_index)
min_index = index;
}
XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
auto* group_intra = new Group(intra_comm_size);
int i = 0;
- sg_host_self()->pimpl_->foreach_actor([this, group_intra, &i](auto& actor) {
- if (this->group()->rank(actor.get_ciface()) != MPI_UNDEFINED) {
- group_intra->set_mapping(actor.get_ciface(), i);
+ sg_host_self()->get_impl()->foreach_actor([this, group_intra, &i](auto& actor) {
+ if (this->group()->rank(actor.get_pid()) != MPI_UNDEFINED) {
+ group_intra->set_mapping(actor.get_pid(), i);
i++;
}
});
if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
//create leader_communicator
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
+ leaders_group->set_mapping(leader_list[i], i);
leader_comm = new Comm(leaders_group, nullptr, true);
this->set_leaders_comm(leader_comm);
this->set_intra_comm(comm_intra);
// create intracommunicator
}else{
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
+ leaders_group->set_mapping(leader_list[i], i);
if(this->get_leaders_comm()==MPI_COMM_NULL){
leader_comm = new Comm(leaders_group, nullptr, true);
}
is_uniform_=is_uniform;
}
- bcast__scatter_LR_allgather(&(is_uniform_),1, MPI_INT, 0, comm_intra );
+ bcast__scatter_LR_allgather(&is_uniform_, 1, MPI_INT, 0, comm_intra);
if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
// we need to switch as the called function may silently touch global variables
}
// Are the ranks blocked ? = allocated contiguously on the SMP nodes
int is_blocked=1;
- int prev=this->group()->rank(comm_intra->group()->actor(0));
+ int prev = this->group()->rank(comm_intra->group()->actor(0));
for (i = 1; i < my_local_size; i++) {
int that = this->group()->rank(comm_intra->group()->actor(i));
if (that != prev + 1) {
}
int global_blocked;
- allreduce__default(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, this);
+ allreduce__default(&is_blocked, &global_blocked, 1, MPI_INT, MPI_LAND, this);
if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD){
if(this->rank()==0){