#include "src/smpi/include/smpi_actor.hpp"
#include "src/surf/HostImpl.hpp"
-#include <climits>
+#include <limits>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)");
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->rank();
- return group_->rank(s4u::Actor::self());
+ return group_->rank(s4u::this_actor::get_pid());
}
int Comm::id() const
if(this == MPI_COMM_WORLD && name_.empty()) {
strncpy(name, "MPI_COMM_WORLD", 15);
*len = 14;
- } else if(this == MPI_COMM_SELF && name_.empty()) {
- strncpy(name, "MPI_COMM_SELF", 14);
- *len = 13;
} else {
*len = snprintf(name, MPI_MAX_NAME_STRING+1, "%s", name_.c_str());
}
}
+std::string Comm::name() const
+{
+ int size;
+ char name[MPI_MAX_NAME_STRING+1];
+ this->get_name(name, &size);
+ if (name[0]=='\0')
+ return std::string("MPI_Comm");
+ else
+ return std::string(name);
+}
+
+
void Comm::set_name (const char* name)
{
if (this == MPI_COMM_UNINITIALIZED){
MPI_Group group_root = nullptr;
MPI_Group group_out = nullptr;
- MPI_Group group = this->group();
+ const Group* group = this->group();
int myrank = this->rank();
int size = this->size();
/* Gather all colors and keys on rank 0 */
group_root = group_out; /* Save root's group */
}
for (unsigned j = 0; j < rankmap.size(); j++) {
- s4u::Actor* actor = group->actor(rankmap[j].second);
+ aid_t actor = group->actor(rankmap[j].second);
group_out->set_mapping(actor, j);
}
std::vector<MPI_Request> requests(rankmap.size());
if(comm->refcount_==0){
if(simgrid::smpi::F2C::lookup() != nullptr)
- F2C::free_f(comm->c2f());
+ F2C::free_f(comm->f2c_id());
comm->cleanup_smp();
comm->cleanup_attr<Comm>();
if (comm->info_ != MPI_INFO_NULL)
MPI_Comm Comm::find_intra_comm(int * leader){
//get the indices of all processes sharing the same simix host
int intra_comm_size = 0;
- int min_index = INT_MAX; // the minimum index will be the leader
+ aid_t min_index = std::numeric_limits<aid_t>::max(); // the minimum index will be the leader
sg_host_self()->get_impl()->foreach_actor([this, &intra_comm_size, &min_index](auto& actor) {
- int index = actor.get_pid();
- if (this->group()->rank(actor.get_ciface()) != MPI_UNDEFINED) { // Is this process in the current group?
+ aid_t index = actor.get_pid();
+ if (this->group()->rank(index) != MPI_UNDEFINED) { // Is this process in the current group?
intra_comm_size++;
if (index < min_index)
min_index = index;
auto* group_intra = new Group(intra_comm_size);
int i = 0;
sg_host_self()->get_impl()->foreach_actor([this, group_intra, &i](auto& actor) {
- if (this->group()->rank(actor.get_ciface()) != MPI_UNDEFINED) {
- group_intra->set_mapping(actor.get_ciface(), i);
+ if (this->group()->rank(actor.get_pid()) != MPI_UNDEFINED) {
+ group_intra->set_mapping(actor.get_pid(), i);
i++;
}
});
if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
//create leader_communicator
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
+ leaders_group->set_mapping(leader_list[i], i);
leader_comm = new Comm(leaders_group, nullptr, true);
this->set_leaders_comm(leader_comm);
this->set_intra_comm(comm_intra);
// create intracommunicator
}else{
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
+ leaders_group->set_mapping(leader_list[i], i);
if(this->get_leaders_comm()==MPI_COMM_NULL){
leader_comm = new Comm(leaders_group, nullptr, true);
}
// Are the ranks blocked ? = allocated contiguously on the SMP nodes
int is_blocked=1;
- int prev=this->group()->rank(comm_intra->group()->actor(0));
+ int prev = this->group()->rank(comm_intra->group()->actor(0));
for (i = 1; i < my_local_size; i++) {
int that = this->group()->rank(comm_intra->group()->actor(i));
if (that != prev + 1) {