1 /* Copyright (c) 2010-2019. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include "smpi_comm.hpp"
7 #include "smpi_coll.hpp"
8 #include "smpi_datatype.hpp"
9 #include "smpi_request.hpp"
10 #include "smpi_win.hpp"
11 #include "smpi_info.hpp"
12 #include "src/smpi/include/smpi_actor.hpp"
13 #include "src/surf/HostImpl.hpp"
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)");
19 simgrid::smpi::Comm mpi_MPI_COMM_UNINITIALIZED;
20 MPI_Comm MPI_COMM_UNINITIALIZED=&mpi_MPI_COMM_UNINITIALIZED;
22 /* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
23 * support them, we have to add a field SMPI_Topo_type, and replace the MPI_Topology field by an union. */
28 std::unordered_map<int, smpi_key_elem> Comm::keyvals_;
29 int Comm::keyval_id_=0;
31 Comm::Comm(MPI_Group group, MPI_Topology topo, int smp, int in_id) : group_(group), topo_(topo),is_smp_comm_(smp), id_(in_id)
33 errhandler_ = MPI_ERRORS_ARE_FATAL;
35 //First creation of comm is done before SIMIX_run, so only do comms for others
36 if(in_id==MPI_UNDEFINED && smp==0 && this->rank()!=MPI_UNDEFINED ){
39 static int global_id_ = 0;
43 colls::bcast(&id, 1, MPI_INT, 0, this);
44 XBT_DEBUG("Communicator %p has id %d", this, id);
45 id_=id;//only set here, as we don't want to change it in the middle of the bcast
50 void Comm::destroy(Comm* comm)
52 if (comm == MPI_COMM_UNINITIALIZED){
53 Comm::destroy(smpi_process()->comm_world());
59 int Comm::dup(MPI_Comm* newcomm){
60 if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
61 // we need to switch as the called function may silently touch global variables
62 smpi_switch_data_segment(s4u::Actor::self());
64 MPI_Group cp = new Group(this->group());
65 (*newcomm) = new Comm(cp, this->topo());
66 int ret = MPI_SUCCESS;
68 if (not attributes()->empty()) {
70 void* value_out=nullptr;
71 for (auto const& it : *attributes()) {
72 smpi_key_elem elem = keyvals_.at(it.first);
74 if( elem->copy_fn.comm_copy_fn != MPI_NULL_COPY_FN &&
75 elem->copy_fn.comm_copy_fn != MPI_COMM_DUP_FN)
76 ret = elem->copy_fn.comm_copy_fn(this, it.first, elem->extra_state, it.second, &value_out, &flag);
77 else if ( elem->copy_fn.comm_copy_fn_fort != MPI_NULL_COPY_FN &&
78 *(int*)*elem->copy_fn.comm_copy_fn_fort != 1){
79 value_out=(int*)xbt_malloc(sizeof(int));
80 elem->copy_fn.comm_copy_fn_fort(this, it.first, elem->extra_state, it.second, value_out, &flag,&ret);
82 if (ret != MPI_SUCCESS) {
83 Comm::destroy(*newcomm);
84 *newcomm = MPI_COMM_NULL;
87 if (elem->copy_fn.comm_copy_fn == MPI_COMM_DUP_FN ||
88 ((elem->copy_fn.comm_copy_fn_fort != MPI_NULL_COPY_FN) && *(int*)*elem->copy_fn.comm_copy_fn_fort == 1)){
90 (*newcomm)->attributes()->insert({it.first, it.second});
93 (*newcomm)->attributes()->insert({it.first, value_out});
98 //duplicate info if present
99 if(info_!=MPI_INFO_NULL)
100 (*newcomm)->info_ = new simgrid::smpi::Info(info_);
101 //duplicate errhandler
102 (*newcomm)->set_errhandler(errhandler_);
106 int Comm::dup_with_info(MPI_Info info, MPI_Comm* newcomm){
107 int ret = dup(newcomm);
108 if(ret != MPI_SUCCESS)
110 if((*newcomm)->info_!=MPI_INFO_NULL){
111 simgrid::smpi::Info::unref((*newcomm)->info_);
112 (*newcomm)->info_=MPI_INFO_NULL;
114 if(info != MPI_INFO_NULL){
116 (*newcomm)->info_=info;
121 MPI_Group Comm::group()
123 if (this == MPI_COMM_UNINITIALIZED)
124 return smpi_process()->comm_world()->group();
130 if (this == MPI_COMM_UNINITIALIZED)
131 return smpi_process()->comm_world()->size();
132 return group_->size();
137 if (this == MPI_COMM_UNINITIALIZED)
138 return smpi_process()->comm_world()->rank();
139 return group_->rank(s4u::Actor::self());
147 void Comm::get_name (char* name, int* len)
149 if (this == MPI_COMM_UNINITIALIZED){
150 smpi_process()->comm_world()->get_name(name, len);
153 if(this == MPI_COMM_WORLD && name_.empty()) {
154 strncpy(name, "MPI_COMM_WORLD", 15);
156 } else if(this == MPI_COMM_SELF && name_.empty()) {
157 strncpy(name, "MPI_COMM_SELF", 14);
160 *len = snprintf(name, MPI_MAX_NAME_STRING+1, "%s", name_.c_str());
164 void Comm::set_name (const char* name)
166 if (this == MPI_COMM_UNINITIALIZED){
167 smpi_process()->comm_world()->set_name(name);
170 name_.replace (0, MPI_MAX_NAME_STRING+1, name);
174 void Comm::set_leaders_comm(MPI_Comm leaders){
175 if (this == MPI_COMM_UNINITIALIZED){
176 smpi_process()->comm_world()->set_leaders_comm(leaders);
179 leaders_comm_=leaders;
182 int* Comm::get_non_uniform_map(){
183 if (this == MPI_COMM_UNINITIALIZED)
184 return smpi_process()->comm_world()->get_non_uniform_map();
185 return non_uniform_map_;
188 int* Comm::get_leaders_map(){
189 if (this == MPI_COMM_UNINITIALIZED)
190 return smpi_process()->comm_world()->get_leaders_map();
194 MPI_Comm Comm::get_leaders_comm(){
195 if (this == MPI_COMM_UNINITIALIZED)
196 return smpi_process()->comm_world()->get_leaders_comm();
197 return leaders_comm_;
200 MPI_Comm Comm::get_intra_comm(){
201 if (this == MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD)
202 return smpi_process()->comm_intra();
203 else return intra_comm_;
206 int Comm::is_uniform(){
207 if (this == MPI_COMM_UNINITIALIZED)
208 return smpi_process()->comm_world()->is_uniform();
212 int Comm::is_blocked(){
213 if (this == MPI_COMM_UNINITIALIZED)
214 return smpi_process()->comm_world()->is_blocked();
218 int Comm::is_smp_comm(){
219 if (this == MPI_COMM_UNINITIALIZED)
220 return smpi_process()->comm_world()->is_smp_comm();
224 MPI_Comm Comm::split(int color, int key)
226 if (this == MPI_COMM_UNINITIALIZED)
227 return smpi_process()->comm_world()->split(color, key);
228 int system_tag = -123;
231 MPI_Group group_root = nullptr;
232 MPI_Group group_out = nullptr;
233 MPI_Group group = this->group();
234 int myrank = this->rank();
235 int size = this->size();
236 /* Gather all colors and keys on rank 0 */
237 int* sendbuf = xbt_new(int, 2);
241 recvbuf = xbt_new(int, 2 * size);
245 gather__default(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this);
247 /* Do the actual job */
249 MPI_Group* group_snd = xbt_new(MPI_Group, size);
250 std::vector<std::pair<int, int>> rankmap;
251 rankmap.reserve(size);
252 for (int i = 0; i < size; i++) {
253 if (recvbuf[2 * i] != MPI_UNDEFINED) {
255 for (int j = i + 1; j < size; j++) {
256 if(recvbuf[2 * i] == recvbuf[2 * j]) {
257 recvbuf[2 * j] = MPI_UNDEFINED;
258 rankmap.push_back({recvbuf[2 * j + 1], j});
261 /* Add self in the group */
262 recvbuf[2 * i] = MPI_UNDEFINED;
263 rankmap.push_back({recvbuf[2 * i + 1], i});
264 std::sort(begin(rankmap), end(rankmap));
265 group_out = new Group(rankmap.size());
267 group_root = group_out; /* Save root's group */
269 for (unsigned j = 0; j < rankmap.size(); j++) {
270 s4u::Actor* actor = group->actor(rankmap[j].second);
271 group_out->set_mapping(actor, j);
273 MPI_Request* requests = xbt_new(MPI_Request, rankmap.size());
275 for (auto const& rank : rankmap) {
276 if (rank.second != 0) {
277 group_snd[reqs]=new Group(group_out);
278 requests[reqs] = Request::isend(&(group_snd[reqs]), 1, MPI_PTR, rank.second, system_tag, this);
282 if(i != 0 && group_out != MPI_COMM_WORLD->group() && group_out != MPI_GROUP_EMPTY)
283 Group::unref(group_out);
285 Request::waitall(reqs, requests, MPI_STATUS_IGNORE);
291 group_out = group_root; /* exit with root's group */
293 if(color != MPI_UNDEFINED) {
294 Request::recv(&group_out, 1, MPI_PTR, 0, system_tag, this, MPI_STATUS_IGNORE);
295 } /* otherwise, exit with group_out == nullptr */
297 return group_out!=nullptr ? new Comm(group_out, nullptr) : MPI_COMM_NULL;
301 if (this == MPI_COMM_UNINITIALIZED){
302 smpi_process()->comm_world()->ref();
309 void Comm::cleanup_smp(){
310 if (intra_comm_ != MPI_COMM_NULL)
311 Comm::unref(intra_comm_);
312 if (leaders_comm_ != MPI_COMM_NULL)
313 Comm::unref(leaders_comm_);
314 xbt_free(non_uniform_map_);
315 delete[] leaders_map_;
318 void Comm::unref(Comm* comm){
319 if (comm == MPI_COMM_UNINITIALIZED){
320 Comm::unref(smpi_process()->comm_world());
324 Group::unref(comm->group_);
326 if(comm->refcount_==0){
328 comm->cleanup_attr<Comm>();
329 if (comm->info_ != MPI_INFO_NULL)
330 simgrid::smpi::Info::unref(comm->info_);
331 if (comm->errhandler_ != MPI_ERRHANDLER_NULL)
332 simgrid::smpi::Errhandler::unref(comm->errhandler_);
333 delete comm->topo_; // there's no use count on topos
338 MPI_Comm Comm::find_intra_comm(int * leader){
339 //get the indices of all processes sharing the same simix host
340 auto& actor_list = sg_host_self()->pimpl_->actor_list_;
341 int intra_comm_size = 0;
342 int min_index = INT_MAX; // the minimum index will be the leader
343 for (auto& actor : actor_list) {
344 int index = actor.get_pid();
345 if (this->group()->rank(actor.ciface()) != MPI_UNDEFINED) { // Is this process in the current group?
347 if (index < min_index)
351 XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
352 MPI_Group group_intra = new Group(intra_comm_size);
354 for (auto& actor : actor_list) {
355 if (this->group()->rank(actor.ciface()) != MPI_UNDEFINED) {
356 group_intra->set_mapping(actor.ciface(), i);
361 return new Comm(group_intra, nullptr, 1);
364 void Comm::init_smp(){
367 if (this == MPI_COMM_UNINITIALIZED)
368 smpi_process()->comm_world()->init_smp();
370 int comm_size = this->size();
372 // If we are in replay - perform an ugly hack
373 // tell SimGrid we are not in replay for a while, because we need the buffers to be copied for the following calls
374 bool replaying = false; //cache data to set it back again after
375 if(smpi_process()->replaying()){
377 smpi_process()->set_replaying(false);
380 if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
381 // we need to switch as the called function may silently touch global variables
382 smpi_switch_data_segment(s4u::Actor::self());
384 // identify neighbors in comm
385 MPI_Comm comm_intra = find_intra_comm(&leader);
388 int* leaders_map = new int[comm_size];
389 int* leader_list = new int[comm_size];
390 std::fill_n(leaders_map, comm_size, 0);
391 std::fill_n(leader_list, comm_size, -1);
393 allgather__ring(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
395 if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
396 // we need to switch as the called function may silently touch global variables
397 smpi_switch_data_segment(s4u::Actor::self());
400 if(leaders_map_==nullptr){
401 leaders_map_= leaders_map;
403 delete[] leaders_map;
405 int leader_group_size = 0;
406 for(i=0; i<comm_size; i++){
407 int already_done = 0;
408 for (int j = 0; j < leader_group_size; j++) {
409 if (leaders_map_[i] == leader_list[j]) {
413 if (already_done == 0) {
414 leader_list[leader_group_size] = leaders_map_[i];
418 xbt_assert(leader_group_size > 0);
419 std::sort(leader_list, leader_list + leader_group_size);
421 MPI_Group leaders_group = new Group(leader_group_size);
423 MPI_Comm leader_comm = MPI_COMM_NULL;
424 if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
425 //create leader_communicator
426 for (i=0; i< leader_group_size;i++)
427 leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
428 leader_comm = new Comm(leaders_group, nullptr,1);
429 this->set_leaders_comm(leader_comm);
430 this->set_intra_comm(comm_intra);
432 // create intracommunicator
434 for (i=0; i< leader_group_size;i++)
435 leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
437 if(this->get_leaders_comm()==MPI_COMM_NULL){
438 leader_comm = new Comm(leaders_group, nullptr,1);
439 this->set_leaders_comm(leader_comm);
441 leader_comm=this->get_leaders_comm();
442 Group::unref(leaders_group);
444 smpi_process()->set_comm_intra(comm_intra);
447 // Are the nodes uniform ? = same number of process/node
448 int my_local_size=comm_intra->size();
449 if(comm_intra->rank()==0) {
451 int* non_uniform_map = xbt_new0(int,leader_group_size);
452 allgather__ring(&my_local_size, 1, MPI_INT,
453 non_uniform_map, 1, MPI_INT, leader_comm);
454 for(i=0; i < leader_group_size; i++) {
455 if(non_uniform_map[0] != non_uniform_map[i]) {
460 if(is_uniform==0 && this->is_uniform()!=0){
461 non_uniform_map_ = non_uniform_map;
463 xbt_free(non_uniform_map);
465 is_uniform_=is_uniform;
467 bcast__scatter_LR_allgather(&(is_uniform_),1, MPI_INT, 0, comm_intra );
469 if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
470 // we need to switch as the called function may silently touch global variables
471 smpi_switch_data_segment(s4u::Actor::self());
473 // Are the ranks blocked ? = allocated contiguously on the SMP nodes
475 int prev=this->group()->rank(comm_intra->group()->actor(0));
476 for (i = 1; i < my_local_size; i++) {
477 int that = this->group()->rank(comm_intra->group()->actor(i));
478 if (that != prev + 1) {
486 allreduce__default(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, this);
488 if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD){
490 is_blocked_ = global_blocked;
493 is_blocked_=global_blocked;
495 delete[] leader_list;
498 smpi_process()->set_replaying(true);
501 MPI_Comm Comm::f2c(int id) {
503 return MPI_COMM_SELF;
505 return MPI_COMM_WORLD;
506 } else if(F2C::f2c_lookup() != nullptr && id >= 0) {
508 const auto& lookup = F2C::f2c_lookup();
509 auto comm = lookup->find(get_key(key, id));
510 return comm == lookup->end() ? MPI_COMM_NULL : static_cast<MPI_Comm>(comm->second);
512 return MPI_COMM_NULL;
516 void Comm::free_f(int id) {
518 F2C::f2c_lookup()->erase(get_key(key, id));
521 void Comm::add_rma_win(MPI_Win win){
522 rma_wins_.push_back(win);
525 void Comm::remove_rma_win(MPI_Win win){
526 rma_wins_.remove(win);
529 void Comm::finish_rma_calls(){
530 for (auto const& it : rma_wins_) {
531 if(it->rank()==this->rank()){//is it ours (for MPI_COMM_WORLD)?
532 int finished = it->finish_comms();
533 XBT_DEBUG("Barrier for rank %d - Finished %d RMA calls",this->rank(), finished);
538 MPI_Info Comm::info()
540 if (info_ == MPI_INFO_NULL)
546 void Comm::set_info(MPI_Info info)
548 if (info_ != MPI_INFO_NULL)
549 simgrid::smpi::Info::unref(info);
551 if (info_ != MPI_INFO_NULL)
555 MPI_Errhandler Comm::errhandler()
557 if (errhandler_ != MPI_ERRHANDLER_NULL)
562 void Comm::set_errhandler(MPI_Errhandler errhandler)
564 if (errhandler_ != MPI_ERRHANDLER_NULL)
565 simgrid::smpi::Errhandler::unref(errhandler_);
566 errhandler_ = errhandler;
567 if (errhandler_ != MPI_ERRHANDLER_NULL)
571 MPI_Comm Comm::split_type(int type, int /*key*/, MPI_Info)
573 //MPI_UNDEFINED can be given to some nodes... but we need them to still perform the smp part which is collective
574 if(type != MPI_COMM_TYPE_SHARED && type != MPI_UNDEFINED){
575 return MPI_COMM_NULL;
578 MPI_Comm res= this->find_intra_comm(&leader);
579 if(type != MPI_UNDEFINED)
582 xbt_assert(res->refcount_ == 1); // ensure the next call to Comm::destroy really frees the comm
584 return MPI_COMM_NULL;
589 } // namespace simgrid