1 /* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include "simgrid/s4u/Host.hpp"
8 #include "src/simix/smx_private.h"
9 #include "src/smpi/private.h"
10 #include "src/smpi/smpi_comm.hpp"
11 #include "src/smpi/smpi_coll.hpp"
12 #include "src/smpi/smpi_datatype.hpp"
13 #include "src/smpi/smpi_process.hpp"
14 #include "src/smpi/smpi_request.hpp"
15 #include "src/smpi/smpi_status.hpp"
16 #include "src/smpi/smpi_win.hpp"
18 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)");
20 simgrid::smpi::Comm mpi_MPI_COMM_UNINITIALIZED;
21 MPI_Comm MPI_COMM_UNINITIALIZED=&mpi_MPI_COMM_UNINITIALIZED;
23 /* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
24 * support them, we have to add a field MPIR_Topo_type, and replace the MPI_Topology field by an union. */
26 static int smpi_compare_rankmap(const void *a, const void *b)
28 const int* x = static_cast<const int*>(a);
29 const int* y = static_cast<const int*>(b);
49 std::unordered_map<int, smpi_key_elem> Comm::keyvals_;
50 int Comm::keyval_id_=0;
52 Comm::Comm(MPI_Group group, MPI_Topology topo) : group_(group), topo_(topo)
55 topoType_ = MPI_INVALID_TOPO;
56 intra_comm_ = MPI_COMM_NULL;
57 leaders_comm_ = MPI_COMM_NULL;
59 non_uniform_map_ = nullptr;
60 leaders_map_ = nullptr;
64 void Comm::destroy(Comm* comm)
66 if (comm == MPI_COMM_UNINITIALIZED){
67 Comm::destroy(smpi_process()->comm_world());
70 delete comm->topo_; // there's no use count on topos
74 int Comm::dup(MPI_Comm* newcomm){
75 if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
76 smpi_switch_data_segment(smpi_process()->index());
78 MPI_Group cp = new Group(this->group());
79 (*newcomm) = new Comm(cp, this->topo());
80 int ret = MPI_SUCCESS;
82 if(!attributes()->empty()){
85 for(auto it : *attributes()){
86 smpi_key_elem elem = keyvals_.at(it.first);
87 if (elem != nullptr && elem->copy_fn.comm_copy_fn != MPI_NULL_COPY_FN) {
88 ret = elem->copy_fn.comm_copy_fn(this, it.first, nullptr, it.second, &value_out, &flag);
89 if (ret != MPI_SUCCESS) {
90 Comm::destroy(*newcomm);
91 *newcomm = MPI_COMM_NULL;
96 (*newcomm)->attributes()->insert({it.first, value_out});
104 MPI_Group Comm::group()
106 if (this == MPI_COMM_UNINITIALIZED)
107 return smpi_process()->comm_world()->group();
111 MPI_Topology Comm::topo() {
117 if (this == MPI_COMM_UNINITIALIZED)
118 return smpi_process()->comm_world()->size();
119 return group_->size();
124 if (this == MPI_COMM_UNINITIALIZED)
125 return smpi_process()->comm_world()->rank();
126 return group_->rank(smpi_process()->index());
129 void Comm::get_name (char* name, int* len)
131 if (this == MPI_COMM_UNINITIALIZED){
132 smpi_process()->comm_world()->get_name(name, len);
135 if(this == MPI_COMM_WORLD) {
136 strncpy(name, "WORLD",5);
139 *len = snprintf(name, MPI_MAX_NAME_STRING, "%p", this);
143 void Comm::set_leaders_comm(MPI_Comm leaders){
144 if (this == MPI_COMM_UNINITIALIZED){
145 smpi_process()->comm_world()->set_leaders_comm(leaders);
148 leaders_comm_=leaders;
151 void Comm::set_intra_comm(MPI_Comm leaders){
155 int* Comm::get_non_uniform_map(){
156 if (this == MPI_COMM_UNINITIALIZED)
157 return smpi_process()->comm_world()->get_non_uniform_map();
158 return non_uniform_map_;
161 int* Comm::get_leaders_map(){
162 if (this == MPI_COMM_UNINITIALIZED)
163 return smpi_process()->comm_world()->get_leaders_map();
167 MPI_Comm Comm::get_leaders_comm(){
168 if (this == MPI_COMM_UNINITIALIZED)
169 return smpi_process()->comm_world()->get_leaders_comm();
170 return leaders_comm_;
173 MPI_Comm Comm::get_intra_comm(){
174 if (this == MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD)
175 return smpi_process()->comm_intra();
176 else return intra_comm_;
179 int Comm::is_uniform(){
180 if (this == MPI_COMM_UNINITIALIZED)
181 return smpi_process()->comm_world()->is_uniform();
185 int Comm::is_blocked(){
186 if (this == MPI_COMM_UNINITIALIZED)
187 return smpi_process()->comm_world()->is_blocked();
191 MPI_Comm Comm::split(int color, int key)
193 if (this == MPI_COMM_UNINITIALIZED)
194 return smpi_process()->comm_world()->split(color, key);
195 int system_tag = 123;
198 MPI_Group group_root = nullptr;
199 MPI_Group group_out = nullptr;
200 MPI_Group group = this->group();
201 int rank = this->rank();
202 int size = this->size();
203 /* Gather all colors and keys on rank 0 */
204 int* sendbuf = xbt_new(int, 2);
208 recvbuf = xbt_new(int, 2 * size);
212 Coll_gather_default::gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this);
214 /* Do the actual job */
216 MPI_Group* group_snd = xbt_new(MPI_Group, size);
217 int* rankmap = xbt_new(int, 2 * size);
218 for (int i = 0; i < size; i++) {
219 if (recvbuf[2 * i] != MPI_UNDEFINED) {
221 for (int j = i + 1; j < size; j++) {
222 if(recvbuf[2 * i] == recvbuf[2 * j]) {
223 recvbuf[2 * j] = MPI_UNDEFINED;
224 rankmap[2 * count] = j;
225 rankmap[2 * count + 1] = recvbuf[2 * j + 1];
229 /* Add self in the group */
230 recvbuf[2 * i] = MPI_UNDEFINED;
231 rankmap[2 * count] = i;
232 rankmap[2 * count + 1] = recvbuf[2 * i + 1];
234 qsort(rankmap, count, 2 * sizeof(int), &smpi_compare_rankmap);
235 group_out = new Group(count);
237 group_root = group_out; /* Save root's group */
239 for (int j = 0; j < count; j++) {
240 int index = group->index(rankmap[2 * j]);
241 group_out->set_mapping(index, j);
243 MPI_Request* requests = xbt_new(MPI_Request, count);
245 for (int j = 0; j < count; j++) {
246 if(rankmap[2 * j] != 0) {
247 group_snd[reqs]=new Group(group_out);
248 requests[reqs] = Request::isend(&(group_snd[reqs]), 1, MPI_PTR, rankmap[2 * j], system_tag, this);
252 if(i != 0 && group_out != MPI_COMM_WORLD->group() && group_out != MPI_GROUP_EMPTY)
253 Group::unref(group_out);
255 Request::waitall(reqs, requests, MPI_STATUS_IGNORE);
262 group_out = group_root; /* exit with root's group */
264 if(color != MPI_UNDEFINED) {
265 Request::recv(&group_out, 1, MPI_PTR, 0, system_tag, this, MPI_STATUS_IGNORE);
266 } /* otherwise, exit with group_out == nullptr */
268 return group_out!=nullptr ? new Comm(group_out, nullptr) : MPI_COMM_NULL;
272 if (this == MPI_COMM_UNINITIALIZED){
273 smpi_process()->comm_world()->ref();
280 void Comm::cleanup_smp(){
281 if (intra_comm_ != MPI_COMM_NULL)
282 Comm::unref(intra_comm_);
283 if (leaders_comm_ != MPI_COMM_NULL)
284 Comm::unref(leaders_comm_);
285 if (non_uniform_map_ != nullptr)
286 xbt_free(non_uniform_map_);
287 if (leaders_map_ != nullptr)
288 xbt_free(leaders_map_);
291 void Comm::unref(Comm* comm){
292 if (comm == MPI_COMM_UNINITIALIZED){
293 Comm::unref(smpi_process()->comm_world());
297 Group::unref(comm->group_);
299 if(comm->refcount_==0){
301 comm->cleanup_attr<Comm>();
306 static int compare_ints (const void *a, const void *b)
308 const int *da = static_cast<const int *>(a);
309 const int *db = static_cast<const int *>(b);
311 return static_cast<int>(*da > *db) - static_cast<int>(*da < *db);
314 void Comm::init_smp(){
317 if (this == MPI_COMM_UNINITIALIZED)
318 smpi_process()->comm_world()->init_smp();
320 int comm_size = this->size();
322 // If we are in replay - perform an ugly hack
323 // tell SimGrid we are not in replay for a while, because we need the buffers to be copied for the following calls
324 bool replaying = false; //cache data to set it back again after
325 if(smpi_process()->replaying()){
327 smpi_process()->set_replaying(false);
330 if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
331 smpi_switch_data_segment(smpi_process()->index());
333 //identify neighbours in comm
334 //get the indexes of all processes sharing the same simix host
335 xbt_swag_t process_list = SIMIX_host_self()->extension<simgrid::simix::Host>()->process_list;
336 int intra_comm_size = 0;
337 int min_index = INT_MAX;//the minimum index will be the leader
338 smx_actor_t actor = nullptr;
339 xbt_swag_foreach(actor, process_list) {
340 int index = actor->pid -1;
342 if(this->group()->rank(index)!=MPI_UNDEFINED){
344 //the process is in the comm
345 if(index < min_index)
349 XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
350 MPI_Group group_intra = new Group(intra_comm_size);
353 xbt_swag_foreach(actor, process_list) {
354 int index = actor->pid -1;
355 if(this->group()->rank(index)!=MPI_UNDEFINED){
356 group_intra->set_mapping(index, i);
361 MPI_Comm comm_intra = new Comm(group_intra, nullptr);
364 int * leaders_map= static_cast<int*>(xbt_malloc0(sizeof(int)*comm_size));
365 int * leader_list= static_cast<int*>(xbt_malloc0(sizeof(int)*comm_size));
366 for(i=0; i<comm_size; i++){
370 Coll_allgather_mpich::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
372 if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
373 smpi_switch_data_segment(smpi_process()->index());
376 if(leaders_map_==nullptr){
377 leaders_map_= leaders_map;
379 xbt_free(leaders_map);
382 int leader_group_size = 0;
383 for(i=0; i<comm_size; i++){
385 for(j=0;j<leader_group_size; j++){
386 if(leaders_map_[i]==leader_list[j]){
391 leader_list[leader_group_size]=leaders_map_[i];
395 qsort(leader_list, leader_group_size, sizeof(int),compare_ints);
397 MPI_Group leaders_group = new Group(leader_group_size);
399 MPI_Comm leader_comm = MPI_COMM_NULL;
400 if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
401 //create leader_communicator
402 for (i=0; i< leader_group_size;i++)
403 leaders_group->set_mapping(leader_list[i], i);
404 leader_comm = new Comm(leaders_group, nullptr);
405 this->set_leaders_comm(leader_comm);
406 this->set_intra_comm(comm_intra);
408 //create intracommunicator
410 for (i=0; i< leader_group_size;i++)
411 leaders_group->set_mapping(leader_list[i], i);
413 if(this->get_leaders_comm()==MPI_COMM_NULL){
414 leader_comm = new Comm(leaders_group, nullptr);
415 this->set_leaders_comm(leader_comm);
417 leader_comm=this->get_leaders_comm();
418 Group::unref(leaders_group);
420 smpi_process()->set_comm_intra(comm_intra);
425 // Are the nodes uniform ? = same number of process/node
426 int my_local_size=comm_intra->size();
427 if(comm_intra->rank()==0) {
428 int* non_uniform_map = xbt_new0(int,leader_group_size);
429 Coll_allgather_mpich::allgather(&my_local_size, 1, MPI_INT,
430 non_uniform_map, 1, MPI_INT, leader_comm);
431 for(i=0; i < leader_group_size; i++) {
432 if(non_uniform_map[0] != non_uniform_map[i]) {
437 if(is_uniform==0 && this->is_uniform()!=0){
438 non_uniform_map_= non_uniform_map;
440 xbt_free(non_uniform_map);
442 is_uniform_=is_uniform;
444 Coll_bcast_mpich::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
446 if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
447 smpi_switch_data_segment(smpi_process()->index());
449 // Are the ranks blocked ? = allocated contiguously on the SMP nodes
451 int prev=this->group()->rank(comm_intra->group()->index(0));
452 for (i=1; i<my_local_size; i++){
453 int that=this->group()->rank(comm_intra->group()->index(i));
462 Coll_allreduce_default::allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, this);
464 if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD){
466 is_blocked_=global_blocked;
469 is_blocked_=global_blocked;
471 xbt_free(leader_list);
474 smpi_process()->set_replaying(true);
477 MPI_Comm Comm::f2c(int id) {
479 return MPI_COMM_SELF;
481 return MPI_COMM_WORLD;
482 } else if(F2C::f2c_lookup() != nullptr && id >= 0) {
484 MPI_Comm tmp = static_cast<MPI_Comm>(xbt_dict_get_or_null(F2C::f2c_lookup(),get_key_id(key, id)));
485 return tmp != nullptr ? tmp : MPI_COMM_NULL ;
487 return MPI_COMM_NULL;
491 void Comm::free_f(int id) {
493 xbt_dict_remove(F2C::f2c_lookup(), id==0? get_key(key, id) : get_key_id(key, id));
497 if(F2C::f2c_lookup()==nullptr){
498 F2C::set_f2c_lookup(xbt_dict_new_homogeneous(nullptr));
501 xbt_dict_set(F2C::f2c_lookup(), this==MPI_COMM_WORLD? get_key(key, F2C::f2c_id()) : get_key_id(key,F2C::f2c_id()), this, nullptr);
503 return F2C::f2c_id()-1;
507 void Comm::add_rma_win(MPI_Win win){
508 rma_wins_.push_back(win);
511 void Comm::remove_rma_win(MPI_Win win){
512 rma_wins_.remove(win);
515 void Comm::finish_rma_calls(){
516 for(auto it : rma_wins_){
517 if(it->rank()==this->rank()){//is it ours (for MPI_COMM_WORLD)?
518 int finished = it->finish_comms();
519 XBT_DEBUG("Barrier for rank %d - Finished %d RMA calls",this->rank(), finished);