1 /* Copyright (c) 2010-2015. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
14 #include <simgrid/s4u/host.hpp>
17 #include "src/simix/smx_private.h"
19 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)");
21 Comm mpi_MPI_COMM_UNINITIALIZED;
22 MPI_Comm MPI_COMM_UNINITIALIZED=&mpi_MPI_COMM_UNINITIALIZED;
24 /* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
25 * support them, we have to add a field MPIR_Topo_type, and replace the MPI_Topology field by an union. */
27 static int smpi_compare_rankmap(const void *a, const void *b)
29 const int* x = static_cast<const int*>(a);
30 const int* y = static_cast<const int*>(b);
50 std::unordered_map<int, smpi_key_elem> Comm::keyvals_;
51 int Comm::keyval_id_=0;
53 Comm::Comm(MPI_Group group, MPI_Topology topo) : group_(group), topo_(topo)
56 topoType_ = MPI_INVALID_TOPO;
57 intra_comm_ = MPI_COMM_NULL;
58 leaders_comm_ = MPI_COMM_NULL;
60 non_uniform_map_ = nullptr;
61 leaders_map_ = nullptr;
65 void Comm::destroy(Comm* comm)
67 if (comm == MPI_COMM_UNINITIALIZED){
68 Comm::destroy(smpi_process_comm_world());
71 delete comm->topo_; // there's no use count on topos
75 int Comm::dup(MPI_Comm* newcomm){
76 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
77 smpi_switch_data_segment(smpi_process_index());
79 MPI_Group cp = new Group(this->group());
80 (*newcomm) = new Comm(cp, this->topo());
81 int ret = MPI_SUCCESS;
83 if(!attributes_.empty()){
86 for(auto it = attributes_.begin(); it != attributes_.end(); it++){
87 smpi_key_elem elem = keyvals_.at((*it).first);
88 if (elem != nullptr && elem->copy_fn.comm_copy_fn != MPI_NULL_COPY_FN) {
89 ret = elem->copy_fn.comm_copy_fn(this, (*it).first, nullptr, (*it).second, &value_out, &flag);
90 if (ret != MPI_SUCCESS) {
91 Comm::destroy(*newcomm);
92 *newcomm = MPI_COMM_NULL;
97 (*newcomm)->attributes_.insert({(*it).first, value_out});
105 MPI_Group Comm::group()
107 if (this == MPI_COMM_UNINITIALIZED)
108 return smpi_process_comm_world()->group();
112 MPI_Topology Comm::topo() {
118 if (this == MPI_COMM_UNINITIALIZED)
119 return smpi_process_comm_world()->size();
120 return group_->size();
125 if (this == MPI_COMM_UNINITIALIZED)
126 return smpi_process_comm_world()->rank();
127 return group_->rank(smpi_process_index());
130 void Comm::get_name (char* name, int* len)
132 if (this == MPI_COMM_UNINITIALIZED){
133 smpi_process_comm_world()->get_name(name, len);
136 if(this == MPI_COMM_WORLD) {
137 strncpy(name, "WORLD",5);
140 *len = snprintf(name, MPI_MAX_NAME_STRING, "%p", this);
144 void Comm::set_leaders_comm(MPI_Comm leaders){
145 if (this == MPI_COMM_UNINITIALIZED){
146 smpi_process_comm_world()->set_leaders_comm(leaders);
149 leaders_comm_=leaders;
152 void Comm::set_intra_comm(MPI_Comm leaders){
156 int* Comm::get_non_uniform_map(){
157 if (this == MPI_COMM_UNINITIALIZED)
158 return smpi_process_comm_world()->get_non_uniform_map();
159 return non_uniform_map_;
162 int* Comm::get_leaders_map(){
163 if (this == MPI_COMM_UNINITIALIZED)
164 return smpi_process_comm_world()->get_leaders_map();
168 MPI_Comm Comm::get_leaders_comm(){
169 if (this == MPI_COMM_UNINITIALIZED)
170 return smpi_process_comm_world()->get_leaders_comm();
171 return leaders_comm_;
174 MPI_Comm Comm::get_intra_comm(){
175 if (this == MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD)
176 return smpi_process_get_comm_intra();
177 else return intra_comm_;
180 int Comm::is_uniform(){
181 if (this == MPI_COMM_UNINITIALIZED)
182 return smpi_process_comm_world()->is_uniform();
186 int Comm::is_blocked(){
187 if (this == MPI_COMM_UNINITIALIZED)
188 return smpi_process_comm_world()->is_blocked();
192 MPI_Comm Comm::split(int color, int key)
194 if (this == MPI_COMM_UNINITIALIZED)
195 return smpi_process_comm_world()->split(color, key);
196 int system_tag = 123;
199 MPI_Group group_root = nullptr;
200 MPI_Group group_out = nullptr;
201 MPI_Group group = this->group();
202 int rank = this->rank();
203 int size = this->size();
204 /* Gather all colors and keys on rank 0 */
205 int* sendbuf = xbt_new(int, 2);
209 recvbuf = xbt_new(int, 2 * size);
213 Coll_gather_default::gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this);
215 /* Do the actual job */
217 MPI_Group* group_snd = xbt_new(MPI_Group, size);
218 int* rankmap = xbt_new(int, 2 * size);
219 for (int i = 0; i < size; i++) {
220 if (recvbuf[2 * i] != MPI_UNDEFINED) {
222 for (int j = i + 1; j < size; j++) {
223 if(recvbuf[2 * i] == recvbuf[2 * j]) {
224 recvbuf[2 * j] = MPI_UNDEFINED;
225 rankmap[2 * count] = j;
226 rankmap[2 * count + 1] = recvbuf[2 * j + 1];
230 /* Add self in the group */
231 recvbuf[2 * i] = MPI_UNDEFINED;
232 rankmap[2 * count] = i;
233 rankmap[2 * count + 1] = recvbuf[2 * i + 1];
235 qsort(rankmap, count, 2 * sizeof(int), &smpi_compare_rankmap);
236 group_out = new Group(count);
238 group_root = group_out; /* Save root's group */
240 for (int j = 0; j < count; j++) {
241 int index = group->index(rankmap[2 * j]);
242 group_out->set_mapping(index, j);
244 MPI_Request* requests = xbt_new(MPI_Request, count);
246 for (int j = 0; j < count; j++) {
247 if(rankmap[2 * j] != 0) {
248 group_snd[reqs]=new Group(group_out);
249 requests[reqs] = Request::isend(&(group_snd[reqs]), 1, MPI_PTR, rankmap[2 * j], system_tag, this);
254 if(group_out != MPI_COMM_WORLD->group() && group_out != MPI_GROUP_EMPTY)
255 Group::unref(group_out);
257 Request::waitall(reqs, requests, MPI_STATUS_IGNORE);
264 group_out = group_root; /* exit with root's group */
266 if(color != MPI_UNDEFINED) {
267 Request::recv(&group_out, 1, MPI_PTR, 0, system_tag, this, MPI_STATUS_IGNORE);
268 } /* otherwise, exit with group_out == nullptr */
270 return group_out!=nullptr ? new Comm(group_out, nullptr) : MPI_COMM_NULL;
274 if (this == MPI_COMM_UNINITIALIZED){
275 smpi_process_comm_world()->ref();
282 void Comm::cleanup_smp(){
283 if (intra_comm_ != MPI_COMM_NULL)
284 Comm::unref(intra_comm_);
285 if (leaders_comm_ != MPI_COMM_NULL)
286 Comm::unref(leaders_comm_);
287 if (non_uniform_map_ != nullptr)
288 xbt_free(non_uniform_map_);
289 if (leaders_map_ != nullptr)
290 xbt_free(leaders_map_);
293 void Comm::unref(Comm* comm){
294 if (comm == MPI_COMM_UNINITIALIZED){
295 Comm::unref(smpi_process_comm_world());
299 Group::unref(comm->group_);
301 if(comm->refcount_==0){
303 comm->cleanup_attr<Comm>();
308 static int compare_ints (const void *a, const void *b)
310 const int *da = static_cast<const int *>(a);
311 const int *db = static_cast<const int *>(b);
313 return static_cast<int>(*da > *db) - static_cast<int>(*da < *db);
316 void Comm::init_smp(){
319 if (this == MPI_COMM_UNINITIALIZED)
320 smpi_process_comm_world()->init_smp();
322 int comm_size = this->size();
324 // If we are in replay - perform an ugly hack
325 // tell SimGrid we are not in replay for a while, because we need the buffers to be copied for the following calls
326 bool replaying = false; //cache data to set it back again after
327 if(smpi_process_get_replaying()){
329 smpi_process_set_replaying(false);
332 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
333 smpi_switch_data_segment(smpi_process_index());
335 //identify neighbours in comm
336 //get the indexes of all processes sharing the same simix host
337 xbt_swag_t process_list = SIMIX_host_self()->extension<simgrid::simix::Host>()->process_list;
338 int intra_comm_size = 0;
339 int min_index = INT_MAX;//the minimum index will be the leader
340 smx_actor_t actor = nullptr;
341 xbt_swag_foreach(actor, process_list) {
342 int index = actor->pid -1;
344 if(this->group()->rank(index)!=MPI_UNDEFINED){
346 //the process is in the comm
347 if(index < min_index)
351 XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
352 MPI_Group group_intra = new Group(intra_comm_size);
355 xbt_swag_foreach(actor, process_list) {
356 int index = actor->pid -1;
357 if(this->group()->rank(index)!=MPI_UNDEFINED){
358 group_intra->set_mapping(index, i);
363 MPI_Comm comm_intra = new Comm(group_intra, nullptr);
366 int * leaders_map= static_cast<int*>(xbt_malloc0(sizeof(int)*comm_size));
367 int * leader_list= static_cast<int*>(xbt_malloc0(sizeof(int)*comm_size));
368 for(i=0; i<comm_size; i++){
372 Coll_allgather_mpich::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
374 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
375 smpi_switch_data_segment(smpi_process_index());
378 if(leaders_map_==nullptr){
379 leaders_map_= leaders_map;
381 xbt_free(leaders_map);
384 int leader_group_size = 0;
385 for(i=0; i<comm_size; i++){
387 for(j=0;j<leader_group_size; j++){
388 if(leaders_map_[i]==leader_list[j]){
393 leader_list[leader_group_size]=leaders_map_[i];
397 qsort(leader_list, leader_group_size, sizeof(int),compare_ints);
399 MPI_Group leaders_group = new Group(leader_group_size);
401 MPI_Comm leader_comm = MPI_COMM_NULL;
402 if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
403 //create leader_communicator
404 for (i=0; i< leader_group_size;i++)
405 leaders_group->set_mapping(leader_list[i], i);
406 leader_comm = new Comm(leaders_group, nullptr);
407 this->set_leaders_comm(leader_comm);
408 this->set_intra_comm(comm_intra);
410 //create intracommunicator
412 for (i=0; i< leader_group_size;i++)
413 leaders_group->set_mapping(leader_list[i], i);
415 if(this->get_leaders_comm()==MPI_COMM_NULL){
416 leader_comm = new Comm(leaders_group, nullptr);
417 this->set_leaders_comm(leader_comm);
419 leader_comm=this->get_leaders_comm();
420 Group::unref(leaders_group);
422 smpi_process_set_comm_intra(comm_intra);
427 // Are the nodes uniform ? = same number of process/node
428 int my_local_size=comm_intra->size();
429 if(comm_intra->rank()==0) {
430 int* non_uniform_map = xbt_new0(int,leader_group_size);
431 Coll_allgather_mpich::allgather(&my_local_size, 1, MPI_INT,
432 non_uniform_map, 1, MPI_INT, leader_comm);
433 for(i=0; i < leader_group_size; i++) {
434 if(non_uniform_map[0] != non_uniform_map[i]) {
439 if(is_uniform==0 && this->is_uniform()!=0){
440 non_uniform_map_= non_uniform_map;
442 xbt_free(non_uniform_map);
444 is_uniform_=is_uniform;
446 Coll_bcast_mpich::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
448 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
449 smpi_switch_data_segment(smpi_process_index());
451 // Are the ranks blocked ? = allocated contiguously on the SMP nodes
453 int prev=this->group()->rank(comm_intra->group()->index(0));
454 for (i=1; i<my_local_size; i++){
455 int that=this->group()->rank(comm_intra->group()->index(i));
464 Coll_allreduce_default::allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, this);
466 if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD){
468 is_blocked_=global_blocked;
471 is_blocked_=global_blocked;
473 xbt_free(leader_list);
476 smpi_process_set_replaying(true);
479 MPI_Comm Comm::f2c(int id) {
481 return MPI_COMM_SELF;
483 return MPI_COMM_WORLD;
484 } else if(F2C::f2c_lookup_ != nullptr && id >= 0) {
486 MPI_Comm tmp = static_cast<MPI_Comm>(xbt_dict_get_or_null(F2C::f2c_lookup_,get_key_id(key, id)));
487 return tmp != nullptr ? tmp : MPI_COMM_NULL ;
489 return MPI_COMM_NULL;
493 void Comm::free_f(int id) {
495 xbt_dict_remove(F2C::f2c_lookup_, id==0? get_key(key, id) : get_key_id(key, id));
499 if(F2C::f2c_lookup_==nullptr){
500 F2C::f2c_lookup_=xbt_dict_new_homogeneous(nullptr);
503 xbt_dict_set(F2C::f2c_lookup_, this==MPI_COMM_WORLD? get_key(key, F2C::f2c_id_) : get_key_id(key,F2C::f2c_id_), this, nullptr);
505 return F2C::f2c_id_-1;