-/* Copyright (c) 2007-2021. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2022. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
colls::barrier(comm_);
Comm::unref(comm_);
-
+
if (rank_ == 0)
delete bar_;
MPI_Info Win::info()
{
- if (info_ == MPI_INFO_NULL)
- info_ = new Info();
- info_->ref();
return info_;
}
int Win::fence(int assert)
{
XBT_DEBUG("Entering fence");
- if (opened_ == 0)
- opened_=1;
+ opened_++;
if (not (assert & MPI_MODE_NOPRECEDE)) {
// This is not the first fence => finalize what came before
bar_->wait();
- mut_->lock();
- // This (simulated) mutex ensures that no process pushes to the vector of requests during the waitall.
- // Without this, the vector could get redimensioned when another process pushes.
- // This would result in the array used by Request::waitall() to be invalidated.
- // Another solution would be to copy the data and cleanup the vector *before* Request::waitall
-
- // start all requests that have been prepared by another process
- if (not requests_.empty()) {
- int size = static_cast<int>(requests_.size());
- MPI_Request* treqs = requests_.data();
- Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
- }
+ flush_local_all();
count_=0;
- mut_->unlock();
}
if (assert & MPI_MODE_NOSUCCEED) // there should be no ops after this one, tell we are closed.
mut_->unlock();
}
+ // FIXME: The current implementation fails to ensure the correct ordering of the accumulate requests. The following
+ // 'flush' is a workaround to fix that.
+ flush(target_rank);
XBT_DEBUG("Leaving MPI_Win_Accumulate");
return MPI_SUCCESS;
}
}
int Win::flush(int rank){
- MPI_Win target_win = connected_wins_[rank];
- int finished = finish_comms(rank);
- XBT_DEBUG("Win_flush on local %d - Finished %d RMA calls", rank, finished);
- finished = target_win->finish_comms(rank_);
- XBT_DEBUG("Win_flush on remote %d - Finished %d RMA calls", rank, finished);
+ int finished = finish_comms(rank);
+ XBT_DEBUG("Win_flush on local %d for remote %d - Finished %d RMA calls", rank_, rank, finished);
+ if (rank != rank_) {
+ finished = connected_wins_[rank]->finish_comms(rank_);
+ XBT_DEBUG("Win_flush on remote %d for local %d - Finished %d RMA calls", rank, rank_, finished);
+ }
return MPI_SUCCESS;
}
int Win::flush_local(int rank){
int finished = finish_comms(rank);
- XBT_DEBUG("Win_flush_local for rank %d - Finished %d RMA calls", rank, finished);
+ XBT_DEBUG("Win_flush_local on local %d for remote %d - Finished %d RMA calls", rank_, rank, finished);
return MPI_SUCCESS;
}
int Win::flush_all(){
int finished = finish_comms();
- XBT_DEBUG("Win_flush_all on local - Finished %d RMA calls", finished);
+ XBT_DEBUG("Win_flush_all on local %d - Finished %d RMA calls", rank_, finished);
for (int i = 0; i < comm_->size(); i++) {
- finished = connected_wins_[i]->finish_comms(rank_);
- XBT_DEBUG("Win_flush_all on %d - Finished %d RMA calls", i, finished);
+ if (i != rank_) {
+ finished = connected_wins_[i]->finish_comms(rank_);
+ XBT_DEBUG("Win_flush_all on remote %d for local %d - Finished %d RMA calls", i, rank_, finished);
+ }
}
return MPI_SUCCESS;
}
int Win::flush_local_all(){
int finished = finish_comms();
- XBT_DEBUG("Win_flush_local_all - Finished %d RMA calls", finished);
+ XBT_DEBUG("Win_flush_local_all on local %d - Finished %d RMA calls", rank_, finished);
return MPI_SUCCESS;
}
}
int Win::finish_comms(){
+ // This (simulated) mutex ensures that no process pushes to the vector of requests during the waitall.
+ // Without this, the vector could get redimensioned when another process pushes.
+ // This would result in the array used by Request::waitall() to be invalidated.
+ // Another solution would be to copy the data and cleanup the vector *before* Request::waitall
mut_->lock();
//Finish own requests
int size = static_cast<int>(requests_.size());
}
int Win::finish_comms(int rank){
+ // See comment about the mutex in finish_comms() above
mut_->lock();
// Finish own requests
// Let's see if we're either the destination or the sender of this request