X-Git-Url: http://bilbo.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/b6dde5ff6f208b83545a4b4e9e81712d0d8617a1..f3b7e5f4b4d7c87ee3e8827313ec966ea8fc8387:/src/smpi/mpi/smpi_request.cpp diff --git a/src/smpi/mpi/smpi_request.cpp b/src/smpi/mpi/smpi_request.cpp index 90d7485744..ca295f3dff 100644 --- a/src/smpi/mpi/smpi_request.cpp +++ b/src/smpi/mpi/smpi_request.cpp @@ -33,26 +33,18 @@ extern void (*smpi_comm_copy_data_callback)(simgrid::kernel::activity::CommImpl* namespace simgrid{ namespace smpi{ -Request::Request(const void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags, MPI_Op op) - : buf_(const_cast(buf)), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags), op_(op) +Request::Request(const void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, + unsigned flags, MPI_Op op) + : buf_(const_cast(buf)) + , old_type_(datatype) + , size_(datatype->size() * count) + , src_(src) + , dst_(dst) + , tag_(tag) + , comm_(comm) + , flags_(flags) + , op_(op) { - void *old_buf = nullptr; -// FIXME Handle the case of a partial shared malloc. - if ((((flags & MPI_REQ_RECV) != 0) && ((flags & MPI_REQ_ACCUMULATE) != 0)) || (datatype->flags() & DT_FLAG_DERIVED)) { - // This part handles the problem of non-contiguous memory - old_buf = const_cast(buf); - if (count==0){ - buf_ = nullptr; - }else { - buf_ = xbt_malloc(count*datatype->size()); - if ((datatype->flags() & DT_FLAG_DERIVED) && ((flags & MPI_REQ_SEND) != 0)) { - datatype->serialize(old_buf, buf_, count); - } - } - } - // This part handles the problem of non-contiguous memory (for the unserialization at the reception) - old_buf_ = old_buf; - size_ = datatype->size() * count; datatype->ref(); comm_->ref(); if(op != MPI_REPLACE && op != MPI_OP_NULL) @@ -72,6 +64,7 @@ Request::Request(const void* buf, int count, MPI_Datatype datatype, int src, int generalized_funcs=nullptr; nbc_requests_=nullptr; nbc_requests_size_=0; + init_buffer(count); } void Request::ref(){ @@ -139,21 +132,40 @@ bool Request::match_common(MPI_Request req, MPI_Request sender, MPI_Request rece return false; } +void Request::init_buffer(int count){ + void *old_buf = nullptr; +// FIXME Handle the case of a partial shared malloc. + // This part handles the problem of non-contiguous memory (for the unserialization at the reception) + if ((((flags_ & MPI_REQ_RECV) != 0) && ((flags_ & MPI_REQ_ACCUMULATE) != 0)) || (old_type_->flags() & DT_FLAG_DERIVED)) { + // This part handles the problem of non-contiguous memory + old_buf = buf_; + if (count==0){ + buf_ = nullptr; + }else { + buf_ = xbt_malloc(count*old_type_->size()); + if ((old_type_->flags() & DT_FLAG_DERIVED) && ((flags_ & MPI_REQ_SEND) != 0)) { + old_type_->serialize(old_buf, buf_, count); + } + } + } + old_buf_ = old_buf; +} + bool Request::match_recv(void* a, void* b, simgrid::kernel::activity::CommImpl*) { - MPI_Request ref = static_cast(a); - MPI_Request req = static_cast(b); + auto ref = static_cast(a); + auto req = static_cast(b); return match_common(req, req, ref); } bool Request::match_send(void* a, void* b, simgrid::kernel::activity::CommImpl*) { - MPI_Request ref = static_cast(a); - MPI_Request req = static_cast(b); + auto ref = static_cast(a); + auto req = static_cast(b); return match_common(req, ref, req); } -void Request::print_request(const char *message) +void Request::print_request(const char* message) const { XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]", message, this, buf_, size_, src_, dst_, tag_, flags_); @@ -355,6 +367,11 @@ void Request::start() s4u::Mailbox* mailbox; xbt_assert(action_ == nullptr, "Cannot (re-)start unfinished communication"); + //reinitialize temporary buffer for persistent requests + if(real_size_ > 0 && flags_ & MPI_REQ_FINISHED){ + buf_ = old_buf_; + init_buffer(real_size_/old_type_->size()); + } flags_ &= ~MPI_REQ_PREPARED; flags_ &= ~MPI_REQ_FINISHED; this->ref(); @@ -412,7 +429,7 @@ void Request::start() if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0) mut->unlock(); } else { /* the RECV flag was not set, so this is a send */ - simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_)); + const simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_)); xbt_assert(process, "Actor pid=%d is gone??", dst_); int rank = src_; if (TRACE_smpi_view_internals()) { @@ -496,8 +513,9 @@ void Request::start() // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later real_size_=size_; + size_t payload_size_ = size_ + 16;//MPI enveloppe size (tag+dest+communicator) action_ = simcall_comm_isend( - simgrid::s4u::Actor::by_pid(src_)->get_impl(), mailbox->get_impl(), size_, -1.0, buf, real_size_, &match_send, + simgrid::s4u::Actor::by_pid(src_)->get_impl(), mailbox->get_impl(), payload_size_, -1.0, buf, real_size_, &match_send, &xbt_free_f, // how to free the userdata if a detached send fails process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this, // detach if msg size < eager/rdv switch limit @@ -559,7 +577,7 @@ int Request::test(MPI_Request * request, MPI_Status * status, int* flag) { Status::empty(status); *flag = 1; - if (((*request)->flags_ & MPI_REQ_PREPARED) == 0) { + if (((*request)->flags_ & (MPI_REQ_PREPARED | MPI_REQ_FINISHED)) == 0) { if ((*request)->action_ != nullptr && (*request)->cancelled_ != 1){ try{ *flag = simcall_comm_test((*request)->action_.get()); @@ -748,9 +766,9 @@ void Request::iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* static int nsleeps = 1; double speed = s4u::this_actor::get_host()->get_speed(); double maxrate = smpi_cfg_iprobe_cpu_usage(); - MPI_Request request = new Request(nullptr, 0, MPI_CHAR, - source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->get_pid(), - simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV); + auto request = new Request(nullptr, 0, MPI_CHAR, + source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->get_pid(), + simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV); if (smpi_iprobe_sleep > 0) { /** Compute the number of flops we will sleep **/ s4u::this_actor::exec_init(/*nsleeps: See comment above */ nsleeps * @@ -846,12 +864,14 @@ void Request::finish_wait(MPI_Request* request, MPI_Status * status) if ((req->flags_ & MPI_REQ_RECV) && datatype->size() != 0) datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_); xbt_free(req->buf_); + req->buf_=nullptr; } else if (req->flags_ & MPI_REQ_RECV) { // apply op on contiguous buffer for accumulate if (datatype->size() != 0) { int n = req->real_size_ / datatype->size(); req->op_->apply(req->buf_, req->old_buf_, &n, datatype); } xbt_free(req->buf_); + req->buf_=nullptr; } } } @@ -909,7 +929,7 @@ int Request::wait(MPI_Request * request, MPI_Status * status) } (*request)->print_request("Waiting"); - if ((*request)->flags_ & MPI_REQ_PREPARED) { + if ((*request)->flags_ & (MPI_REQ_PREPARED | MPI_REQ_FINISHED)) { Status::empty(status); return ret; } @@ -1006,7 +1026,7 @@ int Request::waitany(int count, MPI_Request requests[], MPI_Status * status) return index; } -static int sort_accumulates(MPI_Request a, MPI_Request b) +static int sort_accumulates(const Request* a, const Request* b) { return (a->tag() > b->tag()); } @@ -1035,8 +1055,8 @@ int Request::waitall(int count, MPI_Request requests[], MPI_Status status[]) wait(&requests[c],pstat); index = c; } else { - index = waitany(count, (MPI_Request*)requests, pstat); - + index = waitany(count, requests, pstat); + if (index == MPI_UNDEFINED) break; @@ -1070,7 +1090,7 @@ int Request::waitsome(int incount, MPI_Request requests[], int *indices, MPI_Sta int index = 0; MPI_Status stat; MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat; - index = waitany(incount, (MPI_Request*)requests, pstat); + index = waitany(incount, requests, pstat); if(index==MPI_UNDEFINED) return MPI_UNDEFINED; if(status != MPI_STATUSES_IGNORE) { status[count] = *pstat; @@ -1099,7 +1119,7 @@ MPI_Request Request::f2c(int id) { char key[KEY_SIZE]; if(id==MPI_FORTRAN_REQUEST_NULL) - return static_cast(MPI_REQUEST_NULL); + return MPI_REQUEST_NULL; return static_cast(F2C::f2c_lookup()->at(get_key(key,id))); } @@ -1158,7 +1178,7 @@ int Request::grequest_start(MPI_Grequest_query_function* query_fn, MPI_Grequest_ int Request::grequest_complete(MPI_Request request) { - if ((!(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex==NULL) + if ((!(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex == nullptr) return MPI_ERR_REQUEST; request->generalized_funcs->mutex->lock(); request->flags_ |= MPI_REQ_COMPLETE; // in case wait would be called after complete @@ -1177,13 +1197,14 @@ void Request::set_nbc_requests(MPI_Request* reqs, int size){ } } -int Request::get_nbc_requests_size(){ +int Request::get_nbc_requests_size() const +{ return nbc_requests_size_; } -MPI_Request* Request::get_nbc_requests(){ +MPI_Request* Request::get_nbc_requests() const +{ return nbc_requests_; } - } }