/* smpi_datatype.cpp -- MPI primitives to handle datatypes */
-/* Copyright (c) 2009-2021. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2009-2022. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
}
Datatype::Datatype(Datatype* datatype, int* ret)
- : size_(datatype->size_), lb_(datatype->lb_), ub_(datatype->ub_), flags_(datatype->flags_)
+ : size_(datatype->size_), lb_(datatype->lb_), ub_(datatype->ub_), flags_(datatype->flags_), duplicated_datatype_(datatype)
{
this->add_f();
+ datatype->ref();
*ret = this->copy_attrs(datatype);
}
return;
//prevent further usage
flags_ &= ~ DT_FLAG_COMMITED;
+ if(duplicated_datatype_ != MPI_DATATYPE_NULL)
+ unref(duplicated_datatype_);
F2C::free_f(this->f2c_id());
//if still used, mark for deletion
if(refcount_!=0){
int Datatype::copy_attrs(Datatype* datatype){
flags_ &= ~DT_FLAG_PREDEFINED;
- int ret = MPI_SUCCESS;
-
- if (not datatype->attributes()->empty()) {
- int flag=0;
- void* value_out;
- for (auto const& it : *(datatype->attributes())) {
- smpi_key_elem elem = keyvals_.at(it.first);
- if (elem != nullptr){
- if( elem->copy_fn.type_copy_fn != MPI_NULL_COPY_FN &&
- elem->copy_fn.type_copy_fn != MPI_TYPE_DUP_FN)
- ret = elem->copy_fn.type_copy_fn(datatype, it.first, elem->extra_state, it.second, &value_out, &flag);
- else if ( elem->copy_fn.type_copy_fn_fort != MPI_NULL_COPY_FN &&
- (*(int*)*elem->copy_fn.type_copy_fn_fort) != 1){
- value_out=(int*)xbt_malloc(sizeof(int));
- elem->copy_fn.type_copy_fn_fort(datatype, it.first, elem->extra_state, it.second, value_out, &flag, &ret);
- }
- if (ret != MPI_SUCCESS) {
- break;
- }
- if(elem->copy_fn.type_copy_fn == MPI_TYPE_DUP_FN ||
- ((elem->copy_fn.type_copy_fn_fort != MPI_NULL_COPY_FN) && (*(int*)*elem->copy_fn.type_copy_fn_fort == 1))){
- elem->refcount++;
- attributes()->insert({it.first, it.second});
- } else if (flag){
- elem->refcount++;
- attributes()->insert({it.first, value_out});
- }
+
+ set_contents(MPI_COMBINER_DUP, 0, nullptr, 0, nullptr, 1, &datatype);
+ for (auto const& it : datatype->attributes()) {
+ auto elem_it = keyvals_.find(it.first);
+ xbt_assert(elem_it != keyvals_.end(), "Keyval not found for Datatype: %d", it.first);
+
+ smpi_key_elem& elem = elem_it->second;
+ int ret = MPI_SUCCESS;
+ int flag = 0;
+ void* value_out = nullptr;
+ if (elem.copy_fn.type_copy_fn == MPI_TYPE_DUP_FN) {
+ value_out = it.second;
+ flag = 1;
+ } else if (elem.copy_fn.type_copy_fn != MPI_NULL_COPY_FN) {
+ ret = elem.copy_fn.type_copy_fn(datatype, it.first, elem.extra_state, it.second, &value_out, &flag);
+ }
+ if (ret != MPI_SUCCESS)
+ return ret;
+
+ if (elem.copy_fn.type_copy_fn_fort != MPI_NULL_COPY_FN) {
+ value_out = xbt_new(int, 1);
+ if (*(int*)*elem.copy_fn.type_copy_fn_fort == 1) { // MPI_TYPE_DUP_FN
+ memcpy(value_out, it.second, sizeof(int));
+ flag = 1;
+ } else { // not null, nor dup
+ elem.copy_fn.type_copy_fn_fort(datatype, it.first, elem.extra_state, it.second, value_out, &flag, &ret);
+ }
+ if (ret != MPI_SUCCESS) {
+ xbt_free(value_out);
+ return ret;
}
}
+ if (flag) {
+ elem.refcount++;
+ attributes().emplace(it.first, value_out);
+ }
}
- set_contents(MPI_COMBINER_DUP, 0, nullptr, 0, nullptr, 1, &datatype);
- return ret;
+ return MPI_SUCCESS;
}
int Datatype::clone(MPI_Datatype* type){
return (flags_ & DT_FLAG_BASIC);
}
-bool Datatype::is_replayable() const
-{
- return (simgrid::instr::trace_format == simgrid::instr::TraceFormat::Ti) &&
- ((this == MPI_BYTE) || (this == MPI_DOUBLE) || (this == MPI_INT) || (this == MPI_CHAR) ||
- (this == MPI_SHORT) || (this == MPI_LONG) || (this == MPI_FLOAT));
-}
-
MPI_Datatype Datatype::decode(const std::string& datatype_id)
{
return id2type_lookup.find(datatype_id)->second;
if (static_cast<unsigned>(max_datatypes) < contents_->datatypes_.size())
return MPI_ERR_COUNT;
std::copy(begin(contents_->datatypes_), end(contents_->datatypes_), array_of_datatypes);
- std::for_each(begin(contents_->datatypes_), end(contents_->datatypes_), std::mem_fn(&Datatype::ref));
+ for (auto& datatype : contents_->datatypes_)
+ datatype->ref();
return MPI_SUCCESS;
}
{
// FIXME Handle the case of a partial shared malloc.
- if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
- smpi_switch_data_segment(simgrid::s4u::Actor::self());
- }
+ smpi_switch_data_segment(simgrid::s4u::Actor::self());
+
/* First check if we really have something to do */
size_t offset = 0;
std::vector<std::pair<size_t, size_t>> private_blocks;
recvtype->unserialize(sendbuf, recvbuf, count / recvtype->size(), MPI_REPLACE);
} else if (not(recvtype->flags() & DT_FLAG_DERIVED)) {
sendtype->serialize(sendbuf, recvbuf, count / sendtype->size());
- } else {
+ } else if(sendtype->size() != 0 && recvtype->size() != 0){
void * buf_tmp = xbt_malloc(count);
-
sendtype->serialize( sendbuf, buf_tmp,count/sendtype->size());
recvtype->unserialize( buf_tmp, recvbuf,count/recvtype->size(), MPI_REPLACE);
-
xbt_free(buf_tmp);
}
}
ub=((count-1)*stride+block_length-1)*old_type->get_extent()+old_type->ub();
}
if(old_type->flags() & DT_FLAG_DERIVED || stride != block_length){
- *new_type = new Type_Vector(count * block_length * old_type->size(), lb, ub, DT_FLAG_DERIVED, count, block_length,
+ *new_type = new Type_Vector(old_type->size() * block_length * count, lb, ub, DT_FLAG_DERIVED, count, block_length,
stride, old_type);
retval=MPI_SUCCESS;
}else{
/* in this situation the data are contiguous thus it's not required to serialize and unserialize it*/
- *new_type = new Datatype(count * block_length * old_type->size(), 0, ((count -1) * stride + block_length)*
- old_type->size(), DT_FLAG_CONTIGUOUS);
+ *new_type =
+ new Datatype(old_type->size() * block_length * count, 0,
+ old_type->size() * ((count - 1) * stride + block_length), DT_FLAG_CONTIGUOUS | DT_FLAG_DERIVED);
const std::array<int, 3> ints = {{count, block_length, stride}};
(*new_type)->set_contents(MPI_COMBINER_VECTOR, 3, ints.data(), 0, nullptr, 1, &old_type);
retval=MPI_SUCCESS;
ub=((count-1)*stride)+(block_length-1)*old_type->get_extent()+old_type->ub();
}
if(old_type->flags() & DT_FLAG_DERIVED || stride != block_length*old_type->get_extent()){
- *new_type = new Type_Hvector(count * block_length * old_type->size(), lb, ub, DT_FLAG_DERIVED, count, block_length,
+ *new_type = new Type_Hvector(old_type->size() * block_length * count, lb, ub, DT_FLAG_DERIVED, count, block_length,
stride, old_type);
retval=MPI_SUCCESS;
}else{
/* in this situation the data are contiguous thus it's not required to serialize and unserialize it*/
- *new_type = new Datatype(count * block_length * old_type->size(), 0, count * block_length * old_type->size(), DT_FLAG_CONTIGUOUS);
+ *new_type = new Datatype(old_type->size() * block_length * count, 0, old_type->size() * block_length * count,
+ DT_FLAG_CONTIGUOUS | DT_FLAG_DERIVED);
const std::array<int, 2> ints = {{count, block_length}};
(*new_type)->set_contents(MPI_COMBINER_HVECTOR, 2, ints.data(), 1, &stride, 1, &old_type);
retval=MPI_SUCCESS;
if(indices[i]+block_lengths[i]*old_type->ub()>ub)
ub = indices[i]+block_lengths[i]*old_type->ub();
- if ( (i< count -1) && (indices[i]+block_lengths[i]*(static_cast<int>(old_type->size())) != indices[i+1]) )
+ if ((i < count - 1) && (indices[i] + static_cast<MPI_Aint>(old_type->size()) * block_lengths[i] != indices[i + 1]))
contiguous=false;
}
if (old_type->flags_ & DT_FLAG_DERIVED || lb!=0)
if (not forced_ub && indices[i] + block_lengths[i] * old_types[i]->ub() > ub)
ub = indices[i]+block_lengths[i]*old_types[i]->ub();
- if ( (i< count -1) && (indices[i]+block_lengths[i]*static_cast<int>(old_types[i]->size()) != indices[i+1]) )
+ if ((i < count - 1) &&
+ (indices[i] + static_cast<MPI_Aint>(old_types[i]->size() * block_lengths[i]) != indices[i + 1]))
contiguous=false;
}
if (not contiguous) {