int j = 0;
do {
// We choose a random peer to unchoke.
- std::unordered_map<int, Connection>::iterator chosen_peer_it = connected_peers.begin();
+ auto chosen_peer_it = connected_peers.begin();
std::advance(chosen_peer_it, random.uniform_int(0, static_cast<int>(connected_peers.size() - 1)));
chosen_peer = &chosen_peer_it->second;
if (not chosen_peer->interested || not chosen_peer->choked_upload)
double deadline;
xbt_assert(argc == 3 || argc == 4, "Wrong number of arguments");
/* Node initialization */
- unsigned int node_id = static_cast<unsigned int>(strtoul(argv[1], nullptr, 0));
+ auto node_id = static_cast<unsigned int>(strtoul(argv[1], nullptr, 0));
kademlia::Node node(node_id);
if (argc == 4) {
XBT_INFO("Hi, I'm going to join the network with id %u", node.getId());
- unsigned int known_id = static_cast<unsigned int>(strtoul(argv[2], nullptr, 0));
- join_success = node.join(known_id);
- deadline = std::stod(argv[3]);
+ auto known_id = static_cast<unsigned int>(strtoul(argv[2], nullptr, 0));
+ join_success = node.join(known_id);
+ deadline = std::stod(argv[3]);
} else {
deadline = std::stod(argv[2]);
XBT_INFO("Hi, I'm going to create the network with id %u", node.getId());
masternames[id] = simgrid::s4u::Host::current()->get_cname();
- double* payload = new double();
- *payload = msg_size;
+ auto* payload = new double(msg_size);
count_finished++;
timer_start = 1;
args.emplace_back("java");
for (int index = 1; index < argc; index++) {
- jstring jval = (jstring)env->GetObjectArrayElement(jargs, index - 1);
+ auto jval = (jstring)env->GetObjectArrayElement(jargs, index - 1);
const char* tmp = env->GetStringUTFChars(jval, nullptr);
args.emplace_back(tmp);
env->ReleaseStringUTFChars(jval, tmp);
sg_host_t* hosts = sg_host_list();
size_t host_count = sg_host_count();
for (size_t index = 0; index < host_count - 1; index++) {
- jobject jhost = (jobject)hosts[index]->extension(JAVA_HOST_LEVEL);
+ auto jhost = (jobject)hosts[index]->extension(JAVA_HOST_LEVEL);
if (jhost)
jhost_unref(env, jhost);
}
static void java_main(int argc, char* argv[])
{
JNIEnv *env = get_current_thread_env();
- simgrid::kernel::context::JavaContext* context =
- static_cast<simgrid::kernel::context::JavaContext*>(simgrid::kernel::context::Context::self());
+ auto* context = static_cast<simgrid::kernel::context::JavaContext*>(simgrid::kernel::context::Context::self());
//Change the "." in class name for "/".
std::string arg0 = argv[0];
//Retrieve the name of the process.
jstring jname = env->NewStringUTF(argv[0]);
//Build the arguments
- jobjectArray args = static_cast<jobjectArray>(env->NewObjectArray(argc - 1, env->FindClass("java/lang/String"),
- env->NewStringUTF("")));
+ auto args = static_cast<jobjectArray>(
+ env->NewObjectArray(argc - 1, env->FindClass("java/lang/String"), env->NewStringUTF("")));
for (int i = 1; i < argc; i++)
env->SetObjectArrayElement(args,i - 1, env->NewStringUTF(argv[i]));
//Retrieve the host for the process.
void java_main_jprocess(jobject jprocess)
{
JNIEnv *env = get_current_thread_env();
- JavaContext* context = static_cast<JavaContext*>(Context::self());
+ auto* context = static_cast<JavaContext*>(Context::self());
context->jprocess_ = jprocess;
jprocess_bind(context->jprocess_, sg_actor_self(), env);
//bind the task object.
msg_task_t task = MSG_comm_get_task(comm);
xbt_assert(task != nullptr, "Task is nullptr");
- jobject jtask_global = static_cast<jobject>(MSG_task_get_data(task));
+ auto jtask_global = static_cast<jobject>(MSG_task_get_data(task));
//case where the data has already been retrieved
if (jtask_global == nullptr) {
return;
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_Comm_waitCompletion(JNIEnv *env, jobject jcomm, jdouble timeout) {
- msg_comm_t comm = (msg_comm_t) (uintptr_t) env->GetLongField(jcomm, jcomm_field_Comm_bind);
+ auto comm = (msg_comm_t)(uintptr_t)env->GetLongField(jcomm, jcomm_field_Comm_bind);
if (not comm) {
jxbt_throw_null(env, "comm is null");
return;
static msg_comm_t* jarray_to_commArray(JNIEnv *env, jobjectArray jcomms, /* OUT */ int *count)
{
*count = env->GetArrayLength(jcomms);
- msg_comm_t* comms = new msg_comm_t[*count];
+ auto* comms = new msg_comm_t[*count];
for (int i=0; i < *count; i++) {
jobject jcomm = env->GetObjectArrayElement(jcomms, i);
}
for (int index = 0; index < count; index++) {
- jobject jhost = static_cast<jobject>(table[index]->extension(JAVA_HOST_LEVEL));
+ auto jhost = static_cast<jobject>(table[index]->extension(JAVA_HOST_LEVEL));
if (not jhost) {
jstring jname = env->NewStringUTF(table[index]->get_cname());
jobject jprocess = jprocess_ref(jprocess_arg, env);
/* Actually build the MSG process */
- jstring jname = (jstring)env->GetObjectField(jprocess, jprocess_field_Process_name);
+ auto jname = (jstring)env->GetObjectField(jprocess, jprocess_field_Process_name);
const char* name = env->GetStringUTFChars(jname, nullptr);
auto actor_code = [jprocess]() { simgrid::kernel::context::java_main_jprocess(jprocess); };
smx_actor_t self = SIMIX_process_self();
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_Mutex_acquire(JNIEnv * env, jobject obj) {
- sg_mutex_t mutex = (sg_mutex_t)(uintptr_t)env->GetLongField(obj, jsynchro_field_Mutex_bind);
+ auto mutex = (sg_mutex_t)(uintptr_t)env->GetLongField(obj, jsynchro_field_Mutex_bind);
sg_mutex_lock(mutex);
}
int host_count = env->GetArrayLength(jhosts);
jdouble* jcomputeDurations = env->GetDoubleArrayElements(jcomputeDurations_arg, nullptr);
- msg_host_t* hosts = new msg_host_t[host_count];
- double* computeDurations = new double[host_count];
+ auto* hosts = new msg_host_t[host_count];
+ auto* computeDurations = new double[host_count];
for (int index = 0; index < host_count; index++) {
jobject jhost = env->GetObjectArrayElement(jhosts, index);
hosts[index] = jhost_get_native(env, jhost);
env->ReleaseDoubleArrayElements(jcomputeDurations_arg, jcomputeDurations, 0);
jdouble* jmessageSizes = env->GetDoubleArrayElements(jmessageSizes_arg, nullptr);
- double* messageSizes = new double[host_count * host_count];
+ auto* messageSizes = new double[host_count * host_count];
for (int index = 0; index < host_count * host_count; index++) {
messageSizes[index] = jmessageSizes[index];
}
jmsg_throw_status(env, rv);
return nullptr;
}
- jobject jtask_global = (jobject) MSG_task_get_data(task);
+ auto jtask_global = (jobject)MSG_task_get_data(task);
/* Convert the global ref into a local ref so that the JVM can free the stuff */
jobject jtask_local = env->NewLocalRef(jtask_global);
return nullptr;
//pointer to store the task object pointer.
- msg_task_t* task = new msg_task_t(nullptr);
+ auto* task = new msg_task_t(nullptr);
/* There should be a cache here */
jobject jcomm = env->NewObject(comm_class, jtask_method_Comm_constructor);
jmsg_throw_status(env, res);
return nullptr;
}
- jobject jtask_global = (jobject)MSG_task_get_data(task);
+ auto jtask_global = (jobject)MSG_task_get_data(task);
/* Convert the global ref into a local ref so that the JVM can free the stuff */
jobject jtask_local = env->NewLocalRef(jtask_global);
return nullptr;
// pointer to store the task object pointer.
- msg_task_t* task = new msg_task_t(nullptr);
+ auto* task = new msg_task_t(nullptr);
jobject jcomm = env->NewObject(comm_class, jtask_method_Comm_constructor);
if (not jcomm) {
}
static void msg_task_cancel_on_failed_dsend(void*t) {
- msg_task_t task = (msg_task_t) t;
+ auto task = (msg_task_t)t;
JNIEnv* env = get_current_thread_env();
if (env) {
- jobject jtask_global = (jobject)MSG_task_get_data(task);
+ auto jtask_global = (jobject)MSG_task_get_data(task);
/* Destroy the global ref so that the JVM can free the stuff */
env->DeleteGlobalRef(jtask_global);
/* Don't free the C data here, to avoid a race condition with the GC also sometimes doing so.
std::vector<jobject> vms;
for (size_t i = 0; i < host_count; i++) {
- simgrid::s4u::VirtualMachine* vm = dynamic_cast<simgrid::s4u::VirtualMachine*>(hosts[i]);
+ auto* vm = dynamic_cast<simgrid::s4u::VirtualMachine*>(hosts[i]);
if (vm != nullptr && vm->get_state() != simgrid::s4u::VirtualMachine::state::DESTROYED) {
- jobject jvm = static_cast<jobject>(vm->extension(JAVA_HOST_LEVEL));
+ auto jvm = static_cast<jobject>(vm->extension(JAVA_HOST_LEVEL));
vms.push_back(jvm);
}
}
if (not id) {
jmethodID tostr_id = env->GetMethodID(cls, "getName", "()Ljava/lang/String;");
- jstring jclassname = (jstring) env->CallObjectMethod(cls, tostr_id, nullptr);
+ auto jclassname = (jstring)env->CallObjectMethod(cls, tostr_id, nullptr);
const char* classname = env->GetStringUTFChars(jclassname, nullptr);
env->ReleaseStringUTFChars(jclassname, classname);
if (not id) {
jmethodID tostr_id = env->GetMethodID(cls, "getName", "()Ljava/lang/String;");
- jstring jclassname = (jstring) env->CallObjectMethod(cls, tostr_id, nullptr);
+ auto jclassname = (jstring)env->CallObjectMethod(cls, tostr_id, nullptr);
const char* classname = env->GetStringUTFChars(jclassname, nullptr);
env->ReleaseStringUTFChars(jclassname, classname);
if (not id) {
jmethodID getname_id = env->GetMethodID(cls, "getName", "()Ljava/lang/String;");
- jstring jclassname = (jstring) env->CallObjectMethod(cls, getname_id, nullptr);
+ auto jclassname = (jstring)env->CallObjectMethod(cls, getname_id, nullptr);
const char* classname = env->GetStringUTFChars(jclassname, nullptr);
env->ReleaseStringUTFChars(jclassname, classname);
}
buffer.clear();
} else {
- std::vector<PajeEvent*>::iterator i = buffer.begin();
+ auto i = buffer.begin();
for (auto const& event : buffer) {
double head_timestamp = event->timestamp_;
if (head_timestamp > last_timestamp_to_dump)
xbt_assert(Container::get_root() == root);
if (TRACE_smpi_is_enabled()) {
- ContainerType* mpi = root->type_->by_name_or_create<ContainerType>("MPI");
+ auto* mpi = root->type_->by_name_or_create<ContainerType>("MPI");
if (not TRACE_smpi_is_grouped())
mpi->by_name_or_create<StateType>("MPI_STATE");
root->type_->by_name_or_create("MPI_LINK", mpi, mpi);
container->type_->by_name_or_create("speed_used", "0.5 0.5 0.5");
if (TRACE_smpi_is_enabled() && TRACE_smpi_is_grouped()) {
- ContainerType* mpi = container->type_->by_name_or_create<ContainerType>("MPI");
+ auto* mpi = container->type_->by_name_or_create<ContainerType>("MPI");
mpi->by_name_or_create<StateType>("MPI_STATE");
// TODO See if we can move this to the LoadBalancer plugin
root->type_->by_name_or_create("MIGRATE_LINK", mpi, mpi);
std::string container_name = instr_pid(actor);
container->create_child(container_name, "ACTOR");
- ContainerType* actor_type = container->type_->by_name_or_create<ContainerType>("ACTOR");
- StateType* state = actor_type->by_name_or_create<StateType>("ACTOR_STATE");
+ auto* actor_type = container->type_->by_name_or_create<ContainerType>("ACTOR");
+ auto* state = actor_type->by_name_or_create<StateType>("ACTOR_STATE");
state->add_entity_value("suspend", "1 0 1");
state->add_entity_value("sleep", "1 1 0");
state->add_entity_value("receive", "1 0 0");
{
const Container* container = new HostContainer(host, currentContainer.back());
const Container* root = Container::get_root();
- ContainerType* vm = container->type_->by_name_or_create<ContainerType>("VM");
- StateType* state = vm->by_name_or_create<StateType>("VM_STATE");
+ auto* vm = container->type_->by_name_or_create<ContainerType>("VM");
+ auto* state = vm->by_name_or_create<StateType>("VM_STATE");
state->add_entity_value("suspend", "1 0 1");
state->add_entity_value("sleep", "1 1 0");
state->add_entity_value("receive", "1 0 0");
int FatTreeZone::connect_node_to_parents(FatTreeNode* node)
{
- std::vector<FatTreeNode*>::iterator currentParentNode = this->nodes_.begin();
+ auto currentParentNode = this->nodes_.begin();
int connectionsNumber = 0;
const int level = node->level;
XBT_DEBUG("We are connecting node %d(%u,%u) to his parents.", node->id, node->level, node->position);
static std::vector<double>* netpoint_get_coords(NetPoint* np)
{
- vivaldi::Coords* coords = np->extension<vivaldi::Coords>();
+ auto* coords = np->extension<vivaldi::Coords>();
xbt_assert(coords, "Please specify the Vivaldi coordinates of %s %s (%p)",
(np->is_netzone() ? "Netzone" : (np->is_host() ? "Host" : "Router")), np->get_cname(), np);
return &coords->coords;
}
// assign a file descriptor id to the newly opened File
- FileDescriptorHostExt* ext = host->extension<simgrid::s4u::FileDescriptorHostExt>();
+ auto* ext = host->extension<simgrid::s4u::FileDescriptorHostExt>();
if (ext->file_descriptor_table == nullptr) {
ext->file_descriptor_table = std::make_unique<std::vector<int>>(sg_storage_max_file_descriptors);
std::iota(ext->file_descriptor_table->rbegin(), ext->file_descriptor_table->rend(), 0); // Fill with ..., 1, 0.
host = vm->get_pm();
// Get the host_energy extension for the relevant host
- HostEnergy* host_energy = host->extension<HostEnergy>();
+ auto* host_energy = host->extension<HostEnergy>();
if (host_energy->get_last_update_time() < surf_get_clock())
host_energy->update();
if (dynamic_cast<simgrid::s4u::VirtualMachine const*>(&host)) // Ignore virtual machines
return;
- HostEnergy* host_energy = host.extension<HostEnergy>();
+ auto* host_energy = host.extension<HostEnergy>();
host_energy->update();
}
continue;
XBT_DEBUG("Update link %s", link->get_cname());
- LinkEnergy* link_energy = link->get_iface()->extension<LinkEnergy>();
+ auto* link_energy = link->get_iface()->extension<LinkEnergy>();
link_energy->init_watts_range_list();
link_energy->update();
}
inline XBT_PRIVATE
typename C::mapped_type* find_map_ptr(C& c, K const& k)
{
- typename C::iterator i = c.find(k);
+ auto i = c.find(k);
if (i == c.end())
return nullptr;
else
if (Y > X)
num_reqs = Y;
- MPI_Request* req = new MPI_Request[num_reqs];
+ auto* req = new MPI_Request[num_reqs];
MPI_Request* req_ptr = req;
// do local allgather/local copy
block_size = extent * send_count;
- MPI_Request* req = new MPI_Request[num_reqs];
+ auto* req = new MPI_Request[num_reqs];
MPI_Request* req_ptr = req;
// do local allgather/local copy
size = comm->size();
rextent = rtype->get_extent();
sextent = stype->get_extent();
- MPI_Request* rrequest_array = new MPI_Request[size];
- MPI_Request* srequest_array = new MPI_Request[size];
+ auto* rrequest_array = new MPI_Request[size];
+ auto* srequest_array = new MPI_Request[size];
// irregular case use default MPI functions
if (scount * sextent != rcount * rextent) {
// root of each SMP
if (intra_rank == 0) {
- MPI_Request* rrequest_array = new MPI_Request[inter_comm_size - 1];
- MPI_Request* srequest_array = new MPI_Request[inter_comm_size - 1];
+ auto* rrequest_array = new MPI_Request[inter_comm_size - 1];
+ auto* srequest_array = new MPI_Request[inter_comm_size - 1];
src = ((inter_rank - 1 + inter_comm_size) % inter_comm_size) * num_core;
dst = ((inter_rank + 1) % inter_comm_size) * num_core;
if (intra_rank == 0) {
int num_req = (inter_comm_size - 1) * 2;
- MPI_Request* reqs = new MPI_Request[num_req];
+ auto* reqs = new MPI_Request[num_req];
MPI_Request* req_ptr = reqs;
- MPI_Status* stat = new MPI_Status[num_req];
+ auto* stat = new MPI_Status[num_req];
for (i = 1; i < inter_comm_size; i++) {
extent = send_type->get_extent();
num_reqs = (2 * num_procs) - 2;
- MPI_Request* reqs = new MPI_Request[num_reqs];
+ auto* reqs = new MPI_Request[num_reqs];
MPI_Request* req_ptr = reqs;
Request::sendrecv(send_buff, send_count, send_type, rank, tag,
(char *) recv_buff + rank * recv_count * extent, recv_count,
if (Y > X)
num_reqs = Y;
- MPI_Status* statuses = new MPI_Status[num_reqs];
- MPI_Request* reqs = new MPI_Request[num_reqs];
+ auto* statuses = new MPI_Status[num_reqs];
+ auto* reqs = new MPI_Request[num_reqs];
MPI_Request* req_ptr = reqs;
count = send_count * num_procs;
unsigned char* tmp_buff1 = smpi_get_tmp_sendbuffer(block_size * num_procs * two_dsize);
unsigned char* tmp_buff2 = smpi_get_tmp_recvbuffer(block_size * two_dsize);
- MPI_Status* statuses = new MPI_Status[num_reqs];
- MPI_Request* reqs = new MPI_Request[num_reqs];
+ auto* statuses = new MPI_Status[num_reqs];
+ auto* reqs = new MPI_Request[num_reqs];
MPI_Request* req_ptr = reqs;
recv_offset = (rank % two_dsize) * block_size * num_procs;
static_cast<char *>(recvbuf) + rank * recvcount * recvext, recvcount, recvtype);
if (err == MPI_SUCCESS && size > 1) {
/* Initiate all send/recv to/from others. */
- MPI_Request* requests = new MPI_Request[2 * (size - 1)];
+ auto* requests = new MPI_Request[2 * (size - 1)];
/* Post all receives first -- a simple optimization */
count = 0;
for (i = (rank + 1) % size; i != rank; i = (i + 1) % size) {
/* FIXME: This should use the memory macros (there are storage
leaks here if there is an error, for example) */
- MPI_Request* reqarray = new MPI_Request[2 * bblock];
+ auto* reqarray = new MPI_Request[2 * bblock];
- MPI_Status* starray = new MPI_Status[2 * bblock];
+ auto* starray = new MPI_Status[2 * bblock];
for (ii=0; ii<comm_size; ii+=bblock) {
ss = comm_size-ii < bblock ? comm_size-ii : bblock;
int ii, ss, dst;
/* post only bblock isends/irecvs at a time as suggested by Tony Ladd */
for (ii = 0; ii < size; ii += bblock) {
- MPI_Request* requests = new MPI_Request[2 * bblock];
+ auto* requests = new MPI_Request[2 * bblock];
ss = size - ii < bblock ? size - ii : bblock;
count = 0;
}
/* Now, initiate all send/recv to/from others. */
- MPI_Request* ireqs = new MPI_Request[size * 2];
+ auto* ireqs = new MPI_Request[size * 2];
int nreqs = 0;
MPI_Request* preq = ireqs;
// pipelining
else {
- MPI_Request* send_request_array = new MPI_Request[2 * (size + pipe_length)];
- MPI_Request* recv_request_array = new MPI_Request[size + pipe_length];
- MPI_Status* send_status_array = new MPI_Status[2 * (size + pipe_length)];
- MPI_Status* recv_status_array = new MPI_Status[size + pipe_length];
+ auto* send_request_array = new MPI_Request[2 * (size + pipe_length)];
+ auto* recv_request_array = new MPI_Request[size + pipe_length];
+ auto* send_status_array = new MPI_Status[2 * (size + pipe_length)];
+ auto* recv_status_array = new MPI_Status[size + pipe_length];
/* case: root */
if (rank == 0) {
/* pipeline bcast */
else {
- MPI_Request* send_request_array = new MPI_Request[size + pipe_length];
- MPI_Request* recv_request_array = new MPI_Request[size + pipe_length];
- MPI_Status* send_status_array = new MPI_Status[size + pipe_length];
- MPI_Status* recv_status_array = new MPI_Status[size + pipe_length];
+ auto* send_request_array = new MPI_Request[size + pipe_length];
+ auto* recv_request_array = new MPI_Request[size + pipe_length];
+ auto* send_status_array = new MPI_Status[size + pipe_length];
+ auto* recv_status_array = new MPI_Status[size + pipe_length];
/* root send data */
if (rank == 0) {
/* pipeline bcast */
else {
- MPI_Request* send_request_array = new MPI_Request[size + pipe_length];
- MPI_Request* recv_request_array = new MPI_Request[size + pipe_length];
- MPI_Status* send_status_array = new MPI_Status[size + pipe_length];
- MPI_Status* recv_status_array = new MPI_Status[size + pipe_length];
+ auto* send_request_array = new MPI_Request[size + pipe_length];
+ auto* recv_request_array = new MPI_Request[size + pipe_length];
+ auto* send_status_array = new MPI_Status[size + pipe_length];
+ auto* recv_status_array = new MPI_Status[size + pipe_length];
/* root send data */
if (rank == 0) {
// pipeline bcast
else {
- MPI_Request* request_array = new MPI_Request[size + pipe_length];
- MPI_Status* status_array = new MPI_Status[size + pipe_length];
+ auto* request_array = new MPI_Request[size + pipe_length];
+ auto* status_array = new MPI_Status[size + pipe_length];
// case ROOT-of-each-SMP
if (rank % host_num_core == 0) {
}
// pipeline bcast
else {
- MPI_Request* request_array = new MPI_Request[size + pipe_length];
- MPI_Status* status_array = new MPI_Status[size + pipe_length];
+ auto* request_array = new MPI_Request[size + pipe_length];
+ auto* status_array = new MPI_Status[size + pipe_length];
// case ROOT of each SMP
if (rank % num_core == 0) {
/* start pipeline bcast */
- MPI_Request* send_request_array = new MPI_Request[size + pipe_length];
- MPI_Request* recv_request_array = new MPI_Request[size + pipe_length];
- MPI_Status* send_status_array = new MPI_Status[size + pipe_length];
- MPI_Status* recv_status_array = new MPI_Status[size + pipe_length];
+ auto* send_request_array = new MPI_Request[size + pipe_length];
+ auto* recv_request_array = new MPI_Request[size + pipe_length];
+ auto* send_status_array = new MPI_Status[size + pipe_length];
+ auto* recv_status_array = new MPI_Status[size + pipe_length];
/* root */
if (rank == 0) {
}
/* pipeline bcast */
else {
- MPI_Request* send_request_array = new MPI_Request[size + pipe_length];
- MPI_Request* recv_request_array = new MPI_Request[size + pipe_length];
- MPI_Status* send_status_array = new MPI_Status[size + pipe_length];
- MPI_Status* recv_status_array = new MPI_Status[size + pipe_length];
+ auto* send_request_array = new MPI_Request[size + pipe_length];
+ auto* recv_request_array = new MPI_Request[size + pipe_length];
+ auto* send_status_array = new MPI_Status[size + pipe_length];
+ auto* recv_status_array = new MPI_Status[size + pipe_length];
if (rank == 0) {
//double start2 = MPI_Wtime();
rank = comm->rank();
num_procs = comm->size();
- MPI_Request* request_array = new MPI_Request[pipe_length];
- MPI_Status* status_array = new MPI_Status[pipe_length];
+ auto* request_array = new MPI_Request[pipe_length];
+ auto* status_array = new MPI_Status[pipe_length];
if (rank != root) {
for (i = 0; i < pipe_length; i++) {
}
else {
- MPI_Request* reqs = new MPI_Request[num_procs - 1];
+ auto* reqs = new MPI_Request[num_procs - 1];
MPI_Request* req_ptr = reqs;
// Root sends data to all others
local_size = comm->size();
rank = comm->rank();
- MPI_Request* reqarray = new MPI_Request[2 * mv2_intra_node_knomial_factor];
+ auto* reqarray = new MPI_Request[2 * mv2_intra_node_knomial_factor];
- MPI_Status* starray = new MPI_Status[2 * mv2_intra_node_knomial_factor];
+ auto* starray = new MPI_Status[2 * mv2_intra_node_knomial_factor];
/* intra-node k-nomial bcast */
if (local_size > 1) {
*/
char* ptmp;
MPI_Request first_segment_req;
- MPI_Request* reqs = new (std::nothrow) MPI_Request[size];
+ auto* reqs = new (std::nothrow) MPI_Request[size];
if (nullptr == reqs) {
ret = -1;
line = __LINE__;
/* pipeline */
else {
- MPI_Request* send_request_array = new MPI_Request[size + pipe_length];
- MPI_Request* recv_request_array = new MPI_Request[size + pipe_length];
- MPI_Status* send_status_array = new MPI_Status[size + pipe_length];
- MPI_Status* recv_status_array = new MPI_Status[size + pipe_length];
+ auto* send_request_array = new MPI_Request[size + pipe_length];
+ auto* recv_request_array = new MPI_Request[size + pipe_length];
+ auto* send_status_array = new MPI_Status[size + pipe_length];
+ auto* recv_status_array = new MPI_Status[size + pipe_length];
/* root recv data */
if (rank == root) {
else {
// printf("node %d start\n",rank);
- MPI_Request* send_request_array = new MPI_Request[size + pipe_length];
- MPI_Request* recv_request_array = new MPI_Request[size + pipe_length];
- MPI_Status* send_status_array = new MPI_Status[size + pipe_length];
- MPI_Status* recv_status_array = new MPI_Status[size + pipe_length];
+ auto* send_request_array = new MPI_Request[size + pipe_length];
+ auto* recv_request_array = new MPI_Request[size + pipe_length];
+ auto* send_status_array = new MPI_Status[size + pipe_length];
+ auto* recv_status_array = new MPI_Status[size + pipe_length];
if (rank == 0) {
sent_count = 0;
&dst, &expected_send_count, &expected_recv_count, &src_array);
if(expected_recv_count > 0 ) {
- unsigned char** tmp_buf = new unsigned char*[expected_recv_count];
- MPI_Request* requests = new MPI_Request[expected_recv_count];
+ auto** tmp_buf = new unsigned char*[expected_recv_count];
+ auto* requests = new MPI_Request[expected_recv_count];
for (k = 0; k < expected_recv_count; k++) {
tmp_buf[k] = smpi_get_tmp_sendbuffer(count * std::max(extent, true_extent));
tmp_buf[k] = tmp_buf[k] - true_lb;
else {
int creq = 0;
- MPI_Request* sreq = new (std::nothrow) MPI_Request[max_outstanding_reqs];
+ auto* sreq = new (std::nothrow) MPI_Request[max_outstanding_reqs];
if (nullptr == sreq) {
line = __LINE__;
ret = -1;
Datatype::copy(sendbuf, count, datatype, recvbuf, count, datatype);
// Send/Recv buffers to/from others
- MPI_Request* requests = new MPI_Request[size - 1];
- unsigned char** tmpbufs = new unsigned char*[rank];
+ auto* requests = new MPI_Request[size - 1];
+ auto** tmpbufs = new unsigned char*[rank];
int index = 0;
for (int other = 0; other < rank; other++) {
tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
datatype->extent(&lb, &dataext);
// Send/Recv buffers to/from others
- MPI_Request* requests = new MPI_Request[size - 1];
- unsigned char** tmpbufs = new unsigned char*[rank];
+ auto* requests = new MPI_Request[size - 1];
+ auto** tmpbufs = new unsigned char*[rank];
int index = 0;
for (int other = 0; other < rank; other++) {
tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
simgrid::smpi::colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
mv2_allgather_num_ppn_conf = 3;
mv2_allgather_thresholds_table = new mv2_allgather_tuning_table*[mv2_allgather_num_ppn_conf];
- mv2_allgather_tuning_table** table_ptrs = new mv2_allgather_tuning_table*[mv2_allgather_num_ppn_conf];
+ auto** table_ptrs = new mv2_allgather_tuning_table*[mv2_allgather_num_ppn_conf];
mv2_size_allgather_tuning_table = new int[mv2_allgather_num_ppn_conf];
mv2_allgather_table_ppn_conf = new int[mv2_allgather_num_ppn_conf];
mv2_allgather_table_ppn_conf[0] = 1;
int agg_table_sum = 0;
mv2_scatter_num_ppn_conf = 3;
mv2_scatter_thresholds_table = new mv2_scatter_tuning_table*[mv2_scatter_num_ppn_conf];
- mv2_scatter_tuning_table** table_ptrs = new mv2_scatter_tuning_table*[mv2_scatter_num_ppn_conf];
+ auto** table_ptrs = new mv2_scatter_tuning_table*[mv2_scatter_num_ppn_conf];
mv2_size_scatter_tuning_table = new int[mv2_scatter_num_ppn_conf];
mv2_scatter_table_ppn_conf = new int[mv2_scatter_num_ppn_conf];
mv2_scatter_table_ppn_conf[0] = 1;
(*request) = new Request( nullptr, 0, MPI_BYTE,
rank,rank, system_tag, comm, MPI_REQ_PERSISTENT);
if (rank > 0) {
- MPI_Request* requests = new MPI_Request[2];
+ auto* requests = new MPI_Request[2];
requests[0] = Request::isend (nullptr, 0, MPI_BYTE, 0,
system_tag,
comm);
(*request)->set_nbc_requests(requests, 2);
}
else {
- MPI_Request* requests = new MPI_Request[(size - 1) * 2];
+ auto* requests = new MPI_Request[(size - 1) * 2];
for (int i = 1; i < 2 * size - 1; i += 2) {
requests[i - 1] = Request::irecv(nullptr, 0, MPI_BYTE, MPI_ANY_SOURCE, system_tag, comm);
requests[i] = Request::isend(nullptr, 0, MPI_BYTE, (i + 1) / 2, system_tag, comm);
(*request) = new Request( nullptr, 0, MPI_BYTE,
rank,rank, system_tag, comm, MPI_REQ_PERSISTENT);
if (rank != root) {
- MPI_Request* requests = new MPI_Request[1];
+ auto* requests = new MPI_Request[1];
requests[0] = Request::irecv (buf, count, datatype, root,
system_tag,
comm);
(*request)->set_nbc_requests(requests, 1);
}
else {
- MPI_Request* requests = new MPI_Request[size - 1];
+ auto* requests = new MPI_Request[size - 1];
int n = 0;
for (int i = 0; i < size; i++) {
if(i!=root){
Datatype::copy(sendbuf, sendcount, sendtype, static_cast<char *>(recvbuf) + rank * recvcount * recvext, recvcount,
recvtype);
// Send/Recv buffers to/from others;
- MPI_Request* requests = new MPI_Request[2 * (size - 1)];
+ auto* requests = new MPI_Request[2 * (size - 1)];
int index = 0;
for (int other = 0; other < size; other++) {
if(other != rank) {
(*request) = new Request( nullptr, 0, MPI_BYTE,
rank,rank, system_tag, comm, MPI_REQ_PERSISTENT);
if(rank != root) {
- MPI_Request* requests = new MPI_Request[1];
+ auto* requests = new MPI_Request[1];
// Recv buffer from root
requests[0] = Request::irecv(recvbuf, recvcount, recvtype, root, system_tag, comm);
(*request)->set_nbc_requests(requests, 1);
sendcount, sendtype, recvbuf, recvcount, recvtype);
}
// Send buffers to receivers
- MPI_Request* requests = new MPI_Request[size - 1];
+ auto* requests = new MPI_Request[size - 1];
int index = 0;
for(int dst = 0; dst < size; dst++) {
if(dst != root) {
Datatype::copy(sendbuf, sendcount, sendtype,
static_cast<char *>(recvbuf) + displs[rank] * recvext,recvcounts[rank], recvtype);
// Send buffers to others;
- MPI_Request *requests = new MPI_Request[2 * (size - 1)];
+ auto* requests = new MPI_Request[2 * (size - 1)];
int index = 0;
for (int other = 0; other < size; other++) {
if(other != rank) {
static_cast<char *>(recvbuf) + rank * recvcount * recvext, recvcount, recvtype);
if (err == MPI_SUCCESS && size > 1) {
/* Initiate all send/recv to/from others. */
- MPI_Request* requests = new MPI_Request[2 * (size - 1)];
+ auto* requests = new MPI_Request[2 * (size - 1)];
/* Post all receives first -- a simple optimization */
int count = 0;
for (int i = (rank + 1) % size; i != rank; i = (i + 1) % size) {
static_cast<char *>(recvbuf) + recvdisps[rank] * recvext, recvcounts[rank], recvtype);
if (err == MPI_SUCCESS && size > 1) {
/* Initiate all send/recv to/from others. */
- MPI_Request* requests = new MPI_Request[2 * (size - 1)];
+ auto* requests = new MPI_Request[2 * (size - 1)];
int count = 0;
/* Create all receives that will be posted first */
for (int i = 0; i < size; ++i) {
static_cast<char *>(recvbuf) + recvdisps[rank], recvcounts[rank], recvtypes[rank]): MPI_SUCCESS;
if (err == MPI_SUCCESS && size > 1) {
/* Initiate all send/recv to/from others. */
- MPI_Request* requests = new MPI_Request[2 * (size - 1)];
+ auto* requests = new MPI_Request[2 * (size - 1)];
int count = 0;
/* Create all receives that will be posted first */
for (int i = 0; i < size; ++i) {
rank,rank, system_tag, comm, MPI_REQ_PERSISTENT);
if(rank != root) {
// Send buffer to root
- MPI_Request* requests = new MPI_Request[1];
+ auto* requests = new MPI_Request[1];
requests[0]=Request::isend(sendbuf, sendcount, sendtype, root, system_tag, comm);
(*request)->set_nbc_requests(requests, 1);
} else {
Datatype::copy(sendbuf, sendcount, sendtype, static_cast<char*>(recvbuf) + root * recvcount * recvext,
recvcount, recvtype);
// Receive buffers from senders
- MPI_Request* requests = new MPI_Request[size - 1];
+ auto* requests = new MPI_Request[size - 1];
int index = 0;
for (int src = 0; src < size; src++) {
if(src != root) {
rank,rank, system_tag, comm, MPI_REQ_PERSISTENT);
if (rank != root) {
// Send buffer to root
- MPI_Request* requests = new MPI_Request[1];
+ auto* requests = new MPI_Request[1];
requests[0]=Request::isend(sendbuf, sendcount, sendtype, root, system_tag, comm);
(*request)->set_nbc_requests(requests, 1);
} else {
Datatype::copy(sendbuf, sendcount, sendtype, static_cast<char*>(recvbuf) + displs[root] * recvext,
recvcounts[root], recvtype);
// Receive buffers from senders
- MPI_Request* requests = new MPI_Request[size - 1];
+ auto* requests = new MPI_Request[size - 1];
int index = 0;
for (int src = 0; src < size; src++) {
if(src != root) {
rank,rank, system_tag, comm, MPI_REQ_PERSISTENT);
if(rank != root) {
// Recv buffer from root
- MPI_Request* requests = new MPI_Request[1];
+ auto* requests = new MPI_Request[1];
requests[0]=Request::irecv(recvbuf, recvcount, recvtype, root, system_tag, comm);
(*request)->set_nbc_requests(requests, 1);
} else {
sendtype, recvbuf, recvcount, recvtype);
}
// Send buffers to receivers
- MPI_Request *requests = new MPI_Request[size - 1];
+ auto* requests = new MPI_Request[size - 1];
int index = 0;
for (int dst = 0; dst < size; dst++) {
if (dst != root) {
if(rank != root) {
// Send buffer to root
- MPI_Request* requests = new MPI_Request[1];
+ auto* requests = new MPI_Request[1];
requests[0] = Request::isend(real_sendbuf, count, datatype, root, system_tag, comm);
(*request)->set_nbc_requests(requests, 1);
} else {
if (real_sendbuf != nullptr && recvbuf != nullptr)
Datatype::copy(real_sendbuf, count, datatype, recvbuf, count, datatype);
// Receive buffers from senders
- MPI_Request *requests = new MPI_Request[size - 1];
+ auto* requests = new MPI_Request[size - 1];
int index = 0;
for (int src = 0; src < size; src++) {
if (src != root) {
// Local copy from self
Datatype::copy(sendbuf, count, datatype, recvbuf, count, datatype);
// Send/Recv buffers to/from others;
- MPI_Request* requests = new MPI_Request[2 * (size - 1)];
+ auto* requests = new MPI_Request[2 * (size - 1)];
int index = 0;
for (int other = 0; other < size; other++) {
if(other != rank) {
Datatype::copy(sendbuf, count, datatype, recvbuf, count, datatype);
// Send/Recv buffers to/from others
- MPI_Request *requests = new MPI_Request[size - 1];
+ auto* requests = new MPI_Request[size - 1];
int index = 0;
for (int other = 0; other < rank; other++) {
requests[index] = Request::irecv_init(smpi_get_tmp_sendbuffer(count * dataext), count, datatype, other, system_tag, comm);
memset(recvbuf, 0, count*dataext);
// Send/Recv buffers to/from others
- MPI_Request *requests = new MPI_Request[size - 1];
+ auto* requests = new MPI_Request[size - 1];
int index = 0;
for (int other = 0; other < rank; other++) {
requests[index] = Request::irecv_init(smpi_get_tmp_sendbuffer(count * dataext), count, datatype, other, system_tag, comm);
datatype->extent(&lb, &dataext);
// Send/Recv buffers to/from others;
- MPI_Request* requests = new MPI_Request[2 * (size - 1)];
+ auto* requests = new MPI_Request[2 * (size - 1)];
int index = 0;
int recvdisp=0;
for (int other = 0; other < size; other++) {
smpi_bench_end();
double now = SIMIX_get_clock();
- unsigned long long sec = static_cast<unsigned long long>(now);
+ auto sec = static_cast<unsigned long long>(now);
unsigned long long pre = (now - sec) * smpi_rastro_resolution();
smpi_bench_begin();
return sec * smpi_rastro_resolution() + pre;
MPI_Request find(int src, int dst, int tag)
{
- req_storage_t::iterator it = store.find(req_key_t(src, dst, tag));
+ auto it = store.find(req_key_t(src, dst, tag));
return (it == store.end()) ? MPI_REQUEST_NULL : it->second;
}
if (size > 0) {
size = 0;
std::vector<MPI_Request> myreqqs;
- std::vector<MPI_Request>::iterator iter = reqqs->begin();
+ auto iter = reqqs->begin();
int proc_id = comm_->group()->actor(rank)->get_pid();
while (iter != reqqs->end()){
// Let's see if we're either the destination or the sender of this request
to->ActiveCommsDown[from] -= 1;
to->nbActiveCommsDown--;
- std::vector<ActiveComm*>::iterator it =
- std::find_if(begin(from->ActiveCommsUp), end(from->ActiveCommsUp),
- [action](const ActiveComm* comm) { return comm->action == action; });
+ auto it = std::find_if(begin(from->ActiveCommsUp), end(from->ActiveCommsUp),
+ [action](const ActiveComm* comm) { return comm->action == action; });
if (it != std::end(from->ActiveCommsUp)) {
delete *it;
from->ActiveCommsUp.erase(it);
ns3::NetDeviceContainer netA;
WifiZone* zone = WifiZone::by_name(name);
xbt_assert(zone != nullptr, "Link name '%s' does not match the 'wifi_link' property of a host.", name.c_str());
- NetPointNs3* netpoint_ns3 = zone->get_host()->get_netpoint()->extension<NetPointNs3>();
+ auto* netpoint_ns3 = zone->get_host()->get_netpoint()->extension<NetPointNs3>();
wifi.SetRemoteStationManager("ns3::ConstantRateWifiManager", "ControlMode", ns3::StringValue("HtMcs0"), "DataMode",
ns3::StringValue("HtMcs" + std::to_string(zone->get_mcs())));