From 5e1fa53a0d0ae78cebae28364e6802aa1db1cba3 Mon Sep 17 00:00:00 2001 From: Arnaud Giersch Date: Mon, 5 Oct 2020 22:59:22 +0200 Subject: [PATCH] Modern C++ use "auto". Clang-tidy enabled checks: modernize-use-auto --- examples/s4u/app-bittorrent/s4u-peer.cpp | 2 +- .../s4u/dht-kademlia/s4u-dht-kademlia.cpp | 8 ++-- examples/s4u/network-ns3/s4u-network-ns3.cpp | 3 +- src/bindings/java/jmsg.cpp | 13 +++--- src/bindings/java/jmsg_comm.cpp | 6 +-- src/bindings/java/jmsg_host.cpp | 2 +- src/bindings/java/jmsg_process.cpp | 2 +- src/bindings/java/jmsg_synchro.cpp | 2 +- src/bindings/java/jmsg_task.cpp | 18 ++++---- src/bindings/java/jmsg_vm.cpp | 4 +- src/bindings/java/jxbt_utilities.cpp | 6 +-- src/instr/instr_paje_trace.cpp | 2 +- src/instr/instr_platform.cpp | 12 ++--- src/kernel/routing/FatTreeZone.cpp | 2 +- src/kernel/routing/VivaldiZone.cpp | 2 +- src/plugins/file_system/s4u_FileSystem.cpp | 2 +- src/plugins/host_energy.cpp | 4 +- src/plugins/link_energy.cpp | 2 +- src/simgrid/util.hpp | 2 +- src/smpi/colls/allgather/allgather-2dmesh.cpp | 2 +- src/smpi/colls/allgather/allgather-3dmesh.cpp | 2 +- .../colls/allgather/allgather-NTSLR-NB.cpp | 4 +- .../colls/allgather/allgather-SMP-NTS.cpp | 4 +- .../colls/allgather/allgather-smp-simple.cpp | 4 +- .../allgather/allgather-spreading-simple.cpp | 2 +- src/smpi/colls/alltoall/alltoall-2dmesh.cpp | 4 +- src/smpi/colls/alltoall/alltoall-3dmesh.cpp | 4 +- .../colls/alltoall/alltoall-basic-linear.cpp | 2 +- .../alltoall-mvapich-scatter-dest.cpp | 4 +- src/smpi/colls/alltoallv/alltoallv-bruck.cpp | 2 +- .../alltoallv/alltoallv-ompi-basic-linear.cpp | 2 +- src/smpi/colls/bcast/bcast-NTSB.cpp | 8 ++-- src/smpi/colls/bcast/bcast-NTSL-Isend.cpp | 8 ++-- src/smpi/colls/bcast/bcast-NTSL.cpp | 8 ++-- src/smpi/colls/bcast/bcast-SMP-binary.cpp | 4 +- src/smpi/colls/bcast/bcast-SMP-linear.cpp | 4 +- .../bcast-arrival-pattern-aware-wait.cpp | 8 ++-- .../bcast/bcast-arrival-pattern-aware.cpp | 8 ++-- .../colls/bcast/bcast-flattree-pipeline.cpp | 4 +- src/smpi/colls/bcast/bcast-flattree.cpp | 2 +- src/smpi/colls/bcast/bcast-mvapich-smp.cpp | 4 +- src/smpi/colls/gather/gather-ompi.cpp | 2 +- src/smpi/colls/reduce/reduce-NTSL.cpp | 8 ++-- .../reduce/reduce-arrival-pattern-aware.cpp | 8 ++-- .../colls/reduce/reduce-mvapich-knomial.cpp | 4 +- src/smpi/colls/reduce/reduce-ompi.cpp | 2 +- src/smpi/colls/smpi_coll.cpp | 8 ++-- .../colls/smpi_mvapich2_selector_stampede.hpp | 4 +- src/smpi/colls/smpi_nbc_impl.cpp | 46 +++++++++---------- src/smpi/internals/smpi_bench.cpp | 2 +- src/smpi/internals/smpi_replay.cpp | 2 +- src/smpi/mpi/smpi_win.cpp | 2 +- src/surf/network_ib.cpp | 5 +- src/surf/network_ns3.cpp | 2 +- 54 files changed, 140 insertions(+), 143 deletions(-) diff --git a/examples/s4u/app-bittorrent/s4u-peer.cpp b/examples/s4u/app-bittorrent/s4u-peer.cpp index fe653ac776..1b34b49466 100644 --- a/examples/s4u/app-bittorrent/s4u-peer.cpp +++ b/examples/s4u/app-bittorrent/s4u-peer.cpp @@ -552,7 +552,7 @@ void Peer::updateChokedPeers() int j = 0; do { // We choose a random peer to unchoke. - std::unordered_map::iterator chosen_peer_it = connected_peers.begin(); + auto chosen_peer_it = connected_peers.begin(); std::advance(chosen_peer_it, random.uniform_int(0, static_cast(connected_peers.size() - 1))); chosen_peer = &chosen_peer_it->second; if (not chosen_peer->interested || not chosen_peer->choked_upload) diff --git a/examples/s4u/dht-kademlia/s4u-dht-kademlia.cpp b/examples/s4u/dht-kademlia/s4u-dht-kademlia.cpp index 6d595d8704..60362b8a3c 100644 --- a/examples/s4u/dht-kademlia/s4u-dht-kademlia.cpp +++ b/examples/s4u/dht-kademlia/s4u-dht-kademlia.cpp @@ -23,14 +23,14 @@ static void node(int argc, char* argv[]) double deadline; xbt_assert(argc == 3 || argc == 4, "Wrong number of arguments"); /* Node initialization */ - unsigned int node_id = static_cast(strtoul(argv[1], nullptr, 0)); + auto node_id = static_cast(strtoul(argv[1], nullptr, 0)); kademlia::Node node(node_id); if (argc == 4) { XBT_INFO("Hi, I'm going to join the network with id %u", node.getId()); - unsigned int known_id = static_cast(strtoul(argv[2], nullptr, 0)); - join_success = node.join(known_id); - deadline = std::stod(argv[3]); + auto known_id = static_cast(strtoul(argv[2], nullptr, 0)); + join_success = node.join(known_id); + deadline = std::stod(argv[3]); } else { deadline = std::stod(argv[2]); XBT_INFO("Hi, I'm going to create the network with id %u", node.getId()); diff --git a/examples/s4u/network-ns3/s4u-network-ns3.cpp b/examples/s4u/network-ns3/s4u-network-ns3.cpp index 3c150a4ba6..83e4f0a01d 100644 --- a/examples/s4u/network-ns3/s4u-network-ns3.cpp +++ b/examples/s4u/network-ns3/s4u-network-ns3.cpp @@ -32,8 +32,7 @@ static void master(int argc, char* argv[]) masternames[id] = simgrid::s4u::Host::current()->get_cname(); - double* payload = new double(); - *payload = msg_size; + auto* payload = new double(msg_size); count_finished++; timer_start = 1; diff --git a/src/bindings/java/jmsg.cpp b/src/bindings/java/jmsg.cpp index 92f1b4678f..6c984d7af4 100644 --- a/src/bindings/java/jmsg.cpp +++ b/src/bindings/java/jmsg.cpp @@ -106,7 +106,7 @@ JNIEXPORT void JNICALL Java_org_simgrid_msg_Msg_init(JNIEnv* env, jclass, jobjec args.emplace_back("java"); for (int index = 1; index < argc; index++) { - jstring jval = (jstring)env->GetObjectArrayElement(jargs, index - 1); + auto jval = (jstring)env->GetObjectArrayElement(jargs, index - 1); const char* tmp = env->GetStringUTFChars(jval, nullptr); args.emplace_back(tmp); env->ReleaseStringUTFChars(jval, tmp); @@ -138,7 +138,7 @@ JNIEXPORT void JNICALL JNICALL Java_org_simgrid_msg_Msg_run(JNIEnv* env, jclass) sg_host_t* hosts = sg_host_list(); size_t host_count = sg_host_count(); for (size_t index = 0; index < host_count - 1; index++) { - jobject jhost = (jobject)hosts[index]->extension(JAVA_HOST_LEVEL); + auto jhost = (jobject)hosts[index]->extension(JAVA_HOST_LEVEL); if (jhost) jhost_unref(env, jhost); } @@ -277,8 +277,7 @@ static void run_jprocess(JNIEnv *env, jobject jprocess) static void java_main(int argc, char* argv[]) { JNIEnv *env = get_current_thread_env(); - simgrid::kernel::context::JavaContext* context = - static_cast(simgrid::kernel::context::Context::self()); + auto* context = static_cast(simgrid::kernel::context::Context::self()); //Change the "." in class name for "/". std::string arg0 = argv[0]; @@ -292,8 +291,8 @@ static void java_main(int argc, char* argv[]) //Retrieve the name of the process. jstring jname = env->NewStringUTF(argv[0]); //Build the arguments - jobjectArray args = static_cast(env->NewObjectArray(argc - 1, env->FindClass("java/lang/String"), - env->NewStringUTF(""))); + auto args = static_cast( + env->NewObjectArray(argc - 1, env->FindClass("java/lang/String"), env->NewStringUTF(""))); for (int i = 1; i < argc; i++) env->SetObjectArrayElement(args,i - 1, env->NewStringUTF(argv[i])); //Retrieve the host for the process. @@ -323,7 +322,7 @@ namespace context { void java_main_jprocess(jobject jprocess) { JNIEnv *env = get_current_thread_env(); - JavaContext* context = static_cast(Context::self()); + auto* context = static_cast(Context::self()); context->jprocess_ = jprocess; jprocess_bind(context->jprocess_, sg_actor_self(), env); diff --git a/src/bindings/java/jmsg_comm.cpp b/src/bindings/java/jmsg_comm.cpp index 76f56d9d14..0e4ed684b5 100644 --- a/src/bindings/java/jmsg_comm.cpp +++ b/src/bindings/java/jmsg_comm.cpp @@ -27,7 +27,7 @@ void jcomm_bind_task(JNIEnv *env, jobject jcomm) { //bind the task object. msg_task_t task = MSG_comm_get_task(comm); xbt_assert(task != nullptr, "Task is nullptr"); - jobject jtask_global = static_cast(MSG_task_get_data(task)); + auto jtask_global = static_cast(MSG_task_get_data(task)); //case where the data has already been retrieved if (jtask_global == nullptr) { return; @@ -95,7 +95,7 @@ JNIEXPORT jboolean JNICALL Java_org_simgrid_msg_Comm_test(JNIEnv *env, jobject j } JNIEXPORT void JNICALL Java_org_simgrid_msg_Comm_waitCompletion(JNIEnv *env, jobject jcomm, jdouble timeout) { - msg_comm_t comm = (msg_comm_t) (uintptr_t) env->GetLongField(jcomm, jcomm_field_Comm_bind); + auto comm = (msg_comm_t)(uintptr_t)env->GetLongField(jcomm, jcomm_field_Comm_bind); if (not comm) { jxbt_throw_null(env, "comm is null"); return; @@ -119,7 +119,7 @@ JNIEXPORT void JNICALL Java_org_simgrid_msg_Comm_waitCompletion(JNIEnv *env, job static msg_comm_t* jarray_to_commArray(JNIEnv *env, jobjectArray jcomms, /* OUT */ int *count) { *count = env->GetArrayLength(jcomms); - msg_comm_t* comms = new msg_comm_t[*count]; + auto* comms = new msg_comm_t[*count]; for (int i=0; i < *count; i++) { jobject jcomm = env->GetObjectArrayElement(jcomms, i); diff --git a/src/bindings/java/jmsg_host.cpp b/src/bindings/java/jmsg_host.cpp index 75c2ad4db5..655f1ed813 100644 --- a/src/bindings/java/jmsg_host.cpp +++ b/src/bindings/java/jmsg_host.cpp @@ -309,7 +309,7 @@ JNIEXPORT jobjectArray JNICALL Java_org_simgrid_msg_Host_all(JNIEnv * env, jclas } for (int index = 0; index < count; index++) { - jobject jhost = static_cast(table[index]->extension(JAVA_HOST_LEVEL)); + auto jhost = static_cast(table[index]->extension(JAVA_HOST_LEVEL)); if (not jhost) { jstring jname = env->NewStringUTF(table[index]->get_cname()); diff --git a/src/bindings/java/jmsg_process.cpp b/src/bindings/java/jmsg_process.cpp index bf00535b1d..2e6c77bc54 100644 --- a/src/bindings/java/jmsg_process.cpp +++ b/src/bindings/java/jmsg_process.cpp @@ -70,7 +70,7 @@ JNIEXPORT void JNICALL Java_org_simgrid_msg_Process_create(JNIEnv* env, jobject jobject jprocess = jprocess_ref(jprocess_arg, env); /* Actually build the MSG process */ - jstring jname = (jstring)env->GetObjectField(jprocess, jprocess_field_Process_name); + auto jname = (jstring)env->GetObjectField(jprocess, jprocess_field_Process_name); const char* name = env->GetStringUTFChars(jname, nullptr); auto actor_code = [jprocess]() { simgrid::kernel::context::java_main_jprocess(jprocess); }; smx_actor_t self = SIMIX_process_self(); diff --git a/src/bindings/java/jmsg_synchro.cpp b/src/bindings/java/jmsg_synchro.cpp index 157f37b895..fc4604212f 100644 --- a/src/bindings/java/jmsg_synchro.cpp +++ b/src/bindings/java/jmsg_synchro.cpp @@ -28,7 +28,7 @@ JNIEXPORT void JNICALL Java_org_simgrid_msg_Mutex_init(JNIEnv * env, jobject obj } JNIEXPORT void JNICALL Java_org_simgrid_msg_Mutex_acquire(JNIEnv * env, jobject obj) { - sg_mutex_t mutex = (sg_mutex_t)(uintptr_t)env->GetLongField(obj, jsynchro_field_Mutex_bind); + auto mutex = (sg_mutex_t)(uintptr_t)env->GetLongField(obj, jsynchro_field_Mutex_bind); sg_mutex_lock(mutex); } diff --git a/src/bindings/java/jmsg_task.cpp b/src/bindings/java/jmsg_task.cpp index 884a6ccdda..0bd50d6925 100644 --- a/src/bindings/java/jmsg_task.cpp +++ b/src/bindings/java/jmsg_task.cpp @@ -76,8 +76,8 @@ JNIEXPORT void JNICALL Java_org_simgrid_msg_Task_parallelCreate(JNIEnv * env, jo int host_count = env->GetArrayLength(jhosts); jdouble* jcomputeDurations = env->GetDoubleArrayElements(jcomputeDurations_arg, nullptr); - msg_host_t* hosts = new msg_host_t[host_count]; - double* computeDurations = new double[host_count]; + auto* hosts = new msg_host_t[host_count]; + auto* computeDurations = new double[host_count]; for (int index = 0; index < host_count; index++) { jobject jhost = env->GetObjectArrayElement(jhosts, index); hosts[index] = jhost_get_native(env, jhost); @@ -86,7 +86,7 @@ JNIEXPORT void JNICALL Java_org_simgrid_msg_Task_parallelCreate(JNIEnv * env, jo env->ReleaseDoubleArrayElements(jcomputeDurations_arg, jcomputeDurations, 0); jdouble* jmessageSizes = env->GetDoubleArrayElements(jmessageSizes_arg, nullptr); - double* messageSizes = new double[host_count * host_count]; + auto* messageSizes = new double[host_count * host_count]; for (int index = 0; index < host_count * host_count; index++) { messageSizes[index] = jmessageSizes[index]; } @@ -294,7 +294,7 @@ JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Task_receive(JNIEnv* env, jclass jmsg_throw_status(env, rv); return nullptr; } - jobject jtask_global = (jobject) MSG_task_get_data(task); + auto jtask_global = (jobject)MSG_task_get_data(task); /* Convert the global ref into a local ref so that the JVM can free the stuff */ jobject jtask_local = env->NewLocalRef(jtask_global); @@ -310,7 +310,7 @@ JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Task_irecv(JNIEnv * env, jclass c return nullptr; //pointer to store the task object pointer. - msg_task_t* task = new msg_task_t(nullptr); + auto* task = new msg_task_t(nullptr); /* There should be a cache here */ jobject jcomm = env->NewObject(comm_class, jtask_method_Comm_constructor); @@ -343,7 +343,7 @@ JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Task_receiveBounded(JNIEnv* env, jmsg_throw_status(env, res); return nullptr; } - jobject jtask_global = (jobject)MSG_task_get_data(task); + auto jtask_global = (jobject)MSG_task_get_data(task); /* Convert the global ref into a local ref so that the JVM can free the stuff */ jobject jtask_local = env->NewLocalRef(jtask_global); @@ -363,7 +363,7 @@ JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Task_irecvBounded(JNIEnv * env, j return nullptr; // pointer to store the task object pointer. - msg_task_t* task = new msg_task_t(nullptr); + auto* task = new msg_task_t(nullptr); jobject jcomm = env->NewObject(comm_class, jtask_method_Comm_constructor); if (not jcomm) { @@ -464,10 +464,10 @@ JNIEXPORT void JNICALL Java_org_simgrid_msg_Task_nativeFinalize(JNIEnv * env, jo } static void msg_task_cancel_on_failed_dsend(void*t) { - msg_task_t task = (msg_task_t) t; + auto task = (msg_task_t)t; JNIEnv* env = get_current_thread_env(); if (env) { - jobject jtask_global = (jobject)MSG_task_get_data(task); + auto jtask_global = (jobject)MSG_task_get_data(task); /* Destroy the global ref so that the JVM can free the stuff */ env->DeleteGlobalRef(jtask_global); /* Don't free the C data here, to avoid a race condition with the GC also sometimes doing so. diff --git a/src/bindings/java/jmsg_vm.cpp b/src/bindings/java/jmsg_vm.cpp index 6cebf90ea0..8e3193b441 100644 --- a/src/bindings/java/jmsg_vm.cpp +++ b/src/bindings/java/jmsg_vm.cpp @@ -89,9 +89,9 @@ JNIEXPORT jobjectArray JNICALL Java_org_simgrid_msg_VM_all(JNIEnv* env, jclass c std::vector vms; for (size_t i = 0; i < host_count; i++) { - simgrid::s4u::VirtualMachine* vm = dynamic_cast(hosts[i]); + auto* vm = dynamic_cast(hosts[i]); if (vm != nullptr && vm->get_state() != simgrid::s4u::VirtualMachine::state::DESTROYED) { - jobject jvm = static_cast(vm->extension(JAVA_HOST_LEVEL)); + auto jvm = static_cast(vm->extension(JAVA_HOST_LEVEL)); vms.push_back(jvm); } } diff --git a/src/bindings/java/jxbt_utilities.cpp b/src/bindings/java/jxbt_utilities.cpp index 9eb4936451..9907ef0fc0 100644 --- a/src/bindings/java/jxbt_utilities.cpp +++ b/src/bindings/java/jxbt_utilities.cpp @@ -33,7 +33,7 @@ jmethodID jxbt_get_jmethod(JNIEnv * env, jclass cls, const char *name, const cha if (not id) { jmethodID tostr_id = env->GetMethodID(cls, "getName", "()Ljava/lang/String;"); - jstring jclassname = (jstring) env->CallObjectMethod(cls, tostr_id, nullptr); + auto jclassname = (jstring)env->CallObjectMethod(cls, tostr_id, nullptr); const char* classname = env->GetStringUTFChars(jclassname, nullptr); env->ReleaseStringUTFChars(jclassname, classname); @@ -55,7 +55,7 @@ jmethodID jxbt_get_static_jmethod(JNIEnv * env, jclass cls, const char *name, co if (not id) { jmethodID tostr_id = env->GetMethodID(cls, "getName", "()Ljava/lang/String;"); - jstring jclassname = (jstring) env->CallObjectMethod(cls, tostr_id, nullptr); + auto jclassname = (jstring)env->CallObjectMethod(cls, tostr_id, nullptr); const char* classname = env->GetStringUTFChars(jclassname, nullptr); env->ReleaseStringUTFChars(jclassname, classname); @@ -114,7 +114,7 @@ jfieldID jxbt_get_jfield(JNIEnv * env, jclass cls, const char *name, const char if (not id) { jmethodID getname_id = env->GetMethodID(cls, "getName", "()Ljava/lang/String;"); - jstring jclassname = (jstring) env->CallObjectMethod(cls, getname_id, nullptr); + auto jclassname = (jstring)env->CallObjectMethod(cls, getname_id, nullptr); const char* classname = env->GetStringUTFChars(jclassname, nullptr); env->ReleaseStringUTFChars(jclassname, classname); diff --git a/src/instr/instr_paje_trace.cpp b/src/instr/instr_paje_trace.cpp index 45113de4b3..1c6c1c407b 100644 --- a/src/instr/instr_paje_trace.cpp +++ b/src/instr/instr_paje_trace.cpp @@ -32,7 +32,7 @@ void dump_buffer(bool force) } buffer.clear(); } else { - std::vector::iterator i = buffer.begin(); + auto i = buffer.begin(); for (auto const& event : buffer) { double head_timestamp = event->timestamp_; if (head_timestamp > last_timestamp_to_dump) diff --git a/src/instr/instr_platform.cpp b/src/instr/instr_platform.cpp index 26acc23a17..6e825621cc 100644 --- a/src/instr/instr_platform.cpp +++ b/src/instr/instr_platform.cpp @@ -270,7 +270,7 @@ static void on_netzone_creation(s4u::NetZone const& netzone) xbt_assert(Container::get_root() == root); if (TRACE_smpi_is_enabled()) { - ContainerType* mpi = root->type_->by_name_or_create("MPI"); + auto* mpi = root->type_->by_name_or_create("MPI"); if (not TRACE_smpi_is_grouped()) mpi->by_name_or_create("MPI_STATE"); root->type_->by_name_or_create("MPI_LINK", mpi, mpi); @@ -332,7 +332,7 @@ static void on_host_creation(s4u::Host const& host) container->type_->by_name_or_create("speed_used", "0.5 0.5 0.5"); if (TRACE_smpi_is_enabled() && TRACE_smpi_is_grouped()) { - ContainerType* mpi = container->type_->by_name_or_create("MPI"); + auto* mpi = container->type_->by_name_or_create("MPI"); mpi->by_name_or_create("MPI_STATE"); // TODO See if we can move this to the LoadBalancer plugin root->type_->by_name_or_create("MIGRATE_LINK", mpi, mpi); @@ -381,8 +381,8 @@ static void on_actor_creation(s4u::Actor const& actor) std::string container_name = instr_pid(actor); container->create_child(container_name, "ACTOR"); - ContainerType* actor_type = container->type_->by_name_or_create("ACTOR"); - StateType* state = actor_type->by_name_or_create("ACTOR_STATE"); + auto* actor_type = container->type_->by_name_or_create("ACTOR"); + auto* state = actor_type->by_name_or_create("ACTOR_STATE"); state->add_entity_value("suspend", "1 0 1"); state->add_entity_value("sleep", "1 1 0"); state->add_entity_value("receive", "1 0 0"); @@ -418,8 +418,8 @@ static void on_vm_creation(s4u::Host const& host) { const Container* container = new HostContainer(host, currentContainer.back()); const Container* root = Container::get_root(); - ContainerType* vm = container->type_->by_name_or_create("VM"); - StateType* state = vm->by_name_or_create("VM_STATE"); + auto* vm = container->type_->by_name_or_create("VM"); + auto* state = vm->by_name_or_create("VM_STATE"); state->add_entity_value("suspend", "1 0 1"); state->add_entity_value("sleep", "1 1 0"); state->add_entity_value("receive", "1 0 0"); diff --git a/src/kernel/routing/FatTreeZone.cpp b/src/kernel/routing/FatTreeZone.cpp index d891b6fd79..a10cbabe84 100644 --- a/src/kernel/routing/FatTreeZone.cpp +++ b/src/kernel/routing/FatTreeZone.cpp @@ -178,7 +178,7 @@ void FatTreeZone::seal() int FatTreeZone::connect_node_to_parents(FatTreeNode* node) { - std::vector::iterator currentParentNode = this->nodes_.begin(); + auto currentParentNode = this->nodes_.begin(); int connectionsNumber = 0; const int level = node->level; XBT_DEBUG("We are connecting node %d(%u,%u) to his parents.", node->id, node->level, node->position); diff --git a/src/kernel/routing/VivaldiZone.cpp b/src/kernel/routing/VivaldiZone.cpp index be055b470e..0cfec03597 100644 --- a/src/kernel/routing/VivaldiZone.cpp +++ b/src/kernel/routing/VivaldiZone.cpp @@ -55,7 +55,7 @@ static inline double euclidean_dist_comp(int index, std::vector* src, st static std::vector* netpoint_get_coords(NetPoint* np) { - vivaldi::Coords* coords = np->extension(); + auto* coords = np->extension(); xbt_assert(coords, "Please specify the Vivaldi coordinates of %s %s (%p)", (np->is_netzone() ? "Netzone" : (np->is_host() ? "Host" : "Router")), np->get_cname(), np); return &coords->coords; diff --git a/src/plugins/file_system/s4u_FileSystem.cpp b/src/plugins/file_system/s4u_FileSystem.cpp index 628c813a7c..516b680a63 100644 --- a/src/plugins/file_system/s4u_FileSystem.cpp +++ b/src/plugins/file_system/s4u_FileSystem.cpp @@ -106,7 +106,7 @@ File::File(const std::string& fullpath, sg_host_t host, void* userdata) : fullpa } // assign a file descriptor id to the newly opened File - FileDescriptorHostExt* ext = host->extension(); + auto* ext = host->extension(); if (ext->file_descriptor_table == nullptr) { ext->file_descriptor_table = std::make_unique>(sg_storage_max_file_descriptors); std::iota(ext->file_descriptor_table->rbegin(), ext->file_descriptor_table->rend(), 0); // Fill with ..., 1, 0. diff --git a/src/plugins/host_energy.cpp b/src/plugins/host_energy.cpp index 9f6dc51b7a..1fe704348c 100644 --- a/src/plugins/host_energy.cpp +++ b/src/plugins/host_energy.cpp @@ -481,7 +481,7 @@ static void on_action_state_change(simgrid::kernel::resource::CpuAction const& a host = vm->get_pm(); // Get the host_energy extension for the relevant host - HostEnergy* host_energy = host->extension(); + auto* host_energy = host->extension(); if (host_energy->get_last_update_time() < surf_get_clock()) host_energy->update(); @@ -496,7 +496,7 @@ static void on_host_change(simgrid::s4u::Host const& host) if (dynamic_cast(&host)) // Ignore virtual machines return; - HostEnergy* host_energy = host.extension(); + auto* host_energy = host.extension(); host_energy->update(); } diff --git a/src/plugins/link_energy.cpp b/src/plugins/link_energy.cpp index 3d68a3bbc2..8b8699e98b 100644 --- a/src/plugins/link_energy.cpp +++ b/src/plugins/link_energy.cpp @@ -156,7 +156,7 @@ static void on_communicate(const simgrid::kernel::resource::NetworkAction& actio continue; XBT_DEBUG("Update link %s", link->get_cname()); - LinkEnergy* link_energy = link->get_iface()->extension(); + auto* link_energy = link->get_iface()->extension(); link_energy->init_watts_range_list(); link_energy->update(); } diff --git a/src/simgrid/util.hpp b/src/simgrid/util.hpp index 634a6f2e99..d022134997 100644 --- a/src/simgrid/util.hpp +++ b/src/simgrid/util.hpp @@ -17,7 +17,7 @@ template inline XBT_PRIVATE typename C::mapped_type* find_map_ptr(C& c, K const& k) { - typename C::iterator i = c.find(k); + auto i = c.find(k); if (i == c.end()) return nullptr; else diff --git a/src/smpi/colls/allgather/allgather-2dmesh.cpp b/src/smpi/colls/allgather/allgather-2dmesh.cpp index 10f41b50be..6e6ffd66ed 100644 --- a/src/smpi/colls/allgather/allgather-2dmesh.cpp +++ b/src/smpi/colls/allgather/allgather-2dmesh.cpp @@ -139,7 +139,7 @@ allgather__2dmesh(const void *send_buff, int send_count, MPI_Datatype if (Y > X) num_reqs = Y; - MPI_Request* req = new MPI_Request[num_reqs]; + auto* req = new MPI_Request[num_reqs]; MPI_Request* req_ptr = req; // do local allgather/local copy diff --git a/src/smpi/colls/allgather/allgather-3dmesh.cpp b/src/smpi/colls/allgather/allgather-3dmesh.cpp index b0a10020ea..c642b58e86 100644 --- a/src/smpi/colls/allgather/allgather-3dmesh.cpp +++ b/src/smpi/colls/allgather/allgather-3dmesh.cpp @@ -131,7 +131,7 @@ int allgather__3dmesh(const void *send_buff, int send_count, block_size = extent * send_count; - MPI_Request* req = new MPI_Request[num_reqs]; + auto* req = new MPI_Request[num_reqs]; MPI_Request* req_ptr = req; // do local allgather/local copy diff --git a/src/smpi/colls/allgather/allgather-NTSLR-NB.cpp b/src/smpi/colls/allgather/allgather-NTSLR-NB.cpp index 116e152d83..623efe8393 100644 --- a/src/smpi/colls/allgather/allgather-NTSLR-NB.cpp +++ b/src/smpi/colls/allgather/allgather-NTSLR-NB.cpp @@ -25,8 +25,8 @@ allgather__NTSLR_NB(const void *sbuf, int scount, MPI_Datatype stype, size = comm->size(); rextent = rtype->get_extent(); sextent = stype->get_extent(); - MPI_Request* rrequest_array = new MPI_Request[size]; - MPI_Request* srequest_array = new MPI_Request[size]; + auto* rrequest_array = new MPI_Request[size]; + auto* srequest_array = new MPI_Request[size]; // irregular case use default MPI functions if (scount * sextent != rcount * rextent) { diff --git a/src/smpi/colls/allgather/allgather-SMP-NTS.cpp b/src/smpi/colls/allgather/allgather-SMP-NTS.cpp index f6c91abb7b..a185059b5d 100644 --- a/src/smpi/colls/allgather/allgather-SMP-NTS.cpp +++ b/src/smpi/colls/allgather/allgather-SMP-NTS.cpp @@ -85,8 +85,8 @@ int allgather__SMP_NTS(const void *sbuf, int scount, // root of each SMP if (intra_rank == 0) { - MPI_Request* rrequest_array = new MPI_Request[inter_comm_size - 1]; - MPI_Request* srequest_array = new MPI_Request[inter_comm_size - 1]; + auto* rrequest_array = new MPI_Request[inter_comm_size - 1]; + auto* srequest_array = new MPI_Request[inter_comm_size - 1]; src = ((inter_rank - 1 + inter_comm_size) % inter_comm_size) * num_core; dst = ((inter_rank + 1) % inter_comm_size) * num_core; diff --git a/src/smpi/colls/allgather/allgather-smp-simple.cpp b/src/smpi/colls/allgather/allgather-smp-simple.cpp index b279d4d9ac..338f831e14 100644 --- a/src/smpi/colls/allgather/allgather-smp-simple.cpp +++ b/src/smpi/colls/allgather/allgather-smp-simple.cpp @@ -76,9 +76,9 @@ int allgather__smp_simple(const void *send_buf, int scount, if (intra_rank == 0) { int num_req = (inter_comm_size - 1) * 2; - MPI_Request* reqs = new MPI_Request[num_req]; + auto* reqs = new MPI_Request[num_req]; MPI_Request* req_ptr = reqs; - MPI_Status* stat = new MPI_Status[num_req]; + auto* stat = new MPI_Status[num_req]; for (i = 1; i < inter_comm_size; i++) { diff --git a/src/smpi/colls/allgather/allgather-spreading-simple.cpp b/src/smpi/colls/allgather/allgather-spreading-simple.cpp index 4d1972b1bb..6a0b5f4dc7 100644 --- a/src/smpi/colls/allgather/allgather-spreading-simple.cpp +++ b/src/smpi/colls/allgather/allgather-spreading-simple.cpp @@ -89,7 +89,7 @@ allgather__spreading_simple(const void *send_buff, int send_count, extent = send_type->get_extent(); num_reqs = (2 * num_procs) - 2; - MPI_Request* reqs = new MPI_Request[num_reqs]; + auto* reqs = new MPI_Request[num_reqs]; MPI_Request* req_ptr = reqs; Request::sendrecv(send_buff, send_count, send_type, rank, tag, (char *) recv_buff + rank * recv_count * extent, recv_count, diff --git a/src/smpi/colls/alltoall/alltoall-2dmesh.cpp b/src/smpi/colls/alltoall/alltoall-2dmesh.cpp index c427d2cdee..2da2653654 100644 --- a/src/smpi/colls/alltoall/alltoall-2dmesh.cpp +++ b/src/smpi/colls/alltoall/alltoall-2dmesh.cpp @@ -87,8 +87,8 @@ int alltoall__2dmesh(const void *send_buff, int send_count, if (Y > X) num_reqs = Y; - MPI_Status* statuses = new MPI_Status[num_reqs]; - MPI_Request* reqs = new MPI_Request[num_reqs]; + auto* statuses = new MPI_Status[num_reqs]; + auto* reqs = new MPI_Request[num_reqs]; MPI_Request* req_ptr = reqs; count = send_count * num_procs; diff --git a/src/smpi/colls/alltoall/alltoall-3dmesh.cpp b/src/smpi/colls/alltoall/alltoall-3dmesh.cpp index 87a79b0d83..742c1ab76d 100644 --- a/src/smpi/colls/alltoall/alltoall-3dmesh.cpp +++ b/src/smpi/colls/alltoall/alltoall-3dmesh.cpp @@ -83,8 +83,8 @@ int alltoall__3dmesh(const void *send_buff, int send_count, unsigned char* tmp_buff1 = smpi_get_tmp_sendbuffer(block_size * num_procs * two_dsize); unsigned char* tmp_buff2 = smpi_get_tmp_recvbuffer(block_size * two_dsize); - MPI_Status* statuses = new MPI_Status[num_reqs]; - MPI_Request* reqs = new MPI_Request[num_reqs]; + auto* statuses = new MPI_Status[num_reqs]; + auto* reqs = new MPI_Request[num_reqs]; MPI_Request* req_ptr = reqs; recv_offset = (rank % two_dsize) * block_size * num_procs; diff --git a/src/smpi/colls/alltoall/alltoall-basic-linear.cpp b/src/smpi/colls/alltoall/alltoall-basic-linear.cpp index 272ffb9e66..b6fda99805 100644 --- a/src/smpi/colls/alltoall/alltoall-basic-linear.cpp +++ b/src/smpi/colls/alltoall/alltoall-basic-linear.cpp @@ -32,7 +32,7 @@ int alltoall__basic_linear(const void *sendbuf, int sendcount, MPI_Datatype send static_cast(recvbuf) + rank * recvcount * recvext, recvcount, recvtype); if (err == MPI_SUCCESS && size > 1) { /* Initiate all send/recv to/from others. */ - MPI_Request* requests = new MPI_Request[2 * (size - 1)]; + auto* requests = new MPI_Request[2 * (size - 1)]; /* Post all receives first -- a simple optimization */ count = 0; for (i = (rank + 1) % size; i != rank; i = (i + 1) % size) { diff --git a/src/smpi/colls/alltoall/alltoall-mvapich-scatter-dest.cpp b/src/smpi/colls/alltoall/alltoall-mvapich-scatter-dest.cpp index 15dd553323..f9335112b8 100644 --- a/src/smpi/colls/alltoall/alltoall-mvapich-scatter-dest.cpp +++ b/src/smpi/colls/alltoall/alltoall-mvapich-scatter-dest.cpp @@ -90,9 +90,9 @@ int alltoall__mvapich2_scatter_dest(const void *sendbuf, /* FIXME: This should use the memory macros (there are storage leaks here if there is an error, for example) */ - MPI_Request* reqarray = new MPI_Request[2 * bblock]; + auto* reqarray = new MPI_Request[2 * bblock]; - MPI_Status* starray = new MPI_Status[2 * bblock]; + auto* starray = new MPI_Status[2 * bblock]; for (ii=0; iirank(); num_procs = comm->size(); - MPI_Request* request_array = new MPI_Request[pipe_length]; - MPI_Status* status_array = new MPI_Status[pipe_length]; + auto* request_array = new MPI_Request[pipe_length]; + auto* status_array = new MPI_Status[pipe_length]; if (rank != root) { for (i = 0; i < pipe_length; i++) { diff --git a/src/smpi/colls/bcast/bcast-flattree.cpp b/src/smpi/colls/bcast/bcast-flattree.cpp index 8b367f3271..0fbbac4d8e 100644 --- a/src/smpi/colls/bcast/bcast-flattree.cpp +++ b/src/smpi/colls/bcast/bcast-flattree.cpp @@ -21,7 +21,7 @@ int bcast__flattree(void *buff, int count, MPI_Datatype data_type, } else { - MPI_Request* reqs = new MPI_Request[num_procs - 1]; + auto* reqs = new MPI_Request[num_procs - 1]; MPI_Request* req_ptr = reqs; // Root sends data to all others diff --git a/src/smpi/colls/bcast/bcast-mvapich-smp.cpp b/src/smpi/colls/bcast/bcast-mvapich-smp.cpp index e61d7e27a6..cfed7b771d 100644 --- a/src/smpi/colls/bcast/bcast-mvapich-smp.cpp +++ b/src/smpi/colls/bcast/bcast-mvapich-smp.cpp @@ -193,9 +193,9 @@ int bcast__mvapich2_knomial_intra_node(void *buffer, local_size = comm->size(); rank = comm->rank(); - MPI_Request* reqarray = new MPI_Request[2 * mv2_intra_node_knomial_factor]; + auto* reqarray = new MPI_Request[2 * mv2_intra_node_knomial_factor]; - MPI_Status* starray = new MPI_Status[2 * mv2_intra_node_knomial_factor]; + auto* starray = new MPI_Status[2 * mv2_intra_node_knomial_factor]; /* intra-node k-nomial bcast */ if (local_size > 1) { diff --git a/src/smpi/colls/gather/gather-ompi.cpp b/src/smpi/colls/gather/gather-ompi.cpp index 3963e23274..e88c4f43c0 100644 --- a/src/smpi/colls/gather/gather-ompi.cpp +++ b/src/smpi/colls/gather/gather-ompi.cpp @@ -270,7 +270,7 @@ int gather__ompi_linear_sync(const void *sbuf, int scount, */ char* ptmp; MPI_Request first_segment_req; - MPI_Request* reqs = new (std::nothrow) MPI_Request[size]; + auto* reqs = new (std::nothrow) MPI_Request[size]; if (nullptr == reqs) { ret = -1; line = __LINE__; diff --git a/src/smpi/colls/reduce/reduce-NTSL.cpp b/src/smpi/colls/reduce/reduce-NTSL.cpp index 3b0e3afdd5..a40b751c79 100644 --- a/src/smpi/colls/reduce/reduce-NTSL.cpp +++ b/src/smpi/colls/reduce/reduce-NTSL.cpp @@ -83,10 +83,10 @@ int reduce__NTSL(const void *buf, void *rbuf, int count, /* pipeline */ else { - MPI_Request* send_request_array = new MPI_Request[size + pipe_length]; - MPI_Request* recv_request_array = new MPI_Request[size + pipe_length]; - MPI_Status* send_status_array = new MPI_Status[size + pipe_length]; - MPI_Status* recv_status_array = new MPI_Status[size + pipe_length]; + auto* send_request_array = new MPI_Request[size + pipe_length]; + auto* recv_request_array = new MPI_Request[size + pipe_length]; + auto* send_status_array = new MPI_Status[size + pipe_length]; + auto* recv_status_array = new MPI_Status[size + pipe_length]; /* root recv data */ if (rank == root) { diff --git a/src/smpi/colls/reduce/reduce-arrival-pattern-aware.cpp b/src/smpi/colls/reduce/reduce-arrival-pattern-aware.cpp index e2f142b1aa..941bd87560 100644 --- a/src/smpi/colls/reduce/reduce-arrival-pattern-aware.cpp +++ b/src/smpi/colls/reduce/reduce-arrival-pattern-aware.cpp @@ -184,10 +184,10 @@ int reduce__arrival_pattern_aware(const void *buf, void *rbuf, else { // printf("node %d start\n",rank); - MPI_Request* send_request_array = new MPI_Request[size + pipe_length]; - MPI_Request* recv_request_array = new MPI_Request[size + pipe_length]; - MPI_Status* send_status_array = new MPI_Status[size + pipe_length]; - MPI_Status* recv_status_array = new MPI_Status[size + pipe_length]; + auto* send_request_array = new MPI_Request[size + pipe_length]; + auto* recv_request_array = new MPI_Request[size + pipe_length]; + auto* send_status_array = new MPI_Status[size + pipe_length]; + auto* recv_status_array = new MPI_Status[size + pipe_length]; if (rank == 0) { sent_count = 0; diff --git a/src/smpi/colls/reduce/reduce-mvapich-knomial.cpp b/src/smpi/colls/reduce/reduce-mvapich-knomial.cpp index ffc0d250b1..6386bde954 100644 --- a/src/smpi/colls/reduce/reduce-mvapich-knomial.cpp +++ b/src/smpi/colls/reduce/reduce-mvapich-knomial.cpp @@ -173,8 +173,8 @@ int reduce__mvapich2_knomial( &dst, &expected_send_count, &expected_recv_count, &src_array); if(expected_recv_count > 0 ) { - unsigned char** tmp_buf = new unsigned char*[expected_recv_count]; - MPI_Request* requests = new MPI_Request[expected_recv_count]; + auto** tmp_buf = new unsigned char*[expected_recv_count]; + auto* requests = new MPI_Request[expected_recv_count]; for (k = 0; k < expected_recv_count; k++) { tmp_buf[k] = smpi_get_tmp_sendbuffer(count * std::max(extent, true_extent)); tmp_buf[k] = tmp_buf[k] - true_lb; diff --git a/src/smpi/colls/reduce/reduce-ompi.cpp b/src/smpi/colls/reduce/reduce-ompi.cpp index dec6161bc2..8f87360b09 100644 --- a/src/smpi/colls/reduce/reduce-ompi.cpp +++ b/src/smpi/colls/reduce/reduce-ompi.cpp @@ -266,7 +266,7 @@ int smpi_coll_tuned_ompi_reduce_generic(const void* sendbuf, void* recvbuf, int else { int creq = 0; - MPI_Request* sreq = new (std::nothrow) MPI_Request[max_outstanding_reqs]; + auto* sreq = new (std::nothrow) MPI_Request[max_outstanding_reqs]; if (nullptr == sreq) { line = __LINE__; ret = -1; diff --git a/src/smpi/colls/smpi_coll.cpp b/src/smpi/colls/smpi_coll.cpp index f5672c29d7..6a213c0f84 100644 --- a/src/smpi/colls/smpi_coll.cpp +++ b/src/smpi/colls/smpi_coll.cpp @@ -360,8 +360,8 @@ int colls::scan(const void* sendbuf, void* recvbuf, int count, MPI_Datatype data Datatype::copy(sendbuf, count, datatype, recvbuf, count, datatype); // Send/Recv buffers to/from others - MPI_Request* requests = new MPI_Request[size - 1]; - unsigned char** tmpbufs = new unsigned char*[rank]; + auto* requests = new MPI_Request[size - 1]; + auto** tmpbufs = new unsigned char*[rank]; int index = 0; for (int other = 0; other < rank; other++) { tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext); @@ -418,8 +418,8 @@ int colls::exscan(const void* sendbuf, void* recvbuf, int count, MPI_Datatype da datatype->extent(&lb, &dataext); // Send/Recv buffers to/from others - MPI_Request* requests = new MPI_Request[size - 1]; - unsigned char** tmpbufs = new unsigned char*[rank]; + auto* requests = new MPI_Request[size - 1]; + auto** tmpbufs = new unsigned char*[rank]; int index = 0; for (int other = 0; other < rank; other++) { tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext); diff --git a/src/smpi/colls/smpi_mvapich2_selector_stampede.hpp b/src/smpi/colls/smpi_mvapich2_selector_stampede.hpp index 955b9e3125..38f329adf8 100644 --- a/src/smpi/colls/smpi_mvapich2_selector_stampede.hpp +++ b/src/smpi/colls/smpi_mvapich2_selector_stampede.hpp @@ -371,7 +371,7 @@ static void init_mv2_allgather_tables_stampede() simgrid::smpi::colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2; mv2_allgather_num_ppn_conf = 3; mv2_allgather_thresholds_table = new mv2_allgather_tuning_table*[mv2_allgather_num_ppn_conf]; - mv2_allgather_tuning_table** table_ptrs = new mv2_allgather_tuning_table*[mv2_allgather_num_ppn_conf]; + auto** table_ptrs = new mv2_allgather_tuning_table*[mv2_allgather_num_ppn_conf]; mv2_size_allgather_tuning_table = new int[mv2_allgather_num_ppn_conf]; mv2_allgather_table_ppn_conf = new int[mv2_allgather_num_ppn_conf]; mv2_allgather_table_ppn_conf[0] = 1; @@ -1516,7 +1516,7 @@ static void init_mv2_scatter_tables_stampede() int agg_table_sum = 0; mv2_scatter_num_ppn_conf = 3; mv2_scatter_thresholds_table = new mv2_scatter_tuning_table*[mv2_scatter_num_ppn_conf]; - mv2_scatter_tuning_table** table_ptrs = new mv2_scatter_tuning_table*[mv2_scatter_num_ppn_conf]; + auto** table_ptrs = new mv2_scatter_tuning_table*[mv2_scatter_num_ppn_conf]; mv2_size_scatter_tuning_table = new int[mv2_scatter_num_ppn_conf]; mv2_scatter_table_ppn_conf = new int[mv2_scatter_num_ppn_conf]; mv2_scatter_table_ppn_conf[0] = 1; diff --git a/src/smpi/colls/smpi_nbc_impl.cpp b/src/smpi/colls/smpi_nbc_impl.cpp index 4a174df611..b475317488 100644 --- a/src/smpi/colls/smpi_nbc_impl.cpp +++ b/src/smpi/colls/smpi_nbc_impl.cpp @@ -19,7 +19,7 @@ int colls::ibarrier(MPI_Comm comm, MPI_Request* request, int external) (*request) = new Request( nullptr, 0, MPI_BYTE, rank,rank, system_tag, comm, MPI_REQ_PERSISTENT); if (rank > 0) { - MPI_Request* requests = new MPI_Request[2]; + auto* requests = new MPI_Request[2]; requests[0] = Request::isend (nullptr, 0, MPI_BYTE, 0, system_tag, comm); @@ -29,7 +29,7 @@ int colls::ibarrier(MPI_Comm comm, MPI_Request* request, int external) (*request)->set_nbc_requests(requests, 2); } else { - MPI_Request* requests = new MPI_Request[(size - 1) * 2]; + auto* requests = new MPI_Request[(size - 1) * 2]; for (int i = 1; i < 2 * size - 1; i += 2) { requests[i - 1] = Request::irecv(nullptr, 0, MPI_BYTE, MPI_ANY_SOURCE, system_tag, comm); requests[i] = Request::isend(nullptr, 0, MPI_BYTE, (i + 1) / 2, system_tag, comm); @@ -48,14 +48,14 @@ int colls::ibcast(void* buf, int count, MPI_Datatype datatype, int root, MPI_Com (*request) = new Request( nullptr, 0, MPI_BYTE, rank,rank, system_tag, comm, MPI_REQ_PERSISTENT); if (rank != root) { - MPI_Request* requests = new MPI_Request[1]; + auto* requests = new MPI_Request[1]; requests[0] = Request::irecv (buf, count, datatype, root, system_tag, comm); (*request)->set_nbc_requests(requests, 1); } else { - MPI_Request* requests = new MPI_Request[size - 1]; + auto* requests = new MPI_Request[size - 1]; int n = 0; for (int i = 0; i < size; i++) { if(i!=root){ @@ -89,7 +89,7 @@ int colls::iallgather(const void* sendbuf, int sendcount, MPI_Datatype sendtype, Datatype::copy(sendbuf, sendcount, sendtype, static_cast(recvbuf) + rank * recvcount * recvext, recvcount, recvtype); // Send/Recv buffers to/from others; - MPI_Request* requests = new MPI_Request[2 * (size - 1)]; + auto* requests = new MPI_Request[2 * (size - 1)]; int index = 0; for (int other = 0; other < size; other++) { if(other != rank) { @@ -117,7 +117,7 @@ int colls::iscatter(const void* sendbuf, int sendcount, MPI_Datatype sendtype, v (*request) = new Request( nullptr, 0, MPI_BYTE, rank,rank, system_tag, comm, MPI_REQ_PERSISTENT); if(rank != root) { - MPI_Request* requests = new MPI_Request[1]; + auto* requests = new MPI_Request[1]; // Recv buffer from root requests[0] = Request::irecv(recvbuf, recvcount, recvtype, root, system_tag, comm); (*request)->set_nbc_requests(requests, 1); @@ -129,7 +129,7 @@ int colls::iscatter(const void* sendbuf, int sendcount, MPI_Datatype sendtype, v sendcount, sendtype, recvbuf, recvcount, recvtype); } // Send buffers to receivers - MPI_Request* requests = new MPI_Request[size - 1]; + auto* requests = new MPI_Request[size - 1]; int index = 0; for(int dst = 0; dst < size; dst++) { if(dst != root) { @@ -161,7 +161,7 @@ int colls::iallgatherv(const void* sendbuf, int sendcount, MPI_Datatype sendtype Datatype::copy(sendbuf, sendcount, sendtype, static_cast(recvbuf) + displs[rank] * recvext,recvcounts[rank], recvtype); // Send buffers to others; - MPI_Request *requests = new MPI_Request[2 * (size - 1)]; + auto* requests = new MPI_Request[2 * (size - 1)]; int index = 0; for (int other = 0; other < size; other++) { if(other != rank) { @@ -199,7 +199,7 @@ int colls::ialltoall(const void* sendbuf, int sendcount, MPI_Datatype sendtype, static_cast(recvbuf) + rank * recvcount * recvext, recvcount, recvtype); if (err == MPI_SUCCESS && size > 1) { /* Initiate all send/recv to/from others. */ - MPI_Request* requests = new MPI_Request[2 * (size - 1)]; + auto* requests = new MPI_Request[2 * (size - 1)]; /* Post all receives first -- a simple optimization */ int count = 0; for (int i = (rank + 1) % size; i != rank; i = (i + 1) % size) { @@ -245,7 +245,7 @@ int colls::ialltoallv(const void* sendbuf, const int* sendcounts, const int* sen static_cast(recvbuf) + recvdisps[rank] * recvext, recvcounts[rank], recvtype); if (err == MPI_SUCCESS && size > 1) { /* Initiate all send/recv to/from others. */ - MPI_Request* requests = new MPI_Request[2 * (size - 1)]; + auto* requests = new MPI_Request[2 * (size - 1)]; int count = 0; /* Create all receives that will be posted first */ for (int i = 0; i < size; ++i) { @@ -290,7 +290,7 @@ int colls::ialltoallw(const void* sendbuf, const int* sendcounts, const int* sen static_cast(recvbuf) + recvdisps[rank], recvcounts[rank], recvtypes[rank]): MPI_SUCCESS; if (err == MPI_SUCCESS && size > 1) { /* Initiate all send/recv to/from others. */ - MPI_Request* requests = new MPI_Request[2 * (size - 1)]; + auto* requests = new MPI_Request[2 * (size - 1)]; int count = 0; /* Create all receives that will be posted first */ for (int i = 0; i < size; ++i) { @@ -332,7 +332,7 @@ int colls::igather(const void* sendbuf, int sendcount, MPI_Datatype sendtype, vo rank,rank, system_tag, comm, MPI_REQ_PERSISTENT); if(rank != root) { // Send buffer to root - MPI_Request* requests = new MPI_Request[1]; + auto* requests = new MPI_Request[1]; requests[0]=Request::isend(sendbuf, sendcount, sendtype, root, system_tag, comm); (*request)->set_nbc_requests(requests, 1); } else { @@ -341,7 +341,7 @@ int colls::igather(const void* sendbuf, int sendcount, MPI_Datatype sendtype, vo Datatype::copy(sendbuf, sendcount, sendtype, static_cast(recvbuf) + root * recvcount * recvext, recvcount, recvtype); // Receive buffers from senders - MPI_Request* requests = new MPI_Request[size - 1]; + auto* requests = new MPI_Request[size - 1]; int index = 0; for (int src = 0; src < size; src++) { if(src != root) { @@ -371,7 +371,7 @@ int colls::igatherv(const void* sendbuf, int sendcount, MPI_Datatype sendtype, v rank,rank, system_tag, comm, MPI_REQ_PERSISTENT); if (rank != root) { // Send buffer to root - MPI_Request* requests = new MPI_Request[1]; + auto* requests = new MPI_Request[1]; requests[0]=Request::isend(sendbuf, sendcount, sendtype, root, system_tag, comm); (*request)->set_nbc_requests(requests, 1); } else { @@ -380,7 +380,7 @@ int colls::igatherv(const void* sendbuf, int sendcount, MPI_Datatype sendtype, v Datatype::copy(sendbuf, sendcount, sendtype, static_cast(recvbuf) + displs[root] * recvext, recvcounts[root], recvtype); // Receive buffers from senders - MPI_Request* requests = new MPI_Request[size - 1]; + auto* requests = new MPI_Request[size - 1]; int index = 0; for (int src = 0; src < size; src++) { if(src != root) { @@ -409,7 +409,7 @@ int colls::iscatterv(const void* sendbuf, const int* sendcounts, const int* disp rank,rank, system_tag, comm, MPI_REQ_PERSISTENT); if(rank != root) { // Recv buffer from root - MPI_Request* requests = new MPI_Request[1]; + auto* requests = new MPI_Request[1]; requests[0]=Request::irecv(recvbuf, recvcount, recvtype, root, system_tag, comm); (*request)->set_nbc_requests(requests, 1); } else { @@ -420,7 +420,7 @@ int colls::iscatterv(const void* sendbuf, const int* sendcounts, const int* disp sendtype, recvbuf, recvcount, recvtype); } // Send buffers to receivers - MPI_Request *requests = new MPI_Request[size - 1]; + auto* requests = new MPI_Request[size - 1]; int index = 0; for (int dst = 0; dst < size; dst++) { if (dst != root) { @@ -468,7 +468,7 @@ int colls::ireduce(const void* sendbuf, void* recvbuf, int count, MPI_Datatype d if(rank != root) { // Send buffer to root - MPI_Request* requests = new MPI_Request[1]; + auto* requests = new MPI_Request[1]; requests[0] = Request::isend(real_sendbuf, count, datatype, root, system_tag, comm); (*request)->set_nbc_requests(requests, 1); } else { @@ -477,7 +477,7 @@ int colls::ireduce(const void* sendbuf, void* recvbuf, int count, MPI_Datatype d if (real_sendbuf != nullptr && recvbuf != nullptr) Datatype::copy(real_sendbuf, count, datatype, recvbuf, count, datatype); // Receive buffers from senders - MPI_Request *requests = new MPI_Request[size - 1]; + auto* requests = new MPI_Request[size - 1]; int index = 0; for (int src = 0; src < size; src++) { if (src != root) { @@ -513,7 +513,7 @@ int colls::iallreduce(const void* sendbuf, void* recvbuf, int count, MPI_Datatyp // Local copy from self Datatype::copy(sendbuf, count, datatype, recvbuf, count, datatype); // Send/Recv buffers to/from others; - MPI_Request* requests = new MPI_Request[2 * (size - 1)]; + auto* requests = new MPI_Request[2 * (size - 1)]; int index = 0; for (int other = 0; other < size; other++) { if(other != rank) { @@ -546,7 +546,7 @@ int colls::iscan(const void* sendbuf, void* recvbuf, int count, MPI_Datatype dat Datatype::copy(sendbuf, count, datatype, recvbuf, count, datatype); // Send/Recv buffers to/from others - MPI_Request *requests = new MPI_Request[size - 1]; + auto* requests = new MPI_Request[size - 1]; int index = 0; for (int other = 0; other < rank; other++) { requests[index] = Request::irecv_init(smpi_get_tmp_sendbuffer(count * dataext), count, datatype, other, system_tag, comm); @@ -577,7 +577,7 @@ int colls::iexscan(const void* sendbuf, void* recvbuf, int count, MPI_Datatype d memset(recvbuf, 0, count*dataext); // Send/Recv buffers to/from others - MPI_Request *requests = new MPI_Request[size - 1]; + auto* requests = new MPI_Request[size - 1]; int index = 0; for (int other = 0; other < rank; other++) { requests[index] = Request::irecv_init(smpi_get_tmp_sendbuffer(count * dataext), count, datatype, other, system_tag, comm); @@ -609,7 +609,7 @@ int colls::ireduce_scatter(const void* sendbuf, void* recvbuf, const int* recvco datatype->extent(&lb, &dataext); // Send/Recv buffers to/from others; - MPI_Request* requests = new MPI_Request[2 * (size - 1)]; + auto* requests = new MPI_Request[2 * (size - 1)]; int index = 0; int recvdisp=0; for (int other = 0; other < size; other++) { diff --git a/src/smpi/internals/smpi_bench.cpp b/src/smpi/internals/smpi_bench.cpp index 369445dfdd..1f10512a3f 100644 --- a/src/smpi/internals/smpi_bench.cpp +++ b/src/smpi/internals/smpi_bench.cpp @@ -280,7 +280,7 @@ unsigned long long smpi_rastro_timestamp () smpi_bench_end(); double now = SIMIX_get_clock(); - unsigned long long sec = static_cast(now); + auto sec = static_cast(now); unsigned long long pre = (now - sec) * smpi_rastro_resolution(); smpi_bench_begin(); return sec * smpi_rastro_resolution() + pre; diff --git a/src/smpi/internals/smpi_replay.cpp b/src/smpi/internals/smpi_replay.cpp index 10bb834385..925b506073 100644 --- a/src/smpi/internals/smpi_replay.cpp +++ b/src/smpi/internals/smpi_replay.cpp @@ -111,7 +111,7 @@ public: MPI_Request find(int src, int dst, int tag) { - req_storage_t::iterator it = store.find(req_key_t(src, dst, tag)); + auto it = store.find(req_key_t(src, dst, tag)); return (it == store.end()) ? MPI_REQUEST_NULL : it->second; } diff --git a/src/smpi/mpi/smpi_win.cpp b/src/smpi/mpi/smpi_win.cpp index 9cf535d043..806b363d87 100644 --- a/src/smpi/mpi/smpi_win.cpp +++ b/src/smpi/mpi/smpi_win.cpp @@ -709,7 +709,7 @@ int Win::finish_comms(int rank){ if (size > 0) { size = 0; std::vector myreqqs; - std::vector::iterator iter = reqqs->begin(); + auto iter = reqqs->begin(); int proc_id = comm_->group()->actor(rank)->get_pid(); while (iter != reqqs->end()){ // Let's see if we're either the destination or the sender of this request diff --git a/src/surf/network_ib.cpp b/src/surf/network_ib.cpp index ce8a68c499..81d054956f 100644 --- a/src/surf/network_ib.cpp +++ b/src/surf/network_ib.cpp @@ -186,9 +186,8 @@ void NetworkIBModel::updateIBfactors(NetworkAction* action, IBNode* from, IBNode to->ActiveCommsDown[from] -= 1; to->nbActiveCommsDown--; - std::vector::iterator it = - std::find_if(begin(from->ActiveCommsUp), end(from->ActiveCommsUp), - [action](const ActiveComm* comm) { return comm->action == action; }); + auto it = std::find_if(begin(from->ActiveCommsUp), end(from->ActiveCommsUp), + [action](const ActiveComm* comm) { return comm->action == action; }); if (it != std::end(from->ActiveCommsUp)) { delete *it; from->ActiveCommsUp.erase(it); diff --git a/src/surf/network_ns3.cpp b/src/surf/network_ns3.cpp index 6ecd3a1711..b44c56a99e 100644 --- a/src/surf/network_ns3.cpp +++ b/src/surf/network_ns3.cpp @@ -378,7 +378,7 @@ LinkNS3::LinkNS3(NetworkNS3Model* model, const std::string& name, double bandwid ns3::NetDeviceContainer netA; WifiZone* zone = WifiZone::by_name(name); xbt_assert(zone != nullptr, "Link name '%s' does not match the 'wifi_link' property of a host.", name.c_str()); - NetPointNs3* netpoint_ns3 = zone->get_host()->get_netpoint()->extension(); + auto* netpoint_ns3 = zone->get_host()->get_netpoint()->extension(); wifi.SetRemoteStationManager("ns3::ConstantRateWifiManager", "ControlMode", ns3::StringValue("HtMcs0"), "DataMode", ns3::StringValue("HtMcs" + std::to_string(zone->get_mcs()))); -- 2.20.1