teshsuite/smpi/macro-partial-shared-communication/macro-partial-shared-communication
teshsuite/smpi/type-struct/type-struct
teshsuite/smpi/type-vector/type-vector
+teshsuite/s4u/actor/actor
+teshsuite/s4u/concurrent_rw/concurrent_rw
+teshsuite/s4u/host_on_off_wait/host_on_off_wait
+teshsuite/s4u/listen_async/listen_async
+teshsuite/s4u/pid/pid
+teshsuite/s4u/storage_client_server/storage_client_server
teshsuite/surf/lmm_usage/lmm_usage
teshsuite/surf/maxmin_bench/maxmin_bench
teshsuite/surf/surf_usage/surf_usage
name: "simgrid/simgrid"
description: "Build submitted via Travis CI"
notification_email: martin.quinson@ens-rennes.fr
- build_command_prepend: "cmake -Denable_documentation=OFF -Denable_coverage=ON -Denable_java=OFF -Denable_model-checking=OFF -Denable_lua=OFF -Denable_compile_optimizations=OFF -Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=OFF -Denable_compile_warnings=OFF ."
+ build_command_prepend: "cmake -Denable_documentation=OFF -Denable_coverage=ON -Denable_java=OFF -Denable_model-checking=OFF -Denable_lua=OFF -Denable_compile_optimizations=ON -Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=OFF -Denable_compile_warnings=OFF ."
build_command: "make VERBOSE=1"
branch_pattern: coverity
script:
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update ; fi
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install python3; fi
- - cmake -Denable_documentation=OFF -Denable_coverage=ON -Denable_java=ON -Denable_model-checking=OFF -Denable_lua=OFF -Denable_compile_optimizations=OFF -Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=OFF -Denable_compile_warnings=ON .
+ - cmake -Denable_documentation=OFF -Denable_coverage=ON -Denable_java=ON -Denable_model-checking=OFF -Denable_lua=OFF -Denable_compile_optimizations=ON -Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=OFF -Denable_compile_warnings=ON .
# run make in the sonar wrapper && run the tests before sonar to get coverage info
- ./tools/internal/travis-sonarqube.sh make VERBOSE=1
# if sonar was not run (and if the build succeeded), run the tests manually
set(CMAKE_SMPI_COMMAND "${CMAKE_SMPI_COMMAND}:${NS3_LIBRARY_PATH}")
endif()
set(CMAKE_SMPI_COMMAND "${CMAKE_SMPI_COMMAND}:\${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}\"")
+set(SMPIMAIN smpimain)
configure_file(${CMAKE_HOME_DIRECTORY}/include/smpi/mpif.h.in ${CMAKE_BINARY_DIR}/include/smpi/mpif.h @ONLY)
foreach(script cc cxx ff f90 run)
set(CMAKE_SMPI_COMMAND "${CMAKE_SMPI_COMMAND}:${NS3_LIBRARY_PATH}")
endif()
set(CMAKE_SMPI_COMMAND "${CMAKE_SMPI_COMMAND}:\${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}\"")
+set(SMPIMAIN ${CMAKE_BINARY_DIR}/bin/smpimain)
foreach(script cc cxx ff f90 run)
configure_file(${CMAKE_HOME_DIRECTORY}/src/smpi/smpi${script}.in ${CMAKE_BINARY_DIR}/smpi_script/bin/smpi${script} @ONLY)
[online on GitLab](https://gitlab.inria.fr/simgrid/simgrid/tree/master/doc/msg-tuto-src).
If you find the right button on the top right of the interface, you can download the whole
directory in one archive file. If you wish, you can find other platform file in
-[this GitLab directory](https://gitlab.inria.fr/simgrid/simgrid/tree/master/doc/examples/platforms).
+[this GitLab directory](https://gitlab.inria.fr/simgrid/simgrid/tree/master/examples/platforms).
As you can see, there is already a little Makefile that compiles
everything for you. If you struggle with the compilation, then you should double check
set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/${example}/s4u_${example}.cpp)
endforeach()
-set(examples_src ${examples_src} PARENT_SCOPE)
-set(tesh_files ${tesh_files} PARENT_SCOPE)
+# CHORD EXAMPLE
+add_executable (s4u_dht-chord dht-chord/s4u_dht-chord.cpp dht-chord/node.cpp)
+target_link_libraries(s4u_dht-chord simgrid)
+set_target_properties(s4u_dht-chord PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/dht-chord)
+foreach (file s4u_dht-chord node)
+ set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/dht-chord/${file}.cpp)
+endforeach()
+set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/dht-chord/s4u_dht-chord.hpp)
+
+set(examples_src ${examples_src} PARENT_SCOPE)
+set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/dht-chord/s4u_dht-chord.tesh PARENT_SCOPE)
set(xml_files ${xml_files} ${CMAKE_CURRENT_SOURCE_DIR}/actions-comm/s4u_actions-comm_split_d.xml
${CMAKE_CURRENT_SOURCE_DIR}/actions-comm/s4u_actions-comm_d.xml
${CMAKE_CURRENT_SOURCE_DIR}/actions-storage/s4u_actions-storage_d.xml
${CMAKE_CURRENT_SOURCE_DIR}/actor-create/s4u_actor-create_d.xml
- ${CMAKE_CURRENT_SOURCE_DIR}/app-masterworker/s4u_app-masterworker_d.xml PARENT_SCOPE)
+ ${CMAKE_CURRENT_SOURCE_DIR}/app-masterworker/s4u_app-masterworker_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/dht-chord/s4u_dht-chord_d.xml PARENT_SCOPE)
set(txt_files ${txt_files} ${CMAKE_CURRENT_SOURCE_DIR}/actions-comm/s4u_actions-comm_split_p0.txt
${CMAKE_CURRENT_SOURCE_DIR}/actions-comm/s4u_actions-comm_split_p1.txt
${CMAKE_CURRENT_SOURCE_DIR}/actions-comm/s4u_actions-comm.txt
${CMAKE_CURRENT_SOURCE_DIR}/README.doc PARENT_SCOPE)
foreach(example actions-comm actions-storage actor-create actor-kill actor-migration actor-suspend
- app-masterworker app-token-ring io mutex )
+ app-masterworker app-token-ring dht-chord io mutex )
ADD_TESH_FACTORIES(s4u-${example} "thread;ucontext;raw;boost" --setenv bindir=${CMAKE_CURRENT_BINARY_DIR}/${example} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --cd ${CMAKE_HOME_DIRECTORY}/examples/s4u/${example} s4u_${example}.tesh)
endforeach()
--- /dev/null
+/* Copyright (c) 2010-2016. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "s4u_dht-chord.hpp"
+
+XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(s4u_chord);
+
+/* Returns whether an id belongs to the interval [start, end].
+ *
+ * The parameters are normalized to make sure they are between 0 and nb_keys - 1).
+ * 1 belongs to [62, 3]
+ * 1 does not belong to [3, 62]
+ * 63 belongs to [62, 3]
+ * 63 does not belong to [3, 62]
+ * 24 belongs to [21, 29]
+ * 24 does not belong to [29, 21]
+ *
+ * \param id id to check
+ * \param start lower bound
+ * \param end upper bound
+ * \return a non-zero value if id in in [start, end]
+ */
+static int is_in_interval(int id, int start, int end)
+{
+ int i = id % nb_keys;
+ int s = start % nb_keys;
+ int e = end % nb_keys;
+
+ // make sure end >= start and id >= start
+ if (e < s) {
+ e += nb_keys;
+ }
+
+ if (i < s) {
+ i += nb_keys;
+ }
+
+ return i <= e;
+}
+
+/* Initializes the current node as the first one of the system */
+Node::Node(std::vector<std::string> args)
+{
+ xbt_assert(args.size() == 3 || args.size() == 5, "Wrong number of arguments for this node");
+
+ // initialize my node
+ id_ = std::stoi(args[1]);
+ stream = simgrid::s4u::this_actor::host()->extension<HostChord>()->getStream();
+ mailbox_ = simgrid::s4u::Mailbox::byName(std::to_string(id_));
+ next_finger_to_fix = 0;
+ fingers_ = new int[nb_bits];
+
+ for (int i = 0; i < nb_bits; i++) {
+ fingers_[i] = id_;
+ }
+
+ if (args.size() == 3) { // first ring
+ deadline_ = std::stod(args[2]);
+ start_time_ = simgrid::s4u::Engine::getClock();
+ XBT_DEBUG("Create a new Chord ring...");
+ } else {
+ known_id_ = std::stoi(args[2]);
+ start_time_ = std::stod(args[3]);
+ deadline_ = std::stod(args[4]);
+ XBT_DEBUG("Hey! Let's join the system in %f seconds (shall leave at time %f)", start_time_,
+ start_time_ + deadline_);
+ }
+}
+
+Node::~Node()
+{
+ delete[] fingers_;
+}
+/* Makes the current node join the ring, knowing the id of a node already in the ring
+ *
+ * \param known_id id of a node already in the ring
+ * \return true if the join operation succeeded
+ * */
+
+void Node::join(int known_id)
+{
+ XBT_INFO("Joining the ring with id %d, knowing node %d", id_, known_id);
+ setPredecessor(-1); // no predecessor (yet)
+
+ int successor_id = remoteFindSuccessor(known_id, id_);
+ if (successor_id == -1) {
+ XBT_INFO("Cannot join the ring.");
+ } else {
+ setFinger(0, successor_id);
+ printFingerTable();
+ joined = true;
+ }
+}
+
+/* Makes the current node quit the system */
+void Node::leave()
+{
+ XBT_INFO("Well Guys! I Think it's time for me to leave ;)");
+ notifyAndQuit();
+ joined = false;
+}
+
+/* Notifies the successor and the predecessor of the current node before leaving */
+void Node::notifyAndQuit()
+{
+ // send the PREDECESSOR_LEAVING to our successor
+ ChordMessage* pred_msg = new ChordMessage(PREDECESSOR_LEAVING);
+ pred_msg->request_id = pred_id_;
+ pred_msg->answer_to = mailbox_;
+
+ XBT_DEBUG("Sending a 'PREDECESSOR_LEAVING' to my successor %d", fingers_[0]);
+ try {
+ simgrid::s4u::this_actor::send(simgrid::s4u::Mailbox::byName(std::to_string(fingers_[0])), pred_msg, 10, timeout);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Timeout expired when sending a 'PREDECESSOR_LEAVING' to my successor %d", fingers_[0]);
+ delete pred_msg;
+ }
+ }
+
+ // send the SUCCESSOR_LEAVING to our predecessor
+ ChordMessage* succ_msg = new ChordMessage(SUCCESSOR_LEAVING);
+ succ_msg->request_id = fingers_[0];
+ succ_msg->answer_to = mailbox_;
+ XBT_DEBUG("Sending a 'SUCCESSOR_LEAVING' to my predecessor %d", pred_id_);
+
+ try {
+ simgrid::s4u::this_actor::send(simgrid::s4u::Mailbox::byName(std::to_string(pred_id_)), succ_msg, 10, timeout);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Timeout expired when sending a 'SUCCESSOR_LEAVING' to my predecessor %d", pred_id_);
+ delete succ_msg;
+ }
+ }
+}
+
+/* Performs a find successor request to a random id */
+void Node::randomLookup()
+{
+ int res = id_;
+ int random_index = RngStream_RandInt(stream, 0, nb_bits - 1);
+ int random_id = fingers_[random_index];
+ XBT_DEBUG("Making a lookup request for id %d", random_id);
+ if (random_id != id_)
+ res = findSuccessor(random_id);
+ XBT_DEBUG("The successor of node %d is %d", random_id, res);
+}
+
+/* Sets a finger of the current node.
+ *
+ * \param node the current node
+ * \param finger_index index of the finger to set (0 to nb_bits - 1)
+ * \param id the id to set for this finger
+ */
+void Node::setFinger(int finger_index, int id)
+{
+ if (id != fingers_[finger_index]) {
+ fingers_[finger_index] = id;
+ XBT_VERB("My new finger #%d is %d", finger_index, id);
+ }
+}
+
+/* Sets the predecessor of the current node.
+ * \param id the id to predecessor, or -1 to unset the predecessor
+ */
+void Node::setPredecessor(int predecessor_id)
+{
+ if (predecessor_id != pred_id_) {
+ pred_id_ = predecessor_id;
+ XBT_VERB("My new predecessor is %d", predecessor_id);
+ }
+}
+
+/** refreshes the finger table of the current node (called periodically) */
+void Node::fixFingers()
+{
+ XBT_DEBUG("Fixing fingers");
+ int id = findSuccessor(id_ + powers2[next_finger_to_fix]);
+ if (id != -1) {
+ if (id != fingers_[next_finger_to_fix]) {
+ setFinger(next_finger_to_fix, id);
+ printFingerTable();
+ }
+ next_finger_to_fix = (next_finger_to_fix + 1) % nb_bits;
+ }
+}
+
+/** Displays the finger table of a node. */
+void Node::printFingerTable()
+{
+ if (XBT_LOG_ISENABLED(s4u_chord, xbt_log_priority_verbose)) {
+ XBT_VERB("My finger table:");
+ XBT_VERB("Start | Succ");
+ for (int i = 0; i < nb_bits; i++) {
+ XBT_VERB(" %3d | %3d", (id_ + powers2[i]) % nb_keys, fingers_[i]);
+ }
+
+ XBT_VERB("Predecessor: %d", pred_id_);
+ }
+}
+
+/* checks whether the predecessor has failed (called periodically) */
+void Node::checkPredecessor()
+{
+ XBT_DEBUG("Checking whether my predecessor is alive");
+ void* data = nullptr;
+ if (pred_id_ == -1)
+ return;
+
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName(std::to_string(pred_id_));
+ simgrid::s4u::MailboxPtr return_mailbox = simgrid::s4u::Mailbox::byName(std::to_string(id_) + "_is_alive");
+
+ ChordMessage* message = new ChordMessage(PREDECESSOR_ALIVE);
+ message->request_id = pred_id_;
+ message->answer_to = return_mailbox;
+
+ XBT_DEBUG("Sending a 'Predecessor Alive' request to my predecessor %d", pred_id_);
+ try {
+ simgrid::s4u::this_actor::send(mailbox, message, 10, timeout);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Failed to send the 'Predecessor Alive' request to %d", pred_id_);
+ delete message;
+ return;
+ }
+ }
+ // receive the answer
+ XBT_DEBUG("Sent 'Predecessor Alive' request to %d, waiting for the answer on my mailbox '%s'", pred_id_,
+ message->answer_to->name());
+ simgrid::s4u::Comm& comm = simgrid::s4u::this_actor::irecv(return_mailbox, &data);
+
+ try {
+ comm.wait(timeout);
+ XBT_DEBUG("Received the answer to my 'Predecessor Alive': my predecessor %d is alive", pred_id_);
+ delete message;
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Failed to receive the answer to my 'Predecessor Alive' request");
+ pred_id_ = -1;
+ }
+ }
+}
+
+/* Asks its predecessor to a remote node
+ *
+ * \param ask_to the node to ask to
+ * \return the id of its predecessor node, or -1 if the request failed (or if the node does not know its predecessor)
+ */
+int Node::remoteGetPredecessor(int ask_to)
+{
+ int predecessor_id = -1;
+ void* data = nullptr;
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName(std::to_string(ask_to));
+ simgrid::s4u::MailboxPtr return_mailbox = simgrid::s4u::Mailbox::byName(std::to_string(id_) + "_pred");
+
+ ChordMessage* message = new ChordMessage(GET_PREDECESSOR);
+ message->request_id = id_;
+ message->answer_to = return_mailbox;
+
+ // send a "Get Predecessor" request to ask_to_id
+ XBT_DEBUG("Sending a 'Get Predecessor' request to %d", ask_to);
+ try {
+ simgrid::s4u::this_actor::send(mailbox, message, 10, timeout);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Failed to send the 'Get Predecessor' request to %d", ask_to);
+ delete message;
+ return predecessor_id;
+ }
+ }
+
+ // receive the answer
+ XBT_DEBUG("Sent 'Get Predecessor' request to %d, waiting for the answer on my mailbox '%s'", ask_to,
+ message->answer_to->name());
+ simgrid::s4u::Comm& comm = simgrid::s4u::this_actor::irecv(return_mailbox, &data);
+
+ try {
+ comm.wait(timeout);
+ ChordMessage* answer = static_cast<ChordMessage*>(data);
+ XBT_DEBUG("Received the answer to my 'Get Predecessor' request: the predecessor of node %d is %d", ask_to,
+ answer->answer_id);
+ predecessor_id = answer->answer_id;
+ delete answer;
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Failed to receive the answer to my 'Get Predecessor' request");
+ delete static_cast<ChordMessage*>(data);
+ }
+ }
+
+ return predecessor_id;
+}
+
+/* Returns the closest preceding finger of an id with respect to the finger table of the current node.
+ *
+ * \param id the id to find
+ * \return the closest preceding finger of that id
+ */
+int Node::closestPrecedingFinger(int id)
+{
+ for (int i = nb_bits - 1; i >= 0; i--) {
+ if (is_in_interval(fingers_[i], id_ + 1, id - 1)) {
+ return fingers_[i];
+ }
+ }
+ return id_;
+}
+
+/* Makes the current node find the successor node of an id.
+ *
+ * \param id the id to find
+ * \return the id of the successor node, or -1 if the request failed
+ */
+int Node::findSuccessor(int id)
+{
+ // is my successor the successor?
+ if (is_in_interval(id, id_ + 1, fingers_[0])) {
+ return fingers_[0];
+ }
+
+ // otherwise, ask the closest preceding finger in my table
+ return remoteFindSuccessor(closestPrecedingFinger(id), id);
+}
+
+int Node::remoteFindSuccessor(int ask_to, int id)
+{
+ int successor = -1;
+ void* data = nullptr;
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName(std::to_string(ask_to));
+ simgrid::s4u::MailboxPtr return_mailbox = simgrid::s4u::Mailbox::byName(std::to_string(id_) + "_succ");
+
+ ChordMessage* message = new ChordMessage(FIND_SUCCESSOR);
+ message->request_id = id_;
+ message->answer_to = return_mailbox;
+
+ // send a "Find Successor" request to ask_to_id
+ XBT_DEBUG("Sending a 'Find Successor' request to %d for id %d", ask_to, id);
+ try {
+ simgrid::s4u::this_actor::send(mailbox, message, 10, timeout);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Failed to send the 'Find Successor' request to %d for id %d", ask_to, id_);
+ delete message;
+ return successor;
+ }
+ }
+ // receive the answer
+ XBT_DEBUG("Sent a 'Find Successor' request to %d for key %d, waiting for the answer", ask_to, id);
+ simgrid::s4u::Comm& comm = simgrid::s4u::this_actor::irecv(return_mailbox, &data);
+
+ try {
+ comm.wait(timeout);
+ ChordMessage* answer = static_cast<ChordMessage*>(data);
+ XBT_DEBUG("Received the answer to my 'Find Successor' request for id %d: the successor of key %d is %d",
+ answer->request_id, id_, answer->answer_id);
+ successor = answer->answer_id;
+ delete answer;
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Failed to receive the answer to my 'Find Successor' request");
+ delete static_cast<ChordMessage*>(data);
+ }
+ }
+ return successor;
+}
+
+/* Notifies the current node that its predecessor may have changed. */
+void Node::notify(int predecessor_candidate_id)
+{
+ if (pred_id_ == -1 || is_in_interval(predecessor_candidate_id, pred_id_ + 1, id_ - 1)) {
+ setPredecessor(predecessor_candidate_id);
+ printFingerTable();
+ } else {
+ XBT_DEBUG("I don't have to change my predecessor to %d", predecessor_candidate_id);
+ }
+}
+
+/* Notifies a remote node that its predecessor may have changed. */
+void Node::remoteNotify(int notify_id, int predecessor_candidate_id)
+{
+ ChordMessage* message = new ChordMessage(NOTIFY);
+ message->request_id = predecessor_candidate_id;
+ message->answer_to = nullptr;
+
+ // send a "Notify" request to notify_id
+ XBT_DEBUG("Sending a 'Notify' request to %d", notify_id);
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName(std::to_string(notify_id));
+ try {
+ // TODO make it a dsend
+ simgrid::s4u::this_actor::isend(mailbox, message, 10);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Send of 'Notify' failed due to an expired timeout on receiver side");
+ delete message;
+ }
+ }
+}
+
+/* This function is called periodically. It checks the immediate successor of the current node. */
+void Node::stabilize()
+{
+ XBT_DEBUG("Stabilizing node");
+
+ // get the predecessor of my immediate successor
+ int candidate_id;
+ int successor_id = fingers_[0];
+ if (successor_id != id_) {
+ candidate_id = remoteGetPredecessor(successor_id);
+ } else {
+ candidate_id = pred_id_;
+ }
+
+ // this node is a candidate to become my new successor
+ if (candidate_id != -1 && is_in_interval(candidate_id, id_ + 1, successor_id - 1)) {
+ setFinger(0, candidate_id);
+ }
+ if (successor_id != id_) {
+ remoteNotify(successor_id, id_);
+ }
+}
+
+/* This function is called when a node receives a message.
+ *
+ * \param message the message to handle (don't touch it afterward: it will be destroyed, reused or forwarded)
+ */
+void Node::handleMessage(ChordMessage* message)
+{
+ switch (message->type) {
+ case FIND_SUCCESSOR:
+ XBT_DEBUG("Received a 'Find Successor' request from %s for id %d", message->issuer_host_name.c_str(),
+ message->request_id);
+ // is my successor the successor?
+ if (is_in_interval(message->request_id, id_ + 1, fingers_[0])) {
+ message->type = FIND_SUCCESSOR_ANSWER;
+ message->answer_id = fingers_[0];
+ XBT_DEBUG("Sending back a 'Find Successor Answer' to %s (mailbox %s): the successor of %d is %d",
+ message->issuer_host_name.c_str(), message->answer_to->name(), message->request_id, message->answer_id);
+ // TODO Replace by dsend
+ try {
+ simgrid::s4u::this_actor::isend(message->answer_to, message, 10);
+ } catch(xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Send of 'Find Successor Answer' failed due du an expired timeout on receiver side");
+ }
+ }
+ } else {
+ // otherwise, forward the request to the closest preceding finger in my table
+ int closest = closestPrecedingFinger(message->request_id);
+ XBT_DEBUG("Forwarding the 'Find Successor' request for id %d to my closest preceding finger %d",
+ message->request_id, closest);
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName(std::to_string(closest));
+ //TODO make it a dsend
+ try{
+ simgrid::s4u::this_actor::isend(mailbox, message, 10);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Forward of 'Find Successor' failed due du an expired timeout on receiver side");
+ }
+ }
+ }
+ break;
+
+ case GET_PREDECESSOR:
+ XBT_DEBUG("Receiving a 'Get Predecessor' request from %s", message->issuer_host_name.c_str());
+ message->type = GET_PREDECESSOR_ANSWER;
+ message->answer_id = pred_id_;
+ XBT_DEBUG("Sending back a 'Get Predecessor Answer' to %s via mailbox '%s': my predecessor is %d",
+ message->issuer_host_name.c_str(), message->answer_to->name(), message->answer_id);
+ //TODO make it a dsend
+ try{
+ simgrid::s4u::this_actor::isend(message->answer_to, message, 10);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Send of 'Get Predecessor Answer' failed due du an expired timeout on receiver side");
+ }
+ }
+ break;
+
+ case NOTIFY:
+ // someone is telling me that he may be my new predecessor
+ XBT_DEBUG("Receiving a 'Notify' request from %s", message->issuer_host_name.c_str());
+ notify(message->request_id);
+ delete message;
+ break;
+
+ case PREDECESSOR_LEAVING:
+ // my predecessor is about to quit
+ XBT_DEBUG("Receiving a 'Predecessor Leaving' message from %s", message->issuer_host_name.c_str());
+ // modify my predecessor
+ setPredecessor(message->request_id);
+ delete message;
+ /*TODO :
+ >> notify my new predecessor
+ >> send a notify_predecessors !!
+ */
+ break;
+
+ case SUCCESSOR_LEAVING:
+ // my successor is about to quit
+ XBT_DEBUG("Receiving a 'Successor Leaving' message from %s", message->issuer_host_name.c_str());
+ // modify my successor FIXME : this should be implicit ?
+ setFinger(0, message->request_id);
+ delete message;
+ /* TODO
+ >> notify my new successor
+ >> update my table & predecessors table */
+ break;
+
+ case PREDECESSOR_ALIVE:
+ XBT_DEBUG("Receiving a 'Predecessor Alive' request from %s", message->issuer_host_name.c_str());
+ message->type = PREDECESSOR_ALIVE_ANSWER;
+ XBT_DEBUG("Sending back a 'Predecessor Alive Answer' to %s (mailbox %s)",
+ message->issuer_host_name.c_str(), message->answer_to->name());
+ //TODO Make it a dsend
+ try{
+ simgrid::s4u::this_actor::isend(message->answer_to, message, 10);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Send of 'Predecessor Alive' failed due du an expired timeout on receiver side");
+ }
+ }
+ break;
+
+ default:
+ XBT_DEBUG("Ignoring unexpected message: %d from %s", message->type, message->issuer_host_name.c_str());
+ delete message;
+ }
+}
--- /dev/null
+/* Copyright (c) 2010-2016. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "s4u_dht-chord.hpp"
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_chord, "Messages specific for this s4u example");
+simgrid::xbt::Extension<simgrid::s4u::Host, HostChord> HostChord::EXTENSION_ID;
+
+int nb_bits = 24;
+int nb_keys = 0;
+int timeout = 50;
+int* powers2 = nullptr;
+
+/* Global initialization of the Chord simulation. */
+static void chord_init()
+{
+ // compute the powers of 2 once for all
+ powers2 = new int[nb_bits];
+ int pow = 1;
+ for (int i = 0; i < nb_bits; i++) {
+ powers2[i] = pow;
+ pow = pow << 1;
+ }
+ nb_keys = pow;
+ XBT_DEBUG("Sets nb_keys to %d", nb_keys);
+
+ HostChord::EXTENSION_ID = simgrid::s4u::Host::extension_create<HostChord>();
+
+ std::vector<simgrid::s4u::Host*> list;
+ simgrid::s4u::Engine::instance()->hostList(&list);
+ for (auto host : list)
+ host->extension_set(new HostChord(host));
+}
+
+static void chord_exit()
+{
+ delete[] powers2;
+}
+
+int main(int argc, char* argv[])
+{
+ simgrid::s4u::Engine* e = new simgrid::s4u::Engine(&argc, argv);
+ xbt_assert(argc > 2, "Usage: %s [-nb_bits=n] [-timeout=t] platform_file deployment_file\n"
+ "\tExample: %s ../msg_platform.xml chord.xml\n",
+ argv[0], argv[0]);
+ char** options = &argv[1];
+ while (!strncmp(options[0], "-", 1)) {
+ unsigned int length = strlen("-nb_bits=");
+ if (!strncmp(options[0], "-nb_bits=", length) && strlen(options[0]) > length) {
+ nb_bits = xbt_str_parse_int(options[0] + length, "Invalid nb_bits parameter: %s");
+ XBT_DEBUG("Set nb_bits to %d", nb_bits);
+ } else {
+ length = strlen("-timeout=");
+ if (!strncmp(options[0], "-timeout=", length) && strlen(options[0]) > length) {
+ timeout = xbt_str_parse_int(options[0] + length, "Invalid timeout parameter: %s");
+ XBT_DEBUG("Set timeout to %d", timeout);
+ } else {
+ xbt_die("Invalid chord option '%s'", options[0]);
+ }
+ }
+ options++;
+ }
+
+ e->loadPlatform(options[0]);
+
+ chord_init();
+
+ e->registerFunction<Node>("node");
+ e->loadDeployment(options[1]);
+
+ e->run();
+
+ XBT_INFO("Simulated time: %g", e->getClock());
+
+ chord_exit();
+
+ return 0;
+}
--- /dev/null
+/* Copyright (c) 2016-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#ifndef S4U_CHORD_HPP
+#define S4U_CHORD_HPP
+#include "simgrid/s4u.hpp"
+#include <string>
+#include <xbt/RngStream.h>
+#include <xbt/ex.hpp>
+#include <xbt/str.h>
+
+#define MAX_SIMULATION_TIME 1000
+#define PERIODIC_STABILIZE_DELAY 20
+#define PERIODIC_FIX_FINGERS_DELAY 120
+#define PERIODIC_CHECK_PREDECESSOR_DELAY 120
+#define PERIODIC_LOOKUP_DELAY 10
+#define SLEEP_DELAY 4.9999
+
+extern int nb_bits;
+extern int nb_keys;
+extern int timeout;
+extern int* powers2;
+
+class HostChord {
+ RngStream stream_;
+ simgrid::s4u::Host* host = nullptr;
+
+public:
+ static simgrid::xbt::Extension<simgrid::s4u::Host, HostChord> EXTENSION_ID;
+
+ explicit HostChord(simgrid::s4u::Host* ptr) : host(ptr)
+ {
+ std::string descr = std::string("RngSream<") + host->cname() + ">";
+ stream_ = RngStream_CreateStream(descr.c_str());
+ }
+
+ ~HostChord() { RngStream_DeleteStream(&stream_); };
+
+ RngStream getStream() { return stream_; };
+};
+
+/* Types of tasks exchanged between nodes. */
+typedef enum {
+ FIND_SUCCESSOR,
+ FIND_SUCCESSOR_ANSWER,
+ GET_PREDECESSOR,
+ GET_PREDECESSOR_ANSWER,
+ NOTIFY,
+ SUCCESSOR_LEAVING,
+ PREDECESSOR_LEAVING,
+ PREDECESSOR_ALIVE,
+ PREDECESSOR_ALIVE_ANSWER
+} e_message_type_t;
+
+class ChordMessage {
+public:
+ e_message_type_t type; // type of message
+ std::string issuer_host_name; // used for logging
+ int request_id = -1; // id (used by some types of messages)
+ int request_finger = 1; // finger parameter (used by some types of messages)
+ int answer_id = -1; // answer (used by some types of messages)
+ simgrid::s4u::MailboxPtr answer_to; // mailbox to send an answer to (if any)
+
+ explicit ChordMessage(e_message_type_t type) : type(type)
+ {
+ issuer_host_name = simgrid::s4u::this_actor::host()->name();
+ }
+
+ ~ChordMessage() = default;
+};
+
+class Node {
+ int known_id_ = -1;
+ double start_time_ = -1;
+ double deadline_ = -1;
+ bool joined = false;
+ int id_; // my id
+ int pred_id_ = -1; // predecessor id
+ simgrid::s4u::MailboxPtr mailbox_; // my mailbox
+ int* fingers_; // finger table,(fingers[0] is my successor)
+ int next_finger_to_fix; // index of the next finger to fix in fix_fingers()
+ RngStream stream;
+
+public:
+ explicit Node(std::vector<std::string> args);
+ ~Node();
+ void join(int known_id);
+ void leave();
+ void notifyAndQuit();
+
+ void randomLookup();
+ void setFinger(int finger_index, int id);
+ void fixFingers();
+ void printFingerTable();
+
+ void setPredecessor(int predecessor_id);
+ void checkPredecessor();
+ int remoteGetPredecessor(int ask_to);
+ int closestPrecedingFinger(int id);
+ int findSuccessor(int id);
+ int remoteFindSuccessor(int ask_to, int id);
+
+ void notify(int predecessor_candidate_id);
+ void remoteNotify(int notify_id, int predecessor_candidate_id);
+ void stabilize();
+ void handleMessage(ChordMessage* message);
+
+ void operator()()
+ {
+ simgrid::s4u::this_actor::sleep_for(start_time_);
+ if (known_id_ == -1) {
+ setPredecessor(-1); // -1 means that I have no predecessor
+ printFingerTable();
+ joined = true;
+ } else {
+ join(known_id_);
+ }
+
+ if (!joined)
+ return;
+ ChordMessage* message = nullptr;
+ void* data = nullptr;
+ double now = simgrid::s4u::Engine::getClock();
+ double next_stabilize_date = start_time_ + PERIODIC_STABILIZE_DELAY;
+ double next_fix_fingers_date = start_time_ + PERIODIC_FIX_FINGERS_DELAY;
+ double next_check_predecessor_date = start_time_ + PERIODIC_CHECK_PREDECESSOR_DELAY;
+ double next_lookup_date = start_time_ + PERIODIC_LOOKUP_DELAY;
+
+ while ((now < (start_time_ + deadline_)) && now < MAX_SIMULATION_TIME) {
+ data = nullptr;
+ simgrid::s4u::Comm& comm_receive = simgrid::s4u::this_actor::irecv(mailbox_, &data);
+ while ((now < (start_time_ + deadline_)) && now < MAX_SIMULATION_TIME && !comm_receive.test()) {
+ // no task was received: make some periodic calls
+ if (now >= next_stabilize_date) {
+ stabilize();
+ next_stabilize_date = simgrid::s4u::Engine::getClock() + PERIODIC_STABILIZE_DELAY;
+ } else if (now >= next_fix_fingers_date) {
+ fixFingers();
+ next_fix_fingers_date = simgrid::s4u::Engine::getClock() + PERIODIC_FIX_FINGERS_DELAY;
+ } else if (now >= next_check_predecessor_date) {
+ checkPredecessor();
+ next_check_predecessor_date = simgrid::s4u::Engine::getClock() + PERIODIC_CHECK_PREDECESSOR_DELAY;
+ } else if (now >= next_lookup_date) {
+ randomLookup();
+ next_lookup_date = simgrid::s4u::Engine::getClock() + PERIODIC_LOOKUP_DELAY;
+ } else {
+ // nothing to do: sleep for a while
+ simgrid::s4u::this_actor::sleep_for(SLEEP_DELAY);
+ }
+ now = simgrid::s4u::Engine::getClock();
+ }
+
+ if (data != nullptr) {
+ message = static_cast<ChordMessage*>(data);
+ handleMessage(message);
+ }
+ now = simgrid::s4u::Engine::getClock();
+ }
+ if (data != nullptr) {
+ delete static_cast<ChordMessage*>(data);
+ }
+ // leave the ring
+ leave();
+ }
+};
+
+#endif
--- /dev/null
+#! ./tesh
+
+p Testing the Chord implementation with MSG
+
+! output sort 19
+$ $SG_TEST_EXENV ${bindir:=.}/s4u_dht-chord$EXEEXT -nb_bits=3 ${srcdir:=.}/cluster.xml ${srcdir:=.}/../s4u/dht-chord/s4u_dht-chord_d.xml --log=s4u_chord.thres:verbose "--log=root.fmt:[%10.5r]%e(%P@%h)%e%m%n"
+> [ 0.00000] (node@node-0.acme.org) My finger table:
+> [ 0.00000] (node@node-0.acme.org) Start | Succ
+> [ 0.00000] (node@node-0.acme.org) 3 | 42
+> [ 0.00000] (node@node-0.acme.org) 4 | 42
+> [ 0.00000] (node@node-0.acme.org) 6 | 42
+> [ 0.00000] (node@node-0.acme.org) Predecessor: -1
+> [ 10.00000] (node@node-1.acme.org) Joining the ring with id 366680, knowing node 42
+> [ 15.00751] (node@node-1.acme.org) My new finger #0 is 42
+> [ 15.00751] (node@node-1.acme.org) My finger table:
+> [ 15.00751] (node@node-1.acme.org) Start | Succ
+> [ 15.00751] (node@node-1.acme.org) 1 | 42
+> [ 15.00751] (node@node-1.acme.org) 2 | 366680
+> [ 15.00751] (node@node-1.acme.org) 4 | 366680
+> [ 15.00751] (node@node-1.acme.org) Predecessor: -1
+> [ 20.00000] (node@node-2.acme.org) Joining the ring with id 533744, knowing node 366680
+> [ 30.00000] (node@node-3.acme.org) Joining the ring with id 1319738, knowing node 42
+> [ 30.00721] (node@node-2.acme.org) My new finger #0 is 42
+> [ 30.00721] (node@node-2.acme.org) My finger table:
+> [ 30.00721] (node@node-2.acme.org) Start | Succ
+> [ 30.00721] (node@node-2.acme.org) 1 | 42
+> [ 30.00721] (node@node-2.acme.org) 2 | 533744
+> [ 30.00721] (node@node-2.acme.org) 4 | 533744
+> [ 30.00721] (node@node-2.acme.org) Predecessor: -1
+> [ 35.00711] (node@node-3.acme.org) My new finger #0 is 42
+> [ 35.00711] (node@node-3.acme.org) My finger table:
+> [ 35.00711] (node@node-3.acme.org) Start | Succ
+> [ 35.00711] (node@node-3.acme.org) 3 | 42
+> [ 35.00711] (node@node-3.acme.org) 4 | 1319738
+> [ 35.00711] (node@node-3.acme.org) 6 | 1319738
+> [ 35.00711] (node@node-3.acme.org) Predecessor: -1
+> [ 40.00000] (node@node-4.acme.org) Joining the ring with id 16509405, knowing node 366680
+> [ 49.99900] (node@node-0.acme.org) My new predecessor is 366680
+> [ 49.99900] (node@node-0.acme.org) My finger table:
+> [ 49.99900] (node@node-0.acme.org) Start | Succ
+> [ 49.99900] (node@node-0.acme.org) 3 | 42
+> [ 49.99900] (node@node-0.acme.org) 4 | 42
+> [ 49.99900] (node@node-0.acme.org) 6 | 42
+> [ 49.99900] (node@node-0.acme.org) Predecessor: 366680
+> [ 49.99900] (node@node-0.acme.org) My new finger #0 is 366680
+> [ 55.00671] (node@node-4.acme.org) My new finger #0 is 366680
+> [ 55.00671] (node@node-4.acme.org) My finger table:
+> [ 55.00671] (node@node-4.acme.org) Start | Succ
+> [ 55.00671] (node@node-4.acme.org) 6 | 366680
+> [ 55.00671] (node@node-4.acme.org) 7 | 16509405
+> [ 55.00671] (node@node-4.acme.org) 1 | 16509405
+> [ 55.00671] (node@node-4.acme.org) Predecessor: -1
+> [ 60.00000] (node@node-6.acme.org) Joining the ring with id 16728096, knowing node 1319738
+> [ 65.00651] (node@node-3.acme.org) My new finger #0 is 366680
+> [ 65.01431] (node@node-6.acme.org) My new finger #0 is 366680
+> [ 65.01431] (node@node-6.acme.org) My finger table:
+> [ 65.01431] (node@node-6.acme.org) Start | Succ
+> [ 65.01431] (node@node-6.acme.org) 1 | 366680
+> [ 65.01431] (node@node-6.acme.org) 2 | 16728096
+> [ 65.01431] (node@node-6.acme.org) 4 | 16728096
+> [ 65.01431] (node@node-6.acme.org) Predecessor: -1
+> [ 70.00641] (node@node-1.acme.org) My new predecessor is 16509405
+> [ 70.00641] (node@node-1.acme.org) My finger table:
+> [ 70.00641] (node@node-1.acme.org) Start | Succ
+> [ 70.00641] (node@node-1.acme.org) 1 | 42
+> [ 70.00641] (node@node-1.acme.org) 2 | 366680
+> [ 70.00641] (node@node-1.acme.org) 4 | 366680
+> [ 70.00641] (node@node-1.acme.org) Predecessor: 16509405
+> [ 80.01401] (node@node-0.acme.org) My new finger #0 is 16509405
+> [ 85.01391] (node@node-6.acme.org) My new finger #0 is 16509405
+> [ 100.02922] (node@node-3.acme.org) My new finger #0 is 16509405
+> [ 110.02902] (node@node-4.acme.org) My new predecessor is 42
+> [ 110.02902] (node@node-4.acme.org) My finger table:
+> [ 110.02902] (node@node-4.acme.org) Start | Succ
+> [ 110.02902] (node@node-4.acme.org) 6 | 366680
+> [ 110.02902] (node@node-4.acme.org) 7 | 16509405
+> [ 110.02902] (node@node-4.acme.org) 1 | 16509405
+> [ 110.02902] (node@node-4.acme.org) Predecessor: 42
+> [ 115.03673] (node@node-6.acme.org) My new finger #0 is 42
+> [ 200.05164] (node@node-3.acme.org) Well Guys! I Think it's time for me to leave ;)
+> [ 210.04364] (node@node-1.acme.org) Well Guys! I Think it's time for me to leave ;)
+> [ 210.05925] (node@node-4.acme.org) My new predecessor is -1
+> [ 220.05905] (node@node-4.acme.org) My new predecessor is 42
+> [ 220.05905] (node@node-4.acme.org) My finger table:
+> [ 220.05905] (node@node-4.acme.org) Start | Succ
+> [ 220.05905] (node@node-4.acme.org) 6 | 366680
+> [ 220.05905] (node@node-4.acme.org) 7 | 16509405
+> [ 220.05905] (node@node-4.acme.org) 1 | 16509405
+> [ 220.05905] (node@node-4.acme.org) Predecessor: 42
+> [ 220.07466] (node@node-0.acme.org) My new predecessor is 16509405
+> [ 225.05895] (node@node-4.acme.org) My new finger #0 is 42
+> [ 230.07446] (node@node-0.acme.org) My new predecessor is 533744
+> [ 230.07446] (node@node-0.acme.org) My finger table:
+> [ 230.07446] (node@node-0.acme.org) Start | Succ
+> [ 230.07446] (node@node-0.acme.org) 3 | 16509405
+> [ 230.07446] (node@node-0.acme.org) 4 | 42
+> [ 230.07446] (node@node-0.acme.org) 6 | 42
+> [ 230.07446] (node@node-0.acme.org) Predecessor: 533744
+> [ 235.08217] (node@node-4.acme.org) My new finger #0 is 533744
+> [ 240.08987] (node@node-0.acme.org) My new finger #1 is 16509405
+> [ 240.08987] (node@node-0.acme.org) My finger table:
+> [ 240.08987] (node@node-0.acme.org) Start | Succ
+> [ 240.08987] (node@node-0.acme.org) 3 | 16509405
+> [ 240.08987] (node@node-0.acme.org) 4 | 16509405
+> [ 240.08987] (node@node-0.acme.org) 6 | 42
+> [ 240.08987] (node@node-0.acme.org) Predecessor: 533744
+> [ 250.00000] (node@node-5.acme.org) Joining the ring with id 10874876, knowing node 533744
+> [ 255.11299] (node@node-5.acme.org) My new finger #0 is 16509405
+> [ 255.11299] (node@node-5.acme.org) My finger table:
+> [ 255.11299] (node@node-5.acme.org) Start | Succ
+> [ 255.11299] (node@node-5.acme.org) 5 | 16509405
+> [ 255.11299] (node@node-5.acme.org) 6 | 10874876
+> [ 255.11299] (node@node-5.acme.org) 0 | 10874876
+> [ 255.11299] (node@node-5.acme.org) Predecessor: -1
+> [ 265.09718] (node@node-2.acme.org) My new predecessor is 16509405
+> [ 265.09718] (node@node-2.acme.org) My finger table:
+> [ 265.09718] (node@node-2.acme.org) Start | Succ
+> [ 265.09718] (node@node-2.acme.org) 1 | 42
+> [ 265.09718] (node@node-2.acme.org) 2 | 533744
+> [ 265.09718] (node@node-2.acme.org) 4 | 533744
+> [ 265.09718] (node@node-2.acme.org) Predecessor: 16509405
+> [ 275.11259] (node@node-5.acme.org) My new finger #0 is 42
+> [ 280.10468] (node@node-4.acme.org) My new predecessor is 10874876
+> [ 280.10468] (node@node-4.acme.org) My finger table:
+> [ 280.10468] (node@node-4.acme.org) Start | Succ
+> [ 280.10468] (node@node-4.acme.org) 6 | 533744
+> [ 280.10468] (node@node-4.acme.org) 7 | 16509405
+> [ 280.10468] (node@node-4.acme.org) 1 | 16509405
+> [ 280.10468] (node@node-4.acme.org) Predecessor: 10874876
+> [ 285.13581] (node@node-4.acme.org) My new predecessor is 42
+> [ 285.13581] (node@node-4.acme.org) My finger table:
+> [ 285.13581] (node@node-4.acme.org) Start | Succ
+> [ 285.13581] (node@node-4.acme.org) 6 | 533744
+> [ 285.13581] (node@node-4.acme.org) 7 | 16509405
+> [ 285.13581] (node@node-4.acme.org) 1 | 16509405
+> [ 285.13581] (node@node-4.acme.org) Predecessor: 42
+> [ 300.13551] (node@node-4.acme.org) My new finger #1 is 533744
+> [ 300.13551] (node@node-4.acme.org) My finger table:
+> [ 300.13551] (node@node-4.acme.org) Start | Succ
+> [ 300.13551] (node@node-4.acme.org) 6 | 533744
+> [ 300.13551] (node@node-4.acme.org) 7 | 533744
+> [ 300.13551] (node@node-4.acme.org) 1 | 16509405
+> [ 300.13551] (node@node-4.acme.org) Predecessor: 42
+> [ 300.14332] (node@node-2.acme.org) My new finger #1 is 42
+> [ 300.14332] (node@node-2.acme.org) My finger table:
+> [ 300.14332] (node@node-2.acme.org) Start | Succ
+> [ 300.14332] (node@node-2.acme.org) 1 | 42
+> [ 300.14332] (node@node-2.acme.org) 2 | 42
+> [ 300.14332] (node@node-2.acme.org) 4 | 533744
+> [ 300.14332] (node@node-2.acme.org) Predecessor: 16509405
+> [ 305.14322] (node@node-5.acme.org) My new finger #0 is 533744
+> [ 305.15102] (node@node-0.acme.org) My new finger #0 is 10874876
+> [ 310.15873] (node@node-6.acme.org) My new finger #1 is 42
+> [ 310.15873] (node@node-6.acme.org) My finger table:
+> [ 310.15873] (node@node-6.acme.org) Start | Succ
+> [ 310.15873] (node@node-6.acme.org) 1 | 42
+> [ 310.15873] (node@node-6.acme.org) 2 | 42
+> [ 310.15873] (node@node-6.acme.org) 4 | 16728096
+> [ 310.15873] (node@node-6.acme.org) Predecessor: -1
+> [ 330.16613] (node@node-5.acme.org) My new finger #0 is 16509405
+> [ 335.16603] (node@node-5.acme.org) My new predecessor is 42
+> [ 335.16603] (node@node-5.acme.org) My finger table:
+> [ 335.16603] (node@node-5.acme.org) Start | Succ
+> [ 335.16603] (node@node-5.acme.org) 5 | 16509405
+> [ 335.16603] (node@node-5.acme.org) 6 | 10874876
+> [ 335.16603] (node@node-5.acme.org) 0 | 10874876
+> [ 335.16603] (node@node-5.acme.org) Predecessor: 42
+> [ 340.16593] (node@node-4.acme.org) Well Guys! I Think it's time for me to leave ;)
+> [ 350.15793] (node@node-2.acme.org) My new predecessor is 42
+> [ 350.16573] (node@node-0.acme.org) My new finger #0 is 533744
+> [ 360.18115] (node@node-0.acme.org) My new finger #2 is 533744
+> [ 360.18115] (node@node-0.acme.org) My finger table:
+> [ 360.18115] (node@node-0.acme.org) Start | Succ
+> [ 360.18115] (node@node-0.acme.org) 3 | 533744
+> [ 360.18115] (node@node-0.acme.org) 4 | 16509405
+> [ 360.18115] (node@node-0.acme.org) 6 | 533744
+> [ 360.18115] (node@node-0.acme.org) Predecessor: 533744
+> [ 420.23459] (node@node-2.acme.org) Well Guys! I Think it's time for me to leave ;)
+> [ 425.22668] (node@node-0.acme.org) My new predecessor is 42
+> [ 475.23449] (node@node-0.acme.org) My new finger #0 is 42
+> [ 480.23439] (node@node-0.acme.org) My new predecessor is 16728096
+> [ 480.23439] (node@node-0.acme.org) My finger table:
+> [ 480.23439] (node@node-0.acme.org) Start | Succ
+> [ 480.23439] (node@node-0.acme.org) 3 | 42
+> [ 480.23439] (node@node-0.acme.org) 4 | 16509405
+> [ 480.23439] (node@node-0.acme.org) 6 | 533744
+> [ 480.23439] (node@node-0.acme.org) Predecessor: 16728096
+> [ 485.24209] (node@node-6.acme.org) My new finger #2 is 42
+> [ 485.24209] (node@node-6.acme.org) My finger table:
+> [ 485.24209] (node@node-6.acme.org) Start | Succ
+> [ 485.24209] (node@node-6.acme.org) 1 | 42
+> [ 485.24209] (node@node-6.acme.org) 2 | 42
+> [ 485.24209] (node@node-6.acme.org) 4 | 42
+> [ 485.24209] (node@node-6.acme.org) Predecessor: -1
+> [ 495.24970] (node@node-0.acme.org) My new finger #0 is 16728096
+> [ 575.26471] (node@node-6.acme.org) My new predecessor is 42
+> [ 575.26471] (node@node-6.acme.org) My finger table:
+> [ 575.26471] (node@node-6.acme.org) Start | Succ
+> [ 575.26471] (node@node-6.acme.org) 1 | 42
+> [ 575.26471] (node@node-6.acme.org) 2 | 42
+> [ 575.26471] (node@node-6.acme.org) 4 | 42
+> [ 575.26471] (node@node-6.acme.org) Predecessor: 42
+> [ 600.27202] (node@node-0.acme.org) My new finger #1 is 16728096
+> [ 600.27202] (node@node-0.acme.org) My finger table:
+> [ 600.27202] (node@node-0.acme.org) Start | Succ
+> [ 600.27202] (node@node-0.acme.org) 3 | 16728096
+> [ 600.27202] (node@node-0.acme.org) 4 | 16728096
+> [ 600.27202] (node@node-0.acme.org) 6 | 533744
+> [ 600.27202] (node@node-0.acme.org) Predecessor: 16728096
+> [ 720.36329] (node@node-0.acme.org) My new finger #2 is 16728096
+> [ 720.36329] (node@node-0.acme.org) My finger table:
+> [ 720.36329] (node@node-0.acme.org) Start | Succ
+> [ 720.36329] (node@node-0.acme.org) 3 | 16728096
+> [ 720.36329] (node@node-0.acme.org) 4 | 16728096
+> [ 720.36329] (node@node-0.acme.org) 6 | 16728096
+> [ 720.36329] (node@node-0.acme.org) Predecessor: 16728096
+> [ 855.46207] (node@node-6.acme.org) My new finger #2 is 16728096
+> [ 855.46207] (node@node-6.acme.org) My finger table:
+> [ 855.46207] (node@node-6.acme.org) Start | Succ
+> [ 855.46207] (node@node-6.acme.org) 1 | 42
+> [ 855.46207] (node@node-6.acme.org) 2 | 42
+> [ 855.46207] (node@node-6.acme.org) 4 | 16728096
+> [ 855.46207] (node@node-6.acme.org) Predecessor: 42
+> [ 860.46197] (node@node-6.acme.org) Well Guys! I Think it's time for me to leave ;)
+> [ 865.45406] (node@node-0.acme.org) My new predecessor is 42
+> [ 890.43115] (node@node-5.acme.org) Well Guys! I Think it's time for me to leave ;)
+> [ 915.45406] (node@node-0.acme.org) My new finger #0 is 42
+> [ 940.45356] (node@node-0.acme.org) My new finger #0 is 16509405
+> [ 990.45356] (node@node-0.acme.org) My new finger #1 is 16509405
+> [ 990.45356] (node@node-0.acme.org) My finger table:
+> [ 990.45356] (node@node-0.acme.org) Start | Succ
+> [ 990.45356] (node@node-0.acme.org) 3 | 16509405
+> [ 990.45356] (node@node-0.acme.org) 4 | 16509405
+> [ 990.45356] (node@node-0.acme.org) 6 | 16728096
+> [ 990.45356] (node@node-0.acme.org) Predecessor: 42
+> [1040.45356] (node@node-0.acme.org) Well Guys! I Think it's time for me to leave ;)
+> [1090.46137] (maestro@) Simulated time: 1090.46
--- /dev/null
+<?xml version='1.0'?>
+<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">
+<platform version="4">
+ <process host="node-0.acme.org" function="node">
+ <argument value="42"/>
+ <argument value="1000"/>
+ </process>
+ <process host="node-1.acme.org" function="node">
+ <argument value="366680" />
+ <argument value="42" />
+ <argument value="10" />
+ <argument value="200" />
+ </process>
+ <process host="node-2.acme.org" function="node">
+ <argument value="533744" />
+ <argument value="366680" />
+ <argument value="20" />
+ <argument value="400" />
+ </process>
+ <process host="node-3.acme.org" function="node">
+ <argument value="1319738" />
+ <argument value="42" />
+ <argument value="30" />
+ <argument value="150" />
+ </process>
+ <process host="node-4.acme.org" function="node">
+ <argument value="16509405" />
+ <argument value="366680" />
+ <argument value="40" />
+ <argument value="300" />
+ </process>
+ <process host="node-5.acme.org" function="node">
+ <argument value="10874876" />
+ <argument value="533744" />
+ <argument value="250" />
+ <argument value="600" />
+ </process>
+ <process host="node-6.acme.org" function="node">
+ <argument value="16728096" />
+ <argument value="1319738" />
+ <argument value="60" />
+ <argument value="800" />
+ </process>
+</platform>
\ No newline at end of file
* See \ref Comm for the full communication API (including non blocking communications).
*/
XBT_PUBLIC(void*) recv(MailboxPtr chan);
+ XBT_PUBLIC(Comm&) irecv(MailboxPtr chan, void** data);
/** Block the actor until it delivers a message of the given simulated size to the given mailbox
*
* See \ref Comm for the full communication API (including non blocking communications).
*/
XBT_PUBLIC(void) send(MailboxPtr chan, void* payload, double simulatedSize);
+ XBT_PUBLIC(void) send(MailboxPtr chan, void* payload, double simulatedSize, double timeout);
XBT_PUBLIC(Comm&) isend(MailboxPtr chan, void* payload, double simulatedSize);
MPI_CALL(XBT_PUBLIC(int), MPI_Get_accumulate,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype,
void* result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp,
int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win));
+
+MPI_CALL(XBT_PUBLIC(int), MPI_Rget,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request));
+MPI_CALL(XBT_PUBLIC(int), MPI_Rput,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request));
+MPI_CALL(XBT_PUBLIC(int), MPI_Raccumulate,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype,
+ int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request));
+MPI_CALL(XBT_PUBLIC(int), MPI_Rget_accumulate,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype,
+ void* result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp,
+ int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request));
+
+MPI_CALL(XBT_PUBLIC(int), MPI_Fetch_and_op,( void *origin_addr, void* result_addr, MPI_Datatype datatype,
+ int target_rank, MPI_Aint target_disp, MPI_Op op, MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Compare_and_swap, (void *origin_addr, void *compare_addr,
+ void *result_addr, MPI_Datatype datatype, int target_rank, MPI_Aint target_disp, MPI_Win win));
+
MPI_CALL(XBT_PUBLIC(int), MPI_Alloc_mem, (MPI_Aint size, MPI_Info info, void *baseptr));
MPI_CALL(XBT_PUBLIC(int), MPI_Free_mem, (void *base));
MPI_Comm comm, MPI_Comm *intercomm, int* array_of_errcodes));
MPI_CALL(XBT_PUBLIC(int), MPI_Comm_get_parent,( MPI_Comm *parent));
MPI_CALL(XBT_PUBLIC(int), MPI_Win_complete,(MPI_Win win));
-MPI_CALL(XBT_PUBLIC(int), MPI_Win_lock,(int lock_type, int rank, int assert, MPI_Win win));
+
MPI_CALL(XBT_PUBLIC(int), MPI_Win_post,(MPI_Group group, int assert, MPI_Win win));
MPI_CALL(XBT_PUBLIC(int), MPI_Win_start,(MPI_Group group, int assert, MPI_Win win));
MPI_CALL(XBT_PUBLIC(int), MPI_Win_test,(MPI_Win win, int *flag));
-MPI_CALL(XBT_PUBLIC(int), MPI_Win_unlock,(int rank, MPI_Win win));
MPI_CALL(XBT_PUBLIC(int), MPI_Win_wait,(MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_lock,(int lock_type, int rank, int assert, MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_lock_all,(int assert, MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_unlock,(int rank, MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_unlock_all,(MPI_Win win));
+
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_flush,(int rank, MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_flush_local,(int rank, MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_flush_all,(MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_flush_local_all,(MPI_Win win));
MPI_CALL(XBT_PUBLIC(int), MPI_File_get_errhandler , (MPI_File file, MPI_Errhandler *errhandler));
MPI_CALL(XBT_PUBLIC(int), MPI_File_set_errhandler, (MPI_File file, MPI_Errhandler errhandler));
/* Fortran specific stuff */
-XBT_PUBLIC(int) __attribute__((weak)) smpi_simulated_main_(int argc, char** argv);
-XBT_PUBLIC(int) __attribute__((weak)) MAIN__();
-XBT_PUBLIC(int) smpi_main(int (*realmain) (int argc, char *argv[]),int argc, char *argv[]);
-XBT_PUBLIC(void) __attribute__((weak)) user_main_();
+XBT_PUBLIC(int) smpi_main(const char* program, int argc, char *argv[]);
XBT_PUBLIC(int) smpi_process_index();
XBT_PUBLIC(void) smpi_process_init(int *argc, char ***argv);
#define MPI_Testall(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Testall(__VA_ARGS__); })
#define MPI_Op_create(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Op_create(__VA_ARGS__); })
#define MPI_Op_free(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Op_free(__VA_ARGS__); })
+#define MPI_Op_commutative(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Op_commutative(__VA_ARGS__); })
#define MPI_Group_free(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Group_free(__VA_ARGS__); })
#define MPI_Group_size(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Group_size(__VA_ARGS__); })
#define MPI_Group_rank(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Group_rank(__VA_ARGS__); })
#define MPI_Reduce_local(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Reduce_local(__VA_ARGS__); })
#define MPI_Win_free(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_free(__VA_ARGS__); })
#define MPI_Win_create(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_create(__VA_ARGS__); })
+#define MPI_Win_allocate(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_allocate(__VA_ARGS__); })
+#define MPI_Win_create_dynamic(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_create_dynamic(__VA_ARGS__); })
+#define MPI_Win_attach(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_attach(__VA_ARGS__); })
+#define MPI_Win_detach(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_detach(__VA_ARGS__); })
#define MPI_Win_set_name(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_set_name(__VA_ARGS__); })
#define MPI_Win_get_name(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_get_name(__VA_ARGS__); })
+#define MPI_Win_set_info(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_set_info(__VA_ARGS__); })
+#define MPI_Win_get_info(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_get_info(__VA_ARGS__); })
#define MPI_Win_get_group(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_get_group(__VA_ARGS__); })
#define MPI_Win_fence(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_fence(__VA_ARGS__); })
#define MPI_Win_get_attr(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_get_attr(__VA_ARGS__); })
#define MPI_Get(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Get(__VA_ARGS__); })
#define MPI_Put(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Put(__VA_ARGS__); })
#define MPI_Accumulate(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Accumulate(__VA_ARGS__); })
+#define MPI_Get_accumulate(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Get_accumulate(__VA_ARGS__); })
+#define MPI_Rget(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Rget(__VA_ARGS__); })
+#define MPI_Rput(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Rput(__VA_ARGS__); })
+#define MPI_Raccumulate(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Raccumulate(__VA_ARGS__); })
+#define MPI_Rget_accumulate(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Rget_accumulate(__VA_ARGS__); })
+#define MPI_Fetch_and_op(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Fetch_and_op(__VA_ARGS__); })
+#define MPI_Compare_and_swap(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Compare_and_swap(__VA_ARGS__); })
#define MPI_Alloc_mem(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Alloc_mem(__VA_ARGS__); })
#define MPI_Free_mem(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Free_mem(__VA_ARGS__); })
#define MPI_Type_f2c(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Type_f2c(__VA_ARGS__); })
#define MPI_Comm_spawn_multiple(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Comm_spawn_multiple(__VA_ARGS__); })
#define MPI_Comm_get_parent(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Comm_get_parent(__VA_ARGS__); })
#define MPI_Win_complete(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_complete(__VA_ARGS__); })
-#define MPI_Win_lock(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_lock(__VA_ARGS__); })
#define MPI_Win_post(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_post(__VA_ARGS__); })
#define MPI_Win_start(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_start(__VA_ARGS__); })
#define MPI_Win_test(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_test(__VA_ARGS__); })
-#define MPI_Win_unlock(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_unlock(__VA_ARGS__); })
#define MPI_Win_wait(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_wait(__VA_ARGS__); })
+#define MPI_Win_lock(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_lock(__VA_ARGS__); })
+#define MPI_Win_lock_all(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_lock_all(__VA_ARGS__); })
+#define MPI_Win_unlock(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_unlock(__VA_ARGS__); })
+#define MPI_Win_unlock_all(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_unlock_all(__VA_ARGS__); })
+#define MPI_Win_flush(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_flush(__VA_ARGS__); })
+#define MPI_Win_flush_local(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_flush_local(__VA_ARGS__); })
+#define MPI_Win_flush_all(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_flush_all(__VA_ARGS__); })
+#define MPI_Win_flush_local_all(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_flush_local_all(__VA_ARGS__); })
#define MPI_File_get_errhandler(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_File_get_errhandler(__VA_ARGS__); })
#define MPI_File_set_errhandler(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_File_set_errhandler(__VA_ARGS__); })
#define MPI_File_open(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_File_open(__VA_ARGS__); })
#define MPI_TESTALL smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_testall
#define MPI_OP_CREATE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_op_create
#define MPI_OP_FREE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_op_free
+#define MPI_OP_COMMUTATIVE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_op_commutative
#define MPI_GROUP_FREE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_group_free
#define MPI_GROUP_SIZE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_group_size
#define MPI_GROUP_RANK smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_group_rank
#define MPI_REDUCE_LOCAL smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_reduce_local
#define MPI_WIN_FREE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_free
#define MPI_WIN_CREATE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_create
+#define MPI_WIN_ALLOCATE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_allocate
+#define MPI_WIN_CREATE_DYNAMIC smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_create_dynamic
+#define MPI_WIN_ATTACH smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_attach
+#define MPI_WIN_DETACH smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_detach
#define MPI_WIN_SET_NAME smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_set_name
#define MPI_WIN_GET_NAME smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_get_name
+#define MPI_WIN_SET_INFO smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_set_info
+#define MPI_WIN_GET_INFO smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_get_info
#define MPI_WIN_GET_GROUP smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_get_group
#define MPI_WIN_FENCE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_fence
#define MPI_WIN_GET_ATTR smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_get_attr
#define MPI_GET smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_get
#define MPI_PUT smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_put
#define MPI_ACCUMULATE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_accumulate
+#define MPI_GET_ACCUMULATE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_get_accumulate
+#define MPI_RGET smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_rget
+#define MPI_RPUT smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_rput
+#define MPI_RACCUMULATE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_raccumulate
+#define MPI_RGET_ACCUMULATE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_rget_accumulate
+#define MPI_FETCH_AND_OP smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_fetch_and_op
+#define MPI_COMPARE_AND_SWAP smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_compare_and_swap
#define MPI_ALLOC_MEM smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_alloc_mem
#define MPI_FREE_MEM smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_free_mem
#define MPI_TYPE_F2C smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_type_f2c
#define MPI_COMM_SPAWN_MULTIPLE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_comm_spawn_multiple
#define MPI_COMM_GET_PARENT smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_comm_get_parent
#define MPI_WIN_COMPLETE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_complete
-#define MPI_WIN_LOCK smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_lock
#define MPI_WIN_POST smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_post
#define MPI_WIN_START smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_start
#define MPI_WIN_TEST smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_test
-#define MPI_WIN_UNLOCK smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_unlock
#define MPI_WIN_WAIT smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_wait
+#define MPI_WIN_LOCK smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_lock
+#define MPI_WIN_LOCK_ALL smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_lock_all
+#define MPI_WIN_UNLOCK smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_unlock
+#define MPI_WIN_UNLOCK_ALL smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_unlock_all
+#define MPI_WIN_FLUSH smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_flush
+#define MPI_WIN_FLUSH_LOCAL smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_flush_local
+#define MPI_WIN_FLUSH_ALL smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_flush_all
+#define MPI_WIN_FLUSH_LOCAL_ALL smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_flush_local_all
#define MPI_FILE_GET_ERRHANDLER smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_file_get_errhandler
#define MPI_FILE_SET_ERRHANDLER smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_file_set_errhandler
#define MPI_FILE_OPEN smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_file_open
fprintf(trace_file, "%s send %d %d %s\n", process_id, extra->dst, extra->send_size, extra->datatype1);
break;
case TRACING_ISEND:
- fprintf(trace_file, "%s isend %d %d %s\n", process_id, extra->dst, extra->send_size, extra->datatype1);
+ fprintf(trace_file, "%s Isend %d %d %s\n", process_id, extra->dst, extra->send_size, extra->datatype1);
break;
case TRACING_RECV:
fprintf(trace_file, "%s recv %d %d %s\n", process_id, extra->src, extra->send_size, extra->datatype1);
break;
case TRACING_IRECV:
- fprintf(trace_file, "%s irecv %d %d %s\n", process_id, extra->src, extra->send_size, extra->datatype1);
+ fprintf(trace_file, "%s Irecv %d %d %s\n", process_id, extra->src, extra->send_size, extra->datatype1);
break;
case TRACING_TEST:
fprintf(trace_file, "%s test\n", process_id);
fprintf(trace_file, "%s wait\n", process_id);
break;
case TRACING_WAITALL:
- fprintf(trace_file, "%s waitall\n", process_id);
+ fprintf(trace_file, "%s waitAll\n", process_id);
break;
case TRACING_BARRIER:
fprintf(trace_file, "%s barrier\n", process_id);
fprintf(trace_file, "\n");
break;
case TRACING_ALLREDUCE: // rank allreduce comm_size comp_size (datatype)
- fprintf(trace_file, "%s allreduce %d %f %s\n", process_id, extra->send_size, extra->comp_size, extra->datatype1);
+ fprintf(trace_file, "%s allReduce %d %f %s\n", process_id, extra->send_size, extra->comp_size, extra->datatype1);
break;
case TRACING_ALLTOALL: // rank alltoall send_size recv_size (sendtype) (recvtype)
- fprintf(trace_file, "%s alltoall %d %d %s %s\n", process_id, extra->send_size, extra->recv_size, extra->datatype1,
+ fprintf(trace_file, "%s allToAll %d %d %s %s\n", process_id, extra->send_size, extra->recv_size, extra->datatype1,
extra->datatype2);
break;
case TRACING_ALLTOALLV: // rank alltoallv send_size [sendcounts] recv_size [recvcounts] (sendtype) (recvtype)
- fprintf(trace_file, "%s alltoallv %d ", process_id, extra->send_size);
+ fprintf(trace_file, "%s allToAllV %d ", process_id, extra->send_size);
for (i = 0; i < extra->num_processes; i++)
fprintf(trace_file, "%d ", extra->sendcounts[i]);
fprintf(trace_file, "%d ", extra->recv_size);
extra->datatype1, extra->datatype2);
break;
case TRACING_ALLGATHERV: // rank allgatherv send_size [recvcounts] (sendtype) (recvtype)
- fprintf(trace_file, "%s allgatherv %d ", process_id, extra->send_size);
+ fprintf(trace_file, "%s allGatherV %d ", process_id, extra->send_size);
for (i = 0; i < extra->num_processes; i++)
fprintf(trace_file, "%d ", extra->recvcounts[i]);
fprintf(trace_file, "%s %s \n", extra->datatype1, extra->datatype2);
break;
case TRACING_REDUCE_SCATTER: // rank reducescatter [recvcounts] comp_size (sendtype)
- fprintf(trace_file, "%s reducescatter ", process_id);
+ fprintf(trace_file, "%s reduceScatter ", process_id);
for (i = 0; i < extra->num_processes; i++)
fprintf(trace_file, "%d ", extra->recvcounts[i]);
fprintf(trace_file, "%f %s\n", extra->comp_size, extra->datatype1);
fprintf(trace_file, "%s sleep %f\n", process_id, extra->sleep_duration);
break;
case TRACING_GATHERV: // rank gatherv send_size [recvcounts] root (sendtype) (recvtype)
- fprintf(trace_file, "%s gatherv %d ", process_id, extra->send_size);
+ fprintf(trace_file, "%s gatherV %d ", process_id, extra->send_size);
for (i = 0; i < extra->num_processes; i++)
fprintf(trace_file, "%d ", extra->recvcounts[i]);
fprintf(trace_file, "%d %s %s\n", extra->root, extra->datatype1, extra->datatype2);
close(this->memory_file);
if (this->unw_underlying_addr_space != unw_local_addr_space) {
- unw_destroy_addr_space(this->unw_underlying_addr_space);
- _UPT_destroy(this->unw_underlying_context);
+ if (this->unw_underlying_addr_space)
+ unw_destroy_addr_space(this->unw_underlying_addr_space);
+ if (this->unw_underlying_context)
+ _UPT_destroy(this->unw_underlying_context);
}
unw_destroy_addr_space(this->unw_addr_space);
{
std::unique_ptr<simgrid::mc::Process> process(new simgrid::mc::Process(pid, socket));
// TODO, automatic detection of the config from the process
- process->privatized(
- xbt_cfg_get_boolean("smpi/privatize-global-variables"));
+ process->privatized(smpi_privatize_global_variables != SMPI_PRIVATIZE_NONE);
modelChecker_ = std::unique_ptr<ModelChecker>(
new simgrid::mc::ModelChecker(std::move(process)));
xbt_assert(mc_model_checker == nullptr);
-/* Copyright (c) 2011-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2011-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "src/mc/mc_smx.h"
#include "src/mc/VisitedState.hpp"
-XBT_LOG_NEW_DEFAULT_SUBCATEGORY(mc_VisitedState, mc,
- "Logging specific to state equaity detection mechanisms");
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(mc_VisitedState, mc, "Logging specific to state equality detection mechanisms");
namespace simgrid {
namespace mc {
return snapshot_compare(num1, s1, num2, s2);
}
-/**
- * \brief Save the current state
- * \return Snapshot of the current state.
- */
+/** @brief Save the current state */
VisitedState::VisitedState(unsigned long state_number)
{
simgrid::mc::Process* process = &(mc_model_checker->process());
this->checkNonTermination(next_state.get());
/* Check whether we already explored next_state in the past (but only if interested in state-equality reduction) */
- if (_sg_mc_max_visited_states == true)
+ if (_sg_mc_max_visited_states > 0)
visitedState_ = visitedStates_.addVisitedState(expandedStatesCount_, next_state.get(), true);
/* If this is a new state (or if we don't care about state-equality reduction) */
} else {
const smx_actor_t previous_issuer = MC_smx_simcall_get_issuer(&prev_state->internal_req);
- XBT_DEBUG("Simcall %d, process %lu (state %d) and simcall %d, process %lu (state %d) are independant",
+ XBT_DEBUG("Simcall %d, process %lu (state %d) and simcall %d, process %lu (state %d) are independent",
req->call, issuer->pid, state->num,
prev_state->internal_req.call,
previous_issuer->pid,
void ProcessComparisonState::initHeapInformation(xbt_mheap_t heap,
std::vector<simgrid::mc::IgnoredHeapRegion>* i)
{
- auto heaplimit = ((struct mdesc *) heap)->heaplimit;
- this->heapsize = ((struct mdesc *) heap)->heapsize;
+ auto heaplimit = heap->heaplimit;
+ this->heapsize = heap->heapsize;
this->to_ignore = i;
this->equals_to.assign(heaplimit * MAX_FRAGMENT_PER_BLOCK, HeapArea());
this->types.assign(heaplimit * MAX_FRAGMENT_PER_BLOCK, nullptr);
std::vector<simgrid::mc::IgnoredHeapRegion>* i1,
std::vector<simgrid::mc::IgnoredHeapRegion>* i2)
{
- if ((((struct mdesc *) heap1)->heaplimit !=
- ((struct mdesc *) heap2)->heaplimit)
- ||
- ((((struct mdesc *) heap1)->heapsize !=
- ((struct mdesc *) heap2)->heapsize)))
+ if ((heap1->heaplimit != heap2->heaplimit) || (heap1->heapsize != heap2->heapsize))
return -1;
- this->heaplimit = ((struct mdesc *) heap1)->heaplimit;
+ this->heaplimit = heap1->heaplimit;
this->std_heap_copy = *mc_model_checker->process().get_heap();
this->processStates[0].initHeapInformation(heap1, i1);
this->processStates[1].initHeapInformation(heap2, i2);
int equal, res_compare = 0;
/* Check busy blocks */
-
i1 = 1;
malloc_info heapinfo_temp1, heapinfo_temp2;
}
i2++;
-
}
if (!equal) {
}
if (heapinfo2b->type < 0) {
- fprintf(stderr, "Unkown mmalloc block type.\n");
+ fprintf(stderr, "Unknown mmalloc block type.\n");
abort();
}
equal = 1;
break;
}
-
}
i2++;
-
}
if (!equal) {
nb_diff1++;
break;
}
-
}
i1++;
-
}
-
}
/* All blocks/fragments are equal to another block/fragment ? */
// Compare the global variables separately for each simulates process:
for (size_t process_index = 0; process_index < process_count; process_index++) {
- int is_diff = compare_global_variables(state,
- object_info, process_index,
- &r1->privatized_data()[process_index],
- &r2->privatized_data()[process_index],
- snapshot1, snapshot2);
- if (is_diff) return 1;
+ if (compare_global_variables(state,
+ object_info, process_index,
+ &r1->privatized_data()[process_index],
+ &r2->privatized_data()[process_index],
+ snapshot1, snapshot2))
+ return 1;
}
return 0;
}
simgrid::mc::Process* process = &mc_model_checker->process();
int errors = 0;
- int res_init;
int hash_result = 0;
if (_sg_mc_hash) {
/* Compare enabled processes */
if (s1->enabled_processes != s2->enabled_processes) {
- XBT_VERB("(%d - %d) Different enabled processes", num1, num2);
- // return 1; ??
+ XBT_VERB("(%d - %d) Different amount of enabled processes", num1, num2);
+ return 1;
}
- unsigned long i = 0;
- size_t size_used1, size_used2;
- int is_diff = 0;
-
/* Compare size of stacks */
- while (i < s1->stacks.size()) {
- size_used1 = s1->stack_sizes[i];
- size_used2 = s2->stack_sizes[i];
+ int is_diff = 0;
+ for (unsigned long i = 0; i < s1->stacks.size(); i++) {
+ size_t size_used1 = s1->stack_sizes[i];
+ size_t size_used2 = s2->stack_sizes[i];
if (size_used1 != size_used2) {
#ifdef MC_DEBUG
XBT_DEBUG("(%d - %d) Different size used in stacks: %zu - %zu", num1, num2, size_used1, size_used2);
return 1;
#endif
}
- i++;
}
+ if (is_diff) // do not proceed if there is any stacks that don't match
+ return 1;
/* Init heap information used in heap comparison algorithm */
xbt_mheap_t heap1 = (xbt_mheap_t)s1->read_bytes(
alloca(sizeof(struct mdesc)), sizeof(struct mdesc),
remote(process->heap_address),
simgrid::mc::ProcessIndexMissing, simgrid::mc::ReadOptions::lazy());
- res_init = state_comparator->initHeapInformation(
- heap1, heap2, &s1->to_ignore, &s2->to_ignore);
+ int res_init = state_comparator->initHeapInformation(heap1, heap2, &s1->to_ignore, &s2->to_ignore);
if (res_init == -1) {
#ifdef MC_DEBUG
/* Stacks comparison */
int diff_local = 0;
-#ifdef MC_DEBUG
- is_diff = 0;
-#endif
for (unsigned int cursor = 0; cursor < s1->stacks.size(); cursor++) {
mc_snapshot_stack_t stack1 = &s1->stacks[cursor];
mc_snapshot_stack_t stack2 = &s2->stacks[cursor];
std::string const& name = region1->object_info()->file_name;
/* Compare global variables */
- is_diff =
- compare_global_variables(*state_comparator,
- region1->object_info(), simgrid::mc::ProcessIndexDisabled,
- region1, region2, s1, s2);
+ if (compare_global_variables(*state_comparator, region1->object_info(), simgrid::mc::ProcessIndexDisabled, region1,
+ region2, s1, s2)) {
- if (is_diff != 0) {
#ifdef MC_DEBUG
XBT_DEBUG("(%d - %d) Different global variables in %s",
num1, num2, name.c_str());
if (r1->issuer == r2->issuer)
return false;
- /* Wait with timeout transitions are not considered by the independence theorem, thus we consider them as dependant with all other transitions */
+ /* Wait with timeout transitions are not considered by the independence theorem, thus we consider them as dependent with all other transitions */
if ((r1->call == SIMCALL_COMM_WAIT && simcall_comm_wait__get__timeout(r1) > 0)
|| (r2->call == SIMCALL_COMM_WAIT
&& simcall_comm_wait__get__timeout(r2) > 0))
break;
default:
- THROW_UNIMPLEMENTED;
+ type = SIMIX_simcall_name(req->call);
+ args = bprintf("??");
+ break;
}
std::string str;
region.size = size;
region.block = ((char*)stack - (char*)heap->heapbase) / BLOCKSIZE + 1;
#if HAVE_SMPI
- if (smpi_privatize_global_variables && process)
+ if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP && process)
region.process_index = smpi_process_index_of_smx_process(process);
else
#endif
c.wait();
}
+void send(MailboxPtr chan, void* payload, double simulatedSize, double timeout)
+{
+ Comm& c = Comm::send_init(chan);
+ c.setRemains(simulatedSize);
+ c.setSrcData(payload);
+ // c.start() is optional.
+ c.wait(timeout);
+}
+
Comm& isend(MailboxPtr chan, void* payload, double simulatedSize)
{
return Comm::send_async(chan, payload, simulatedSize);
}
+Comm& irecv(MailboxPtr chan, void** data)
+{
+ return Comm::recv_async(chan, data);
+}
+
int pid()
{
return SIMIX_process_self()->pid;
s4u::Comm &Comm::recv_async(MailboxPtr dest, void **data) {
s4u::Comm &res = s4u::Comm::recv_init(dest);
- res.setDstData(data);
+ res.setDstData(data, sizeof(*data));
res.start();
return res;
}
xbt_cfg_register_alias("smpi/send-is-detached-thresh","smpi/send_is_detached_thresh");
xbt_cfg_register_alias("smpi/send-is-detached-thresh","smpi/send_is_detached_thres");
- xbt_cfg_register_boolean("smpi/privatize-global-variables", "no", nullptr, "Whether we should privatize global variable at runtime.");
+ const char* default_privatization = std::getenv("SMPI_PRIVATIZATION");
+ if (default_privatization == nullptr)
+ default_privatization = "no";
+
+ xbt_cfg_register_string("smpi/privatize-global-variables", default_privatization, nullptr, "Whether we should privatize global variable at runtime (no, yes, mmap, dlopen).");
+
xbt_cfg_register_alias("smpi/privatize-global-variables", "smpi/privatize_global_variables");
xbt_cfg_register_boolean("smpi/grow-injected-times", "yes", nullptr, "Whether we want to make the injected time in MPI_Iprobe and MPI_Test grow, to allow faster simulation. This can make simulation less precise, though.");
*
* \return the process created
*/
-smx_actor_t SIMIX_process_create(
- const char *name,
- std::function<void()> code,
- void *data,
- sg_host_t host,
- xbt_dict_t properties,
- smx_actor_t parent_process)
+smx_actor_t SIMIX_process_create(const char* name, std::function<void()> code, void* data, simgrid::s4u::Host* host,
+ xbt_dict_t properties, smx_actor_t parent_process)
{
XBT_DEBUG("Start process %s on host '%s'", name, host->cname());
process->ppid = parent_process->pid;
/* SMPI process have their own data segment and each other inherit from their father */
#if HAVE_SMPI
- if (smpi_privatize_global_variables) {
+ if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) {
if (parent_process->pid != 0) {
SIMIX_segment_index_set(process, parent_process->segment_index);
} else {
/* Add properties */
process->properties = properties;
+ /* Make sure that the process is initialized for simix, in case we are called from the Host::onCreation signal */
+ if (host->extension<simgrid::simix::Host>() == nullptr)
+ host->extension_set<simgrid::simix::Host>(new simgrid::simix::Host());
+
/* Add the process to it's host process list */
xbt_swag_insert(process, host->extension<simgrid::simix::Host>()->process_list);
process->ppid = parent_process->pid;
/* SMPI process have their own data segment and each other inherit from their father */
#if HAVE_SMPI
- if (smpi_privatize_global_variables) {
+ if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) {
if (parent_process->pid != 0) {
SIMIX_segment_index_set(process, parent_process->segment_index);
} else {
} else if (siginfo->si_signo == SIGSEGV) {
fprintf(stderr, "Segmentation fault.\n");
#if HAVE_SMPI
- if (smpi_enabled() && !smpi_privatize_global_variables) {
+ if (smpi_enabled() && smpi_privatize_global_variables == SMPI_PRIVATIZE_NONE) {
#if HAVE_PRIVATIZATION
fprintf(stderr, "Try to enable SMPI variable privatization with --cfg=smpi/privatize-global-variables:yes.\n");
#else
sg_platf_init();
simgrid::s4u::onPlatformCreated.connect(SIMIX_post_create_environment);
simgrid::s4u::Host::onCreation.connect([](simgrid::s4u::Host& host) {
- host.extension_set<simgrid::simix::Host>(new simgrid::simix::Host());
+ if (host.extension<simgrid::simix::Host>() == nullptr) // another callback to the same signal may have created it
+ host.extension_set<simgrid::simix::Host>(new simgrid::simix::Host());
});
simgrid::surf::storageCreatedCallbacks.connect([](simgrid::surf::Storage* storage) {
SIMIX_wake_processes();
} while (SIMIX_execute_tasks());
+ /* If only daemon processes remain, cancel their actions, mark them to die and reschedule them */
+ if (simix_global->process_list.size() == simix_global->daemons.size())
+ for (const auto& dmon : simix_global->daemons) {
+ XBT_DEBUG("Kill %s", dmon->cname());
+ SIMIX_process_kill(dmon, simix_global->maestro_process);
+ }
}
time = SIMIX_timer_next();
XBT_DEBUG("### time %f, #processes %zu, #to_run %lu", time, simix_global->process_list.size(),
xbt_dynar_length(simix_global->process_to_run));
- /* If only daemon processes remain, cancel their actions, mark them to die and reschedule them */
- if (simix_global->process_list.size() == simix_global->daemons.size())
- for (const auto& dmon : simix_global->daemons) {
- XBT_DEBUG("Kill %s", dmon->cname());
- SIMIX_process_kill(dmon, simix_global->maestro_process);
- }
if (xbt_dynar_is_empty(simix_global->process_to_run) &&
!simix_global->process_list.empty())
// utilities
extern XBT_PRIVATE double smpi_cpu_threshold;
extern XBT_PRIVATE double smpi_host_speed;
-extern XBT_PRIVATE bool smpi_privatize_global_variables;
+
+#define SMPI_PRIVATIZE_NONE 0
+#define SMPI_PRIVATIZE_MMAP 1
+#define SMPI_PRIVATIZE_DLOPEN 2
+#define SMPI_PRIVATIZE_DEFAULT SMPI_PRIVATIZE_MMAP
+extern XBT_PRIVATE int smpi_privatize_global_variables;
+
extern XBT_PRIVATE char* smpi_start_data_exe; //start of the data+bss segment of the executable
extern XBT_PRIVATE int smpi_size_data_exe; //size of the data+bss segment of the executable
void smpi_bench_begin()
{
- if (smpi_privatize_global_variables) {
+ if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) {
smpi_switch_data_segment(smpi_process()->index());
}
}
int Comm::dup(MPI_Comm* newcomm){
- if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(smpi_process()->index());
}
MPI_Group cp = new Group(this->group());
smpi_process()->set_replaying(false);
}
- if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(smpi_process()->index());
}
//identify neighbours in comm
Coll_allgather_mpich::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
- if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(smpi_process()->index());
}
}
Coll_bcast_mpich::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
- if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(smpi_process()->index());
}
// Are the ranks blocked ? = allocated contiguously on the SMP nodes
}
#endif
- if(smpi_privatize_global_variables){
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
smpi_switch_data_segment(smpi_process()->index());
}
/* First check if we really have something to do */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
+#include <spawn.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <dlfcn.h>
+
#include "mc/mc.h"
#include "private.h"
#include "private.hpp"
#include <stdio.h>
#include <stdlib.h>
#include <string>
+#include <utility>
#include <vector>
+#include <memory>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_kernel, smpi, "Logging specific to SMPI (kernel)");
#include <boost/tokenizer.hpp>
#include <boost/algorithm/string.hpp> /* trim_right / trim_left */
+#ifndef RTLD_DEEPBIND
+/* RTLD_DEEPBIND is a bad idea of GNU ld that obviously does not exist on other platforms
+ * See https://www.akkadia.org/drepper/dsohowto.pdf
+ * and https://lists.freebsd.org/pipermail/freebsd-current/2016-March/060284.html
+*/
+#define RTLD_DEEPBIND 0
+#endif
+
+/* Mac OSX does not have any header file providing that definition so we have to duplicate it here. Bummers. */
+extern char** environ; /* we use it in posix_spawnp below */
+
#if HAVE_PAPI
#include "papi.h"
const char* papi_default_config_name = "default";
*/
check_blocks(private_blocks, buff_size);
void* tmpbuff=buff;
- if((smpi_privatize_global_variables) && (static_cast<char*>(buff) >= smpi_start_data_exe)
+ if((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) && (static_cast<char*>(buff) >= smpi_start_data_exe)
&& (static_cast<char*>(buff) < smpi_start_data_exe + smpi_size_data_exe )
){
XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
memcpy_private(tmpbuff, buff, buff_size, private_blocks);
}
- if((smpi_privatize_global_variables) && ((char*)comm->dst_buff >= smpi_start_data_exe)
+ if((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) && ((char*)comm->dst_buff >= smpi_start_data_exe)
&& ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){
XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
smpi_switch_data_segment(
(static_cast<simgrid::smpi::Process*>((static_cast<simgrid::MsgActorExt*>(comm->dst_proc->data)->data))->index()));
}
-
XBT_DEBUG("Copying %zu bytes from %p to %p", buff_size, tmpbuff,comm->dst_buff);
memcpy_private(comm->dst_buff, tmpbuff, buff_size, private_blocks);
void smpi_global_init()
{
- int i;
MPI_Group group;
- int smpirun=0;
if (!MC_is_active()) {
global_timer = xbt_os_timer_new();
}
}
#endif
+
+ int smpirun = 0;
+ msg_bar_t finalization_barrier = nullptr;
if (process_count == 0){
process_count = SIMIX_process_count();
smpirun=1;
+ finalization_barrier = MSG_barrier_init(process_count);
}
smpi_universe_size = process_count;
process_data = new simgrid::smpi::Process*[process_count];
- for (i = 0; i < process_count; i++) {
- process_data[i] = new simgrid::smpi::Process(i);
+ for (int i = 0; i < process_count; i++) {
+ process_data[i] = new simgrid::smpi::Process(i, finalization_barrier);
}
//if the process was launched through smpirun script we generate a global mpi_comm_world
//if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
- if(smpirun){
+ if (smpirun) {
group = new simgrid::smpi::Group(process_count);
MPI_COMM_WORLD = new simgrid::smpi::Comm(group, nullptr);
MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast<void *>(process_count));
- msg_bar_t bar = MSG_barrier_init(process_count);
- for (i = 0; i < process_count; i++) {
+ for (int i = 0; i < process_count; i++)
group->set_mapping(i, i);
- process_data[i]->set_finalization_barrier(bar);
- }
}
}
}
xbt_free(index_to_process_data);
- if(smpi_privatize_global_variables)
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP)
smpi_destroy_global_memory_segments();
smpi_free_static();
}
extern "C" {
-#ifndef WIN32
-
-void __attribute__ ((weak)) user_main_()
-{
- xbt_die("Should not be in this smpi_simulated_main");
-}
-
-int __attribute__ ((weak)) smpi_simulated_main_(int argc, char **argv)
-{
- simgrid::smpi::Process::init(&argc, &argv);
- user_main_();
- return 0;
-}
-
-inline static int smpi_main_wrapper(int argc, char **argv){
- int ret = smpi_simulated_main_(argc,argv);
- if(ret !=0){
- XBT_WARN("SMPI process did not return 0. Return value : %d", ret);
- smpi_process()->set_return_value(ret);
- }
- return 0;
-}
-
-int __attribute__ ((weak)) main(int argc, char **argv)
-{
- return smpi_main(smpi_main_wrapper, argc, argv);
-}
-
-#endif
-
static void smpi_init_logs(){
/* Connect log categories. See xbt/log.c */
simgrid::smpi::Colls::smpi_coll_cleanup_callback=nullptr;
smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold");
smpi_host_speed = xbt_cfg_get_double("smpi/host-speed");
- smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables");
+ const char* smpi_privatize_option = xbt_cfg_get_string("smpi/privatize-global-variables");
+ if (std::strcmp(smpi_privatize_option, "no") == 0)
+ smpi_privatize_global_variables = SMPI_PRIVATIZE_NONE;
+ else if (std::strcmp(smpi_privatize_option, "yes") == 0)
+ smpi_privatize_global_variables = SMPI_PRIVATIZE_DEFAULT;
+ else if (std::strcmp(smpi_privatize_option, "mmap") == 0)
+ smpi_privatize_global_variables = SMPI_PRIVATIZE_MMAP;
+ else if (std::strcmp(smpi_privatize_option, "dlopen") == 0)
+ smpi_privatize_global_variables = SMPI_PRIVATIZE_DLOPEN;
+
+ // Some compatibility stuff:
+ else if (std::strcmp(smpi_privatize_option, "1") == 0)
+ smpi_privatize_global_variables = SMPI_PRIVATIZE_DEFAULT;
+ else if (std::strcmp(smpi_privatize_option, "0") == 0)
+ smpi_privatize_global_variables = SMPI_PRIVATIZE_NONE;
+
+ else
+ xbt_die("Invalid value for smpi/privatize-global-variables: %s",
+ smpi_privatize_option);
+
if (smpi_cpu_threshold < 0)
smpi_cpu_threshold = DBL_MAX;
}
}
-int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[])
+static int execute_command(const char * const argv[])
+{
+ pid_t pid;
+ int status;
+ if (posix_spawnp(&pid, argv[0], nullptr, nullptr, (char* const*) argv, environ) != 0)
+ return 127;
+ if (waitpid(pid, &status, 0) != pid)
+ return 127;
+ return status;
+}
+
+typedef std::function<int(int argc, char *argv[])> smpi_entry_point_type;
+typedef int (* smpi_c_entry_point_type)(int argc, char **argv);
+typedef void (* smpi_fortran_entry_point_type)(void);
+
+static int smpi_run_entry_point(smpi_entry_point_type entry_point, std::vector<std::string> args)
+{
+ const int argc = args.size();
+ std::unique_ptr<char*[]> argv(new char*[argc + 1]);
+ for (int i = 0; i != argc; ++i)
+ argv[i] = args[i].empty() ? const_cast<char*>(""): &args[i].front();
+ argv[argc] = nullptr;
+
+ int res = entry_point(argc, argv.get());
+ if (res != 0){
+ XBT_WARN("SMPI process did not return 0. Return value : %d", res);
+ smpi_process()->set_return_value(res);
+ }
+ return 0;
+}
+
+// TODO, remove the number of functions involved here
+static smpi_entry_point_type smpi_resolve_function(void* handle)
+{
+ smpi_fortran_entry_point_type entry_point2 =
+ (smpi_fortran_entry_point_type) dlsym(handle, "user_main_");
+ if (entry_point2 != nullptr) {
+ // fprintf(stderr, "EP user_main_=%p\n", entry_point2);
+ return [entry_point2](int argc, char** argv) {
+ smpi_process_init(&argc, &argv);
+ entry_point2();
+ return 0;
+ };
+ }
+
+ smpi_c_entry_point_type entry_point = (smpi_c_entry_point_type) dlsym(handle, "main");
+ if (entry_point != nullptr) {
+ // fprintf(stderr, "EP main=%p\n", entry_point);
+ return entry_point;
+ }
+
+ return smpi_entry_point_type();
+}
+
+int smpi_main(const char* executable, int argc, char *argv[])
{
srand(SMPI_RAND_SEED);
// parse the platform file: get the host list
SIMIX_create_environment(argv[1]);
- SIMIX_comm_set_copy_data_callback(smpi_comm_copy_data_callback);
- SIMIX_function_register_default(realmain);
+ SIMIX_comm_set_copy_data_callback(smpi_comm_copy_buffer_callback);
+
+ static std::size_t rank = 0;
+
+ if (smpi_privatize_global_variables == SMPI_PRIVATIZE_DLOPEN) {
+
+ std::string executable_copy = executable;
+ simix_global->default_function = [executable_copy](std::vector<std::string> args) {
+ return std::function<void()>([executable_copy, args] {
+
+ // Copy the dynamic library:
+ std::string target_executable = executable_copy
+ + "_" + std::to_string(getpid())
+ + "_" + std::to_string(rank++) + ".so";
+ // TODO, execute directly instead of relying on cp
+ const char* command1 [] = {
+ "cp", "--reflink=auto", "--", executable_copy.c_str(), target_executable.c_str(),
+ nullptr
+ };
+ const char* command2 [] = {
+ "cp", "--", executable_copy.c_str(), target_executable.c_str(),
+ nullptr
+ };
+ if (execute_command(command1) != 0 && execute_command(command2) != 0)
+ xbt_die("copy failed");
+
+ // Load the copy and resolve the entry point:
+ void* handle = dlopen(target_executable.c_str(), RTLD_LAZY | RTLD_LOCAL | RTLD_DEEPBIND);
+ unlink(target_executable.c_str());
+ if (handle == nullptr)
+ xbt_die("dlopen failed");
+ smpi_entry_point_type entry_point = smpi_resolve_function(handle);
+ if (!entry_point)
+ xbt_die("Could not resolve entry point");
+
+ smpi_run_entry_point(entry_point, args);
+ });
+ };
+
+ }
+ else {
+
+ // Load the dynamic library and resolve the entry point:
+ void* handle = dlopen(executable, RTLD_LAZY | RTLD_LOCAL | RTLD_DEEPBIND);
+ if (handle == nullptr)
+ xbt_die("dlopen failed for %s", executable);
+ smpi_entry_point_type entry_point = smpi_resolve_function(handle);
+ if (!entry_point)
+ xbt_die("main not found in %s", executable);
+ // TODO, register the executable for SMPI privatization
+
+ // Execute the same entry point for each simulated process:
+ simix_global->default_function = [entry_point](std::vector<std::string> args) {
+ return std::function<void()>([entry_point, args] {
+ smpi_run_entry_point(entry_point, args);
+ });
+ };
+
+ }
+
SIMIX_launch_application(argv[2]);
smpi_global_init();
smpi_check_options();
- if(smpi_privatize_global_variables)
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP)
smpi_initialize_global_memory_segments();
/* Clean IO before the run */
smpi_check_options();
if (TRACE_is_enabled() && TRACE_is_configured())
TRACE_smpi_alloc();
- if(smpi_privatize_global_variables)
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP)
smpi_initialize_global_memory_segments();
}
--- /dev/null
+/* Copyright (c) 2007-2015. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <smpi/smpi.h>
+
+int main(int argc, char **argv)
+{
+ if (argc < 2) {
+ fprintf(stderr, "Missing SMPI program to launch\n");
+ exit(1);
+ }
+ return smpi_main(argv[1], argc - 1, argv + 1);
+}
int smpi_loaded_page = -1;
char* smpi_start_data_exe = nullptr;
int smpi_size_data_exe = 0;
-bool smpi_privatize_global_variables;
+int smpi_privatize_global_variables;
static const int PROT_RWX = (PROT_READ | PROT_WRITE | PROT_EXEC);
static const int PROT_RW = (PROT_READ | PROT_WRITE );
WRAPPED_PMPI_CALL(int,MPI_Comm_size,(MPI_Comm comm, int *size),(comm, size))
WRAPPED_PMPI_CALL(int,MPI_Comm_split,(MPI_Comm comm, int color, int key, MPI_Comm* comm_out),(comm, color, key, comm_out))
WRAPPED_PMPI_CALL(int,MPI_Comm_create_group,(MPI_Comm comm, MPI_Group group, int tag, MPI_Comm* comm_out),(comm, group, tag, comm_out))
+WRAPPED_PMPI_CALL(int,MPI_Compare_and_swap,(void *origin_addr, void *compare_addr,
+ void *result_addr, MPI_Datatype datatype, int target_rank, MPI_Aint target_disp, MPI_Win win), (origin_addr, compare_addr, result_addr, datatype, target_rank, target_disp, win));
WRAPPED_PMPI_CALL(int,MPI_Dims_create,(int nnodes, int ndims, int* dims) ,(nnodes, ndims, dims))
WRAPPED_PMPI_CALL(int,MPI_Error_class,(int errorcode, int* errorclass) ,(errorcode, errorclass))
WRAPPED_PMPI_CALL(int,MPI_Exscan,(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm),(sendbuf, recvbuf, count, datatype, op, comm))
WRAPPED_PMPI_CALL(int,MPI_Get_version ,(int *version,int *subversion),(version,subversion))
WRAPPED_PMPI_CALL(int,MPI_Get,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win),(origin_addr,origin_count, origin_datatype,target_rank, target_disp, target_count,target_datatype,win))
WRAPPED_PMPI_CALL(int,MPI_Get_accumulate, (void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win),(origin_addr, origin_count, origin_datatype, result_addr, result_count, result_datatype, target_rank, target_disp, target_count, target_datatype, op, win))
+WRAPPED_PMPI_CALL(int,MPI_Fetch_and_op, (void *origin_addr, void *result_addr, MPI_Datatype datatype, int target_rank, MPI_Aint target_disp, MPI_Op op, MPI_Win win),(origin_addr, result_addr, datatype, target_rank, target_disp, op, win))
WRAPPED_PMPI_CALL(int,MPI_Group_compare,(MPI_Group group1, MPI_Group group2, int *result),(group1, group2, result))
WRAPPED_PMPI_CALL(int,MPI_Group_difference,(MPI_Group group1, MPI_Group group2, MPI_Group * newgroup),(group1, group2, newgroup))
WRAPPED_PMPI_CALL(int,MPI_Group_excl,(MPI_Group group, int n, int *ranks, MPI_Group * newgroup),(group, n, ranks, newgroup))
WRAPPED_PMPI_CALL(int,MPI_Probe,(int source, int tag, MPI_Comm comm, MPI_Status* status) ,(source, tag, comm, status))
WRAPPED_PMPI_CALL(int,MPI_Put,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win),(origin_addr,origin_count, origin_datatype,target_rank,target_disp, target_count,target_datatype, win))
WRAPPED_PMPI_CALL(int,MPI_Query_thread,(int *provided),(provided))
+WRAPPED_PMPI_CALL(int,MPI_Raccumulate,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request),( origin_addr,origin_count, origin_datatype,target_rank,target_disp, target_count,target_datatype,op, win, request))
WRAPPED_PMPI_CALL(int,MPI_Recv_init,(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request * request),(buf, count, datatype, src, tag, comm, request))
WRAPPED_PMPI_CALL(int,MPI_Recv,(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status),(buf, count, datatype, src, tag, comm, status))
WRAPPED_PMPI_CALL(int,MPI_Reduce_local,(void *inbuf, void *inoutbuf, int count, MPI_Datatype datatype, MPI_Op op),(inbuf, inoutbuf, count, datatype, op))
WRAPPED_PMPI_CALL(int,MPI_Reduce_scatter,(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm),(sendbuf, recvbuf, recvcounts, datatype, op, comm))
WRAPPED_PMPI_CALL(int,MPI_Reduce,(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm),(sendbuf, recvbuf, count, datatype, op, root, comm))
WRAPPED_PMPI_CALL(int,MPI_Request_free,(MPI_Request * request),(request))
+WRAPPED_PMPI_CALL(int,MPI_Rget,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request),(origin_addr,origin_count, origin_datatype,target_rank, target_disp, target_count,target_datatype,win, request))
+WRAPPED_PMPI_CALL(int,MPI_Rget_accumulate, (void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request),(origin_addr, origin_count, origin_datatype, result_addr, result_count, result_datatype, target_rank, target_disp, target_count, target_datatype, op, win, request))
+WRAPPED_PMPI_CALL(int,MPI_Rput,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request),(origin_addr,origin_count, origin_datatype,target_rank,target_disp, target_count,target_datatype, win, request))
WRAPPED_PMPI_CALL(int,MPI_Scan,(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm),(sendbuf, recvbuf, count, datatype, op, comm))
WRAPPED_PMPI_CALL(int,MPI_Scatter,(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype,int root, MPI_Comm comm),(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm))
WRAPPED_PMPI_CALL(int,MPI_Scatterv,(void *sendbuf, int *sendcounts, int *displs, MPI_Datatype sendtype, void *recvbuf, int recvcount,MPI_Datatype recvtype, int root, MPI_Comm comm),(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm))
WRAPPED_PMPI_CALL(int,MPI_Win_wait,(MPI_Win win),(win))
WRAPPED_PMPI_CALL(int,MPI_Win_lock,(int lock_type, int rank, int assert, MPI_Win win) ,(lock_type, rank, assert, win))
WRAPPED_PMPI_CALL(int,MPI_Win_unlock,(int rank, MPI_Win win),(rank, win))
+WRAPPED_PMPI_CALL(int,MPI_Win_lock_all,(int assert, MPI_Win win) ,(assert, win))
+WRAPPED_PMPI_CALL(int,MPI_Win_unlock_all,(MPI_Win win),(win))
+WRAPPED_PMPI_CALL(int,MPI_Win_flush,(int rank, MPI_Win win),(rank, win))
+WRAPPED_PMPI_CALL(int,MPI_Win_flush_local,(int rank, MPI_Win win),(rank, win))
+WRAPPED_PMPI_CALL(int,MPI_Win_flush_all,(MPI_Win win),(win))
+WRAPPED_PMPI_CALL(int,MPI_Win_flush_local_all,(MPI_Win win),(win))
WRAPPED_PMPI_CALL(int,MPI_Win_get_attr, (MPI_Win type, int type_keyval, void *attribute_val, int* flag), (type, type_keyval, attribute_val, flag))
WRAPPED_PMPI_CALL(int,MPI_Win_set_attr, (MPI_Win type, int type_keyval, void *att), (type, type_keyval, att))
WRAPPED_PMPI_CALL(int,MPI_Win_delete_attr, (MPI_Win type, int comm_keyval), (type, comm_keyval))
#define BAND_OP(a, b) (b) &= (a)
#define BOR_OP(a, b) (b) |= (a)
#define BXOR_OP(a, b) (b) ^= (a)
-#define MAXLOC_OP(a, b) (b) = (a.value) < (b.value) ? (b) : (a)
-#define MINLOC_OP(a, b) (b) = (a.value) < (b.value) ? (a) : (b)
+#define MAXLOC_OP(a, b) (b) = (a.value) < (b.value) ? (b) : ((a.value) == (b.value) ? ((a.index) < (b.index) ? (a) : (b)) : (a))
+#define MINLOC_OP(a, b) (b) = (a.value) < (b.value) ? (a) : ((a.value) == (b.value) ? ((a.index) < (b.index) ? (a) : (b)) : (b))
#define APPLY_FUNC(a, b, length, type, func) \
{ \
smpi_switch_data_segment(smpi_process()->index());
}
- if(!smpi_process()->replaying()){
+ if(!smpi_process()->replaying() && *len > 0){
if(! is_fortran_op_)
this->func_(invec, inoutvec, len, &datatype);
else{
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
TRACE_smpi_finalize(smpi_process()->index());
- smpi_process()->destroy();
return MPI_SUCCESS;
}
int PMPI_Abort(MPI_Comm comm, int errorcode)
{
smpi_bench_end();
- smpi_process()->destroy();
// FIXME: should kill all processes in comm instead
simcall_process_kill(SIMIX_process_self());
return MPI_SUCCESS;
int retval = 0;
smpi_bench_end();
- if ((flag == nullptr) || (status == nullptr)) {
+ if (flag == nullptr) {
retval = MPI_ERR_ARG;
} else if (comm == MPI_COMM_NULL) {
retval = MPI_ERR_COMM;
return retval;
}
+int PMPI_Rget( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0) ||
+ (origin_addr==nullptr && origin_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((!origin_datatype->is_valid()) || (!target_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->get( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
+ target_datatype, request);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
int PMPI_Put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){
int retval = 0;
return retval;
}
+int PMPI_Rput( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0) ||
+ (origin_addr==nullptr && origin_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((!origin_datatype->is_valid()) || (!target_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int dst_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, nullptr);
+ TRACE_smpi_send(rank, rank, dst_traced, SMPI_RMA_TAG, origin_count*origin_datatype->size());
+
+ retval = win->put( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
+ target_datatype, request);
+
+ TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
int PMPI_Accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){
int retval = 0;
return retval;
}
+int PMPI_Raccumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0) ||
+ (origin_addr==nullptr && origin_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((!origin_datatype->is_valid()) ||
+ (!target_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->accumulate( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
+ target_datatype, op, request);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
int PMPI_Get_accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){
//in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
retval = MPI_ERR_ARG;
} else if ((origin_count < 0 || target_count < 0 || result_count <0) ||
- (origin_addr==nullptr && origin_count > 0) ||
+ (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) ||
(result_addr==nullptr && result_count > 0)){
retval = MPI_ERR_COUNT;
- } else if ((!origin_datatype->is_valid()) ||
+ } else if ((origin_datatype!=MPI_DATATYPE_NULL && !origin_datatype->is_valid()) ||
(!target_datatype->is_valid())||
(!result_datatype->is_valid())) {
retval = MPI_ERR_TYPE;
return retval;
}
+
+int PMPI_Rget_accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
+int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
+MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0 || result_count <0) ||
+ (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) ||
+ (result_addr==nullptr && result_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((origin_datatype!=MPI_DATATYPE_NULL && !origin_datatype->is_valid()) ||
+ (!target_datatype->is_valid())||
+ (!result_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->get_accumulate( origin_addr, origin_count, origin_datatype, result_addr,
+ result_count, result_datatype, target_rank, target_disp,
+ target_count, target_datatype, op, request);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Fetch_and_op(void *origin_addr, void *result_addr, MPI_Datatype dtype, int target_rank, MPI_Aint target_disp, MPI_Op op, MPI_Win win){
+ return PMPI_Get_accumulate(origin_addr, origin_addr==nullptr?0:1, dtype, result_addr, 1, dtype, target_rank, target_disp, 1, dtype, op, win);
+}
+
+int PMPI_Compare_and_swap(void *origin_addr, void *compare_addr,
+ void *result_addr, MPI_Datatype datatype, int target_rank,
+ MPI_Aint target_disp, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if (origin_addr==nullptr || result_addr==nullptr || compare_addr==nullptr){
+ retval = MPI_ERR_COUNT;
+ } else if (!datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->compare_and_swap( origin_addr, compare_addr, result_addr, datatype,
+ target_rank, target_disp);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
int PMPI_Win_post(MPI_Group group, int assert, MPI_Win win){
int retval = 0;
smpi_bench_end();
return retval;
}
+int PMPI_Win_lock_all(int assert, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->lock_all(assert);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_unlock_all(MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->unlock_all();
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush(int rank, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (rank == MPI_PROC_NULL){
+ retval = MPI_SUCCESS;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush(rank);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush_local(int rank, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (rank == MPI_PROC_NULL){
+ retval = MPI_SUCCESS;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush_local(rank);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush_all(MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush_all();
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush_local_all(MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush_local_all();
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
int PMPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr){
void *ptr = xbt_malloc(size);
if(ptr==nullptr)
namespace simgrid{
namespace smpi{
-Process::Process(int index)
+Process::Process(int index, msg_bar_t finalization_barrier)
+ : finalization_barrier_(finalization_barrier)
{
char name[MAILBOX_NAME_MAXLEN];
- index_ = MPI_UNDEFINED;
- argc_ = nullptr;
- argv_ = nullptr;
mailbox_ = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, index));
mailbox_small_ = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, index));
mailboxes_mutex_ = xbt_mutex_init();
timer_ = xbt_os_timer_new();
+ state_ = SMPI_UNINITIALIZED;
if (MC_is_active())
MC_ignore_heap(timer_, xbt_os_timer_size());
- comm_self_ = MPI_COMM_NULL;
- comm_intra_ = MPI_COMM_NULL;
- comm_world_ = nullptr;
- state_ = SMPI_UNINITIALIZED;
- sampling_ = 0;
- finalization_barrier_ = nullptr;
- return_value_ = 0;
#if HAVE_PAPI
if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
#endif
}
-void Process::set_data(int index, int *argc, char ***argv)
+void Process::set_data(int index, int* argc, char*** argv)
{
-
char* instance_id = (*argv)[1];
comm_world_ = smpi_deployment_comm_world(instance_id);
msg_bar_t bar = smpi_deployment_finalization_barrier(instance_id);
if (bar!=nullptr) // don't overwrite the default one
finalization_barrier_ = bar;
- index_ = index;
instance_id_ = instance_id;
- replaying_ = false;
+ index_ = index;
static_cast<simgrid::MsgActorExt*>(SIMIX_process_self()->data)->data = this;
argv_ = argv;
// set the process attached to the mailbox
mailbox_small_->setReceiver(simgrid::s4u::Actor::self());
- XBT_DEBUG("<%d> New process in the game: %p", index, SIMIX_process_self());
+ process_ = SIMIX_process_self();
+ XBT_DEBUG("<%d> New process in the game: %p", index_, SIMIX_process_self());
}
-void Process::destroy()
+/** @brief Prepares the current process for termination. */
+void Process::finalize()
{
- if(smpi_privatize_global_variables){
- smpi_switch_data_segment(index_);
- }
state_ = SMPI_FINALIZED;
XBT_DEBUG("<%d> Process left the game", index_);
-}
-/** @brief Prepares the current process for termination. */
-void Process::finalize()
-{
// This leads to an explosion of the search graph which cannot be reduced:
if(MC_is_active() || MC_record_replay_is_active())
return;
return false;
}
-void Process::set_user_data(void *data)
-{
- data_ = data;
-}
-
-void *Process::get_user_data()
-{
- return data_;
+smx_actor_t Process::process(){
+ return process_;
}
return sampling_;
}
-void Process::set_finalization_barrier(msg_bar_t bar){
- finalization_barrier_=bar;
-}
-
msg_bar_t Process::finalization_barrier(){
return finalization_barrier_;
}
int rank = xbt_str_parse_int((*argv)[2], "Invalid rank: %s");
smpi_deployment_register_process(instance_id, rank, index);
- if(smpi_privatize_global_variables){
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
/* Now using segment index of the process */
index = proc->segment_index;
/* Done at the process's creation */
class Process {
private:
- double simulated_;
- int *argc_;
- char ***argv_;
+ double simulated_ = 0 /* Used to time with simulated_start/elapsed */;
+ int* argc_ = nullptr;
+ char*** argv_ = nullptr;
simgrid::s4u::MailboxPtr mailbox_;
simgrid::s4u::MailboxPtr mailbox_small_;
xbt_mutex_t mailboxes_mutex_;
xbt_os_timer_t timer_;
- MPI_Comm comm_self_;
- MPI_Comm comm_intra_;
- MPI_Comm* comm_world_;
- void *data_; /* user data */
- int index_;
+ MPI_Comm comm_self_ = MPI_COMM_NULL;
+ MPI_Comm comm_intra_ = MPI_COMM_NULL;
+ MPI_Comm* comm_world_ = nullptr;
+ int index_ = MPI_UNDEFINED;
char state_;
- int sampling_; /* inside an SMPI_SAMPLE_ block? */
- char* instance_id_;
- bool replaying_; /* is the process replaying a trace */
+ int sampling_ = 0; /* inside an SMPI_SAMPLE_ block? */
+ char* instance_id_ = nullptr;
+ bool replaying_ = false; /* is the process replaying a trace */
msg_bar_t finalization_barrier_;
- int return_value_;
+ int return_value_ = 0;
smpi_trace_call_location_t trace_call_loc_;
+ smx_actor_t process_ = nullptr;
#if HAVE_PAPI
/** Contains hardware data as read by PAPI **/
int papi_event_set_;
papi_counter_t papi_counter_data_;
#endif
public:
- explicit Process(int index);
- void destroy();
- void set_data(int index, int *argc, char ***argv);
+ explicit Process(int index, msg_bar_t barrier);
+ void set_data(int index, int* argc, char*** argv);
void finalize();
int finalized();
int initialized();
void mark_as_initialized();
void set_replaying(bool value);
bool replaying();
- void set_user_data(void *data);
- void *get_user_data();
smpi_trace_call_location_t* call_location();
int index();
MPI_Comm comm_world();
void set_sampling(int s);
int sampling();
msg_bar_t finalization_barrier();
- void set_finalization_barrier(msg_bar_t bar);
int return_value();
void set_return_value(int val);
static void init(int *argc, char ***argv);
+ smx_actor_t process();
};
TRACE_smpi_collective_out(rank, -1, operation);
TRACE_smpi_finalize(smpi_process()->index());
- smpi_process()->destroy();
xbt_free(operation);
}
if(((((flags & RECV) != 0) && ((flags & ACCUMULATE) !=0)) || (datatype->flags() & DT_FLAG_DERIVED))) { // && (!smpi_is_shared(buf_))){
// This part handles the problem of non-contiguous memory
old_buf = buf;
- buf_ = count==0 ? nullptr : xbt_malloc(count*datatype->size());
- if ((datatype->flags() & DT_FLAG_DERIVED) && ((flags & SEND) != 0)) {
- datatype->serialize(old_buf, buf_, count);
+ if (count==0){
+ buf_ = nullptr;
+ }else {
+ buf_ = xbt_malloc(count*datatype->size());
+ if ((datatype->flags() & DT_FLAG_DERIVED) && ((flags & SEND) != 0)) {
+ datatype->serialize(old_buf, buf_, count);
+ }
}
}
// This part handles the problem of non-contiguous memory (for the unserialisation at the reception)
if ((flags_ & RECV) != 0) {
this->print_request("New recv");
+ simgrid::smpi::Process* process = smpi_process_remote(dst_);
+
int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
- xbt_mutex_t mut = smpi_process()->mailboxes_mutex();
+ xbt_mutex_t mut = process->mailboxes_mutex();
if (async_small_thresh != 0 || (flags_ & RMA) != 0)
xbt_mutex_acquire(mut);
if (async_small_thresh == 0 && (flags_ & RMA) == 0 ) {
- mailbox = smpi_process()->mailbox();
+ mailbox = process->mailbox();
}
else if (((flags_ & RMA) != 0) || static_cast<int>(size_) < async_small_thresh) {
//We have to check both mailboxes (because SSEND messages are sent to the large mbox).
//begin with the more appropriate one : the small one.
- mailbox = smpi_process()->mailbox_small();
+ mailbox = process->mailbox_small();
XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox);
smx_activity_t action = simcall_comm_iprobe(mailbox, 0, src_,tag_, &match_recv,
static_cast<void*>(this));
if (action == nullptr) {
- mailbox = smpi_process()->mailbox();
+ mailbox = process->mailbox();
XBT_DEBUG("No, nothing in the small mailbox test the other one : %p", mailbox);
action = simcall_comm_iprobe(mailbox, 0, src_,tag_, &match_recv, static_cast<void*>(this));
if (action == nullptr) {
XBT_DEBUG("Still nothing, switch back to the small mailbox : %p", mailbox);
- mailbox = smpi_process()->mailbox_small();
+ mailbox = process->mailbox_small();
}
} else {
XBT_DEBUG("yes there was something for us in the large mailbox");
}
} else {
- mailbox = smpi_process()->mailbox_small();
+ mailbox = process->mailbox_small();
XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
smx_activity_t action = simcall_comm_iprobe(mailbox, 0, src_,tag_, &match_recv, static_cast<void*>(this));
if (action == nullptr) {
XBT_DEBUG("No, nothing in the permanent receive mailbox");
- mailbox = smpi_process()->mailbox();
+ mailbox = process->mailbox();
} else {
XBT_DEBUG("yes there was something for us in the small mailbox");
}
// we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
real_size_=size_;
- action_ = simcall_comm_irecv(SIMIX_process_self(), mailbox, buf_, &real_size_, &match_recv,
- ! smpi_process()->replaying()? smpi_comm_copy_data_callback
+ action_ = simcall_comm_irecv(process->process(), mailbox, buf_, &real_size_, &match_recv,
+ ! process->replaying()? smpi_comm_copy_data_callback
: &smpi_comm_null_copy_buffer_callback, this, -1.0);
XBT_DEBUG("recv simcall posted");
if (async_small_thresh != 0 || (flags_ & RMA) != 0 )
xbt_mutex_release(mut);
} else { /* the RECV flag was not set, so this is a send */
- int receiver = dst_;
-
+ simgrid::smpi::Process* process = smpi_process_remote(dst_);
int rank = src_;
if (TRACE_smpi_view_internals()) {
- TRACE_smpi_send(rank, rank, receiver, tag_, size_);
+ TRACE_smpi_send(rank, rank, dst_, tag_, size_);
}
this->print_request("New send");
refcount_++;
if(!(old_type_->flags() & DT_FLAG_DERIVED)){
oldbuf = buf_;
- if (!smpi_process()->replaying() && oldbuf != nullptr && size_!=0){
- if((smpi_privatize_global_variables != 0)
+ if (!process->replaying() && oldbuf != nullptr && size_!=0){
+ if((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP)
&& (static_cast<char*>(buf_) >= smpi_start_data_exe)
&& (static_cast<char*>(buf_) < smpi_start_data_exe + smpi_size_data_exe )){
XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
- xbt_mutex_t mut=smpi_process_remote(receiver)->mailboxes_mutex();
+ xbt_mutex_t mut=process->mailboxes_mutex();
if (async_small_thresh != 0 || (flags_ & RMA) != 0)
xbt_mutex_acquire(mut);
if (!(async_small_thresh != 0 || (flags_ & RMA) !=0)) {
- mailbox = smpi_process_remote(receiver)->mailbox();
+ mailbox = process->mailbox();
} else if (((flags_ & RMA) != 0) || static_cast<int>(size_) < async_small_thresh) { // eager mode
- mailbox = smpi_process_remote(receiver)->mailbox();
+ mailbox = process->mailbox();
XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox);
smx_activity_t action = simcall_comm_iprobe(mailbox, 1,dst_, tag_, &match_send,
static_cast<void*>(this));
if (action == nullptr) {
if ((flags_ & SSEND) == 0){
- mailbox = smpi_process_remote(receiver)->mailbox_small();
+ mailbox = process->mailbox_small();
XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %p", mailbox);
} else {
- mailbox = smpi_process_remote(receiver)->mailbox_small();
+ mailbox = process->mailbox_small();
XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %p?", mailbox);
action = simcall_comm_iprobe(mailbox, 1,dst_, tag_, &match_send, static_cast<void*>(this));
if (action == nullptr) {
XBT_DEBUG("No, we are first, send to large mailbox");
- mailbox = smpi_process_remote(receiver)->mailbox();
+ mailbox = process->mailbox();
}
}
} else {
XBT_DEBUG("Yes there was something for us in the large mailbox");
}
} else {
- mailbox = smpi_process_remote(receiver)->mailbox();
+ mailbox = process->mailbox();
XBT_DEBUG("Send request %p is in the large mailbox %p (buf: %p)",mailbox, this,buf_);
}
action_ = simcall_comm_isend(SIMIX_process_from_PID(src_+1), mailbox, size_, -1.0,
buf, real_size_, &match_send,
&xbt_free_f, // how to free the userdata if a detached send fails
- !smpi_process()->replaying() ? smpi_comm_copy_data_callback
+ !process->replaying() ? smpi_comm_copy_data_callback
: &smpi_comm_null_copy_buffer_callback, this,
// detach if msg size < eager/rdv switch limit
detached_);
if((((req->flags_ & ACCUMULATE) != 0) || (datatype->flags() & DT_FLAG_DERIVED))){// && (!smpi_is_shared(req->old_buf_))){
if (!smpi_process()->replaying()){
- if( smpi_privatize_global_variables != 0 && (static_cast<char*>(req->old_buf_) >= smpi_start_data_exe)
+ if( smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP && (static_cast<char*>(req->old_buf_) >= smpi_start_data_exe)
&& ((char*)req->old_buf_ < smpi_start_data_exe + smpi_size_data_exe )){
XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
smpi_switch_data_segment(smpi_process()->index());
datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
xbt_free(req->buf_);
}else if(req->flags_ & RECV){//apply op on contiguous buffer for accumulate
- int n =req->real_size_/datatype->size();
- req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
+ if(datatype->size()!=0){
+ int n =req->real_size_/datatype->size();
+ req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
+ }
xbt_free(req->buf_);
}
}
requests_ = new std::vector<MPI_Request>();
mut_=xbt_mutex_init();
lock_mut_=xbt_mutex_init();
+ atomic_mut_=xbt_mutex_init();
connected_wins_ = new MPI_Win[comm_size];
connected_wins_[rank_] = this;
count_ = 0;
MSG_barrier_destroy(bar_);
xbt_mutex_destroy(mut_);
xbt_mutex_destroy(lock_mut_);
+ xbt_mutex_destroy(atomic_mut_);
if(allocated_ !=0)
xbt_free(base_);
int size = static_cast<int>(reqs->size());
// start all requests that have been prepared by another process
if (size > 0) {
- for (const auto& req : *reqs) {
- if (req && (req->flags() & PREPARED))
- req->start();
- }
-
MPI_Request* treqs = &(*reqs)[0];
-
Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
}
count_=0;
xbt_mutex_release(mut_);
}
+
+ if(assert==MPI_MODE_NOSUCCEED)//there should be no ops after this one, tell we are closed.
+ opened_=0;
assert_ = assert;
MSG_barrier_wait(bar_);
}
int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype)
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request)
{
//get receiver pointer
MPI_Win recv_win = connected_wins_[target_rank];
MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, smpi_process()->index(),
comm_->group()->index(target_rank), SMPI_RMA_TAG+1, recv_win->comm_, MPI_OP_NULL);
+ //start send
+ sreq->start();
+
+ if(request!=nullptr){
+ *request=sreq;
+ }else{
+ xbt_mutex_acquire(mut_);
+ requests_->push_back(sreq);
+ xbt_mutex_release(mut_);
+ }
+
//push request to receiver's win
xbt_mutex_acquire(recv_win->mut_);
recv_win->requests_->push_back(rreq);
+ rreq->start();
xbt_mutex_release(recv_win->mut_);
- //start send
- sreq->start();
- //push request to sender's win
- xbt_mutex_acquire(mut_);
- requests_->push_back(sreq);
- xbt_mutex_release(mut_);
}else{
Datatype::copy(origin_addr, origin_count, origin_datatype, recv_addr, target_count, target_datatype);
+ if(request!=nullptr)
+ *request = MPI_REQUEST_NULL;
}
return MPI_SUCCESS;
}
int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype)
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request)
{
//get sender pointer
MPI_Win send_win = connected_wins_[target_rank];
//start recv
rreq->start();
- //push request to sender's win
- xbt_mutex_acquire(mut_);
- requests_->push_back(rreq);
- xbt_mutex_release(mut_);
+
+ if(request!=nullptr){
+ *request=rreq;
+ }else{
+ xbt_mutex_acquire(mut_);
+ requests_->push_back(rreq);
+ xbt_mutex_release(mut_);
+ }
+
}else{
Datatype::copy(send_addr, target_count, target_datatype, origin_addr, origin_count, origin_datatype);
+ if(request!=nullptr)
+ *request=MPI_REQUEST_NULL;
}
return MPI_SUCCESS;
int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op)
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request)
{
//get receiver pointer
smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG-3-count_, recv_win->comm_, op);
count_++;
+
+ //start send
+ sreq->start();
//push request to receiver's win
xbt_mutex_acquire(recv_win->mut_);
recv_win->requests_->push_back(rreq);
+ rreq->start();
xbt_mutex_release(recv_win->mut_);
- //start send
- sreq->start();
- //push request to sender's win
- xbt_mutex_acquire(mut_);
- requests_->push_back(sreq);
- xbt_mutex_release(mut_);
+ if(request!=nullptr){
+ *request=sreq;
+ }else{
+ xbt_mutex_acquire(mut_);
+ requests_->push_back(sreq);
+ xbt_mutex_release(mut_);
+ }
return MPI_SUCCESS;
}
int Win::get_accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
- MPI_Datatype target_datatype, MPI_Op op){
+ MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request){
//get sender pointer
MPI_Win send_win = connected_wins_[target_rank];
return MPI_ERR_ARG;
XBT_DEBUG("Entering MPI_Get_accumulate from %d", target_rank);
-
+ //need to be sure ops are correctly ordered, so finish request here ? slow.
+ MPI_Request req;
+ xbt_mutex_acquire(send_win->atomic_mut_);
get(result_addr, result_count, result_datatype, target_rank,
- target_disp, target_count, target_datatype);
- accumulate(origin_addr, origin_count, origin_datatype, target_rank,
- target_disp, target_count, target_datatype, op);
-
+ target_disp, target_count, target_datatype, &req);
+ if (req != MPI_REQUEST_NULL)
+ Request::wait(&req, MPI_STATUS_IGNORE);
+ if(op!=MPI_NO_OP)
+ accumulate(origin_addr, origin_count, origin_datatype, target_rank,
+ target_disp, target_count, target_datatype, op, &req);
+ if (req != MPI_REQUEST_NULL)
+ Request::wait(&req, MPI_STATUS_IGNORE);
+ xbt_mutex_release(send_win->atomic_mut_);
return MPI_SUCCESS;
}
+int Win::compare_and_swap(void *origin_addr, void *compare_addr,
+ void *result_addr, MPI_Datatype datatype, int target_rank,
+ MPI_Aint target_disp){
+ //get sender pointer
+ MPI_Win send_win = connected_wins_[target_rank];
+
+ if(opened_==0){//check that post/start has been done
+ // no fence or start .. lock ok ?
+ int locked=0;
+ for(auto it : send_win->lockers_)
+ if (it == comm_->rank())
+ locked = 1;
+ if(locked != 1)
+ return MPI_ERR_WIN;
+ }
+
+ XBT_DEBUG("Entering MPI_Compare_and_swap with %d", target_rank);
+ MPI_Request req;
+ xbt_mutex_acquire(send_win->atomic_mut_);
+ get(result_addr, 1, datatype, target_rank,
+ target_disp, 1, datatype, &req);
+ if (req != MPI_REQUEST_NULL)
+ Request::wait(&req, MPI_STATUS_IGNORE);
+ if(! memcmp (result_addr, compare_addr, datatype->get_extent() )){
+ put(origin_addr, 1, datatype, target_rank,
+ target_disp, 1, datatype);
+ }
+ xbt_mutex_release(send_win->atomic_mut_);
+ return MPI_SUCCESS;
+}
+
int Win::start(MPI_Group group, int assert){
/* From MPI forum advices
The call to MPI_WIN_COMPLETE does not return until the put call has completed at the origin; and the target window
}
int Win::lock(int lock_type, int rank, int assert){
- if(opened_!=0)
- return MPI_ERR_WIN;
-
MPI_Win target_win = connected_wins_[rank];
if ((lock_type == MPI_LOCK_EXCLUSIVE && target_win->mode_ != MPI_LOCK_SHARED)|| target_win->mode_ == MPI_LOCK_EXCLUSIVE){
target_win->lockers_.push_back(comm_->rank());
- int finished = finish_comms();
+ int finished = finish_comms(rank);
XBT_DEBUG("Win_lock %d - Finished %d RMA calls", rank, finished);
-
+ finished = target_win->finish_comms(rank_);
+ XBT_DEBUG("Win_lock target %d - Finished %d RMA calls", rank, finished);
return MPI_SUCCESS;
}
-int Win::unlock(int rank){
- if(opened_!=0)
- return MPI_ERR_WIN;
+int Win::lock_all(int assert){
+ int i=0;
+ int retval = MPI_SUCCESS;
+ for (i=0; i<comm_->size();i++){
+ int ret = this->lock(MPI_LOCK_SHARED, i, assert);
+ if(ret != MPI_SUCCESS)
+ retval = ret;
+ }
+ return retval;
+}
+int Win::unlock(int rank){
MPI_Win target_win = connected_wins_[rank];
int target_mode = target_win->mode_;
target_win->mode_= 0;
xbt_mutex_release(target_win->lock_mut_);
}
- int finished = finish_comms();
+ int finished = finish_comms(rank);
XBT_DEBUG("Win_unlock %d - Finished %d RMA calls", rank, finished);
+ finished = target_win->finish_comms(rank_);
+ XBT_DEBUG("Win_unlock target %d - Finished %d RMA calls", rank, finished);
+ return MPI_SUCCESS;
+}
+
+int Win::unlock_all(){
+ int i=0;
+ int retval = MPI_SUCCESS;
+ for (i=0; i<comm_->size();i++){
+ int ret = this->unlock(i);
+ if(ret != MPI_SUCCESS)
+ retval = ret;
+ }
+ return retval;
+}
+
+int Win::flush(int rank){
+ MPI_Win target_win = connected_wins_[rank];
+ int finished = finish_comms(rank);
+ XBT_DEBUG("Win_flush on local %d - Finished %d RMA calls", rank_, finished);
+ finished = target_win->finish_comms(rank_);
+ XBT_DEBUG("Win_flush on remote %d - Finished %d RMA calls", rank, finished);
+ return MPI_SUCCESS;
+}
+
+int Win::flush_local(int rank){
+ int finished = finish_comms(rank);
+ XBT_DEBUG("Win_flush_local for rank %d - Finished %d RMA calls", rank, finished);
+ return MPI_SUCCESS;
+}
+int Win::flush_all(){
+ int i=0;
+ int finished = 0;
+ finished = finish_comms();
+ XBT_DEBUG("Win_flush_all on local - Finished %d RMA calls", finished);
+ for (i=0; i<comm_->size();i++){
+ finished = connected_wins_[i]->finish_comms(rank_);
+ XBT_DEBUG("Win_flush_all on %d - Finished %d RMA calls", i, finished);
+ }
+ return MPI_SUCCESS;
+}
+
+int Win::flush_local_all(){
+ int finished = finish_comms();
+ XBT_DEBUG("Win_flush_local_all - Finished %d RMA calls", finished);
return MPI_SUCCESS;
}
std::vector<MPI_Request> *reqqs = requests_;
int size = static_cast<int>(reqqs->size());
if (size > 0) {
- // start all requests that have been prepared by another process
- for (const auto& req : *reqqs) {
- if (req && (req->flags() & PREPARED))
- req->start();
- }
-
MPI_Request* treqs = &(*reqqs)[0];
Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
reqqs->clear();
return size;
}
+int Win::finish_comms(int rank){
+ xbt_mutex_acquire(mut_);
+ //Finish own requests
+ std::vector<MPI_Request> *reqqs = requests_;
+ int size = static_cast<int>(reqqs->size());
+ if (size > 0) {
+ size = 0;
+ std::vector<MPI_Request>* myreqqs = new std::vector<MPI_Request>();
+ std::vector<MPI_Request>::iterator iter = reqqs->begin();
+ while (iter != reqqs->end()){
+ if(((*iter)!=MPI_REQUEST_NULL) && (((*iter)->src() == rank) || ((*iter)->dst() == rank))){
+ myreqqs->push_back(*iter);
+ iter = reqqs->erase(iter);
+ size++;
+ } else {
+ ++iter;
+ }
+ }
+ if(size >0){
+ MPI_Request* treqs = &(*myreqqs)[0];
+ Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
+ myreqqs->clear();
+ delete myreqqs;
+ }
+ }
+ xbt_mutex_release(mut_);
+ return size;
+}
+
}
}
MPI_Group group_;
int count_; //for ordering the accs
xbt_mutex_t lock_mut_;
+ xbt_mutex_t atomic_mut_;
std::list<int> lockers_;
int rank_; // to identify owner for barriers in MPI_COMM_WORLD
int mode_; // exclusive or shared lock
int disp_unit();
int fence(int assert);
int put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype);
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request=nullptr);
int get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype);
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request=nullptr);
int accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op);
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request=nullptr);
int get_accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
- MPI_Datatype target_datatype, MPI_Op op);
+ MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request=nullptr);
+ int compare_and_swap(void *origin_addr, void *compare_addr,
+ void *result_addr, MPI_Datatype datatype, int target_rank,
+ MPI_Aint target_disp);
static Win* f2c(int id);
+
int lock(int lock_type, int rank, int assert);
int unlock(int rank);
+ int lock_all(int assert);
+ int unlock_all();
+ int flush(int rank);
+ int flush_local(int rank);
+ int flush_all();
+ int flush_local_all();
int finish_comms();
+ int finish_comms(int rank);
};
list_set CFLAGS
list_set LINKARGS
if [ "@WIN32@" != "1" ]; then
- list_add CFLAGS "-Dmain=smpi_simulated_main_"
- list_add LINKARGS "-lsimgrid"
+ # list_add CFLAGS "-Dmain=smpi_simulated_main_"
+ list_add CFLAGS "-fpic"
+ list_add LINKARGS "-shared" "-lsimgrid"
else
list_add CFLAGS "-include" "@includedir@/smpi/smpi_main.h"
list_add LINKARGS "@libdir@\libsimgrid.dll"
list_set CXXFLAGS
list_set LINKARGS
if [ "@WIN32@" != "1" ]; then
- list_add CXXFLAGS "-Dmain=smpi_simulated_main_"
- list_add LINKARGS "-lsimgrid"
+ # list_add CXXFLAGS "-Dmain=smpi_simulated_main_"
+ list_add CXXFLAGS "-fpic"
+ list_add LINKARGS "-shared" "-lsimgrid"
else
list_add CXXFLAGS "-include" "@includedir@/smpi/smpi_main.h"
list_add LINKARGS "@libdir@\libsimgrid.dll"
@SMPITOOLS_SH@
-list_set FFLAGS "-ff2c" "-fno-second-underscore"
-list_set LINKARGS "-lsimgrid" "-lm" "-lgfortran"
+list_set FFLAGS "-fpic" "-ff2c" "-fno-second-underscore"
+list_set LINKARGS "-shared" "-lsimgrid" "-lm" "-lgfortran"
list_set TMPFILES
main_name=main
@SMPITOOLS_SH@
-list_set FFLAGS "-ff2c" "-fno-second-underscore"
-list_set LINKARGS "-lsimgrid" "-lm" "-lgfortran"
+list_set FFLAGS "-fpic" "-ff2c" "-fno-second-underscore"
+list_set LINKARGS "-shared" "-lsimgrid" "-lm" "-lgfortran"
list_set TMPFILES
main_name=main
exit
fi
-if [ -n "$WRAPPER" ]; then
- EXEC="$WRAPPER $1"
-else
- EXEC="$1"
-fi
+EXEC="$1"
shift
# steel --cfg and --logs options
export SMPI_GLOBAL_SIZE=${NUMPROCS}
if [ -n "${KEEP}" ] ; then
- echo ${EXEC} ${TRACEOPTIONS} ${SIMOPTS} ${PRIVATIZE} ${PLATFORMTMP} ${APPLICATIONTMP}
+ echo ${EXEC} ${PRIVATIZE} ${TRACEOPTIONS} ${SIMOPTS} ${PLATFORMTMP} ${APPLICATIONTMP}
if [ ${HOSTFILETMP} = 1 ] ; then
echo "Generated hostfile ${HOSTFILE} kept."
fi
# * The FD 3 is used to temporarily store FD 1. This is because the shell connects FD 1 to /dev/null when the command
# is launched in the background: this can be overriden in bash but not in standard bourne shell.
exec 3<&0
-${EXEC} ${TRACEOPTIONS} ${SIMOPTS} ${PRIVATIZE} ${PLATFORMTMP} ${APPLICATIONTMP} <&3 3>&- &
+${WRAPPER} "@SMPIMAIN@" ${EXEC} ${TRACEOPTIONS} ${SIMOPTS} ${PLATFORMTMP} ${APPLICATIONTMP} <&3 3>&- &
pid=$!
exec 3>&-
wait $pid
#
if [ ${status} -ne 0 ] ; then
if [ -z ${KEEP} ]; then
- echo ${EXEC} ${TRACEOPTIONS} ${SIMOPTS} ${PRIVATIZE} ${PLATFORMTMP} ${APPLICATIONTMP}
+ echo ${EXEC} ${PRIVATIZE} ${TRACEOPTIONS} ${SIMOPTS} ${PLATFORMTMP} ${APPLICATIONTMP}
if [ ${HOSTFILETMP} = 1 ] ; then
echo "Generated hostfile ${HOSTFILE} kept."
fi
double getCurrentLoad();
double getComputedFlops();
+ double getAverageLoad();
void update();
void reset();
private:
simgrid::s4u::Host* host = nullptr;
double last_updated = 0;
+ double last_reset = 0;
double current_flops = 0;
double computed_flops = 0;
};
simgrid::xbt::Extension<simgrid::s4u::Host, HostLoad> HostLoad::EXTENSION_ID;
HostLoad::HostLoad(simgrid::s4u::Host* ptr)
- : host(ptr), last_updated(surf_get_clock()), current_flops(lmm_constraint_get_usage(host->pimpl_cpu->constraint()))
+ : host(ptr)
+ , last_updated(surf_get_clock())
+ , last_reset(surf_get_clock())
+ , current_flops(lmm_constraint_get_usage(host->pimpl_cpu->constraint()))
{
}
return current_flops / (host->speed() * host->coreCount());
}
+double HostLoad::getAverageLoad()
+{
+ return getComputedFlops() / (host->speed() * host->coreCount() * (surf_get_clock() - last_reset));
+}
+
double HostLoad::getComputedFlops()
{
update();
void HostLoad::reset()
{
last_updated = surf_get_clock();
+ last_reset = surf_get_clock();
computed_flops = 0;
}
}
xbt_dictelm_t current;
xbt_dictelm_t previous = nullptr;
+ xbt_assert(!free_ctn, "Cannot set an individual free function in homogeneous dicts.");
XBT_CDEBUG(xbt_dict, "ADD %.*s hash = %u, size = %d, & = %u", key_len, key, hash_code,
dict->table_size, hash_code & dict->table_size);
current = dict->table[hash_code & dict->table_size];
if (current == nullptr) {
/* this key doesn't exist yet */
- current = xbt_dictelm_new(dict, key, key_len, hash_code, data, free_ctn);
+ current = xbt_dictelm_new(key, key_len, hash_code, data);
dict->count++;
if (previous == nullptr) {
dict->table[hash_code & dict->table_size] = current;
/* dict - a generic dictionary, variation over hash table */
-/* Copyright (c) 2004-2014. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2004-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
xbt_mallocator_t dict_elm_mallocator = NULL;
-xbt_dictelm_t xbt_dictelm_new(xbt_dict_t dict, const char *key, int key_len, unsigned int hash_code, void *content,
- void_f_pvoid_t free_f)
+xbt_dictelm_t xbt_dictelm_new(const char* key, int key_len, unsigned int hash_code, void* content)
{
- xbt_dictelm_t element;
-
- xbt_assert(!free_f, "Cannot set an individual free function in homogeneous dicts.");
- element = xbt_mallocator_get(dict_elm_mallocator);
+ xbt_dictelm_t element = xbt_mallocator_get(dict_elm_mallocator);
element->key = xbt_new(char, key_len + 1);
memcpy(element->key, key, key_len);
element->key[key_len] = '\0';
#define dict_elm_mallocator_reset_f ((void_f_pvoid_t)NULL)
/*####[ Function prototypes ]################################################*/
-XBT_PRIVATE xbt_dictelm_t xbt_dictelm_new(xbt_dict_t dict, const char *key, int key_len,
- unsigned int hash_code, void *content, void_f_pvoid_t free_f);
+XBT_PRIVATE xbt_dictelm_t xbt_dictelm_new(const char* key, int key_len, unsigned int hash_code, void* content);
XBT_PRIVATE void xbt_dictelm_free(xbt_dict_t dict, xbt_dictelm_t element);
XBT_PRIVATE void xbt_dictelm_set_data(xbt_dict_t dict, xbt_dictelm_t element, void *data, void_f_pvoid_t free_ctn);
-$ ${bindir:=.}/../../../bin/smpirun -np 16 -platform ../../../examples/platforms/small_platform.xml -hostfile ../hostfile ${bindir:=.}/bug-17132 --cfg=smpi/simulate-computation:no --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -np 16 -platform ../../../examples/platforms/small_platform.xml -hostfile ../hostfile ${bindir:=.}/bug-17132 --cfg=smpi/simulate-computation:no --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> You requested to use 16 processes, but there is only 5 processes in your hostfile...
> Walltime = 0.468274
! output sort
p Test allgather
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
! output sort
p Test allgatherv
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgatherv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgatherv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
p Test allreduce
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --cfg=smpi/allreduce:automatic --cfg=smpi/async-small-thresh:65536 --cfg=smpi/send-is-detached-thresh:128000 --cfg=smpi/simulate-computation:no "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --cfg=smpi/allreduce:automatic --cfg=smpi/async-small-thresh:65536 --cfg=smpi/send-is-detached-thresh:128000 --cfg=smpi/simulate-computation:no "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
! timeout 20
p Test allreduce
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce 300000 --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce 300000 --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Jupiter
> [rank 2] -> Fafard
! output sort
p Test allreduce
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
! output sort
p Test classic - backbone
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
! output sort
p Test separate clusters
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/two_clusters.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/two_clusters.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
! output sort
p Test torus
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_torus.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_torus.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
! output sort
p Test fat tree
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
! output sort
p Test fat tree IB
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --cfg=network/model:IB --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --cfg=network/model:IB --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
! output sort
p Test Dragonfly
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_dragonfly.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_dragonfly.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
! output sort
p Test all to all
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
! output sort
p Test all to all
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoallv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoallv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
! output sort
p Test barrier
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-barrier --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-barrier --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> ... Barrier ....
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-bcast --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-bcast --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
! timeout 30
p Test all to all
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-gather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-gather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
! output sort
p Test reduce_scatter
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> No Errors
> [rank 0] -> Tremblay
> [rank 10] -> Fafard
! output sort
p Test allreduce
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [0] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
> [0] second sndbuf=[0 ]
> [0] sndbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 ]
! output sort
p Test scatter
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [0] ok.
> [10] ok.
> [11] ok.
foreach (test ${umpire_tests_passing})
write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "! timeout 30")
write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "! output display" APPEND)
- write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "\$ \${bindir:=.}/../../../../bin/smpirun -wrapper \"\${bindir:=.}/../../../../bin/simgrid-mc\" -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml --log=xbt_cfg.thresh:warning -np 3 --cfg=smpi/host-speed:1e9 --cfg=smpi/coll_selector:mpich \${bindir:=.}/${test} --log=smpi_coll.thresh:error" APPEND)
+ write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "\$ \${bindir:=.}/../../../../smpi_script/bin/smpirun -wrapper \"\${bindir:=.}/../../../../bin/simgrid-mc\" -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml --log=xbt_cfg.thresh:warning -np 3 --cfg=smpi/host-speed:1e9 --cfg=smpi/coll_selector:mpich \${bindir:=.}/${test} --log=smpi_coll.thresh:error" APPEND)
endforeach()
foreach (test ${umpire_tests_deadlock} ${umpire_tests_problematic} )
write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "! timeout 30" )
write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "! expect return 3" APPEND)
write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "! output display" APPEND)
- write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "\$ \${bindir:=.}/../../../../bin/smpirun -wrapper \"\${bindir:=.}/../../../../bin/simgrid-mc\" -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml --log=xbt_cfg.thresh:warning -np 3 --cfg=smpi/host-speed:1e9 --cfg=smpi/coll_selector:mpich \${bindir:=.}/${test} --log=smpi_coll.thresh:error" APPEND)
+ write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "\$ \${bindir:=.}/../../../../smpi_script/bin/smpirun -wrapper \"\${bindir:=.}/../../../../bin/simgrid-mc\" -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml --log=xbt_cfg.thresh:warning -np 3 --cfg=smpi/host-speed:1e9 --cfg=smpi/coll_selector:mpich \${bindir:=.}/${test} --log=smpi_coll.thresh:error" APPEND)
endforeach()
endif()
p Test compute and bench
! output sort
! timeout 45
-$ ${bindir:=.}/../../../bin/smpirun -hostfile ../hostfile -platform ../../../examples/platforms/small_platform_with_routers.xml -np 3 --log=root.thres:warning ${bindir:=.}/macro-sample quiet --log=smpi_kernel.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ../hostfile -platform ../../../examples/platforms/small_platform_with_routers.xml -np 3 --log=root.thres:warning ${bindir:=.}/macro-sample quiet --log=smpi_kernel.thres:warning
> (0) Run the first computation. It's globally benched, and I want no more than 4 benchmarks (thres<0)
> (0) Run the first computation. It's globally benched, and I want no more than 4 benchmarks (thres<0)
> (0) Run the first computation. It's globally benched, and I want no more than 4 benchmarks (thres<0)
! setenv LD_LIBRARY_PATH=../../lib
! output sort
! timeout 5
-$ ${bindir:=.}/../../../bin/smpirun -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/macro-shared --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/macro-shared --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [0] After change, the value in the shared buffer is: 16053117601147974045
> [0] The value in the shared buffer is: 4
> [1] After change, the value in the shared buffer is: 16053117601147974045
include_directories(BEFORE "${CMAKE_HOME_DIRECTORY}/include/smpi")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../include/")
- foreach(file anyall bottom eagerdt inactivereq isendself isendirecv isendselfprobe issendselfcancel cancelanysrc pingping probenull
+ foreach(file anyall bottom eagerdt huge_anysrc huge_underflow inactivereq isendself isendirecv isendselfprobe issendselfcancel cancelanysrc pingping probenull
dtype_send probe-unexp sendall sendflood sendrecv1 sendrecv2 sendrecv3 waitany-null waittestnull many_isend manylmt recv_any)
# not compiled files: big_count_status bsend1 bsend2 bsend3 bsend4 bsend5 bsendalign bsendfrag bsendpending mprobe
# cancelrecv greq1 icsend large_message pscancel rcancel rqfreeb rqstatus scancel2 scancel sendself scancel_unmatch
SET_TESTS_PROPERTIES(test-smpi-mpich3-pt2pt-raw PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
endif()
-foreach(file anyall bottom eagerdt inactivereq isendself isendirecv isendselfprobe issendselfcancel pingping probenull
+foreach(file anyall bottom eagerdt huge_anysrc huge_underflow inactivereq isendself isendirecv isendselfprobe issendselfcancel pingping probenull
probe-unexp sendall sendflood sendrecv1 sendrecv2 sendrecv3 waitany-null waittestnull
big_count_status bsend1 bsend2 bsend3 bsend4 bsend5 bsendalign bsendfrag bsendpending
cancelrecv cancelanysrc dtype_send greq1 icsend large_message pscancel rcancel rqfreeb rqstatus scancel2 scancel sendself many_isend manylmt mprobe recv_any scancel_unmatch
--- /dev/null
+/*
+ * (C) 2017 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ *
+ * Portions of this code were written by Intel Corporation.
+ * Copyright (C) 2011-2017 Intel Corporation. Intel provides this material
+ * to Argonne National Laboratory subject to Software Grant and Corporate
+ * Contributor License Agreement dated February 8, 2012.
+ *
+ * This program checks if MPICH can correctly handle many outstanding large
+ * message transfers which use wildcard receives.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <mpi.h>
+
+#define N_TRY 16
+#define BLKSIZE (10*1024*1024)
+
+int main(int argc, char *argv[])
+{
+ int size, rank;
+ int dest;
+ int i;
+ char *buff;
+ MPI_Request reqs[N_TRY];
+
+ MPI_Init(&argc, &argv);
+
+ buff = malloc(N_TRY * BLKSIZE);
+ memset(buff, -1, N_TRY * BLKSIZE);
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ dest = size - 1;
+
+ if (rank == 0) {
+ for (i = 0; i < N_TRY; i++)
+ MPI_Isend(buff + BLKSIZE*i, BLKSIZE, MPI_BYTE, dest, 0, MPI_COMM_WORLD, &reqs[i]);
+ MPI_Waitall(N_TRY, reqs, MPI_STATUSES_IGNORE);
+ }
+ else if (rank == dest) {
+ for (i = 0; i < N_TRY; i++)
+ MPI_Irecv(buff + BLKSIZE*i, BLKSIZE, MPI_BYTE, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &reqs[i]);
+ MPI_Waitall(N_TRY, reqs, MPI_STATUSES_IGNORE);
+ }
+
+ free(buff);
+
+ if (rank == 0)
+ puts(" No Errors");
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+
+/*
+ * (C) 2017 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ *
+ * Portions of this code were written by Intel Corporation.
+ * Copyright (C) 2011-2017 Intel Corporation. Intel provides this material
+ * to Argonne National Laboratory subject to Software Grant and Corporate
+ * Contributor License Agreement dated February 8, 2012.
+ *
+ * This program checks if MPICH can correctly handle a huge message receive
+ * when the sender underflows by sending a much smaller message
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <mpi.h>
+
+#define HUGE_SIZE (10*1024*1024)
+
+int main(int argc, char *argv[])
+{
+ int size, rank;
+ int dest;
+ char *buff;
+
+ MPI_Init(&argc, &argv);
+
+ buff = malloc(HUGE_SIZE);
+ buff[0] = 0;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ dest = size - 1;
+
+ /* Try testing underflow to make sure things work if we try to send 1 byte
+ * when receiving a huge message */
+ if (rank == 0) {
+ MPI_Send(buff, 1, MPI_BYTE, dest, 0, MPI_COMM_WORLD);
+ } else if (rank == dest) {
+ MPI_Recv(buff, HUGE_SIZE, MPI_BYTE, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+ }
+
+ free(buff);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (rank == 0)
+ puts(" No Errors");
+
+ MPI_Finalize();
+
+ return 0;
+}
big_count_status 1 mpiversion=3.0
many_isend 3
manylmt 2
+huge_anysrc 2
+huge_underflow 2
#Needs MPI_Irsend
#dtype_send 2
recv_any 2
foreach(file accfence1 accfence2_am accfence2 accpscw1 allocmem epochtest getfence1 getgroup manyrma3 nullpscw lockcontention lockopts contig_displ
putfence1 putfidx putpscw1 test1_am test1 test1_dt test2_am test2 test3 test3_am test4 test4_am test5_am test5 transpose1 transpose2 lockcontention2
transpose3 transpose4 transpose5 transpose6 transpose7 window_creation selfrma locknull win_info
- at_complete acc-pairtype manyget large-small-acc lock_dt win_dynamic_acc
- lock_nested winname attrorderwin baseattrwin fkeyvalwin strided_acc_indexed strided_getacc_indexed
- strided_acc_onelock strided_get_indexed strided_putget_indexed contention_put contention_putget)
- # not compiled files: acc-loc adlb_mimic1 badrma compare_and_swap contention_put
- # contention_putget contig_displ fence_shm fetchandadd_am fetchandadd fetchandadd_tree_am fetchandadd_tree
- # fetch_and_op fkeyvalwin flush get_acc_local get_accumulate linked_list_bench_lock_all
- # linked_list_bench_lock_excl linked_list_bench_lock_shr linked_list linked_list_fop linked_list_lockall
- # manyrma2 mcs-mutex mixedsync mutex_bench lockcontention3
- # pscw_ordering put_base put_bottom req_example reqops rmanull rmazero rma-contig selfrma
- # strided_getacc_indexed_shared
+ at_complete acc-pairtype manyget large-small-acc lock_dt win_dynamic_acc fetch_and_op flush req_example rmanull rmazero badrma
+ lock_nested winname attrorderwin baseattrwin fkeyvalwin strided_acc_indexed strided_getacc_indexed compare_and_swap
+ lockall_dt lockall_dt_flushall lock_dt_flush lockall_dt_flush lockall_dt_flushlocalall lockall_dt_flushlocal lock_dt_flushlocal
+ strided_acc_onelock strided_get_indexed strided_putget_indexed contention_put contention_putget
+ adlb_mimic1 lock_contention_dt acc-loc get_acc_local get_accumulate put_base put_bottom
+ linked_list_bench_lock_all linked_list_bench_lock_excl manyrma2 pscw_ordering rma-contig get-struct
+ rput_local_comp racc_local_comp)
+ # fence_shm fetchandadd_am fetchandadd fetchandadd_tree_am fetchandadd_tree
+ # linked_list_bench_lock_shr linked_list linked_list_fop linked_list_lockall
+ # mcs-mutex mixedsync mutex_bench lockcontention3 reqops
+ # strided_getacc_indexed_shared rget-unlock
# win_flavors win_shared win_shared_noncontig win_shared_noncontig_put
- # win_large_shm win_zero wintest get-struct atomic_rmw_fop atomic_rmw_gacc rget-unlock atomic_get atomic_rmw_cas
- # win_shared_zerobyte aint derived-acc-flush_local large-acc-flush_local lockall_dt lockall_dt_flushall
- # lockall_dt_flushlocalall lockall_dt_flushlocal lock_contention_dt lock_dt_flush lock_dt_flushlocal
- #racc_local_comp rput_local_comp win_shared_create win_shared_put_flush_get win_shared_rma_flush_load
+ # win_large_shm win_zero wintest atomic_rmw_fop atomic_rmw_gacc atomic_get atomic_rmw_cas
+ # win_shared_zerobyte aint derived-acc-flush_local large-acc-flush_local
+ # win_shared_create win_shared_put_flush_get win_shared_rma_flush_load
# wrma_flush_get
add_executable(${file} ${file}.c)
target_link_libraries(${file} simgrid mtest_c)
#define SIZE 100
-MPI_Win win;
-int win_buf[SIZE], origin_buf[SIZE], result_buf[SIZE];
int do_test(int origin_count, MPI_Datatype origin_type, int result_count,
- MPI_Datatype result_type, int target_count, MPI_Datatype target_type)
+ MPI_Datatype result_type, int target_count, MPI_Datatype target_type, MPI_Win win, int* win_buf, int* origin_buf, int* result_buf);
+int do_test(int origin_count, MPI_Datatype origin_type, int result_count,
+ MPI_Datatype result_type, int target_count, MPI_Datatype target_type, MPI_Win win, int* win_buf, int* origin_buf, int* result_buf)
{
int errs = 0, ret, origin_type_size, result_type_size;
int main(int argc, char *argv[])
{
+ MPI_Win win;
+ int win_buf[SIZE], origin_buf[SIZE], result_buf[SIZE];
int rank, nprocs, i, j, k;
int errs = 0;
MPI_Datatype types[4];
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
for (k = 0; k < 4; k++)
- do_test(0, types[i], 0, types[j], 0, types[k]);
+ do_test(0, types[i], 0, types[j], 0, types[k], win, win_buf, origin_buf, result_buf);
/* single zero-size datatype, but non-zero count */
for (i = 1; i < 4; i++) {
for (j = 1; j < 4; j++) {
- do_test(1, types[0], 0, types[i], 0, types[j]);
- do_test(0, types[i], 1, types[0], 0, types[j]);
- do_test(0, types[i], 0, types[j], 1, types[0]);
+ do_test(1, types[0], 0, types[i], 0, types[j], win, win_buf, origin_buf, result_buf);
+ do_test(0, types[i], 1, types[0], 0, types[j], win, win_buf, origin_buf, result_buf);
+ do_test(0, types[i], 0, types[j], 1, types[0], win, win_buf, origin_buf, result_buf);
}
}
/* two zero-size datatypes, but non-zero count */
for (i = 1; i < 4; i++) {
- do_test(1, types[0], 1, types[0], 0, types[i]);
- do_test(1, types[0], 0, types[i], 1, types[0]);
- do_test(0, types[i], 1, types[0], 1, types[0]);
+ do_test(1, types[0], 1, types[0], 0, types[i], win, win_buf, origin_buf, result_buf);
+ do_test(1, types[0], 0, types[i], 1, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(0, types[i], 1, types[0], 1, types[0], win, win_buf, origin_buf, result_buf);
- do_test(1, types[0], 2, types[0], 0, types[i]);
- do_test(2, types[0], 1, types[0], 0, types[i]);
+ do_test(1, types[0], 2, types[0], 0, types[i], win, win_buf, origin_buf, result_buf);
+ do_test(2, types[0], 1, types[0], 0, types[i], win, win_buf, origin_buf, result_buf);
- do_test(1, types[0], 0, types[i], 2, types[0]);
- do_test(2, types[0], 0, types[i], 1, types[0]);
+ do_test(1, types[0], 0, types[i], 2, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(2, types[0], 0, types[i], 1, types[0], win, win_buf, origin_buf, result_buf);
- do_test(0, types[i], 1, types[0], 2, types[0]);
- do_test(0, types[i], 2, types[0], 1, types[0]);
+ do_test(0, types[i], 1, types[0], 2, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(0, types[i], 2, types[0], 1, types[0], win, win_buf, origin_buf, result_buf);
}
/* three zero-size datatypes, but non-zero count */
- do_test(1, types[0], 1, types[0], 1, types[0]);
- do_test(1, types[0], 1, types[0], 2, types[0]);
- do_test(1, types[0], 2, types[0], 1, types[0]);
- do_test(1, types[0], 2, types[0], 2, types[0]);
- do_test(2, types[0], 1, types[0], 1, types[0]);
- do_test(2, types[0], 1, types[0], 2, types[0]);
- do_test(2, types[0], 2, types[0], 1, types[0]);
+ do_test(1, types[0], 1, types[0], 1, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(1, types[0], 1, types[0], 2, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(1, types[0], 2, types[0], 1, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(1, types[0], 2, types[0], 2, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(2, types[0], 1, types[0], 1, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(2, types[0], 1, types[0], 2, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(2, types[0], 2, types[0], 1, types[0], win, win_buf, origin_buf, result_buf);
}
MPI_Win_fence(0, win);
#endif
#define CMP(x, y) ((x - ((TYPE_C) (y))) > 1.0e-9)
-
+void reset_vars(TYPE_C * val_ptr, TYPE_C * res_ptr, MPI_Win win);
void reset_vars(TYPE_C * val_ptr, TYPE_C * res_ptr, MPI_Win win)
{
int i, rank, nproc;
for (i = 0; i < NTIMES; i++) {
Get_nextval(win, val_array, get_type, rank, nprocs, counter_vals + i);
- /* printf("Rank %d, counter %d\n", rank, value); */
+ /* printf("Rank %d, counter %d\n", rank, localvalue); */
}
MPI_Win_free(&win);
int errors = 0;
const int NITER = 1000;
-const int acc_val = 3;
+
int main(int argc, char **argv)
{
int rank, nproc;
int out_val, i, counter = 0;
MPI_Win win;
-
+ int acc_val = 3;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
static const int verbose = 0;
-/* List of locally allocated list elements. */
-static llist_elem_t **my_elems = NULL;
-static int my_elems_size = 0;
-static int my_elems_count = 0;
-
/* Allocate a new shared linked list element */
-MPI_Aint alloc_elem(int value, MPI_Win win)
+static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count)
{
MPI_Aint disp;
llist_elem_t *elem_ptr;
MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));
/* Add the element to the list of local elements so we can free it later. */
- if (my_elems_size == my_elems_count) {
- my_elems_size += 100;
- my_elems = realloc(my_elems, my_elems_size * sizeof(void *));
+ if (*my_elems_size == *my_elems_count) {
+ *my_elems_size += 100;
+ *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *));
}
- my_elems[my_elems_count] = elem_ptr;
- my_elems_count++;
+ (*my_elems)[*my_elems_count] = elem_ptr;
+ (*my_elems_count)++;
MPI_Get_address(elem_ptr, &disp);
return disp;
int procid, nproc, i;
MPI_Win llist_win;
llist_ptr_t head_ptr, tail_ptr;
+ /* List of locally allocated list elements. */
+ llist_elem_t **my_elems = NULL;
+ int my_elems_size = 0;
+ int my_elems_count = 0;
MPI_Init(&argc, &argv);
/* Process 0 creates the head node */
if (procid == 0)
- head_ptr.disp = alloc_elem(-1, llist_win);
+ head_ptr.disp = alloc_elem(-1, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Broadcast the head pointer to everyone */
head_ptr.rank = 0;
/* Create a new list element and register it with the window */
new_elem_ptr.rank = procid;
- new_elem_ptr.disp = alloc_elem(procid, llist_win);
+ new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Append the new node to the list. This might take multiple attempts if
* others have already appended and our tail pointer is stale. */
#define MIN_NPROBE 1
#define ELEM_PER_ROW 16
-#define MIN(X,Y) ((X < Y) ? (X) : (Y))
-#define MAX(X,Y) ((X > Y) ? (X) : (Y))
+#define MYMIN(X,Y) ((X < Y) ? (X) : (Y))
+#define MYMAX(X,Y) ((X > Y) ? (X) : (Y))
/* Linked list pointer */
typedef struct {
static const int verbose = 0;
static const int print_perf = 0;
-/* List of locally allocated list elements. */
-static llist_elem_t **my_elems = NULL;
-static int my_elems_size = 0;
-static int my_elems_count = 0;
-
/* Allocate a new shared linked list element */
-MPI_Aint alloc_elem(int value, MPI_Win win)
+static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count)
{
MPI_Aint disp;
llist_elem_t *elem_ptr;
MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));
/* Add the element to the list of local elements so we can free it later. */
- if (my_elems_size == my_elems_count) {
- my_elems_size += 100;
- my_elems = realloc(my_elems, my_elems_size * sizeof(void *));
+ if (*my_elems_size == *my_elems_count) {
+ *my_elems_size += 100;
+ *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *));
}
- my_elems[my_elems_count] = elem_ptr;
- my_elems_count++;
+ (*my_elems)[*my_elems_count] = elem_ptr;
+ (*my_elems_count)++;
MPI_Get_address(elem_ptr, &disp);
return disp;
MPI_Win llist_win;
llist_ptr_t head_ptr, tail_ptr;
+ /* List of locally allocated list elements. */
+ llist_elem_t **my_elems = NULL;
+ int my_elems_size = 0;
+ int my_elems_count = 0;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &procid);
/* Process 0 creates the head node */
if (procid == 0)
- head_ptr.disp = alloc_elem(procid, llist_win);
+ head_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Broadcast the head pointer to everyone */
head_ptr.rank = 0;
/* Create a new list element and register it with the window */
new_elem_ptr.rank = procid;
- new_elem_ptr.disp = alloc_elem(procid, llist_win);
+ new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Append the new node to the list. This might take multiple attempts if
* others have already appended and our tail pointer is stale. */
printf("%d: Chasing to <%d, %p>\n", procid, next_tail_ptr.rank,
(void *) next_tail_ptr.disp);
tail_ptr = next_tail_ptr;
- pollint = MAX(MIN_NPROBE, pollint / 2);
+ pollint = MYMAX(MIN_NPROBE, pollint / 2);
}
else {
for (j = 0; j < pollint; j++)
MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag,
MPI_STATUS_IGNORE);
- pollint = MIN(MAX_NPROBE, pollint * 2);
+ pollint = MYMIN(MAX_NPROBE, pollint * 2);
}
}
} while (!success);
#include <unistd.h>
#endif
-#define NUM_ELEMS 1000
+#define NUM_ELEMS 100
#define MAX_NPROBE nproc
#define MIN_NPROBE 1
#define ELEM_PER_ROW 16
-#define MIN(X,Y) ((X < Y) ? (X) : (Y))
-#define MAX(X,Y) ((X > Y) ? (X) : (Y))
+#define MYMIN(X,Y) ((X < Y) ? (X) : (Y))
+#define MYMAX(X,Y) ((X > Y) ? (X) : (Y))
/* Linked list pointer */
typedef struct {
static const int verbose = 0;
static const int print_perf = 0;
-/* List of locally allocated list elements. */
-static llist_elem_t **my_elems = NULL;
-static int my_elems_size = 0;
-static int my_elems_count = 0;
-
/* Allocate a new shared linked list element */
-MPI_Aint alloc_elem(int value, MPI_Win win)
+static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count)
{
MPI_Aint disp;
llist_elem_t *elem_ptr;
MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));
/* Add the element to the list of local elements so we can free it later. */
- if (my_elems_size == my_elems_count) {
- my_elems_size += 100;
- my_elems = realloc(my_elems, my_elems_size * sizeof(void *));
+ if (*my_elems_size == *my_elems_count) {
+ *my_elems_size += 100;
+ *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *));
}
- my_elems[my_elems_count] = elem_ptr;
- my_elems_count++;
+ (*my_elems)[*my_elems_count] = elem_ptr;
+ (*my_elems_count)++;
MPI_Get_address(elem_ptr, &disp);
return disp;
double time;
MPI_Win llist_win;
llist_ptr_t head_ptr, tail_ptr;
+ /* List of locally allocated list elements. */
+ llist_elem_t **my_elems = NULL;
+ int my_elems_size = 0;
+ int my_elems_count = 0;
MPI_Init(&argc, &argv);
/* Process 0 creates the head node */
if (procid == 0)
- head_ptr.disp = alloc_elem(procid, llist_win);
+ head_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Broadcast the head pointer to everyone */
head_ptr.rank = 0;
/* Create a new list element and register it with the window */
new_elem_ptr.rank = procid;
- new_elem_ptr.disp = alloc_elem(procid, llist_win);
+ new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Append the new node to the list. This might take multiple attempts if
* others have already appended and our tail pointer is stale. */
printf("%d: Chasing to <%d, %p>\n", procid, next_tail_ptr.rank,
(void *) next_tail_ptr.disp);
tail_ptr = next_tail_ptr;
- pollint = MAX(MIN_NPROBE, pollint / 2);
+ pollint = MYMAX(MIN_NPROBE, pollint / 2);
}
else {
for (j = 0; j < pollint; j++)
MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag,
MPI_STATUS_IGNORE);
- pollint = MIN(MAX_NPROBE, pollint * 2);
+ pollint = MYMIN(MAX_NPROBE, pollint * 2);
}
}
} while (!success);
static const int verbose = 0;
-/* List of locally allocated list elements. */
-static llist_elem_t **my_elems = NULL;
-static int my_elems_size = 0;
-static int my_elems_count = 0;
-
/* Allocate a new shared linked list element */
-MPI_Aint alloc_elem(int value, MPI_Win win)
+static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count)
{
MPI_Aint disp;
llist_elem_t *elem_ptr;
MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));
/* Add the element to the list of local elements so we can free it later. */
- if (my_elems_size == my_elems_count) {
- my_elems_size += 100;
- my_elems = realloc(my_elems, my_elems_size * sizeof(void *));
+ if (*my_elems_size == *my_elems_count) {
+ *my_elems_size += 100;
+ *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *));
}
- my_elems[my_elems_count] = elem_ptr;
- my_elems_count++;
+ (*my_elems)[*my_elems_count] = elem_ptr;
+ (*my_elems_count)++;
MPI_Get_address(elem_ptr, &disp);
return disp;
int procid, nproc, i;
MPI_Win llist_win;
llist_ptr_t head_ptr, tail_ptr;
+ /* List of locally allocated list elements. */
+ llist_elem_t **my_elems = NULL;
+ int my_elems_size = 0;
+ int my_elems_count = 0;
MPI_Init(&argc, &argv);
/* Process 0 creates the head node */
if (procid == 0)
- head_ptr.disp = alloc_elem(-1, llist_win);
+ head_ptr.disp = alloc_elem(-1, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Broadcast the head pointer to everyone */
head_ptr.rank = 0;
/* Create a new list element and register it with the window */
new_elem_ptr.rank = procid;
- new_elem_ptr.disp = alloc_elem(procid, llist_win);
+ new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Append the new node to the list. This might take multiple attempts if
* others have already appended and our tail pointer is stale. */
static const int verbose = 0;
-/* List of locally allocated list elements. */
-static llist_elem_t **my_elems = NULL;
-static int my_elems_size = 0;
-static int my_elems_count = 0;
-
/* Allocate a new shared linked list element */
-MPI_Aint alloc_elem(int value, MPI_Win win)
+static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count)
{
MPI_Aint disp;
llist_elem_t *elem_ptr;
MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));
/* Add the element to the list of local elements so we can free it later. */
- if (my_elems_size == my_elems_count) {
- my_elems_size += 100;
- my_elems = realloc(my_elems, my_elems_size * sizeof(void *));
+ if (*my_elems_size == *my_elems_count) {
+ *my_elems_size += 100;
+ *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *));
}
- my_elems[my_elems_count] = elem_ptr;
- my_elems_count++;
+ (*my_elems)[*my_elems_count] = elem_ptr;
+ (*my_elems_count)++;
MPI_Get_address(elem_ptr, &disp);
return disp;
int procid, nproc, i;
MPI_Win llist_win;
llist_ptr_t head_ptr, tail_ptr;
+ /* List of locally allocated list elements. */
+ llist_elem_t **my_elems = NULL;
+ int my_elems_size = 0;
+ int my_elems_count = 0;
MPI_Init(&argc, &argv);
/* Process 0 creates the head node */
if (procid == 0)
- head_ptr.disp = alloc_elem(-1, llist_win);
+ head_ptr.disp = alloc_elem(-1, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Broadcast the head pointer to everyone */
head_ptr.rank = 0;
/* Create a new list element and register it with the window */
new_elem_ptr.rank = procid;
- new_elem_ptr.disp = alloc_elem(procid, llist_win);
+ new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Append the new node to the list. This might take multiple attempts if
* others have already appended and our tail pointer is stale. */
case 6: /* a few small puts (like strided put, but 1 word at a time) */
/* FIXME: The conditional and increment are reversed below. This looks
* like a bug, and currently prevents the following test from running. */
- for (j = 0; j++; j < veccount) {
+ for (j = 0; j < veccount; j++) {
if (buf[j * stride] != PUT_VAL + j) {
errs++;
printf("case %d: value is %d should be %d\n", i, buf[j * stride], PUT_VAL + j);
case 7: /* a few small accumulates (like strided acc, but 1 word at a time) */
/* FIXME: The conditional and increment are reversed below. This looks
* like a bug, and currently prevents the following test from running. */
- for (j = 0; j++; j < veccount) {
+ for (j = 0; j < veccount;j++) {
if (buf[j * stride] != ACC_VAL + j + OFFSET_2 + j * stride) {
errs++;
printf("case %d: value is %d should be %d\n", i,
#include <stdlib.h>
#include <string.h>
-#define MAX_COUNT 65536*4/16
-#define MAX_RMA_SIZE 2 /* 16 in manyrma performance test */
+#define MAX_COUNT 512
+#define MAX_RMA_SIZE 1 /* 16 in manyrma performance test */
#define MAX_RUNS 8
#define MAX_ITER_TIME 5.0 /* seconds */
#else
#ifdef USE_WIN_ALLOC_SHM
MPI_Info_create(&hdl->win_info);
- MPI_Info_set(hdl->win_info, "alloc_shm", "true");
+ MPI_Info_set(hdl->win_info, (char*)"alloc_shm", (char*)"true");
#else
MPI_Info_create(&hdl->win_info);
- MPI_Info_set(hdl->win_info, "alloc_shm", "false");
+ MPI_Info_set(hdl->win_info, (char*)"alloc_shm", (char*)"false");
#endif
MPI_Win_allocate(2 * sizeof(int), sizeof(int), hdl->win_info, hdl->comm,
&hdl->base, &hdl->window);
/* Use a global variable to inhibit compiler optimizations in the compute
* function. */
double junk = 0.0;
-
+void compute(int step, double *data);
void compute(int step, double *data)
{
int i;
MPI_Info_create(&win_info);
#ifdef USE_WIN_ALLOC_SHM
- MPI_Info_set(win_info, "alloc_shm", "true");
+ MPI_Info_set(win_info, (char*)"alloc_shm", (char*)"true");
#else
- MPI_Info_set(win_info, "alloc_shm", "false");
+ MPI_Info_set(win_info, (char*)"alloc_shm", (char*)"false");
#endif
MPI_Win_allocate(NSTEPS * N * sizeof(double), sizeof(double), win_info,
#include <string.h>
#include <mpi.h>
-#define MAX_DATA_SIZE (1024*128*16)
-#define MAX_NUM_ITERATIONS (8192*4)
+#define MAX_DATA_SIZE (1024)
+#define MAX_NUM_ITERATIONS (1024)
#define MIN_NUM_ITERATIONS 8
#define NUM_WARMUP_ITER 1
const int verbose = 0;
static int rank;
-void run_test(int lock_mode, int lock_assert)
+static void run_test(int lock_mode, int lock_assert)
{
int nproc, test_iter, target_rank, data_size;
- int *buf, *win_buf;
+ char *buf, *win_buf;
MPI_Win win;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
t_acc = MPI_Wtime();
MPI_Win_lock(lock_mode, target_rank, lock_assert, win);
- MPI_Accumulate(buf, data_size / sizeof(int), MPI_INT, target_rank,
- 0, data_size / sizeof(int), MPI_INT, MPI_SUM, win);
+ MPI_Accumulate(buf, data_size, MPI_BYTE, target_rank,
+ 0, data_size, MPI_BYTE, MPI_SUM, win);
MPI_Win_unlock(target_rank, win);
}
t_acc = (MPI_Wtime() - t_acc) / num_iter;
putfidx 4
getfence1 4
accfence1 4
-#Needs lock, unlock
-#adlb_mimic1 3
+adlb_mimic1 3
accfence2 4
putpscw1 4
accpscw1 4
transpose1 2
transpose2 2
transpose3 2
-#Needs MPI_Win_allocate
transpose3_shm 2
transpose5 2
transpose6 1
test3 2
test4 2
test5 2
-#Needs lock, unlock
lockcontention 3
lockcontention2 4
lockcontention2 8
#Buggy one.
#lockcontention3 8
lockopts 2
-#needs get_accumulate
lock_dt 2
-#lock_dt_flush 2
-#lock_dt_flushlocal 2
-#lockall_dt 4 timeLimit=240
-#lockall_dt_flush 4 timeLimit=240
-#lockall_dt_flushall 4 timeLimit=240
-#lockall_dt_flushlocal 4 timeLimit=240
-#lockall_dt_flushlocalall 4 timeLimit=240
-#lock_contention_dt 4 timeLimit=240
+lock_dt_flush 2
+lock_dt_flushlocal 2
+lockall_dt 4 timeLimit=240
+lockall_dt_flush 4 timeLimit=240
+lockall_dt_flushall 4 timeLimit=240
+lockall_dt_flushlocal 4 timeLimit=240
+lockall_dt_flushlocalall 4 timeLimit=240
+lock_contention_dt 4 timeLimit=240
transpose4 2
#fetchandadd 7
#fetchandadd_tree 7
#mixedsync 4
epochtest 3
locknull 2
-#Needs MPI_Rput, rget, racumulate, MPI_Fetch_and_op, MPI_Compare_and_swap
-#rmanull 2
-#rmazero 2
+rmanull 2
+rmazero 2
strided_acc_indexed 2
strided_acc_onelock 2
#needs MPI_Type_create_subarray
window_creation 2
contention_put 4
contention_putget 4
-#put_base 2
-#put_bottom 2
+put_base 2
+put_bottom 2
#win_flavors 4 mpiversion=3.0
#win_flavors 3 mpiversion=3.0
-#manyrma2 2 timeLimit=500
+manyrma2 2 timeLimit=500
manyrma3 2
#win_shared 4 mpiversion=3.0
#win_shared_create_allocshm 4 mpiversion=3.0
#win_shared_noncontig_put 4 mpiversion=3.0
#win_zero 4 mpiversion=3.0
win_dynamic_acc 4
-#get_acc_local 1 mpiversion=3.0
+get_acc_local 1
+#issues with concurrent updates..
#linked_list 4 mpiversion=3.0
#linked_list_fop 4 mpiversion=3.0
-#compare_and_swap 4 mpiversion=3.0
+compare_and_swap 4
+fetch_and_op 4
#fetch_and_op_char 4 mpiversion=3.0
#fetch_and_op_short 4 mpiversion=3.0
#fetch_and_op_int 4 mpiversion=3.0
#fetch_and_op_long_double 4 mpiversion=3.0
#get_accumulate_double 4 mpiversion=3.0
#get_accumulate_double_derived 4 mpiversion=3.0
-#get_accumulate_int 4 mpiversion=3.0
+get_accumulate 4
#get_accumulate_int_derived 4 mpiversion=3.0
#get_accumulate_long 4 mpiversion=3.0
#get_accumulate_long_derived 4 mpiversion=3.0
#get_accumulate_short 4 mpiversion=3.0
#get_accumulate_short_derived 4 mpiversion=3.0
-#flush 4 mpiversion=3.0
-#reqops 4 mpiversion=3.0
-#req_example 4 mpiversion=3.0
+flush 4
+#reqops 4
+req_example 4
+rput_local_comp 2 mpiversion=3.0
+racc_local_comp 2 mpiversion=3.0
win_info 4
+#issues with concurrent updates..
#linked_list_lockall 4 mpiversion=3.0
-#pscw_ordering 4 mpiversion=3.0
-#linked_list_bench_lock_all 4 mpiversion=3.0
-#linked_list_bench_lock_excl 4 mpiversion=3.0
+pscw_ordering 4
+linked_list_bench_lock_all 4
+linked_list_bench_lock_excl 4 mpiversion=3.0
#linked_list_bench_lock_shr 4 mpiversion=3.0
#linked_list_bench_lock_shr_nocheck 4 mpiversion=3.0
#mutex_bench_shm 4 mpiversion=3.0
#mutex_bench_shm_ordered 4 mpiversion=3.0
-#rma-contig 2 mpiversion=3.0 timeLimit=720
-#badrma 2 mpiversion=3.0
-#acc-loc 4
+rma-contig 2 timeLimit=720
+badrma 2
+acc-loc 4
#fence_shm 2 mpiversion=3.0
#mutex_bench 4 mpiversion=3.0
#mutex_bench_shared 4 mpiversion=3.0
win_shared_zerobyte 4 mpiversion=3.0
win_shared_put_flush_get 4 mpiversion=3.0
-#get-struct 2
+get-struct 2
at_complete 2
#atomic_rmw_fop 3
#atomic_rmw_cas 3
#gacc_flush_get 3 mpiversion=3.0
#fop_flush_get 3 mpiversion=3.0
#cas_flush_get 3 mpiversion=3.0
-#rget-unlock 2 mpiversion=3.0
+#We still have an issue here, unlock should finish R* calls, but this causes issues.
+#rget-unlock 2
#overlap_wins_put 3
#overlap_wins_acc 3
#overlap_wins_gacc 3
p Test dsend
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/pt2pt-dsend -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/pt2pt-dsend -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [Jupiter:1:(2) 0.000000] [dsend/INFO] rank 1: data exchanged
> [Tremblay:0:(1) 0.005890] [dsend/INFO] rank 0: data exchanged
> [rank 0] -> Tremblay
p process 2 will finish at 0.5+2*4 (time before first send) + 2*(1+0.5*4) (recv+irecv) + 0.005890 (network time, same as before) = 14.505890s
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/pt2pt-dsend -q --log=smpi_kernel.thres:warning --cfg=smpi/or:0:1:0.5 --cfg=smpi/os:0:0.5:2 --cfg=smpi/ois:0:1:0.1 --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/pt2pt-dsend -q --log=smpi_kernel.thres:warning --cfg=smpi/or:0:1:0.5 --cfg=smpi/os:0:0.5:2 --cfg=smpi/ois:0:1:0.1 --log=xbt_cfg.thres:warning
> [Jupiter:1:(2) 9.900000] [dsend/INFO] rank 1: data exchanged
> [Tremblay:0:(1) 14.505890] [dsend/INFO] rank 0: data exchanged
> [rank 0] -> Tremblay
p generate a trace with pingpong, and replay itself, then check that output trace of the second run is the same as in the first (once sorted)
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -trace-ti --cfg=tracing/filename:out_in_ti.txt --cfg=smpi/simulate-computation:no -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace-ti --cfg=tracing/filename:out_in_ti.txt --cfg=smpi/simulate-computation:no -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
>
>
>
> [rank 2] -> Fafard
> [rank 3] -> Ginette
-$ ${bindir:=.}/../../../bin/smpirun -ext smpi_replay --log=replay.:critical -trace-ti --cfg=tracing/filename:out_ti.txt --cfg=smpi/simulate-computation:no -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/../../../examples/smpi/replay/smpi_replay ./out_in_ti.txt --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.:critical -trace-ti --cfg=tracing/filename:out_ti.txt --cfg=smpi/simulate-computation:no -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/../../../examples/smpi/replay/smpi_replay ./out_in_ti.txt --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [rank 0] -> Tremblay
> [rank 1] -> Jupiter
> [rank 2] -> Fafard
p Same test, but only using one output file for all processes
p generate a trace with pingpong, and replay itself, then check that output trace of the second run is the same as in the first (once sorted)
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -trace-ti --cfg=tracing/filename:out_in_ti.txt --cfg=tracing/smpi/format/ti-one-file:yes -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace-ti --cfg=tracing/filename:out_in_ti.txt --cfg=tracing/smpi/format/ti-one-file:yes -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
>
>
>
> [rank 2] -> Fafard
> [rank 3] -> Ginette
-$ ${bindir:=.}/../../../bin/smpirun -ext smpi_replay --log=replay.:critical -trace-ti --cfg=tracing/filename:out_ti.txt --cfg=tracing/smpi/format/ti-one-file:yes -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/../../../examples/smpi/replay/smpi_replay ./out_in_ti.txt --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.:critical -trace-ti --cfg=tracing/filename:out_ti.txt --cfg=tracing/smpi/format/ti-one-file:yes -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/../../../examples/smpi/replay/smpi_replay ./out_in_ti.txt --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [rank 0] -> Tremblay
> [rank 1] -> Jupiter
> [rank 2] -> Fafard
! setenv LD_LIBRARY_PATH=../../lib
! expect return 1
-$ ${bindir:=.}/../../../bin/smpirun -hostfile ../hostfile_empty -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ../hostfile_empty -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning
> [smpirun] ** error: the hostfile '../hostfile_empty' is empty. Aborting.
! expect return 1
-$ ${bindir:=.}/../../../bin/smpirun -hostfile hostfile-does-not-exist.txt -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile hostfile-does-not-exist.txt -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning
> [smpirun] ** error: the file 'hostfile-does-not-exist.txt' does not exist. Aborting.
p Test pingpong
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> *** Ping-pong test (MPI_Send/MPI_Recv) ***
>
>
p Test timers
! setenv LD_LIBRARY_PATH=../../lib
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 1 ${bindir:=.}/timers -q --log=smpi_kernel.thres:warning --cfg=smpi/simulate-computation:no --cfg=smpi/host-speed:100000 --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 1 ${bindir:=.}/timers -q --log=smpi_kernel.thres:warning --cfg=smpi/simulate-computation:no --cfg=smpi/host-speed:100000 --log=xbt_cfg.thres:warning
> [rank 0] -> Tremblay
p Test hvector
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-hvector -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-hvector -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [rank 0] -> Tremblay
> [rank 1] -> Jupiter
> rank= 0, a[0][0]=0.000000
p Test indexed
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-indexed -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-indexed -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [rank 0] -> Tremblay
> [rank 1] -> Jupiter
> buffer[0] = 0
p Test struct
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-struct -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-struct -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [rank 0] -> Tremblay
> [rank 1] -> Jupiter
> Process 0 got -2 (-2?) and 8.000000 (8.0?), tab (should be all 0): 0 0 0 0 0 0
p Test vector
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ${srcdir:=.}/../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-vector -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ${srcdir:=.}/../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-vector -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [rank 0] -> Tremblay
> [rank 1] -> Jupiter
> rank= 0, a[0][0]=0.000000
endif()
if(NOT enable_memcheck)
- set(DART_TESTING_TIMEOUT "300") #TIMEOUT FOR EACH TEST
+ set(DART_TESTING_TIMEOUT "500") #TIMEOUT FOR EACH TEST
else()
set(DART_TESTING_TIMEOUT "3000") #TIMEOUT FOR EACH TEST
endif()
src/smpi/smpi_keyvals.hpp
src/smpi/smpi_datatype_derived.cpp
src/smpi/smpi_datatype_derived.hpp
+ src/smpi/smpi_main.c
src/smpi/smpi_op.cpp
src/smpi/smpi_op.hpp
src/smpi/smpi_process.cpp
${CMAKE_BINARY_DIR}/bin/smpicc
${CMAKE_BINARY_DIR}/bin/smpicxx
${CMAKE_BINARY_DIR}/bin/smpirun
+ ${CMAKE_BINARY_DIR}/bin/smpimain
DESTINATION $ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/)
if(SMPI_FORTRAN)
install(PROGRAMS
endif()
endif()
+if(enable_model-checking)
+ install(
+ PROGRAMS ${CMAKE_BINARY_DIR}/bin/simgrid-mc
+ DESTINATION $ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/)
+endif()
+
install(PROGRAMS ${CMAKE_BINARY_DIR}/bin/tesh DESTINATION $ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/)
install(PROGRAMS ${CMAKE_BINARY_DIR}/bin/graphicator DESTINATION $ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/)
endif()
endif()
-if(HAVE_MC AND HAVE_GNU_LD)
+if(HAVE_MC AND HAVE_GNU_LD AND NOT ${DL_LIBRARY} STREQUAL "")
SET(SIMGRID_DEP "${SIMGRID_DEP} ${DL_LIBRARY}")
endif()
# Compute the dependencies of SMPI
##################################
+
+if(enable_smpi)
+ if(NOT ${DL_LIBRARY} STREQUAL "")
+ set(SIMGRID_DEP "${SIMGRID_DEP} ${DL_LIBRARY}") # for privatization
+ endif()
+ add_executable(smpimain src/smpi/smpi_main.c)
+ target_link_libraries(smpimain simgrid)
+ set_target_properties(smpimain
+ PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
+endif()
+
if(enable_smpi AND APPLE)
set(SIMGRID_DEP "${SIMGRID_DEP} -Wl,-U -Wl,_smpi_simulated_main")
endif()