- Mailbox: Mailbox.by_name()
- Added the following bindings:
- this_actor.warning()
- - Mailbox.put_init() [example: examples/python/comm-waitallfor]
- - Comm.detach() [example: examples/python/comm-waitallfor]
+ - Mailbox.put_init() [example: examples/python/comm-waitallfor/]
+ - Comm.detach() [example: examples/python/comm-waitallfor/]
- Comm.wait_for() [example: examples/python/comm-waitfor/]
- Comm.wait_any_for()
- - Comm.wait_all_for() [example: examples/python/comm-waitallfor]
+ - Comm.wait_all_for() [example: examples/python/comm-waitallfor/]
+ - Mutex [example: examples/python/synchro-mutex/]
Build System:
- Remove target "make uninstall" which was incomplete and no longer maintained.
Mutex
==============
-.. doxygenclass:: simgrid::s4u::Mutex
+.. tabs::
+
+ .. group-tab:: C++
+
+ .. doxygenclass:: simgrid::s4u::Mutex
+
+ .. group-tab:: Python
+
+ .. autoclass:: simgrid.Mutex
Basic management
----------------
.. doxygenfunction:: simgrid::s4u::Mutex::create()
+ .. group-tab:: Python
+
+ .. code-block:: Python
+
+ from simgrid import Mutex
+ mutex = Mutex()
+
+ # Use a context manager to acquire and automatically release the mutex
+ # when leaving the scope.
+ with mutex:
+ # Access shared resource ...
+ pass
+
.. group-tab:: C
.. code-block:: C
.. doxygenfunction:: simgrid::s4u::Mutex::try_lock()
.. doxygenfunction:: simgrid::s4u::Mutex::unlock()
+ .. group-tab:: Python
+
+ .. automethod:: simgrid.Mutex.lock()
+ .. automethod:: simgrid.Mutex.try_lock()
+ .. automethod:: simgrid.Mutex.unlock()
+
.. group-tab:: C
.. doxygenfunction:: sg_mutex_lock(sg_mutex_t mutex)
.. example-tab:: examples/cpp/synchro-mutex/s4u-synchro-mutex.cpp
+ .. example-tab:: examples/python/synchro-mutex/synchro-mutex.py
+
Semaphore
^^^^^^^^^
/* Create the requested amount of actors pairs. Each pair has a specific mutex and cell in `result`. */
int result[cfg_actor_count.get()];
+ sg4::MutexPtr mutex = sg4::Mutex::create();
for (int i = 0; i < cfg_actor_count; i++) {
result[i] = 0;
sg4::MutexPtr mutex = sg4::Mutex::create();
comm-wait comm-waitall comm-waitallfor comm-waitany comm-waitfor
exec-async exec-basic exec-dvfs exec-remote
platform-profile platform-failures
- network-nonlinear clusters-multicpu io-degradation exec-cpu-nonlinear)
+ network-nonlinear clusters-multicpu io-degradation exec-cpu-nonlinear
+ synchro-mutex)
set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/${example}/${example}.tesh)
set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/${example}/${example}.py)
--- /dev/null
+from argparse import ArgumentParser
+from dataclasses import dataclass
+import sys
+
+from simgrid import Actor, Engine, Host, Mutex, this_actor
+
+
+def create_parser() -> ArgumentParser:
+ parser = ArgumentParser()
+ parser.add_argument(
+ '--platform',
+ type=str,
+ required=True,
+ help='path to the platform description'
+ )
+ parser.add_argument(
+ '--actors',
+ type=int,
+ default=6,
+ help='how many pairs of actors should be started'
+ )
+ return parser
+
+
+@dataclass
+class ResultHolder:
+ value: int
+
+
+def worker_context_manager(mutex: Mutex, result: ResultHolder):
+ with mutex:
+ this_actor.info(f"Hello simgrid, I'm ready to compute after acquiring the mutex from a context manager")
+ result.value += 1
+ this_actor.info(f"I'm done, good bye")
+
+
+def worker(mutex: Mutex, result: ResultHolder):
+ mutex.lock()
+ this_actor.info("Hello simgrid, I'm ready to compute after a regular lock")
+ result.value += 1
+ mutex.unlock()
+ this_actor.info("I'm done, good bye")
+
+
+def master(settings):
+ results = [ResultHolder(value=0) for _ in range(settings.actors)]
+ for i in range(settings.actors):
+ mutex = Mutex()
+ Actor.create(f"worker-{i}(mgr)", Host.by_name("Jupiter"), worker_context_manager, mutex, results[i])
+ Actor.create(f"worker-{i}", Host.by_name("Tremblay"), worker, mutex, results[i])
+ this_actor.sleep_for(10)
+ for i in range(settings.actors):
+ this_actor.info(f"Result[{i}] -> {results[i].value}")
+ this_actor.info("I'm done, good bye")
+
+
+def main():
+ settings = create_parser().parse_known_args()[0]
+ e = Engine(sys.argv)
+ e.load_platform(settings.platform)
+ Actor.create("master", Host.by_name("Tremblay"), master, settings)
+ e.run()
+
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/usr/bin/env tesh
+
+p Testing Mutex
+
+$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${bindir:=.}/synchro-mutex.py --platform ${platfdir}/two_hosts.xml --actors 0 "--log=root.fmt:[%10.6r]%e(%i:%a@%h)%e%m%n"
+>[ 10.000000] (1:master@Tremblay) I'm done, good bye
+
+$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${bindir:=.}/synchro-mutex.py --platform ${platfdir}/two_hosts.xml --actors 1 "--log=root.fmt:[%10.6r]%e(%i:%a@%h)%e%m%n"
+>[ 0.000000] (2:worker-0(mgr)@Jupiter) Hello simgrid, I'm ready to compute after acquiring the mutex from a context manager
+>[ 0.000000] (2:worker-0(mgr)@Jupiter) I'm done, good bye
+>[ 0.000000] (3:worker-0@Tremblay) Hello simgrid, I'm ready to compute after a regular lock
+>[ 0.000000] (3:worker-0@Tremblay) I'm done, good bye
+>[ 10.000000] (1:master@Tremblay) Result[0] -> 2
+>[ 10.000000] (1:master@Tremblay) I'm done, good bye
+
+$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${bindir:=.}/synchro-mutex.py --platform ${platfdir}/two_hosts.xml --actors 5 "--log=root.fmt:[%10.6r]%e(%i:%a@%h)%e%m%n"
+>[ 0.000000] (2:worker-0(mgr)@Jupiter) Hello simgrid, I'm ready to compute after acquiring the mutex from a context manager
+>[ 0.000000] (2:worker-0(mgr)@Jupiter) I'm done, good bye
+>[ 0.000000] (3:worker-0@Tremblay) Hello simgrid, I'm ready to compute after a regular lock
+>[ 0.000000] (3:worker-0@Tremblay) I'm done, good bye
+>[ 0.000000] (4:worker-1(mgr)@Jupiter) Hello simgrid, I'm ready to compute after acquiring the mutex from a context manager
+>[ 0.000000] (4:worker-1(mgr)@Jupiter) I'm done, good bye
+>[ 0.000000] (5:worker-1@Tremblay) Hello simgrid, I'm ready to compute after a regular lock
+>[ 0.000000] (5:worker-1@Tremblay) I'm done, good bye
+>[ 0.000000] (6:worker-2(mgr)@Jupiter) Hello simgrid, I'm ready to compute after acquiring the mutex from a context manager
+>[ 0.000000] (6:worker-2(mgr)@Jupiter) I'm done, good bye
+>[ 0.000000] (7:worker-2@Tremblay) Hello simgrid, I'm ready to compute after a regular lock
+>[ 0.000000] (7:worker-2@Tremblay) I'm done, good bye
+>[ 0.000000] (8:worker-3(mgr)@Jupiter) Hello simgrid, I'm ready to compute after acquiring the mutex from a context manager
+>[ 0.000000] (8:worker-3(mgr)@Jupiter) I'm done, good bye
+>[ 0.000000] (9:worker-3@Tremblay) Hello simgrid, I'm ready to compute after a regular lock
+>[ 0.000000] (9:worker-3@Tremblay) I'm done, good bye
+>[ 0.000000] (10:worker-4(mgr)@Jupiter) Hello simgrid, I'm ready to compute after acquiring the mutex from a context manager
+>[ 0.000000] (10:worker-4(mgr)@Jupiter) I'm done, good bye
+>[ 0.000000] (11:worker-4@Tremblay) Hello simgrid, I'm ready to compute after a regular lock
+>[ 0.000000] (11:worker-4@Tremblay) I'm done, good bye
+>[ 10.000000] (1:master@Tremblay) Result[0] -> 2
+>[ 10.000000] (1:master@Tremblay) Result[1] -> 2
+>[ 10.000000] (1:master@Tremblay) Result[2] -> 2
+>[ 10.000000] (1:master@Tremblay) Result[3] -> 2
+>[ 10.000000] (1:master@Tremblay) Result[4] -> 2
+>[ 10.000000] (1:master@Tremblay) I'm done, good bye
#include <simgrid/s4u/Host.hpp>
#include <simgrid/s4u/Link.hpp>
#include <simgrid/s4u/Mailbox.hpp>
+#include <simgrid/s4u/Mutex.hpp>
#include <simgrid/s4u/NetZone.hpp>
#include <simgrid/version.h>
using simgrid::s4u::Host;
using simgrid::s4u::Link;
using simgrid::s4u::Mailbox;
+using simgrid::s4u::Mutex;
+using simgrid::s4u::MutexPtr;
XBT_LOG_NEW_DEFAULT_CATEGORY(python, "python");
.def("wait", &simgrid::s4u::Exec::wait, py::call_guard<py::gil_scoped_release>(),
"Block until the completion of that execution.");
+ /* Class Mutex */
+ py::class_<Mutex, MutexPtr>(m, "Mutex",
+ "A classical mutex, but blocking in the simulation world."
+ "See the C++ documentation for details.")
+ .def(py::init<>(&Mutex::create))
+ .def("lock", &Mutex::lock, py::call_guard<py::gil_scoped_release>(), "Block until the mutex is acquired.")
+ .def("try_lock", &Mutex::try_lock, py::call_guard<py::gil_scoped_release>(),
+ "Try to acquire the mutex. Return true if the mutex was acquired, false otherwise.")
+ .def("unlock", &Mutex::unlock, py::call_guard<py::gil_scoped_release>(), "Release the mutex")
+ // Allow mutexes to be automatically acquired/released with a context manager: `with mutex: ...`
+ .def("__enter__", [](Mutex* self){ self->lock(); }, py::call_guard<py::gil_scoped_release>())
+ .def("__exit__", [](Mutex* self, py::object&, py::object&, py::object&){ self->unlock(); },
+ py::call_guard<py::gil_scoped_release>());
+
/* Class Actor */
py::class_<simgrid::s4u::Actor, ActorPtr>(m, "Actor",
"An actor is an independent stream of execution in your distributed "
"Returns True if that actor is a daemon and will be terminated automatically when the last non-daemon actor "
"terminates.")
.def("join", py::overload_cast<double>(&Actor::join, py::const_), py::call_guard<py::gil_scoped_release>(),
- "Wait for the actor to finish (more info in the C++ documentation).", py::arg("timeout"))
+ "Wait for the actor to finish (more info in the C++ documentation).", py::arg("timeout") = -1)
.def("kill", &Actor::kill, py::call_guard<py::gil_scoped_release>(), "Kill that actor")
.def("self", &Actor::self, "Retrieves the current actor.")
.def("is_suspended", &Actor::is_suspended, "Returns True if that actor is currently suspended.")