include examples/cpp/synchro-semaphore/s4u-mc-synchro-semaphore.tesh
include examples/cpp/synchro-semaphore/s4u-synchro-semaphore.cpp
include examples/cpp/synchro-semaphore/s4u-synchro-semaphore.tesh
+include examples/cpp/task-dispatch/s4u-task-dispatch.cpp
+include examples/cpp/task-dispatch/s4u-task-dispatch.tesh
include examples/cpp/task-io/s4u-task-io.cpp
include examples/cpp/task-io/s4u-task-io.tesh
include examples/cpp/task-microservice/s4u-task-microservice.cpp
In order to simulate the execution of Dataflow applications, we introduced the
concept of |API_s4u_Tasks|, that can be seen as repeatable activities. A Dataflow
-is defined as a graph of |API_s4u_Tasks| through which circulate Tokens. Tokens
-can carry any user-defined data, using the same internal mechanisms as for the
-other simulated objects. Each Task has to receive a token from each of its
-predecessor to fire a new instance of a :ref:`Communication <API_s4u_Comm>`,
-:ref:`Execution <API_s4u_Exec>`, or :ref:`I/O <API_s4u_Io>` activity.
-On completion of this activity, the Task propagates tokens
-to its successors, and waits for the next set of tokens to arrive.
-Multiple instances of the same Task can run in parallel by adjusting its
-horizontal scaling with
-:cpp:func:`s4u::Task::set_parallelism_degree() <simgrid::s4u::Task::set_parallelism_degree>`.
+is defined as a graph of |API_s4u_Tasks|, where each |API_s4u_Tasks| has a set of
+successors and predecessors. When a |API_s4u_Tasks| ends it sends a token to each
+of its successors. Each |API_s4u_Tasks| has to receive a token from each of its
+predecessor to start. Tokens can carry any user-defined data.
-:ref:`Communications <API_s4u_Comm>` (started on Mailboxes and consuming links),
-:ref:`Executions <API_s4u_Exec>` (started on Host and consuming CPU resources)
-:ref:`I/O <API_s4u_Io>` (started on and consuming disks).
+|API_s4u_Tasks| are composed of several instances: a dispatcher, a collector, and
+instance_0 to instance_n. The dispatcher rely on a load balancing function to select
+the next instance to fire. Once this instance finishes it fires the collector.
+
+Each instance of an |API_s4u_ExecTask| can be placed on a different host.
+|API_s4u_Comm| activities are automatically created when an instance triggers
+another instance on a different host. Each instance has its own parallelism degree
+to scale horizontally on several cores.
To initiate the execution of a Dataflow, it is possible to some make
|API_s4u_Tasks| fire one or more activities without waiting for any token with the
:cpp:func:`s4u::Task::enqueue_firings() <simgrid::s4u::Task::enqueue_firings>`
function.
-The parameters and successors of a Task can be redefined at runtime by attaching
+The parameters of Tasks can be redefined at runtime by attaching
callbacks to the
:cpp:func:`s4u::Task::on_this_start <simgrid::s4u::Task::on_this_start>`
and
:cpp:func:`s4u::Task::on_this_completion <simgrid::s4u::Task::on_this_completion>`
-signals.
+signals. The former is triggered by instances others than the dispatcher and the collector,
+and the latter is triggered by the collector.
+
.. _s4u_mailbox:
mc-bugged1 mc-bugged1-liveness mc-bugged2 mc-bugged2-liveness mc-centralized-mutex mc-electric-fence mc-failing-assert
network-ns3 network-ns3-wifi network-wifi
io-async io-priority io-degradation io-file-system io-file-remote io-disk-raw io-dependent
- task-io task-microservice task-parallelism task-simple task-storm task-switch-host task-variable-load
+ task-dispatch task-io task-microservice task-parallelism task-simple task-storm task-switch-host task-variable-load
solar-panel-simple
platform-comm-serialize platform-failures platform-profile platform-properties
plugin-host-load plugin-jbod plugin-link-load plugin-prodcons
--- /dev/null
+/* Copyright (c) 2017-2023. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "simgrid/s4u.hpp"
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(task_dispatch, "Messages specific for this s4u example");
+namespace sg4 = simgrid::s4u;
+
+static void manager(sg4::ExecTaskPtr t)
+{
+ auto PM0 = sg4::Engine::get_instance()->host_by_name("PM0");
+ auto PM1 = sg4::Engine::get_instance()->host_by_name("PM1");
+
+ XBT_INFO("Test set_flops");
+ t->enqueue_firings(2);
+ sg4::this_actor::sleep_for(50);
+ XBT_INFO("Set instance_0 flops to 50.");
+ t->set_flops(50 * PM0->get_speed());
+ sg4::this_actor::sleep_for(250);
+ t->set_flops(100 * PM0->get_speed());
+
+ XBT_INFO("Test set_parallelism degree");
+ t->enqueue_firings(3);
+ sg4::this_actor::sleep_for(50);
+ XBT_INFO("Set Task parallelism degree to 2.");
+ t->set_parallelism_degree(2);
+ sg4::this_actor::sleep_for(250);
+ t->set_parallelism_degree(1);
+
+ XBT_INFO("Test set_host dispatcher");
+ t->enqueue_firings(2);
+ sg4::this_actor::sleep_for(50);
+ XBT_INFO("Move dispatcher to PM1");
+ t->set_host(PM1, "dispatcher");
+ t->set_internal_bytes(1e6, "dispatcher");
+ sg4::this_actor::sleep_for(250);
+ t->set_host(PM0, "dispatcher");
+
+ XBT_INFO("Test set_host instance_0");
+ t->enqueue_firings(2);
+ sg4::this_actor::sleep_for(50);
+ XBT_INFO("Move instance_0 to PM1");
+ t->set_host(PM1, "instance_0");
+ t->set_flops(100 * PM1->get_speed());
+ t->set_internal_bytes(1e6, "instance_0");
+ sg4::this_actor::sleep_for(250);
+ t->set_host(PM0, "instance_0");
+ t->set_flops(100 * PM0->get_speed());
+
+ XBT_INFO("Test set_host collector");
+ t->enqueue_firings(2);
+ sg4::this_actor::sleep_for(50);
+ XBT_INFO("Move collector to PM1");
+ t->set_host(PM1, "collector");
+ sg4::this_actor::sleep_for(250);
+ t->set_host(PM0, "collector");
+
+ XBT_INFO("Test add_instances");
+ t->enqueue_firings(1);
+ sg4::this_actor::sleep_for(50);
+ XBT_INFO("Add 1 instance and update load balancing function");
+ t->add_instances(1);
+ t->set_load_balancing_function([]() {
+ static int round_robin_counter = 0;
+ int ret = round_robin_counter;
+ round_robin_counter = round_robin_counter == 1 ? 0 : round_robin_counter + 1;
+ return "instance_" + std::to_string(ret);
+ });
+ t->enqueue_firings(2);
+ sg4::this_actor::sleep_for(250);
+
+ XBT_INFO("Test remove_instances");
+ XBT_INFO("Remove 1 instance and update load balancing function");
+ t->remove_instances(1);
+ t->set_load_balancing_function([]() { return "instance_0"; });
+ t->enqueue_firings(2);
+ sg4::this_actor::sleep_for(300);
+}
+
+int main(int argc, char* argv[])
+{
+ sg4::Engine e(&argc, argv);
+ e.load_platform(argv[1]);
+ auto PM0 = e.host_by_name("PM0");
+ auto PM1 = sg4::Engine::get_instance()->host_by_name("PM1");
+
+ auto a = sg4::ExecTask::init("A", 100 * PM0->get_speed(), PM0);
+ auto b = sg4::ExecTask::init("B", 50 * PM0->get_speed(), PM0);
+ auto c = sg4::CommTask::init("C", 1e6, PM1, PM0);
+
+ a->add_successor(b);
+
+ sg4::Task::on_completion_cb(
+ [](const sg4::Task* t) { XBT_INFO("Task %s finished (%d)", t->get_name().c_str(), t->get_count()); });
+ sg4::Task::on_start_cb([](const sg4::Task* t) { XBT_INFO("Task %s start", t->get_name().c_str()); });
+
+ sg4::Actor::create("manager", PM0, manager, a);
+
+ e.run();
+ return 0;
+}
--- /dev/null
+#!/usr/bin/env tesh
+
+> > $ ${bindir:=.}/s4u-task-parallelism ${platfdir}/three_multicore_hosts.xml
+> [PM0:manager:(1) 0.000000] [task_dispatch/INFO] Test set_flops
+> [0.000000] [task_dispatch/INFO] Task A start
+> [0.000000] [task_dispatch/INFO] Task A start
+> [PM0:manager:(1) 50.000000] [task_dispatch/INFO] Set instance_0 flops to 50.
+> [100.000000] [task_dispatch/INFO] Task A finished (1)
+> [100.000000] [task_dispatch/INFO] Task B start
+> [150.000000] [task_dispatch/INFO] Task A finished (2)
+> [150.000000] [task_dispatch/INFO] Task B start
+> [150.000000] [task_dispatch/INFO] Task B finished (1)
+> [200.000000] [task_dispatch/INFO] Task B finished (2)
+> [PM0:manager:(1) 300.000000] [task_dispatch/INFO] Test set_parallelism degree
+> [300.000000] [task_dispatch/INFO] Task A start
+> [300.000000] [task_dispatch/INFO] Task A start
+> [300.000000] [task_dispatch/INFO] Task A start
+> [PM0:manager:(1) 350.000000] [task_dispatch/INFO] Set Task parallelism degree to 2.
+> [400.000000] [task_dispatch/INFO] Task A finished (3)
+> [400.000000] [task_dispatch/INFO] Task B start
+> [450.000000] [task_dispatch/INFO] Task A finished (4)
+> [450.000000] [task_dispatch/INFO] Task B start
+> [450.000000] [task_dispatch/INFO] Task B finished (3)
+> [500.000000] [task_dispatch/INFO] Task A finished (5)
+> [500.000000] [task_dispatch/INFO] Task B start
+> [500.000000] [task_dispatch/INFO] Task B finished (4)
+> [550.000000] [task_dispatch/INFO] Task B finished (5)
+> [PM0:manager:(1) 600.000000] [task_dispatch/INFO] Test set_host dispatcher
+> [600.000000] [task_dispatch/INFO] Task A start
+> [600.000000] [task_dispatch/INFO] Task A start
+> [PM0:manager:(1) 650.000000] [task_dispatch/INFO] Move dispatcher to PM1
+> [700.000000] [task_dispatch/INFO] Task A finished (6)
+> [700.000000] [task_dispatch/INFO] Task B start
+> [750.000000] [task_dispatch/INFO] Task B finished (6)
+> [800.009961] [task_dispatch/INFO] Task A finished (7)
+> [800.009961] [task_dispatch/INFO] Task B start
+> [850.009961] [task_dispatch/INFO] Task B finished (7)
+> [PM0:manager:(1) 900.000000] [task_dispatch/INFO] Test set_host instance_0
+> [900.000000] [task_dispatch/INFO] Task A start
+> [900.000000] [task_dispatch/INFO] Task A start
+> [PM0:manager:(1) 950.000000] [task_dispatch/INFO] Move instance_0 to PM1
+> [1000.000000] [task_dispatch/INFO] Task A finished (8)
+> [1000.000000] [task_dispatch/INFO] Task B start
+> [1050.000000] [task_dispatch/INFO] Task B finished (8)
+> [1100.019922] [task_dispatch/INFO] Task A finished (9)
+> [1100.019922] [task_dispatch/INFO] Task B start
+> [1150.019922] [task_dispatch/INFO] Task B finished (9)
+> [PM0:manager:(1) 1200.000000] [task_dispatch/INFO] Test set_host collector
+> [1200.000000] [task_dispatch/INFO] Task A start
+> [1200.000000] [task_dispatch/INFO] Task A start
+> [PM0:manager:(1) 1250.000000] [task_dispatch/INFO] Move collector to PM1
+> [1300.000000] [task_dispatch/INFO] Task A finished (10)
+> [1300.000000] [task_dispatch/INFO] Task B start
+> [1350.000000] [task_dispatch/INFO] Task B finished (10)
+> [1400.009961] [task_dispatch/INFO] Task A finished (11)
+> [1400.009961] [task_dispatch/INFO] Task B start
+> [1450.009961] [task_dispatch/INFO] Task B finished (11)
+> [PM0:manager:(1) 1500.000000] [task_dispatch/INFO] Test add_instances
+> [1500.000000] [task_dispatch/INFO] Task A start
+> [PM0:manager:(1) 1550.000000] [task_dispatch/INFO] Add 1 instance and update load balancing function
+> [1550.000000] [task_dispatch/INFO] Task A start
+> [1550.000000] [task_dispatch/INFO] Task A start
+> [1600.000000] [task_dispatch/INFO] Task A finished (12)
+> [1600.000000] [task_dispatch/INFO] Task B start
+> [1650.000000] [task_dispatch/INFO] Task A finished (13)
+> [1650.000000] [task_dispatch/INFO] Task B start
+> [1650.000000] [task_dispatch/INFO] Task B finished (12)
+> [1700.000000] [task_dispatch/INFO] Task A finished (14)
+> [1700.000000] [task_dispatch/INFO] Task B start
+> [1700.000000] [task_dispatch/INFO] Task B finished (13)
+> [1750.000000] [task_dispatch/INFO] Task B finished (14)
+> [PM0:manager:(1) 1800.000000] [task_dispatch/INFO] Test remove_instances
+> [PM0:manager:(1) 1800.000000] [task_dispatch/INFO] Remove 1 instance and update load balancing function
+> [1800.000000] [task_dispatch/INFO] Task A start
+> [1800.000000] [task_dispatch/INFO] Task A start
+> [1900.000000] [task_dispatch/INFO] Task A finished (15)
+> [1900.000000] [task_dispatch/INFO] Task B start
+> [1950.000000] [task_dispatch/INFO] Task B finished (15)
+> [2000.000000] [task_dispatch/INFO] Task A finished (16)
+> [2000.000000] [task_dispatch/INFO] Task B start
+> [2050.000000] [task_dispatch/INFO] Task B finished (16)
+>
\ No newline at end of file
$ ${bindir:=.}/s4u-task-parallelism ${platfdir}/three_multicore_hosts.xml
> [0.000000] [task_parallelism/INFO] Task exec_A start
-> [100.000000] [task_parallelism/INFO] Task exec_A finished (1)
> [100.000000] [task_parallelism/INFO] Task exec_A start
+> [100.000000] [task_parallelism/INFO] Task exec_A finished (1)
> [200.000000] [task_parallelism/INFO] Task exec_A finished (2)
> [300.000000] [task_parallelism/INFO] Task exec_A start
> [300.000000] [task_parallelism/INFO] Task exec_A start
-> [400.000000] [task_parallelism/INFO] Task exec_A finished (3)
> [400.000000] [task_parallelism/INFO] Task exec_A start
-> [400.000000] [task_parallelism/INFO] Task exec_A finished (4)
> [400.000000] [task_parallelism/INFO] Task exec_A start
+> [400.000000] [task_parallelism/INFO] Task exec_A finished (3)
+> [400.000000] [task_parallelism/INFO] Task exec_A finished (4)
> [500.000000] [task_parallelism/INFO] Task exec_A finished (5)
> [500.000000] [task_parallelism/INFO] Task exec_A finished (6)
> [600.000000] [task_parallelism/INFO] Task exec_A start
-> [700.000000] [task_parallelism/INFO] Task exec_A finished (7)
> [700.000000] [task_parallelism/INFO] Task exec_A start
+> [700.000000] [task_parallelism/INFO] Task exec_A finished (7)
> [800.000000] [task_parallelism/INFO] Task exec_A finished (8)
> [900.000000] [task_parallelism/INFO] Task exec_A start
> [900.000000] [task_parallelism/INFO] Task exec_A start
-> [1000.000000] [task_parallelism/INFO] Task exec_A finished (9)
> [1000.000000] [task_parallelism/INFO] Task exec_A start
-> [1000.000000] [task_parallelism/INFO] Task exec_A finished (10)
> [1000.000000] [task_parallelism/INFO] Task exec_A start
+> [1000.000000] [task_parallelism/INFO] Task exec_A finished (9)
+> [1000.000000] [task_parallelism/INFO] Task exec_A finished (10)
+> [1100.000000] [task_parallelism/INFO] Task exec_A start
> [1100.000000] [task_parallelism/INFO] Task exec_A finished (11)
> [1100.000000] [task_parallelism/INFO] Task exec_A finished (12)
-> [1100.000000] [task_parallelism/INFO] Task exec_A start
-> [1200.000000] [task_parallelism/INFO] Task exec_A finished (13)
> [1200.000000] [task_parallelism/INFO] Task exec_A start
+> [1200.000000] [task_parallelism/INFO] Task exec_A finished (13)
> [1250.000000] [task_parallelism/INFO] Task exec_A start
> [1250.000000] [task_parallelism/INFO] Task exec_A start
-> [1300.000000] [task_parallelism/INFO] Task exec_A finished (14)
> [1300.000000] [task_parallelism/INFO] Task exec_A start
-> [1350.000000] [task_parallelism/INFO] Task exec_A finished (15)
+> [1300.000000] [task_parallelism/INFO] Task exec_A finished (14)
> [1350.000000] [task_parallelism/INFO] Task exec_A start
-> [1350.000000] [task_parallelism/INFO] Task exec_A finished (16)
> [1350.000000] [task_parallelism/INFO] Task exec_A start
+> [1350.000000] [task_parallelism/INFO] Task exec_A finished (15)
+> [1350.000000] [task_parallelism/INFO] Task exec_A finished (16)
> [1400.000000] [task_parallelism/INFO] Task exec_A finished (17)
> [1450.000000] [task_parallelism/INFO] Task exec_A finished (18)
> [1450.000000] [task_parallelism/INFO] Task exec_A finished (19)
\ No newline at end of file
Alternatively we: remove/add the link between SA and SA_to_B2
add/remove the link between SA and SA_to_B1
*/
- SA->on_this_start_cb([SA_to_B1, SA_to_B2](sg4::Task* t) {
+ SA->on_this_completion_cb([&SA_to_B1, &SA_to_B2](sg4::Task* t) {
int count = t->get_count();
sg4::CommTaskPtr comm;
- if (count % 2 == 0) {
+ if (count % 2 == 1) {
t->remove_successor(SA_to_B2);
t->add_successor(SA_to_B1);
comm = SA_to_B1;
t->add_successor(SA_to_B2);
comm = SA_to_B2;
}
- std::vector<double> amount = {1e3, 1e6, 1e9};
+ std::vector<double> amount = {1e9, 1e3, 1e6};
+ // XBT_INFO("Comm %f", amount[count % 3]);
comm->set_amount(amount[count % 3]);
auto token = std::make_shared<sg4::Token>();
token->set_data(new double(amount[count % 3]));
});
// The token sent by SA is forwarded by both communication tasks
- SA_to_B1->on_this_start_cb([&SA](sg4::Task* t) { t->set_token(t->get_next_token_from(SA)); });
- SA_to_B2->on_this_start_cb([&SA](sg4::Task* t) { t->set_token(t->get_next_token_from(SA)); });
+ SA_to_B1->on_this_completion_cb([&SA](sg4::Task* t) {
+ t->set_token(t->get_token_from(SA));
+ t->deque_token_from(SA);
+ });
+ SA_to_B2->on_this_completion_cb([&SA](sg4::Task* t) {
+ t->set_token(t->get_token_from(SA));
+ t->deque_token_from(SA);
+ });
/* B1 and B2 read the value of the token received by their predecessors
and use it to adapt their amount of work to do.
*/
- B1->on_this_start_cb([SA_to_B1](sg4::Task* t) {
- auto data = t->get_next_token_from(SA_to_B1)->get_unique_data<double>();
+ B1->on_this_start_cb([&SA_to_B1](sg4::Task* t) {
+ auto data = t->get_token_from(SA_to_B1)->get_data<double>();
+ t->deque_token_from(SA_to_B1);
t->set_amount(*data * 10);
});
- B2->on_this_start_cb([SA_to_B2](sg4::Task* t) {
- auto data = t->get_next_token_from(SA_to_B2)->get_unique_data<double>();
+ B2->on_this_start_cb([&SA_to_B2](sg4::Task* t) {
+ auto data = t->get_token_from(SA_to_B2)->get_data<double>();
+ t->deque_token_from(SA_to_B2);
t->set_amount(*data * 10);
});
> [1.798442] [task_storm/INFO] Task SB_to_B3 finished (5)
> [2.619232] [task_storm/INFO] Task B3 finished (1)
> [6.743624] [task_storm/INFO] Task B3 finished (2)
-> [10.868015] [task_storm/INFO] Task B3 finished (3)
> [10.868015] [task_storm/INFO] Task B4 finished (1)
+> [10.868015] [task_storm/INFO] Task B3 finished (3)
> [14.992407] [task_storm/INFO] Task B3 finished (4)
-> [19.116799] [task_storm/INFO] Task B3 finished (5)
> [19.116799] [task_storm/INFO] Task B4 finished (2)
+> [19.116799] [task_storm/INFO] Task B3 finished (5)
> [23.241190] [task_storm/INFO] Task B4 finished (3)
> [27.365582] [task_storm/INFO] Task B4 finished (4)
> [31.489974] [task_storm/INFO] Task B4 finished (5)
// successors to comm0
comm0->on_this_start_cb([&comm0, exec1, exec2, jupiter, fafard](const sg4::Task*) {
static int count = 0;
- if (count % 2 == 0) {
+ if (count % 2 == 0)
comm0->set_destination(jupiter);
+ else
+ comm0->set_destination(fafard);
+ count++;
+ });
+
+ comm0->on_this_completion_cb([&comm0, exec1, exec2](const sg4::Task*) {
+ static int count = 0;
+ if (count % 2 == 0) {
comm0->add_successor(exec1);
comm0->remove_successor(exec2);
} else {
- comm0->set_destination(fafard);
comm0->add_successor(exec2);
comm0->remove_successor(exec1);
}
return parser.parse_args()
def callback(t):
- print(f'[{Engine.clock}] {t} finished ({t.count})')
+ print(f'[{Engine.clock}] {t} finished ({t.get_count()})')
if __name__ == '__main__':
args = parse()
return parser.parse_args()
def callback(t):
- print(f'[{Engine.clock}] {t} finished ({t.count})')
+ print(f'[{Engine.clock}] {t} finished ({t.get_count()})')
if __name__ == '__main__':
args = parse()
> [11.714617112501687] CommTask(comm) finished (1)
> [20.388399000968448] ExecTask(exec1) finished (2)
> [21.90881661298591] CommTask(comm) finished (2)
-> [24.82146412938331] ExecTask(exec2) finished (1)
-> [37.92831114626493] ExecTask(exec2) finished (2)
+> [24.821464129383305] ExecTask(exec2) finished (1)
+> [37.928311146264925] ExecTask(exec2) finished (2)
return parser.parse_args()
def callback(t):
- print(f'[{Engine.clock}] {t} finished ({t.count})')
+ print(f'[{Engine.clock}] {t} finished ({t.get_count()})')
-def switch(t, hosts, execs):
- comm0.destination = hosts[t.count % 2]
- comm0.remove_successor(execs[t.count % 2 - 1])
- comm0.add_successor(execs[t.count % 2])
+def switch_destination(t, hosts):
+ t.destination = hosts[switch_destination.count % 2]
+ switch_destination.count += 1
+switch_destination.count = 0
+
+def switch_successor(t, execs):
+ t.remove_successor(execs[t.get_count() % 2])
+ t.add_successor(execs[t.get_count() % 2 - 1])
if __name__ == '__main__':
args = parse()
exec1.add_successor(comm1)
exec2.add_successor(comm2)
- # Add a function to be called when tasks end for log purpose
+ # Add a callback when tasks end for log purpose
Task.on_completion_cb(callback)
- # Add a function to be called before each firing of comm0
- # This function modifies the graph of tasks by adding or removing
- # successors to comm0
- comm0.on_this_start_cb(lambda t: switch(t, [jupiter, fafard], [exec1,exec2]))
+ # Add a callback before each firing of comm0
+ # It switches the destination of comm0
+ comm0.on_this_start_cb(lambda t: switch_destination(t, [jupiter, fafard]))
+
+ # Add a callback before comm0 send tokens to successors
+ # It switches the successor of comm0
+ comm0.on_this_completion_cb(lambda t: switch_successor(t, [exec1,exec2]))
# Enqueue two firings for task exec1
comm0.enqueue_firings(4)
return parser.parse_args()
def callback(t):
- print(f'[{Engine.clock}] {t} finished ({t.count})')
+ print(f'[{Engine.clock}] {t} finished ({t.get_count()})')
def variable_load(t):
print('--- Small load ---')
#include <map>
#include <memory>
#include <set>
+#include <xbt/asserts.h>
namespace simgrid::s4u {
class XBT_PUBLIC Token : public xbt::Extendable<Token> {};
class Task {
+
std::string name_;
- double amount_;
- int queued_firings_ = 0;
- int count_ = 0;
- int running_instances_ = 0;
- int parallelism_degree_ = 1;
+
+ std::map<std::string, double> amount_ = {{"instance_0", 0}, {"dispatcher", 0}, {"collector", 0}};
+ std::map<std::string, int> queued_firings_ = {{"instance_0", 0}, {"dispatcher", 0}, {"collector", 0}};
+ std::map<std::string, int> running_instances_ = {{"instance_0", 0}, {"dispatcher", 0}, {"collector", 0}};
+ std::map<std::string, int> count_ = {{"instance_0", 0}, {"dispatcher", 0}, {"collector", 0}};
+ std::map<std::string, int> parallelism_degree_ = {{"instance_0", 1}, {"dispatcher", 1}, {"collector", 1}};
+ std::map<std::string, int> internal_bytes_to_send_ = {{"instance_0", 0}, {"dispatcher", 0}};
+
+ std::function<std::string()> load_balancing_function_;
std::set<Task*> successors_ = {};
std::map<Task*, unsigned int> predecessors_ = {};
std::atomic_int_fast32_t refcount_{0};
- bool ready_to_run() const;
+ bool ready_to_run(std::string instance);
void receive(Task* source);
std::shared_ptr<Token> token_ = nullptr;
- std::deque<std::map<TaskPtr, std::shared_ptr<Token>>> tokens_received_;
- std::deque<ActivityPtr> current_activities_;
+ std::map<TaskPtr, std::deque<std::shared_ptr<Token>>> tokens_received_;
+ std::map<std::string, std::deque<ActivityPtr>> current_activities_ = {
+ {"instance_0", {}}, {"dispatcher", {}}, {"collector", {}}};
inline static xbt::signal<void(Task*)> on_start;
xbt::signal<void(Task*)> on_this_start;
explicit Task(const std::string& name);
virtual ~Task() = default;
- virtual void fire();
- void complete();
+ virtual void fire(std::string instance);
+ void complete(std::string instance);
- void store_activity(ActivityPtr a) { current_activities_.push_back(a); }
+ void store_activity(ActivityPtr a, std::string instance) { current_activities_[instance].push_back(a); }
+
+ virtual void add_instances(int n);
+ virtual void remove_instances(int n);
public:
void set_name(std::string name);
const std::string& get_name() const { return name_; }
const char* get_cname() const { return name_.c_str(); }
- void set_amount(double amount);
- double get_amount() const { return amount_; }
- int get_count() const { return count_; }
- void set_parallelism_degree(int n);
- int get_parallelism_degree() const { return parallelism_degree_; }
+ void set_amount(double amount, std::string instance = "instance_0");
+ double get_amount(std::string instance = "instance_0") const { return amount_.at(instance); }
+ int get_queued_firings(std::string instance = "instance_0") { return queued_firings_.at(instance); }
+ int get_running_count(std::string instance = "instance_0") { return running_instances_.at(instance); }
+ int get_count(std::string instance = "collector") const { return count_.at(instance); }
+ void set_parallelism_degree(int n, std::string instance = "all");
+ int get_parallelism_degree(std::string instance = "instance_0") const { return parallelism_degree_.at(instance); }
+ void set_internal_bytes(int bytes, std::string instance = "instance_0");
+ double get_internal_bytes(std::string instance = "instance_0") const { return internal_bytes_to_send_.at(instance); }
+ void set_load_balancing_function(std::function<std::string()> func);
void set_token(std::shared_ptr<Token> token);
- std::shared_ptr<Token> get_next_token_from(TaskPtr t);
+ std::shared_ptr<Token> get_token_from(TaskPtr t) const { return tokens_received_.at(t).front(); }
+ std::deque<std::shared_ptr<Token>> get_tokens_from(TaskPtr t) const { return tokens_received_.at(t); }
+ void deque_token_from(TaskPtr t);
void add_successor(TaskPtr t);
void remove_successor(TaskPtr t);
Host* destination_;
explicit CommTask(const std::string& name);
- void fire() override;
+ void fire(std::string instance) override;
public:
static CommTaskPtr init(const std::string& name);
CommTaskPtr set_destination(Host* destination);
Host* get_destination() const { return destination_; }
CommTaskPtr set_bytes(double bytes);
- double get_bytes() const { return get_amount(); }
+ double get_bytes() const { return get_amount("instance_0"); }
};
class ExecTask : public Task {
- Host* host_;
+ std::map<std::string, Host*> host_ = {{"instance_0", nullptr}, {"dispatcher", nullptr}, {"collector", nullptr}};
explicit ExecTask(const std::string& name);
- void fire() override;
+ void fire(std::string instance) override;
public:
static ExecTaskPtr init(const std::string& name);
static ExecTaskPtr init(const std::string& name, double flops, Host* host);
- ExecTaskPtr set_host(Host* host);
- Host* get_host() const { return host_; }
- ExecTaskPtr set_flops(double flops);
- double get_flops() const { return get_amount(); }
+ ExecTaskPtr set_host(Host* host, std::string instance = "all");
+ Host* get_host(std::string instance = "instance_0") const { return host_.at(instance); }
+ ExecTaskPtr set_flops(double flops, std::string instance = "instance_0");
+ double get_flops(std::string instance = "instance_0") const { return get_amount(instance); }
+
+ void add_instances(int n) override;
+ void remove_instances(int n) override;
};
class IoTask : public Task {
Disk* disk_;
Io::OpType type_;
explicit IoTask(const std::string& name);
- void fire() override;
+ void fire(std::string instance) override;
public:
static IoTaskPtr init(const std::string& name);
IoTaskPtr set_disk(Disk* disk);
Disk* get_disk() const { return disk_; }
IoTaskPtr set_bytes(double bytes);
- double get_bytes() const { return get_amount(); }
+ double get_bytes() const { return get_amount("instance_0"); }
IoTaskPtr set_op_type(Io::OpType type);
Io::OpType get_op_type() const { return type_; }
};
},
"Add a callback called when each task ends.")
.def_property_readonly("name", &Task::get_name, "The name of this task (read-only).")
- .def_property_readonly("count", &Task::get_count, "The execution count of this task (read-only).")
.def_property_readonly("successors", &Task::get_successors, "The successors of this task (read-only).")
.def_property("amount", &Task::get_amount, &Task::set_amount, "The amount of work to do for this task.")
+ .def(
+ "get_count", [](const TaskPtr t) { return t->get_count("instance_0"); },
+ "The execution count of this task instance_0.")
+ .def(
+ "get_count", [](const TaskPtr t, const std::string& instance) { return t->get_count(instance); },
+ "The execution count of this task instance.")
.def("enqueue_firings", py::overload_cast<int>(&Task::enqueue_firings), py::call_guard<py::gil_scoped_release>(),
py::arg("n"), "Enqueue firings for this task.")
.def("add_successor", py::overload_cast<TaskPtr>(&Task::add_successor), py::call_guard<py::gil_scoped_release>(),
+#include <cstddef>
#include <memory>
#include <simgrid/Exception.hpp>
+#include <simgrid/s4u/Activity.hpp>
#include <simgrid/s4u/Comm.hpp>
+#include <simgrid/s4u/Disk.hpp>
#include <simgrid/s4u/Exec.hpp>
#include <simgrid/s4u/Io.hpp>
#include <simgrid/s4u/Task.hpp>
#include <simgrid/simix.hpp>
+#include <string>
+#include <xbt/asserts.h>
#include "src/simgrid/module.hpp"
SIMGRID_REGISTER_PLUGIN(task, "Battery management", nullptr)
-/**
- @beginrst
-
-
-Tasks are designed to represent dataflows, i.e, graphs of Tasks.
-Tasks can only be instancied using either
-:cpp:func:`simgrid::s4u::ExecTask::init` or :cpp:func:`simgrid::s4u::CommTask::init`
-An ExecTask is an Execution Task. Its underlying Activity is an :ref:`Exec <API_s4u_Exec>`.
-A CommTask is a Communication Task. Its underlying Activity is a :ref:`Comm <API_s4u_Comm>`.
-
-
-
- @endrst
- */
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(Task, kernel, "Logging specific to the task plugin");
namespace simgrid::s4u {
Task::Task(const std::string& name) : name_(name) {}
-/**
- * @brief Return True if the Task can start a new Activity.
- * @note The Task is ready if not already doing something and there is at least one execution waiting in queue.
+/** @param instance The Task instance to check.
+ * @brief Return True if this Task instance can start.
*/
-bool Task::ready_to_run() const
+bool Task::ready_to_run(std::string instance)
{
- return running_instances_ < parallelism_degree_ && queued_firings_ > 0;
+ return running_instances_[instance] < parallelism_degree_[instance] && queued_firings_[instance] > 0;
}
-/**
- * @param source The sender.
+/** @param source The sender.
* @brief Receive a token from another Task.
* @note Check upon reception if the Task has received a token from each of its predecessors,
* and in this case consumes those tokens and enqueue an execution.
void Task::receive(Task* source)
{
XBT_DEBUG("Task %s received a token from %s", name_.c_str(), source->name_.c_str());
- auto source_count = predecessors_[source];
predecessors_[source]++;
- if (tokens_received_.size() <= queued_firings_ + source_count)
- tokens_received_.emplace_back();
- tokens_received_[queued_firings_ + source_count][source] = source->token_;
- bool enough_tokens = true;
+ if (source->token_ != nullptr)
+ tokens_received_[source].push_back(source->token_);
+ bool enough_tokens = true;
for (auto const& [key, val] : predecessors_)
if (val < 1) {
enough_tokens = false;
}
}
-/**
- * @brief Task routine when finishing an execution.
- * @note Set its working status as false.
- * Add 1 to its count of finished executions.
- * Call the on_this_end func.
- * Fire on_end callback.
- * Send a token to each of its successors.
- * Start a new execution if possible.
+/** @param instance The Taks instance to complete.
+ * @brief Task instance routine when finishing an execution of an instance.
+ * @note The dispatcher instance enqueues a firing for the next instance.
+ * The collector instance triggers the on_completion signals and sends tokens to successors.
+ * Others instances enqueue a firing of the collector instance.
*/
-void Task::complete()
+void Task::complete(std::string instance)
{
xbt_assert(Actor::is_maestro());
- running_instances_--;
- count_++;
- on_this_completion(this);
- on_completion(this);
- for (auto const& t : successors_)
- t->receive(this);
- if (ready_to_run())
- fire();
+ running_instances_[instance]--;
+ count_[instance]++;
+ if (instance == "collector") {
+ on_this_completion(this);
+ on_completion(this);
+ for (auto const& t : successors_)
+ t->receive(this);
+ } else if (instance == "dispatcher") {
+ auto next_instance = load_balancing_function_();
+ xbt_assert(next_instance != "dispatcher" and next_instance != "collector", "Invalid instance selected: %s",
+ next_instance.c_str());
+ queued_firings_[next_instance] = queued_firings_.at(next_instance) + 1;
+ while (ready_to_run(next_instance))
+ fire(next_instance);
+ } else {
+ queued_firings_["collector"]++;
+ while (ready_to_run("collector"))
+ fire("collector");
+ }
+ if (ready_to_run(instance))
+ fire(instance);
}
-/** @param n The new parallelism degree of the Task.
- * @brief Set the parallelism degree of the Task to inscrease or decrease horizontal scaling.
- * @note When increasing the degree the function starts new instances if there is queued firings.
- * When decreasing the degree the function does NOT stop running instances.
-
+/** @param n The new parallelism degree of the Task instance.
+ * @param instance The Task instance to modify.
+ * @note You can use instance "all" to modify the parallelism degree of all instances of this Task.
+ * When increasing the degree new executions are started if there is queued firings.
+ * When decreasing the degree instances already running are NOT stopped.
*/
-void Task::set_parallelism_degree(int n)
+void Task::set_parallelism_degree(int n, std::string instance)
{
- xbt_assert(n > 0, "Parallelism degree of Tasks must be above 0.");
- simgrid::kernel::actor::simcall_answered([this, n] {
- parallelism_degree_ = n;
- while (ready_to_run())
- fire();
+ xbt_assert(n > 0, "Parallelism degree must be above 0.");
+ simgrid::kernel::actor::simcall_answered([this, n, &instance] {
+ if (instance == "all") {
+ for (auto& [key, value] : parallelism_degree_) {
+ parallelism_degree_[key] = n;
+ while (ready_to_run(key))
+ fire(key);
+ }
+ } else {
+ parallelism_degree_[instance] = n;
+ while (ready_to_run(instance))
+ fire(instance);
+ }
});
}
+/** @param bytes The internal bytes of the Task instance.
+ * @param instance The Task instance to modify.
+ * @note Internal bytes are used for Comms between the dispatcher and instance_n,
+ * and between instance_n and the collector if they are not on the same host.
+ */
+void Task::set_internal_bytes(int bytes, std::string instance)
+{
+ simgrid::kernel::actor::simcall_answered([this, bytes, &instance] { internal_bytes_to_send_[instance] = bytes; });
+}
+
+/** @param func The load balancing function.
+ * @note The dispatcher uses this function to determine which instance to trigger next.
+ */
+void Task::set_load_balancing_function(std::function<std::string()> func)
+{
+ simgrid::kernel::actor::simcall_answered([this, func] { load_balancing_function_ = func; });
+}
+
/** @param n The number of firings to enqueue.
- * @brief Enqueue firing.
- * @note Immediatly fire an activity if possible.
*/
void Task::enqueue_firings(int n)
{
simgrid::kernel::actor::simcall_answered([this, n] {
- queued_firings_ += n;
- while (ready_to_run())
- fire();
+ queued_firings_["dispatcher"] += n;
+ while (ready_to_run("dispatcher"))
+ fire("dispatcher");
});
}
}
/** @param amount The amount to set.
- * @brief Set the amout of work to do.
+ * @param instance The Task instance to modify.
* @note Amount in flop for ExecTask and in bytes for CommTask.
*/
-void Task::set_amount(double amount)
+void Task::set_amount(double amount, std::string instance)
{
- simgrid::kernel::actor::simcall_answered([this, amount] { amount_ = amount; });
+ simgrid::kernel::actor::simcall_answered([this, amount, &instance] { amount_[instance] = amount; });
}
/** @param token The token to set.
* @brief Set the token to send to successors.
- * @note The token is passed to each successor after the task end, i.e., after the on_end callback.
+ * @note The token is passed to each successor after the Task instance collector end, i.e., after the on_completion
+ * callback.
*/
void Task::set_token(std::shared_ptr<Token> token)
{
simgrid::kernel::actor::simcall_answered([this, token] { token_ = token; });
}
-/** @return Map of tokens received for the next execution.
- * @note If there is no queued execution for this task the map might not exist or be partially empty.
+/** @param t The Task to deque a token from.
*/
-std::shared_ptr<Token> Task::get_next_token_from(TaskPtr t)
+void Task::deque_token_from(TaskPtr t)
{
- return tokens_received_.front()[t];
+ simgrid::kernel::actor::simcall_answered([this, &t] { tokens_received_[t].pop_front(); });
}
-void Task::fire()
+void Task::fire(std::string instance)
{
- if ((int)current_activities_.size() > parallelism_degree_) {
- current_activities_.pop_front();
+ if ((int)current_activities_[instance].size() > parallelism_degree_[instance]) {
+ current_activities_[instance].pop_front();
+ }
+ if (instance != "dispatcher" and instance != "collector") {
+ on_this_start(this);
+ on_start(this);
}
- on_this_start(this);
- on_start(this);
- running_instances_++;
- queued_firings_ = std::max(queued_firings_ - 1, 0);
- if (not tokens_received_.empty())
- tokens_received_.pop_front();
+ running_instances_[instance]++;
+ queued_firings_[instance] = std::max(queued_firings_[instance] - 1, 0);
}
-/** @param successor The Task to add.
- * @brief Add a successor to this Task.
+/** @param successor The Task to add as a successor.
* @note It also adds this as a predecessor of successor.
*/
void Task::add_successor(TaskPtr successor)
});
}
-/** @param successor The Task to remove.
- * @brief Remove a successor from this Task.
+/** @param successor The Task to remove from the successors of this Task.
* @note It also remove this from the predecessors of successor.
*/
void Task::remove_successor(TaskPtr successor)
});
}
+/** @brief Remove all successors from this Task.
+ */
void Task::remove_all_successors()
{
simgrid::kernel::actor::simcall_answered([this] {
});
}
+/** @param n The number of instances to add to this Task (>=0).
+ * @note Instances goes always from instance_0 to instance_x,
+ * where x is the current number of instance.
+ */
+void Task::add_instances(int n)
+{
+ xbt_assert(n >= 0, "Cannot add a negative number of instances (provided: %d)", n);
+ int instance_count = (int)amount_.size() - 2;
+ for (int i = instance_count; i < n + instance_count; i++) {
+ amount_["instance_" + std::to_string(i)] = amount_.at("instance_0");
+ queued_firings_["instance_" + std::to_string(i)] = 0;
+ running_instances_["instance_" + std::to_string(i)] = 0;
+ count_["instance_" + std::to_string(i)] = 0;
+ parallelism_degree_["instance_" + std::to_string(i)] = parallelism_degree_.at("instance_0");
+ current_activities_["instance_" + std::to_string(i)] = {};
+ internal_bytes_to_send_["instance_" + std::to_string(i)] = internal_bytes_to_send_.at("instance_0");
+ ;
+ }
+}
+
+/** @param n The number of instances to remove from this Task (>=0).
+ * @note Instances goes always from instance_0 to instance_x,
+ * where x is the current number of instance.
+ * Running instances cannot be removed.
+ */
+void Task::remove_instances(int n)
+{
+ int instance_count = (int)amount_.size() - 2;
+ xbt_assert(n >= 0, "Cannot remove a negative number of instances (provided: %d)", n);
+ xbt_assert(instance_count - n > 0, "The number of instances must be above 0 (instances: %d, provided: %d)",
+ instance_count, n);
+ for (int i = instance_count - 1; i >= instance_count - n; i--) {
+ xbt_assert(running_instances_.at("instance_" + std::to_string(i)) == 0,
+ "Cannot remove a running instance (instances: %d)", i);
+ amount_.erase("instance_" + std::to_string(i));
+ queued_firings_.erase("instance_" + std::to_string(i));
+ running_instances_.erase("instance_" + std::to_string(i));
+ count_.erase("instance_" + std::to_string(i));
+ parallelism_degree_.erase("instance_" + std::to_string(i));
+ current_activities_.erase("instance_" + std::to_string(i));
+ }
+}
+
/**
* @brief Default constructor.
*/
-ExecTask::ExecTask(const std::string& name) : Task(name) {}
+ExecTask::ExecTask(const std::string& name) : Task(name)
+{
+ set_load_balancing_function([]() { return "instance_0"; });
+}
-/** @ingroup plugin_task
+/**
* @brief Smart Constructor.
*/
ExecTaskPtr ExecTask::init(const std::string& name)
return ExecTaskPtr(new ExecTask(name));
}
-/** @ingroup plugin_task
+/**
* @brief Smart Constructor.
*/
ExecTaskPtr ExecTask::init(const std::string& name, double flops, Host* host)
return init(name)->set_flops(flops)->set_host(host);
}
-/**
- * @brief Do one execution of the Task.
- * @note Call the on_this_start() func.
- * Init and start the underlying Activity.
+/** @param instance The Task instance to fire.
+ * @note Only the dispatcher instance triggers the on_start signal.
+ * Comms are created if hosts differ between dispatcher and the instance to fire,
+ * or between the instance and the collector.
*/
-void ExecTask::fire()
+void ExecTask::fire(std::string instance)
{
- Task::fire();
- auto exec = Exec::init()->set_name(get_name())->set_flops_amount(get_amount())->set_host(host_);
- exec->start();
- exec->on_this_completion_cb([this](Exec const&) { complete(); });
- store_activity(exec);
+ Task::fire(instance);
+ if (instance == "dispatcher" or instance == "collector") {
+ auto exec = Exec::init()
+ ->set_name(get_name() + "_" + instance)
+ ->set_flops_amount(get_amount(instance))
+ ->set_host(host_[instance]);
+ exec->start();
+ exec->on_this_completion_cb([this, instance](Exec const&) { complete(instance); });
+ store_activity(exec, instance);
+ } else {
+ auto exec = Exec::init()->set_name(get_name())->set_flops_amount(get_amount())->set_host(host_[instance]);
+ if (host_["dispatcher"] == host_[instance]) {
+ exec->start();
+ store_activity(exec, instance);
+ } else {
+ auto comm = Comm::sendto_init(host_["dispatcher"], host_[instance])
+ ->set_name(get_name() + "_dispatcher_to_" + instance)
+ ->set_payload_size(get_internal_bytes("dispatcher"));
+ comm->add_successor(exec);
+ comm->start();
+ store_activity(comm, instance);
+ }
+ if (host_[instance] == host_["collector"]) {
+ exec->on_this_completion_cb([this, instance](Exec const&) { complete(instance); });
+ if (host_["dispatcher"] != host_[instance])
+ store_activity(exec, instance);
+ } else {
+ auto comm = Comm::sendto_init(host_[instance], host_["collector"])
+ ->set_name(get_name() + instance + "_to_collector")
+ ->set_payload_size(get_internal_bytes(instance));
+ exec->add_successor(comm);
+ comm->on_this_completion_cb([this, instance](Comm const&) { complete(instance); });
+ comm.detach();
+ }
+ }
}
-/** @ingroup plugin_task
- * @param host The host to set.
+/** @param host The host to set.
+ * @param instance The Task instance to modify.
* @brief Set a new host.
*/
-ExecTaskPtr ExecTask::set_host(Host* host)
+ExecTaskPtr ExecTask::set_host(Host* host, std::string instance)
{
- kernel::actor::simcall_answered([this, host] { host_ = host; });
+ kernel::actor::simcall_answered([this, host, &instance] {
+ if (instance == "all")
+ for (auto& [key, value] : host_)
+ host_[key] = host;
+ else
+ host_[instance] = host;
+ });
return this;
}
-/** @ingroup plugin_task
- * @param flops The amount of flops to set.
+/** @param flops The amount of flops to set.
+ * @param instance The Task instance to modify.
*/
-ExecTaskPtr ExecTask::set_flops(double flops)
+ExecTaskPtr ExecTask::set_flops(double flops, std::string instance)
{
- kernel::actor::simcall_answered([this, flops] { set_amount(flops); });
+ kernel::actor::simcall_answered([this, flops, &instance] { set_amount(flops, instance); });
return this;
}
+/** @param n The number of instances to add to this Task (>=0).
+ @note Instances goes always from instance_0 to instance_x,
+ where x is the current number of instance.
+ */
+void ExecTask::add_instances(int n)
+{
+ Task::add_instances(n);
+ int instance_count = (int)host_.size() - 2;
+ for (int i = instance_count; i < n + instance_count; i++)
+ host_["instance_" + std::to_string(i)] = host_.at("instance_0");
+}
+
+/** @param n The number of instances to remove from this Task (>=0).
+ @note Instances goes always from instance_0 to instance_x,
+ where x is the current number of instance.
+ Running instance cannot be removed.
+ */
+void ExecTask::remove_instances(int n)
+{
+ Task::remove_instances(n);
+ int instance_count = (int)host_.size() - 2;
+ for (int i = instance_count - 1; i >= instance_count - n; i--)
+ host_.erase("instance_" + std::to_string(i));
+}
+
/**
* @brief Default constructor.
*/
-CommTask::CommTask(const std::string& name) : Task(name) {}
+CommTask::CommTask(const std::string& name) : Task(name)
+{
+ set_load_balancing_function([]() { return "instance_0"; });
+}
-/** @ingroup plugin_task
+/**
* @brief Smart constructor.
*/
CommTaskPtr CommTask::init(const std::string& name)
return CommTaskPtr(new CommTask(name));
}
-/** @ingroup plugin_task
+/**
* @brief Smart constructor.
*/
CommTaskPtr CommTask::init(const std::string& name, double bytes, Host* source, Host* destination)
return init(name)->set_bytes(bytes)->set_source(source)->set_destination(destination);
}
-/**
- * @brief Do one execution of the Task.
- * @note Call the on_this_start() func.
- * Init and start the underlying Activity.
+/** @param instance The Task instance to fire.
+ * @note Only the dispatcher instance triggers the on_start signal.
*/
-void CommTask::fire()
+void CommTask::fire(std::string instance)
{
- Task::fire();
- auto comm = Comm::sendto_init(source_, destination_)->set_name(get_name())->set_payload_size(get_amount());
- comm->start();
- comm->on_this_completion_cb([this](Comm const&) { complete(); });
- store_activity(comm);
+ Task::fire(instance);
+ if (instance == "dispatcher" or instance == "collector") {
+ auto exec = Exec::init()
+ ->set_name(get_name() + "_" + instance)
+ ->set_flops_amount(get_amount(instance))
+ ->set_host(instance == "dispatcher" ? source_ : destination_);
+ exec->start();
+ exec->on_this_completion_cb([this, instance](Exec const&) { complete(instance); });
+ store_activity(exec, instance);
+ } else {
+ auto comm = Comm::sendto_init(source_, destination_)->set_name(get_name())->set_payload_size(get_amount());
+ comm->start();
+ comm->on_this_completion_cb([this, instance](Comm const&) { complete(instance); });
+ store_activity(comm, instance);
+ }
}
-/** @ingroup plugin_task
+/**
* @param source The host to set.
* @brief Set a new source host.
*/
return this;
}
-/** @ingroup plugin_task
+/**
* @param destination The host to set.
* @brief Set a new destination host.
*/
return this;
}
-/** @ingroup plugin_task
+/**
* @param bytes The amount of bytes to set.
*/
CommTaskPtr CommTask::set_bytes(double bytes)
/**
* @brief Default constructor.
*/
-IoTask::IoTask(const std::string& name) : Task(name) {}
+IoTask::IoTask(const std::string& name) : Task(name)
+{
+ set_load_balancing_function([]() { return "instance_0"; });
+}
-/** @ingroup plugin_task
+/**
* @brief Smart Constructor.
*/
IoTaskPtr IoTask::init(const std::string& name)
return IoTaskPtr(new IoTask(name));
}
-/** @ingroup plugin_task
+/**
* @brief Smart Constructor.
*/
IoTaskPtr IoTask::init(const std::string& name, double bytes, Disk* disk, Io::OpType type)
return init(name)->set_bytes(bytes)->set_disk(disk)->set_op_type(type);
}
-/** @ingroup plugin_task
+/**
* @param disk The disk to set.
- * @brief Set a new disk.
*/
IoTaskPtr IoTask::set_disk(Disk* disk)
{
return this;
}
-/** @ingroup plugin_task
+/**
* @param bytes The amount of bytes to set.
*/
IoTaskPtr IoTask::set_bytes(double bytes)
return this;
}
-/** @ingroup plugin_task */
+/**
+ * @param type The op type to set.
+ */
IoTaskPtr IoTask::set_op_type(Io::OpType type)
{
kernel::actor::simcall_answered([this, type] { type_ = type; });
return this;
}
-void IoTask::fire()
+/** @param instance The Task instance to fire.
+ * @note Only the dispatcher instance triggers the on_start signal.
+ */
+void IoTask::fire(std::string instance)
{
- Task::fire();
- auto io = Io::init()->set_name(get_name())->set_size(get_amount())->set_disk(disk_)->set_op_type(type_);
- io->start();
- io->on_this_completion_cb([this](Io const&) { complete(); });
- store_activity(io);
+ Task::fire(instance);
+ if (instance == "dispatcher" or instance == "collector") {
+ auto exec = Exec::init()
+ ->set_name(get_name() + "_" + instance)
+ ->set_flops_amount(get_amount(instance))
+ ->set_host(disk_->get_host());
+ exec->start();
+ exec->on_this_completion_cb([this, instance](Exec const&) { complete(instance); });
+ store_activity(exec, instance);
+ } else {
+ auto io = Io::init()->set_name(get_name())->set_size(get_amount())->set_disk(disk_)->set_op_type(type_);
+ io->start();
+ io->on_this_completion_cb([this, instance](Io const&) { complete(instance); });
+ store_activity(io, instance);
+ }
}
-
} // namespace simgrid::s4u