include examples/cpp/io-file-remote/s4u-io-file-remote_d.xml
include examples/cpp/io-file-system/s4u-io-file-system.cpp
include examples/cpp/io-file-system/s4u-io-file-system.tesh
+include examples/cpp/io-jbod-raw/s4u-io-jbod-raw.cpp
+include examples/cpp/io-jbod-raw/s4u-io-jbod-raw.tesh
include examples/cpp/io-priority/s4u-io-priority.cpp
include examples/cpp/io-priority/s4u-io-priority.tesh
include examples/cpp/maestro-set/s4u-maestro-set.cpp
include include/simgrid/plugins/dvfs.h
include include/simgrid/plugins/energy.h
include include/simgrid/plugins/file_system.h
+include include/simgrid/plugins/jbod.hpp
include include/simgrid/plugins/live_migration.h
include include/simgrid/plugins/load.h
include include/simgrid/plugins/ns3.hpp
include src/plugins/host_dvfs.cpp
include src/plugins/host_energy.cpp
include src/plugins/host_load.cpp
+include src/plugins/jbod.cpp
include src/plugins/link_energy.cpp
include src/plugins/link_energy_wifi.cpp
include src/plugins/link_load.cpp
maestro-set
mc-bugged1 mc-bugged1-liveness mc-bugged2 mc-bugged2-liveness mc-centralized-mutex mc-electric-fence mc-failing-assert
network-ns3 network-ns3-wifi network-wifi
- io-async io-priority io-degradation io-file-system io-file-remote io-disk-raw io-dependent
+ io-async io-priority io-degradation io-file-system io-file-remote io-disk-raw io-jbod-raw io-dependent
task-io task-simple task-variable-load task-storm task-switch-host
photovoltaic-simple
platform-comm-serialize platform-failures platform-profile platform-properties
--- /dev/null
+/* Copyright (c) 2017-2023. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "simgrid/s4u.hpp"
+#include "simgrid/plugins/jbod.hpp"
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(jbod_test, "Messages specific for this simulation");
+namespace sg4 = simgrid::s4u;
+
+static void write_then_read(simgrid::plugin::Jbod* jbod)
+{
+ simgrid::plugin::JbodIoPtr io = jbod->write_async(1e7);
+ XBT_INFO("asynchronous write posted, wait for it");
+ io->wait();
+ XBT_INFO("asynchronous write done");
+ jbod->read(1e7);
+ XBT_INFO("synchonous read done");
+ jbod->write(1e7);
+ XBT_INFO("synchonous write done");
+ io = jbod->read_async(1e7);
+ XBT_INFO("asynchronous read posted, wait for it");
+ io->wait();
+ XBT_INFO("asynchonous read done");
+ jbod->write(1e7);
+ XBT_INFO("synchonous write done");
+ jbod->read(1e7);
+ XBT_INFO("synchonous read done");
+ jbod->read(1e7);
+ XBT_INFO("synchonous read done");
+}
+
+int main(int argc, char** argv)
+{
+ sg4::Engine e(&argc, argv);
+ auto* zone = sg4::create_full_zone("zone");
+ auto* host = zone->create_host("host", "1Gf");
+ // set up link so that data transfer from host to JBOD takes exactly 1 second (without crosstraffic)
+ auto* link = zone->create_link("link", 1e7/0.97)->set_latency(0);
+
+ auto* jbod_raid0 =
+ simgrid::plugin::Jbod::create_jbod(zone, "jbod_raid0", 1e9, 4, simgrid::plugin::Jbod::RAID::RAID0, 1e7, 5e6);
+ zone->add_route(host->get_netpoint(), jbod_raid0->get_netpoint(), nullptr, nullptr, {sg4::LinkInRoute(link)});
+
+ auto* jbod_raid1 =
+ simgrid::plugin::Jbod::create_jbod(zone, "jbod_raid1", 1e9, 4, simgrid::plugin::Jbod::RAID::RAID1, 1e7, 5e6);
+ zone->add_route(host->get_netpoint(), jbod_raid1->get_netpoint(), nullptr, nullptr, {sg4::LinkInRoute(link)});
+
+ auto* jbod_raid4 =
+ simgrid::plugin::Jbod::create_jbod(zone, "jbod_raid4", 1e9, 4, simgrid::plugin::Jbod::RAID::RAID4, 1e7, 5e6);
+ zone->add_route(host->get_netpoint(), jbod_raid4->get_netpoint(), nullptr, nullptr, {sg4::LinkInRoute(link)});
+
+ auto* jbod_raid5 =
+ simgrid::plugin::Jbod::create_jbod(zone, "jbod_raid5", 1e9, 4, simgrid::plugin::Jbod::RAID::RAID5, 1e7, 5e6);
+ zone->add_route(host->get_netpoint(), jbod_raid5->get_netpoint(), nullptr, nullptr, {sg4::LinkInRoute(link)});
+
+ auto* jbod_raid6 =
+ simgrid::plugin::Jbod::create_jbod(zone, "jbod_raid6", 1e9, 4, simgrid::plugin::Jbod::RAID::RAID6, 1e7, 5e6);
+ zone->add_route(host->get_netpoint(), jbod_raid6->get_netpoint(), nullptr, nullptr, {sg4::LinkInRoute(link)});
+
+ XBT_INFO("XXXXXXXXXXXXXXX RAID 0 XXXXXXXXXXXXXXXX");
+ sg4::Actor::create("", host, write_then_read, jbod_raid0);
+ e.run();
+
+ XBT_INFO("XXXXXXXXXXXXXXX RAID 1 XXXXXXXXXXXXXXXX");
+ sg4::Actor::create("", host, write_then_read, jbod_raid1);
+ e.run();
+
+ XBT_INFO("XXXXXXXXXXXXXXX RAID 4 XXXXXXXXXXXXXXXX");
+ sg4::Actor::create("", host, write_then_read, jbod_raid4);
+ e.run();
+
+ XBT_INFO("XXXXXXXXXXXXXXX RAID 5 XXXXXXXXXXXXXXXX");
+ sg4::Actor::create("", host, write_then_read, jbod_raid5);
+ e.run();
+
+ XBT_INFO("XXXXXXXXXXXXXXX RAID 6 XXXXXXXXXXXXXXXX");
+ sg4::Actor::create("", host, write_then_read, jbod_raid6);
+ e.run();
+
+ XBT_INFO("Simulated time: %g", sg4::Engine::get_clock());
+
+ return 0;
+}
--- /dev/null
+#!/usr/bin/env tesh
+
+$ ${bindir}/s4u-io-jbod-raw --cfg=network/crosstraffic:0 "--log=root.fmt:[%10.6r]%e%m%n"
+> [ 0.000000] Configuration change: Set 'network/crosstraffic' to '0'
+> [ 0.000000] XXXXXXXXXXXXXXX RAID 0 XXXXXXXXXXXXXXXX
+> [ 0.000000] asynchronous write posted, wait for it
+> [ 1.500000] asynchronous write done
+> [ 2.750000] synchonous read done
+> [ 4.250000] synchonous write done
+> [ 4.250000] asynchronous read posted, wait for it
+> [ 5.500000] asynchonous read done
+> [ 7.000000] synchonous write done
+> [ 8.250000] synchonous read done
+> [ 9.500000] synchonous read done
+> [ 9.500000] XXXXXXXXXXXXXXX RAID 1 XXXXXXXXXXXXXXXX
+> [ 9.500000] asynchronous write posted, wait for it
+> [ 12.500000] asynchronous write done
+> [ 14.500000] synchonous read done
+> [ 17.500000] synchonous write done
+> [ 17.500000] asynchronous read posted, wait for it
+> [ 19.500000] asynchonous read done
+> [ 22.500000] synchonous write done
+> [ 24.500000] synchonous read done
+> [ 26.500000] synchonous read done
+> [ 26.500000] XXXXXXXXXXXXXXX RAID 4 XXXXXXXXXXXXXXXX
+> [ 26.500000] asynchronous write posted, wait for it
+> [ 28.170000] asynchronous write done
+> [ 29.503333] synchonous read done
+> [ 31.173333] synchonous write done
+> [ 31.173333] asynchronous read posted, wait for it
+> [ 32.506666] asynchonous read done
+> [ 34.176666] synchonous write done
+> [ 35.510000] synchonous read done
+> [ 36.843333] synchonous read done
+> [ 36.843333] XXXXXXXXXXXXXXX RAID 5 XXXXXXXXXXXXXXXX
+> [ 36.843333] asynchronous write posted, wait for it
+> [ 38.513333] asynchronous write done
+> [ 39.846666] synchonous read done
+> [ 41.516666] synchonous write done
+> [ 41.516666] asynchronous read posted, wait for it
+> [ 42.849999] asynchonous read done
+> [ 44.519999] synchonous write done
+> [ 45.853333] synchonous read done
+> [ 47.186666] synchonous read done
+> [ 47.186666] XXXXXXXXXXXXXXX RAID 6 XXXXXXXXXXXXXXXX
+> [ 47.186666] asynchronous write posted, wait for it
+> [ 50.186666] asynchronous write done
+> [ 51.686666] synchonous read done
+> [ 54.686666] synchonous write done
+> [ 54.686666] asynchronous read posted, wait for it
+> [ 56.186666] asynchonous read done
+> [ 59.186666] synchonous write done
+> [ 60.686666] synchonous read done
+> [ 62.186666] synchonous read done
+> [ 62.186666] Simulated time: 62.1867
\ No newline at end of file
--- /dev/null
+/* Copyright (c) 2023. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#ifndef SIMGRID_PLUGIN_JBOD_HPP
+#define SIMGRID_PLUGIN_JBOD_HPP
+#include <simgrid/s4u/Host.hpp>
+#include <simgrid/s4u/Io.hpp>
+
+namespace simgrid::plugin {
+
+class JbodIo;
+/** Smart pointer to a simgrid::s4u::Activity */
+using JbodIoPtr = boost::intrusive_ptr<JbodIo>;
+XBT_PUBLIC void intrusive_ptr_release(const JbodIo* io);
+XBT_PUBLIC void intrusive_ptr_add_ref(const JbodIo* io);
+
+class Jbod : public s4u::Host {
+public:
+ enum class RAID {RAID0 = 0, RAID1 = 1, RAID4 = 4 , RAID5 = 5, RAID6 = 6};
+ int get_parity_disk_idx() { return parity_disk_idx_; }
+ void update_parity_disk_idx() { parity_disk_idx_ = (parity_disk_idx_- 1) % num_disks_; }
+
+ int get_next_read_disk_idx() { return (++read_disk_idx_) % num_disks_; }
+
+ JbodIoPtr read_async(sg_size_t size);
+ sg_size_t read(sg_size_t size);
+
+ JbodIoPtr write_async(sg_size_t size);
+ sg_size_t write(sg_size_t size);
+
+ static Jbod* create_jbod(s4u::NetZone* zone, const std::string& name, double speed, unsigned int num_disks,
+ RAID raid_level, double read_bandwidth, double write_bandwidth);
+
+protected:
+ void set_num_disks(unsigned int num_disks) { num_disks_ = num_disks; }
+ void set_parity_disk_idx(unsigned int index) { parity_disk_idx_ = index; }
+ void set_read_disk_idx(int index) { read_disk_idx_ = index; }
+ void set_raid_level(RAID raid_level) { raid_level_ = raid_level; }
+
+private:
+ unsigned int num_disks_;
+ RAID raid_level_;
+ unsigned int parity_disk_idx_;
+ int read_disk_idx_;
+};
+
+class JbodIo {
+ const Jbod* jbod_;
+ s4u::CommPtr transfer_;
+ s4u::ExecPtr parity_block_comp_;
+ std::vector<s4u::IoPtr> pending_ios_;
+ s4u::Io::OpType type_;
+ std::atomic_int_fast32_t refcount_{0};
+public:
+
+ explicit JbodIo(const Jbod* jbod, const s4u::CommPtr transfer, const s4u::ExecPtr parity_block_comp,
+ const std::vector<s4u::IoPtr>& pending_ios, s4u::Io::OpType type)
+ : jbod_(jbod), transfer_(transfer), parity_block_comp_(parity_block_comp), pending_ios_(pending_ios), type_(type)
+ {}
+
+ void wait();
+
+#ifndef DOXYGEN
+ friend void intrusive_ptr_release(JbodIo* io)
+ {
+ if (io->refcount_.fetch_sub(1, std::memory_order_release) == 1) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ delete io;
+ }
+ }
+ friend void intrusive_ptr_add_ref(JbodIo* io) { io->refcount_.fetch_add(1, std::memory_order_relaxed); }
+#endif
+};
+
+} // namespace simgrid::plugin
+#endif
\ No newline at end of file
--- /dev/null
+/* Copyright (c) 2023. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <simgrid/plugins/jbod.hpp>
+#include <simgrid/s4u/Comm.hpp>
+#include <simgrid/s4u/Disk.hpp>
+#include <simgrid/s4u/Exec.hpp>
+#include <simgrid/s4u/NetZone.hpp>
+
+
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(s4u_jbod, s4u, "Logging specific to the JBOD implmentation");
+
+namespace simgrid::plugin {
+
+Jbod* Jbod::create_jbod(s4u::NetZone* zone, const std::string& name, double speed, unsigned int num_disks,
+ RAID raid_level, double read_bandwidth, double write_bandwidth)
+{
+ xbt_assert(not ((raid_level == RAID::RAID4 || raid_level == RAID::RAID5) && num_disks < 3),
+ "RAID%d requires at least 3 disks", (int) raid_level);
+ xbt_assert(not (raid_level == RAID::RAID6 && num_disks < 4), "RAID6 requires at least 4 disks");
+
+ auto* jbod = static_cast<Jbod*>(zone->create_host(name, speed));
+ jbod->set_num_disks(num_disks);
+ jbod->set_parity_disk_idx(num_disks -1 );
+ jbod->set_read_disk_idx(-1);
+ jbod->set_raid_level(raid_level);
+ for (unsigned int i = 0; i < num_disks; i++)
+ jbod->create_disk(name + "_disk_" + std::to_string(i), read_bandwidth, write_bandwidth);
+
+ return jbod;
+}
+
+JbodIoPtr Jbod::read_async(sg_size_t size)
+{
+ auto comm = s4u::Comm::sendto_init()->set_source(const_cast<Jbod*>(this))->set_payload_size(size);
+ std::vector<s4u::IoPtr> pending_ios;
+ sg_size_t read_size = 0;
+ std::vector<s4u::Disk*> targets;
+ switch(raid_level_) {
+ case RAID::RAID0:
+ read_size = size / num_disks_;
+ targets = get_disks();
+ break;
+ case RAID::RAID1:
+ read_size = size;
+ targets.push_back(get_disks().at(get_next_read_disk_idx()));
+ break;
+ case RAID::RAID4:
+ read_size = size / (num_disks_ - 1);
+ targets = get_disks();
+ targets.pop_back();
+ break;
+ case RAID::RAID5:
+ read_size = size / (num_disks_ - 1);
+ targets = get_disks();
+ targets.erase(targets.begin() + (get_parity_disk_idx() + 1 % num_disks_));
+ break;
+ case RAID::RAID6:
+ read_size = size / (num_disks_ - 2);
+ targets = get_disks();
+ if ( (get_parity_disk_idx() + 2 % num_disks_) == 0 ) {
+ targets.pop_back();
+ targets.erase(targets.begin());
+ } else if (get_parity_disk_idx() + 1 == static_cast<int>(num_disks_)) {
+ targets.pop_back();
+ targets.pop_back();
+ } else {
+ targets.erase(targets.begin() + (get_parity_disk_idx() + 1) % num_disks_,
+ targets.begin() + get_parity_disk_idx() + 3);
+ }
+ break;
+ default:
+ xbt_die("Unsupported RAID level. Supported level are: 0, 1, 4, 5, and 6");
+ }
+ for (const auto* disk : targets) {
+ auto io = s4u::IoPtr(disk->io_init(read_size, s4u::Io::OpType::READ));
+ io->set_name(disk->get_name())->start();
+ pending_ios.push_back(io);
+ }
+
+ return JbodIoPtr(new JbodIo(this, comm, nullptr, pending_ios, s4u::Io::OpType::READ));
+}
+
+sg_size_t Jbod::read(sg_size_t size)
+{
+ read_async(size)->wait();
+ return size;
+}
+
+JbodIoPtr Jbod::write_async(sg_size_t size)
+{
+ auto comm = s4u::Comm::sendto_init(s4u::Host::current(), const_cast<Jbod*>(this));
+ std::vector<s4u::IoPtr> pending_ios;
+ sg_size_t write_size = 0;
+ switch(raid_level_) {
+ case RAID::RAID0:
+ write_size = size / num_disks_;
+ break;
+ case RAID::RAID1:
+ write_size = size;
+ break;
+ case RAID::RAID4:
+ write_size = size / (num_disks_ - 1);
+ break;
+ case RAID::RAID5:
+ update_parity_disk_idx();
+ write_size = size / (num_disks_ - 1);
+ break;
+ case RAID::RAID6:
+ update_parity_disk_idx();
+ update_parity_disk_idx();
+ write_size = size / (num_disks_ - 2);
+ break;
+ default:
+ xbt_die("Unsupported RAID level. Supported level are: 0, 1, 4, 5, and 6");
+ }
+ for (const auto* disk : get_disks()) {
+ auto io = s4u::IoPtr(disk->io_init(write_size, s4u::Io::OpType::WRITE));
+ io->set_name(disk->get_name());
+ pending_ios.push_back(io);
+ }
+
+ s4u::ExecPtr parity_block_comp = nullptr;
+ if (raid_level_ == RAID::RAID4 || raid_level_ == RAID::RAID5 || raid_level_ == RAID::RAID6) {
+ // Assume 1 flop per byte to write per parity block and two for RAID6.
+ // Do not assign the Exec yet, will be done after the completion of the CommPtr
+ if (raid_level_ == RAID::RAID6)
+ parity_block_comp = s4u::Exec::init()->set_flops_amount(200 * write_size);
+ else
+ parity_block_comp = s4u::Exec::init()->set_flops_amount(write_size);
+ }
+
+ comm->set_payload_size(size)->start();
+ return JbodIoPtr(new JbodIo(this, comm, parity_block_comp, pending_ios, s4u::Io::OpType::WRITE));
+}
+
+sg_size_t Jbod::write(sg_size_t size)
+{
+ write_async(size)->wait();
+ return size;
+}
+
+void JbodIo::wait()
+{
+ if (type_ == s4u::Io::OpType::WRITE) {
+ transfer_->wait();
+ XBT_DEBUG("Data received on JBOD");
+ if (parity_block_comp_) {
+ parity_block_comp_->set_host(const_cast<Jbod*>(jbod_))->wait();
+ XBT_DEBUG("Parity block computed");
+ }
+ XBT_DEBUG("Start writing");
+ for (const auto& io : pending_ios_)
+ io->start();
+ }
+
+ for (const auto& io : pending_ios_) {
+ XBT_DEBUG("Wait for I/O on %s", io->get_cname());
+ io->wait();
+ }
+
+ if (type_ == s4u::Io::OpType::READ) {
+ XBT_DEBUG("Data read on JBOD, send it to %s", s4u::Host::current()->get_cname());
+ transfer_->set_destination(s4u::Host::current())->wait();
+ }
+}
+} // namespace simgrid::plugin
src/plugins/host_dvfs.cpp
src/plugins/host_energy.cpp
src/plugins/host_load.cpp
+ src/plugins/jbod.cpp
src/plugins/link_energy.cpp
src/plugins/link_energy_wifi.cpp
src/plugins/link_load.cpp
include/simgrid/plugins/dvfs.h
include/simgrid/plugins/energy.h
include/simgrid/plugins/file_system.h
+ include/simgrid/plugins/jbod.hpp
include/simgrid/plugins/live_migration.h
include/simgrid/plugins/load.h
include/simgrid/plugins/photovoltaic.hpp