From ef753f4a6180065885bb97ce20ea62f4cc6bcc1c Mon Sep 17 00:00:00 2001 From: Martin Quinson Date: Fri, 20 Jan 2023 16:57:53 +0100 Subject: [PATCH] Allow to disable the TCP windowing modeling by setting network/TCP-gamma to 0 --- ChangeLog | 1 + docs/source/Configuring_SimGrid.rst | 9 +++++---- docs/source/Models.rst | 17 ++++++++++------- src/surf/network_cm02.cpp | 22 ++++++++++++---------- 4 files changed, 28 insertions(+), 21 deletions(-) diff --git a/ChangeLog b/ChangeLog index 6e8287fb63..313343a98a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -29,6 +29,7 @@ Models: - Merge parameters network/bandwidth-factor and smpi/bw-factor that serve the same purpose. - Same for the latency - Rewrite the corresponding documentation. + - Allow to disable the TCP windowing modeling by setting network/TCP-gamma to 0. sthread: - Implement pthread_join in MC mode. diff --git a/docs/source/Configuring_SimGrid.rst b/docs/source/Configuring_SimGrid.rst index 05db361b2c..bce26b17ec 100644 --- a/docs/source/Configuring_SimGrid.rst +++ b/docs/source/Configuring_SimGrid.rst @@ -373,16 +373,17 @@ Maximal TCP Window Size **Option** ``network/TCP-gamma`` **Default:** 4194304 -The analytical models need to know the maximal TCP window size to take -the TCP congestion mechanism into account. On Linux, this value can -be retrieved using the following commands. Both give a set of values, -and you should use the last one, which is the maximal size. +The analytical models need to know the maximal TCP window size to take the TCP congestion mechanism into account (see +:ref:`this page ` for details). On Linux, this value can be retrieved using the following commands. +Both give a set of values, and you should use the last one, which is the maximal size. .. code-block:: console $ cat /proc/sys/net/ipv4/tcp_rmem # gives the sender window $ cat /proc/sys/net/ipv4/tcp_wmem # gives the receiver window +If you want to disable the TCP windowing mechanism, set this parameter to 0. + .. _cfg=network/bandwidth-factor: .. _cfg=network/latency-factor: .. _cfg=network/weight-S: diff --git a/docs/source/Models.rst b/docs/source/Models.rst index 0b8159df75..a717f023fe 100644 --- a/docs/source/Models.rst +++ b/docs/source/Models.rst @@ -118,6 +118,8 @@ the speed, both simulators are linear in the size of their input, but ns-3 has a steady communications. On the other hand, the SimGrid models must be carefully :ref:`calibrated ` if accuracy is really important to your study, while ns-3 models are less demanding with that regard. +.. _understanding_cm02: + CM02 ==== @@ -125,20 +127,21 @@ This is a simple model of TCP performance, where the sender stops sending packet acknowledgment packets are returned in time to the sender, the TCP window has no impact on the performance that then is only limited by the link bandwidth. Otherwise, late acknowledgments will reduce the bandwidth. -SimGrid models this mechanism as follows: :math:`realBW = min(physicalBW, \frac{TCP_GAMMA}{2\times latency})` The used +SimGrid models this mechanism as follows: :math:`real\_BW = min(physical\_BW, \frac{TCP\_GAMMA}{2\times latency})` The used bandwidth is either the physical bandwidth that is configured in the platform, or a value representing the bandwidth limit due to late acknowledgments. This value is the maximal TCP window size (noted TCP Gamma in SimGrid) over the round-trip time (i.e. twice the one-way latency). The default value of TCP Gamma is 4194304. This can be changed with the :ref:`network/TCP-gamma ` configuration item. -Let's compute the time it takes to send 10 Gb of data over a 10 Gb/s link that is otherwise unused. This is always given -by :math:`latency + size / bandwidth`, but the bandwidth to use may be the physical one (10Gb/s) or the one induced by -the TCP window, depending on the latency. +If you want to disable this mechanism altogether (to model e.g. UDP or memory movements), you should set TCP-gamma +to 0. Otherwise, the time it takes to send 10 Gb of data over a 10 Gb/s link that is otherwise unused is computed as +follows. This is always given by :math:`latency + \frac{size}{bandwidth}`, but the bandwidth to use may be the physical +one (10Gb/s) or the one induced by the TCP window, depending on the latency. - - If the link latency is 0, it obviously takes one second. - - If the link latency is 0.00001s, :math:`gamma/2\times lat=209,715,200,000 \approx 209Gb/s` which is larger than the + - If the link latency is 0, the communication obviously takes one second. + - If the link latency is 0.00001s, :math:`\frac{gamma}{2\times lat}=209,715,200,000 \approx 209Gb/s` which is larger than the physical bandwidth. So the physical bandwidth is used (you fully use the link) and the communication takes 1.00001s - - If the link latency is 0.001s, :math:`gamma/2\times lat=2,097,152,000 \approx 2Gb/s`, which is smalled than the + - If the link latency is 0.001s, :math:`\frac{gamma}{2\times lat}=2,097,152,000 \approx 2Gb/s`, which is smalled than the physical bandwidth. The communication thus fails to fully use the link, and takes 5.12s. - With a link latency of 0.1s, :math:`gamma/2\times lat \approx 21Mb/s`, so the communication takes 512 seconds! diff --git a/src/surf/network_cm02.cpp b/src/surf/network_cm02.cpp index fe69c812a2..5cc26f4585 100644 --- a/src/surf/network_cm02.cpp +++ b/src/surf/network_cm02.cpp @@ -397,10 +397,11 @@ void NetworkCm02Model::comm_action_set_variable(NetworkCm02Action* action, const /* after setting the variable, update the bounds depending on user configuration */ if (action->get_user_bound() < 0) { get_maxmin_system()->update_variable_bound( - action->get_variable(), (action->lat_current_ > 0) ? cfg_tcp_gamma / (2.0 * action->lat_current_) : -1.0); + action->get_variable(), + (action->lat_current_ > 0 && cfg_tcp_gamma > 0) ? cfg_tcp_gamma / (2.0 * action->lat_current_) : -1.0); } else { get_maxmin_system()->update_variable_bound( - action->get_variable(), (action->lat_current_ > 0) + action->get_variable(), (action->lat_current_ > 0 && cfg_tcp_gamma > 0) ? std::min(action->get_user_bound(), cfg_tcp_gamma / (2.0 * action->lat_current_)) : action->get_user_bound()); } @@ -518,20 +519,21 @@ void NetworkCm02Link::set_latency(double value) auto* action = static_cast(var->get_id()); action->lat_current_ += delta; action->sharing_penalty_ += delta; - if (action->get_user_bound() < 0) + if (action->get_user_bound() < 0 && NetworkModel::cfg_tcp_gamma > 0) get_model()->get_maxmin_system()->update_variable_bound(action->get_variable(), NetworkModel::cfg_tcp_gamma / (2.0 * action->lat_current_)); - else { + else if (NetworkModel::cfg_tcp_gamma > 0) { get_model()->get_maxmin_system()->update_variable_bound( action->get_variable(), std::min(action->get_user_bound(), NetworkModel::cfg_tcp_gamma / (2.0 * action->lat_current_))); - - if (action->get_user_bound() < NetworkModel::cfg_tcp_gamma / (2.0 * action->lat_current_)) { - XBT_DEBUG("Flow is limited BYBANDWIDTH"); - } else { - XBT_DEBUG("Flow is limited BYLATENCY, latency of flow is %f", action->lat_current_); - } } + if (NetworkModel::cfg_tcp_gamma == 0 || + action->get_user_bound() < NetworkModel::cfg_tcp_gamma / (2.0 * action->lat_current_)) { + XBT_DEBUG("Flow is limited BYBANDWIDTH"); + } else { + XBT_DEBUG("Flow is limited BYLATENCY, latency of flow is %f", action->lat_current_); + } + if (not action->is_suspended()) get_model()->get_maxmin_system()->update_variable_penalty(action->get_variable(), action->sharing_penalty_); } -- 2.20.1