]> AND Private Git Repository - loba.git/blobdiff - process.cpp
Logo AND Algorithmique Numérique Distribuée

Private GIT Repository
Stop locking the mutex on data_receive.
[loba.git] / process.cpp
index 11129e44bb3bb0f3ec413249493df4be51431f50..2de3016588dd2dd95e66b073eba4b59eb1ab7e18 100644 (file)
@@ -50,6 +50,7 @@ process::process(int argc, char* argv[])
     expected_load = real_load;
     total_load_running += real_load;
     total_load_init += real_load;
+    received_load = 0.0;
 
     ctrl_close_pending = data_close_pending = neigh.size();
     close_received = false;
@@ -80,6 +81,8 @@ process::~process()
 {
     delete lb_thread;
     total_load_exit += real_load;
+    xbt_assert(received_load == 0.0,
+               "received_load is %g, but should be 0.0 !", received_load);
     if (opt::log_rate < 0)
         return;
     XBT_INFO("Final load after %d:%d iterations: %g",
@@ -157,7 +160,7 @@ void process::load_balance_loop()
                   std::bind(&process::ctrl_close, this, _1));
     while (ctrl_close_pending) {
         comm.ctrl_flush(false);
-        XBT_DEBUG("waiting for %d CTRL CLOSE", ctrl_close_pending);
+        XBT_DEBUG("waiting for %d CTRL_CLOSE", ctrl_close_pending);
         ctrl_receive(-1.0);
     }
     comm.ctrl_flush(true);
@@ -169,17 +172,14 @@ void process::compute_loop()
 
     double next_iter_after_date = MSG_get_clock() + opt::min_comp_iter_duration;
     while (still_running()) {
-        // receive
-        mutex.acquire();
-        if (real_load > 0.0)
-            data_receive(0.0);
-        else
-            data_receive(opt::min_comp_iter_duration);
-        mutex.release();
+        // receive (do not block if there is something to compute)
+        data_receive(real_load > 0.0 ? 0.0 : opt::min_comp_iter_duration);
 
         // send
         comm.data_flush(false);
         mutex.acquire();
+        real_load += received_load;
+        received_load = 0.0;
         std::for_each(neigh.begin(), neigh.end(),
                       std::bind(&process::data_send, this, _1));
         mutex.release();
@@ -201,20 +201,19 @@ void process::compute_loop()
     }
 
     XBT_VERB("Going to finalize for %s...", __func__);
-    // last send, for not losing load scheduled to be sent
-    std::for_each(neigh.begin(), neigh.end(),
-                  std::bind(&process::data_send, this, _1));
     finalizing = true;
-    total_load_running -= real_load;
     XBT_DEBUG("send DATA_CLOSE to %zu neighbor%s",
               neigh.size(), ESSE(neigh.size()));
     std::for_each(neigh.begin(), neigh.end(),
                   std::bind(&process::data_close, this, _1));
     while (data_close_pending) {
         comm.data_flush(false);
-        XBT_DEBUG("waiting for %d DATA CLOSE", data_close_pending);
+        XBT_DEBUG("waiting for %d DATA_CLOSE", data_close_pending);
         data_receive(-1.0);
     }
+    real_load += received_load;
+    received_load = 0.0;
+    total_load_running -= real_load;
     comm.data_flush(true);
 }
 
@@ -385,9 +384,7 @@ void process::handle_message(message* msg, m_host_t from)
     }
     case message::DATA: {
         double ld = msg->get_amount();
-        real_load += ld;
-        if (finalizing)
-            total_load_running -= ld;
+        received_load += ld;
         break;
     }
     case message::CTRL_CLOSE: