]> AND Private Git Repository - loba.git/blobdiff - process.h
Logo AND Algorithmique Numérique Distribuée

Private GIT Repository
Missing loba_lln.h and loba_lln.cpp added. ALGORITHMS updated to include bulk and...
[loba.git] / process.h
index 4e6ab240f8d73b6f3b15c9fe404568bb82bdc7e2..ed6e6cf6fbedffc1ab96657aadda320828ca0a2a 100644 (file)
--- a/process.h
+++ b/process.h
@@ -4,7 +4,8 @@
 #define USE_UNORDERED_MAP 1
 //#undef USE_UNORDERED_MAP
 
 #define USE_UNORDERED_MAP 1
 //#undef USE_UNORDERED_MAP
 
-#include <vector>
+#include <algorithm>
+#include <tr1/functional>
 #ifdef USE_UNORDERED_MAP
 #  include <tr1/unordered_map>
 #  define MAP_TEMPLATE std::tr1::unordered_map
 #ifdef USE_UNORDERED_MAP
 #  include <tr1/unordered_map>
 #  define MAP_TEMPLATE std::tr1::unordered_map
 #  include <map>
 #  define MAP_TEMPLATE std::map
 #endif
 #  include <map>
 #  define MAP_TEMPLATE std::map
 #endif
+#include <vector>
 #include <msg/msg.h>
 #include <xbt/log.h>
 #include "communicator.h"
 #include <msg/msg.h>
 #include <xbt/log.h>
 #include "communicator.h"
+#include "misc.h"
+#include "msg_thread.h"
 #include "neighbor.h"
 #include "neighbor.h"
+#include "options.h"
+#include "synchro.h"
 
 class process {
 public:
 
 class process {
 public:
@@ -26,6 +32,17 @@ public:
     process(int argc, char* argv[]);
     virtual ~process();
 
     process(int argc, char* argv[]);
     virtual ~process();
 
+    double get_real_load() const           { return real_load;            }
+    double get_comp_amount() const         { return acc.comp_amount;      }
+    double get_data_send_amount() const    { return acc.data_send.amount; }
+    double get_data_recv_amount() const    { return acc.data_recv.amount; }
+    unsigned get_data_send_count() const   { return acc.data_send.count;  }
+    unsigned get_data_recv_count() const   { return acc.data_recv.count;  }
+    double get_ctrl_send_amount() const    { return acc.ctrl_send.amount; }
+    double get_ctrl_recv_amount() const    { return acc.ctrl_recv.amount; }
+    unsigned get_ctrl_send_count() const   { return acc.ctrl_send.count;  }
+    unsigned get_ctrl_recv_count() const   { return acc.ctrl_recv.count;  }
+
     int run();
 
 protected:
     int run();
 
 protected:
@@ -35,8 +52,20 @@ protected:
     pneigh_type pneigh;         // list of pointers to neighbors that
                                 // we are free to reorder
 
     pneigh_type pneigh;         // list of pointers to neighbors that
                                 // we are free to reorder
 
-    // Returns the sum of "to_send" for all neighbors.
-    double sum_of_to_send() const;
+    // Get and set current load, which may be real load, or expected
+    // load if opt::bookkeeping is true.
+    double get_load() const                { return expected_load; }
+
+    // The load balancing algorithm comes here...
+    virtual void load_balance();
+
+    // Register some amount of load to send to given neighbor.
+    void send(neighbor& nb, double amount);
+    void send(neighbor* nb, double amount) { send(*nb, amount); }
+
+    // Sort pneigh by applying comp to their loads
+    template <typename Compare>
+    void pneigh_sort_by_load(const Compare& comp);
 
     // Calls neighbor::print(verbose, logp, cat) for each member of neigh.
     void print_loads(bool verbose = false,
 
     // Calls neighbor::print(verbose, logp, cat) for each member of neigh.
     void print_loads(bool verbose = false,
@@ -50,7 +79,7 @@ protected:
 
 private:
     static double total_load_init; // sum of process loads at init
 
 private:
     static double total_load_init; // sum of process loads at init
-    static double total_load_running; // summ of loads while running
+    static double total_load_running; // sum of loads while running
     static double total_load_exit; // sum of process loads at exit
 
     typedef MAP_TEMPLATE<m_host_t, neighbor*> rev_neigh_type;
     static double total_load_exit; // sum of process loads at exit
 
     typedef MAP_TEMPLATE<m_host_t, neighbor*> rev_neigh_type;
@@ -64,40 +93,91 @@ private:
     int data_close_pending;     // number of "close" messages to wait
                                 // on data channel
     bool close_received;        // true if we received a "close" message
     int data_close_pending;     // number of "close" messages to wait
                                 // on data channel
     bool close_received;        // true if we received a "close" message
-    bool may_receive;           // true if there remains neighbors to listen for
     bool finalizing;            // true when finalize() is running
 
     bool finalizing;            // true when finalize() is running
 
-    unsigned iter;              // counter of iterations
+    unsigned lb_iter;           // counter of load-balancing iterations
+    unsigned comp_iter;         // counter of computation iterations
 
     double prev_load_broadcast; // used to ensure that we do not send
                                 // a same information messages
 
     double prev_load_broadcast; // used to ensure that we do not send
                                 // a same information messages
-    double load;                // current load
+    double real_load;           // current load
     double expected_load;       // expected load in bookkeeping mode
 
     double expected_load;       // expected load in bookkeeping mode
 
-    // The load balancing algorithm comes here...
-    // Parameter "my_load" is the load to take into account for myself
-    // (may be load or expected load).
-    // Returns the total load sent to neighbors.
-    virtual double load_balance(double my_load);
-
-    // Virtually do some computation
-    void compute();
-
-    // Send procedures, with helpers for bookkeeping mode or not
-    void send1_no_bookkeeping(neighbor& nb);
-    void send1_bookkeeping(neighbor& nb);
-    void send();
-
-    // Receive procedure: wait (or not) for a message to come.
-    enum recv_wait_mode { NO_WAIT = 0, WAIT, WAIT_FOR_CLOSE };
-    void receive(recv_wait_mode wait);
-
-    // Finalize sends a "close" message to each neighbor and wait for
-    // all of them to answer.
-    void finalize1(neighbor& nb);
-    void finalize();
+    mutex_t mutex;              // synchronization between threads
+    condition_t cond;
+
+    struct mesg_accounting {
+        double amount;          // sum of message size
+        unsigned count;         // number of messages
+        mesg_accounting(): amount(0.0), count(0) { }
+    };
+    struct accounting {
+        double comp_amount;        // total computing done so far (flops)
+        mesg_accounting data_send; // data messages sent
+        mesg_accounting data_recv; // data messages received
+        mesg_accounting ctrl_send; // ctrl message sent
+        mesg_accounting ctrl_recv; // ctrl message received
+        accounting(): comp_amount(0.0) { }
+    };
+    accounting acc;
+
+    void add_comp_amount(double amount) { acc.comp_amount += amount; }
+    void add_data_send_mesg(double amount) {
+        ++acc.data_send.count;
+        acc.data_send.amount += amount;
+    }
+    void add_data_recv_mesg(double amount) {
+        ++acc.data_recv.count;
+        acc.data_recv.amount += amount;
+    }
+    void add_ctrl_send_mesg(double amount) {
+        ++acc.ctrl_send.count;
+        acc.ctrl_send.amount += amount;
+    }
+    void add_ctrl_recv_mesg(double amount) {
+        ++acc.ctrl_recv.count;
+        acc.ctrl_recv.amount += amount;
+    }
+
+    // Load-balancing loop
+    msg_thread* lb_thread;
+    void load_balance_loop();
+
+    // Simulate computation loop
+    void compute_loop();
+
+    // Check if we need to stop
+    bool still_running();
+
+    // Returns the sum of "to_send" for all neighbors.
+    double get_sum_of_to_send() const;
+
+    // Send procedures
+    void ctrl_send(neighbor& nb);
+    void data_send(neighbor& nb);
+    void ctrl_close(neighbor& nb);
+    void data_close(neighbor& nb);
+
+    // Receive procedure
+    // Parameter "timeout" may be 0 for non-blocking operation, -1 for
+    // infinite waiting, or any positive timeout.
+    void ctrl_receive(double timeout);
+    void data_receive(double timeout);
+    void handle_message(message* msg, m_host_t from);
 };
 
 };
 
+template <typename Compare>
+void process::pneigh_sort_by_load(const Compare& comp)
+{
+    using std::tr1::bind;
+    using std::tr1::placeholders::_1;
+    using std::tr1::placeholders::_2;
+    std::sort(pneigh.begin(), pneigh.end(),
+              bind(comp,
+                   bind(&neighbor::get_load, _1),
+                   bind(&neighbor::get_load, _2)));
+}
+
 #endif // !PROCESS_H
 
 // Local variables:
 #endif // !PROCESS_H
 
 // Local variables: