- std::vector<neighbor> neigh;
- MAP_TEMPLATE<m_host_t, neighbor*> rev_neigh;
- std::vector<neighbor*> pneigh;
-
- communicator comm;
- int ctrl_close_pending;
- int data_close_pending;
-
- unsigned iter;
-
- double load;
- double expected_load;
-
- void compute();
- virtual double load_balance(double my_load);
- void send1_no_bookkeeping(neighbor& nb);
- void send1_bookkeeping(neighbor& nb);
- void send();
- bool receive(bool wait_for_close);
- void finalize1(neighbor& nb);
- void finalize();
- void print_loads(e_xbt_log_priority_t logp = xbt_log_priority_info);
-
- void insert_neighbor_in_map(neighbor& nb);
+ static double total_load_init; // sum of process loads at init
+ static double total_load_running; // sum of loads while running
+ static double total_load_exit; // sum of process loads at exit
+
+ typedef MAP_TEMPLATE<m_host_t, neighbor*> rev_neigh_type;
+ neigh_type neigh; // list of neighbors (do not alter
+ // after construction!)
+ rev_neigh_type rev_neigh; // map m_host_t -> neighbor
+
+ communicator comm; // communicator for this process
+ int ctrl_close_pending; // number of "close" messages to wait
+ // on ctrl channel
+ int data_close_pending; // number of "close" messages to wait
+ // on data channel
+ bool close_received; // true if we received a "close" message
+ bool finalizing; // true when finalize() is running
+
+ unsigned lb_iter; // counter of load-balancing iterations
+ unsigned comp_iter; // counter of computation iterations
+
+ double prev_load_broadcast; // used to ensure that we do not send
+ // a same information messages
+ double real_load; // current load
+ double expected_load; // expected load in bookkeeping mode
+ double received_load; // load received from neighbors
+
+ mutex_t mutex; // synchronization between threads
+ condition_t cond;
+
+ struct mesg_accounting {
+ double amount; // sum of message size
+ unsigned count; // number of messages
+ mesg_accounting(): amount(0.0), count(0) { }
+ };
+ struct accounting {
+ double comp_amount; // total computing done so far (flops)
+ mesg_accounting data_send; // data messages sent
+ mesg_accounting data_recv; // data messages received
+ mesg_accounting ctrl_send; // ctrl message sent
+ mesg_accounting ctrl_recv; // ctrl message received
+ accounting(): comp_amount(0.0) { }
+ };
+ accounting acc;
+
+ void add_comp_amount(double amount) { acc.comp_amount += amount; }
+ void add_data_send_mesg(double amount) {
+ ++acc.data_send.count;
+ acc.data_send.amount += amount;
+ }
+ void add_data_recv_mesg(double amount) {
+ ++acc.data_recv.count;
+ acc.data_recv.amount += amount;
+ }
+ void add_ctrl_send_mesg(double amount) {
+ ++acc.ctrl_send.count;
+ acc.ctrl_send.amount += amount;
+ }
+ void add_ctrl_recv_mesg(double amount) {
+ ++acc.ctrl_recv.count;
+ acc.ctrl_recv.amount += amount;
+ }
+
+ // Load-balancing loop
+ msg_thread* lb_thread;
+ void load_balance_loop();
+
+ // Simulate computation loop
+ void compute_loop();
+
+ // Check if we need to stop
+ bool still_running();
+
+ // Returns the sum of "to_send" for all neighbors.
+ double get_sum_of_to_send() const;
+
+ // Send procedures
+ void ctrl_send(neighbor& nb);
+ void data_send(neighbor& nb);
+ void ctrl_close(neighbor& nb);
+ void data_close(neighbor& nb);
+
+ // Receive procedure
+ // Parameter "timeout" may be 0 for non-blocking operation, -1 for
+ // infinite waiting, or any positive timeout.
+ void ctrl_receive(double timeout);
+ void data_receive(double timeout);
+ void handle_message(message* msg, m_host_t from);