+ static double total_load_init; // sum of process loads at init
+ static double total_load_running; // sum of loads while running
+ static double total_load_exit; // sum of process loads at exit
+
+ static int process_counter;
+ static double total_load_average;
+
+ typedef MAP_TEMPLATE<m_host_t, neighbor*> rev_neigh_type;
+ neigh_type neigh; // list of neighbors (do not alter
+ // after construction!)
+ rev_neigh_type rev_neigh; // map m_host_t -> neighbor
+
+ communicator comm; // communicator for this process
+ int ctrl_close_pending; // number of "close" messages to wait
+ // on ctrl channel
+ int data_close_pending; // number of "close" messages to wait
+ // on data channel
+ bool close_received; // true if we received a "close" message
+ bool finalizing; // true when finalize() is running
+
+ unsigned lb_iter; // counter of load-balancing iterations
+ unsigned comp_iter; // counter of computation iterations
+ unsigned all_comp_iter; // counter of computation iterations
+ // (counting empty iterations too)
+
+ double prev_load_broadcast; // used to ensure that we do not send
+ // a same information messages
+ double real_load; // current load
+ double expected_load; // expected load in bookkeeping mode
+ double received_load; // load received from neighbors
+
+ double convergence; // date when convergence was achieved, or -1.0
+
+ mutex_t mutex; // synchronization between threads
+ condition_t cond;
+
+ struct mesg_accounting {
+ double amount; // sum of message size
+ unsigned count; // number of messages
+ mesg_accounting(): amount(0.0), count(0) { }
+ };
+ struct accounting {
+ double comp_amount; // total computing done so far (flops)
+ mesg_accounting data_send; // data messages sent
+ mesg_accounting data_recv; // data messages received
+ mesg_accounting ctrl_send; // ctrl message sent
+ mesg_accounting ctrl_recv; // ctrl message received
+ accounting(): comp_amount(0.0) { }
+ };
+ accounting acc; // use a structure so that it is
+ // automatically initialized a
+ // construction
+
+ void add_comp_amount(double amount) { acc.comp_amount += amount; }
+ void add_data_send_mesg(double amount) {
+ ++acc.data_send.count;
+ acc.data_send.amount += amount;
+ }
+ void add_data_recv_mesg(double amount) {
+ ++acc.data_recv.count;
+ acc.data_recv.amount += amount;
+ }
+ void add_ctrl_send_mesg(double amount) {
+ ++acc.ctrl_send.count;
+ acc.ctrl_send.amount += amount;
+ }
+ void add_ctrl_recv_mesg(double amount) {
+ ++acc.ctrl_recv.count;
+ acc.ctrl_recv.amount += amount;
+ }
+
+ // Load-balancing loop
+ msg_thread* lb_thread;
+ void load_balance_loop();
+
+ // Simulate computation loop
+ void compute_loop();
+
+ // Check if we need to stop
+ bool still_running();
+
+ // Returns the sum of "to_send" for all neighbors.
+ double get_sum_of_to_send() const;
+
+ // Compute load_to_send (for data_send), subject to the execution parameters
+ static double compute_load_to_send(double desired);
+
+ // Send procedures
+ void ctrl_send(neighbor& nb);
+ void data_send(neighbor& nb);
+ void ctrl_close(neighbor& nb);
+ void data_close(neighbor& nb);
+
+ // Receive procedure
+ // Parameter "timeout" may be 0 for non-blocking operation, -1 for
+ // infinite waiting, or any positive timeout.
+ void ctrl_receive(double timeout);
+ void data_receive(double timeout);
+ void handle_message(message* msg, m_host_t from);