examples/msg/mc/bugged3
examples/msg/mc/random_test
examples/msg/chord/chord
-examples/msg/chord/chord*.xml
examples/msg/token_ring/token_ring
examples/simdag/ex_sd_seq_access
examples/simdag/sd_seq_access
The "Simplicity does not preceed complexity, but follows it" release.
Models:
+ * Change the correction factors used in LMM model, according to
+ the lastest experiments described in INRIA RR-7821.
+ Accuracy should be improved this way.
* Use the partial invalidation optimization by default for the
network too. Should produce the exact same results, only faster.
+ * Major cleanup in surf to merge models and split some optimization
+ mechanisms from the core of the models. As a result you can now
+ specify which model to use (e.g., --cfg=network/model:LV08
+ --cfg=cpu/model:Cas01) and which optimization mode to use
+ (e.g., --cfg=network/optim:lazy --cfg=cpu/optim:TI).
+ Incompatible combinations should err at initialization. See
+ --help-models for the list of all models and optimization modes.
+ * The CLM03 workstation model were dropped for simplicity because it
+ used the deprecated CM02 network model. Use default instead.
+ * Rename the TCP_gamma configuration option to network/TCP_gamma
+ * Rename the coordinates configuration option to
+ network/coordinates, and document it
+ * Use now crosstraffic keyword instead of the terribly missleading
+ fullduplex keyword. It is activated by default now in the current
+ default model, use --cfg=network/crosstraffic:0 to turn it off.
+
+ Simix:
+ * Stabilize the parallel execution mode of user contexts
+ * Introduce configuration variables to control parallel execution:
+ - contexts/synchro: Synchronization mode to use when running
+ contexts in parallel (either futex, posix or busy_wait)
+ - contexts/parallel_threshold: Minimal number of user contexts
+ to be run in parallel (raw contexts only)
SimDag:
* Performance boost by using a swag internally to compute the set of
ADD_TEST(msg-sendrecv-Vegas-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/sendrecv/sendrecv_Vegas.tesh)
ADD_TEST(msg-sendrecv-Reno-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/sendrecv/sendrecv_Reno.tesh)
ADD_TEST(msg-suspend-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/suspend/suspend.tesh)
-ADD_TEST(msg-masterslave-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave.tesh)
-ADD_TEST(msg-masterslave-forwarder-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_forwarder.tesh)
-ADD_TEST(msg-masterslave-failure-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_failure.tesh)
ADD_TEST(msg-masterslave-bypass-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_bypass.tesh)
-ADD_TEST(msg-masterslave-mailbox-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_mailbox.tesh)
ADD_TEST(msg-masterslave-kill-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_kill.tesh)
+ADD_TEST(msg-masterslave-multicore-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_multicore.tesh)
+
+ADD_TEST(msg-masterslave-no-crosstraffic-mailbox-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_mailbox.tesh)
+ADD_TEST(msg-masterslave-no-crosstraffic-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave.tesh)
+ADD_TEST(msg-masterslave-no-crosstraffic-forwarder-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_forwarder.tesh)
+ADD_TEST(msg-masterslave-no-crosstraffic-failure-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_failure.tesh)
+
+ADD_TEST(msg-masterslave-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_crosstraffic.tesh)
+ADD_TEST(msg-masterslave-forwarder-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_forwarder_crosstraffic.tesh)
+ADD_TEST(msg-masterslave-failure-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_failure_crosstraffic.tesh)
+ADD_TEST(msg-masterslave-mailbox-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_mailbox_crosstraffic.tesh)
+ADD_TEST(msg-masterslave-cpu-ti-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/masterslave --cd ${CMAKE_HOME_DIRECTORY}/examples/msg masterslave/masterslave_cpu_ti_crosstraffic.tesh)
if(HAVE_UCONTEXT_H)
ADD_TEST(msg-sendrecv-CLM03-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/sendrecv/sendrecv_CLM03.tesh)
ADD_TEST(msg-sendrecv-Vegas-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/sendrecv/sendrecv_Vegas.tesh)
ADD_TEST(msg-sendrecv-Reno-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/sendrecv/sendrecv_Reno.tesh)
ADD_TEST(msg-suspend-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/suspend/suspend.tesh)
- ADD_TEST(msg-masterslave-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave.tesh)
- ADD_TEST(msg-masterslave-forwarder-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_forwarder.tesh)
- ADD_TEST(msg-masterslave-failure-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_failure.tesh)
ADD_TEST(msg-masterslave-bypass-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_bypass.tesh)
- ADD_TEST(msg-masterslave-mailbox-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_mailbox.tesh)
ADD_TEST(msg-masterslave-kill-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_kill.tesh)
+ ADD_TEST(msg-masterslave-multicore-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_multicore.tesh)
+ ADD_TEST(msg-masterslave-no-crosstraffic-mailbox-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_mailbox.tesh)
+ ADD_TEST(msg-masterslave-no-crosstraffic-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave.tesh)
+ ADD_TEST(msg-masterslave-no-crosstraffic-forwarder-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_forwarder.tesh)
+ ADD_TEST(msg-masterslave-no-crosstraffic-failure-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_failure.tesh)
+
+ ADD_TEST(msg-masterslave-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_crosstraffic.tesh)
+ ADD_TEST(msg-masterslave-forwarder-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_forwarder_crosstraffic.tesh)
+ ADD_TEST(msg-masterslave-failure-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_failure_crosstraffic.tesh)
+ ADD_TEST(msg-masterslave-mailbox-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_mailbox_crosstraffic.tesh)
+ ADD_TEST(msg-masterslave-cpu-ti-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/masterslave --cd ${CMAKE_HOME_DIRECTORY}/examples/msg masterslave/masterslave_cpu_ti_crosstraffic.tesh)
endif(HAVE_UCONTEXT_H)
if(HAVE_RAWCTX)
- ADD_TEST(msg-sendrecv-CLM03-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/sendrecv/sendrecv_CLM03.tesh)
- ADD_TEST(msg-sendrecv-Vegas-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/sendrecv/sendrecv_Vegas.tesh)
- ADD_TEST(msg-sendrecv-Reno-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/sendrecv/sendrecv_Reno.tesh)
- ADD_TEST(msg-suspend-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/suspend/suspend.tesh)
- ADD_TEST(msg-masterslave-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave.tesh)
- ADD_TEST(msg-masterslave-forwarder-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_forwarder.tesh)
- ADD_TEST(msg-masterslave-failure-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_failure.tesh)
- ADD_TEST(msg-masterslave-bypass-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_bypass.tesh)
- ADD_TEST(msg-masterslave-mailbox-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_mailbox.tesh)
- ADD_TEST(msg-masterslave-kill-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_kill.tesh)
+ ADD_TEST(msg-sendrecv-CLM03-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/sendrecv/sendrecv_CLM03.tesh)
+ ADD_TEST(msg-sendrecv-Vegas-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/sendrecv/sendrecv_Vegas.tesh)
+ ADD_TEST(msg-sendrecv-Reno-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/sendrecv/sendrecv_Reno.tesh)
+ ADD_TEST(msg-suspend-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/suspend/suspend.tesh)
+ ADD_TEST(msg-masterslave-bypass-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_bypass.tesh)
+ ADD_TEST(msg-masterslave-kill-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_kill.tesh)
+ ADD_TEST(msg-masterslave-multicore-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_multicore.tesh)
+ ADD_TEST(msg-masterslave-no-crosstraffic-mailbox-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_mailbox.tesh)
+ ADD_TEST(msg-masterslave-no-crosstraffic-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave.tesh)
+ ADD_TEST(msg-masterslave-no-crosstraffic-forwarder-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_forwarder.tesh)
+ ADD_TEST(msg-masterslave-no-crosstraffic-failure-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_failure.tesh)
+
+ ADD_TEST(msg-masterslave-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_crosstraffic.tesh)
+ ADD_TEST(msg-masterslave-forwarder-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_forwarder_crosstraffic.tesh)
+ ADD_TEST(msg-masterslave-failure-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_failure_crosstraffic.tesh)
+ ADD_TEST(msg-masterslave-mailbox-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_mailbox_crosstraffic.tesh)
+ ADD_TEST(msg-masterslave-cpu-ti-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/masterslave --cd ${CMAKE_HOME_DIRECTORY}/examples/msg masterslave/masterslave_cpu_ti_crosstraffic.tesh)
endif(HAVE_RAWCTX)
ADD_TEST(msg-masterslave-vivaldi-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_vivaldi.tesh)
ADD_TEST(msg-token-ring-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg/token_ring --cd ${CMAKE_BINARY_DIR}/examples/msg/token_ring ${CMAKE_HOME_DIRECTORY}/examples/msg/token_ring/token_ring.tesh)
endif(HAVE_UCONTEXT_H)
-ADD_TEST(msg-masterslave-multicore-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_multicore.tesh)
ADD_TEST(msg-migration-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/migration/migration.tesh)
ADD_TEST(msg-ptask-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/parallel_task/parallel_task.tesh)
ADD_TEST(msg-priority-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/priority/priority.tesh)
ADD_TEST(msg-properties-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/properties/msg_prop.tesh)
ADD_TEST(msg-icomms-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg/icomms --cd ${CMAKE_BINARY_DIR}/examples/msg/icomms ${CMAKE_HOME_DIRECTORY}/examples/msg/icomms/peer.tesh)
ADD_TEST(msg-actions-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/actions --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/actions actions.tesh)
-ADD_TEST(msg-masterslave-cpu-ti-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/masterslave --cd ${CMAKE_HOME_DIRECTORY}/examples/msg masterslave/masterslave_cpu_ti.tesh)
ADD_TEST(msg-trace-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg --cd ${CMAKE_HOME_DIRECTORY}/examples/msg trace/trace.tesh)
-ADD_TEST(msg-chord-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord.tesh)
-ADD_TEST(msg-chord-thread-parallel ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --cfg contexts/nthreads:4 --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord.tesh)
+ADD_TEST(msg-chord-no-crosstraffic-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord.tesh)
+ADD_TEST(msg-chord-no-crosstraffic-thread-parallel ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --cfg contexts/nthreads:4 --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord.tesh)
+ADD_TEST(msg-chord-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord_crosstraffic.tesh)
+ADD_TEST(msg-chord-thread-parallel ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --cfg contexts/nthreads:4 --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord_crosstraffic.tesh)
if(HAVE_UCONTEXT_H)
- ADD_TEST(msg-masterslave-multicore-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_multicore.tesh)
ADD_TEST(msg-migration-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/migration/migration.tesh)
ADD_TEST(msg-ptask-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/parallel_task/parallel_task.tesh)
ADD_TEST(msg-priority-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/priority/priority.tesh)
ADD_TEST(msg-properties-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/properties/msg_prop.tesh)
ADD_TEST(msg-icomms-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg/icomms --cd ${CMAKE_BINARY_DIR}/examples/msg/icomms ${CMAKE_HOME_DIRECTORY}/examples/msg/icomms/peer.tesh)
ADD_TEST(msg-actions-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/actions --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/actions actions.tesh)
- ADD_TEST(msg-masterslave-cpu-ti-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/masterslave --cd ${CMAKE_HOME_DIRECTORY}/examples/msg masterslave/masterslave_cpu_ti.tesh)
ADD_TEST(msg-trace-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg --cd ${CMAKE_HOME_DIRECTORY}/examples/msg trace/trace.tesh)
- ADD_TEST(msg-chord-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord.tesh)
- ADD_TEST(msg-chord-ucontext-parallel ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --cfg contexts/nthreads:4 --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord.tesh)
+ ADD_TEST(msg-chord-no-crosstraffic-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord.tesh)
+ ADD_TEST(msg-chord-no-crosstraffic-ucontext-parallel ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --cfg contexts/nthreads:4 --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord.tesh)
+ ADD_TEST(msg-chord-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord_crosstraffic.tesh)
+ ADD_TEST(msg-chord-ucontext-parallel ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --cfg contexts/nthreads:4 --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord_crosstraffic.tesh)
endif(HAVE_UCONTEXT_H)
if(HAVE_RAWCTX)
- ADD_TEST(msg-masterslave-multicore-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_multicore.tesh)
- ADD_TEST(msg-migration-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/migration/migration.tesh)
- ADD_TEST(msg-ptask-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/parallel_task/parallel_task.tesh)
- ADD_TEST(msg-priority-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/priority/priority.tesh)
- ADD_TEST(msg-actions-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/actions --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/actions actions.tesh)
- ADD_TEST(msg-icomms-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg/icomms --cd ${CMAKE_BINARY_DIR}/examples/msg/icomms ${CMAKE_HOME_DIRECTORY}/examples/msg/icomms/peer.tesh)
- ADD_TEST(msg-properties-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/properties/msg_prop.tesh)
- ADD_TEST(msg-masterslave-cpu-ti-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/masterslave --cd ${CMAKE_HOME_DIRECTORY}/examples/msg masterslave/masterslave_cpu_ti.tesh)
- ADD_TEST(msg-trace-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg --cd ${CMAKE_HOME_DIRECTORY}/examples/msg trace/trace.tesh)
- ADD_TEST(msg-chord-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord.tesh)
- ADD_TEST(msg-chord-raw-parallel ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --cfg contexts/nthreads:4 --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord.tesh)
+ ADD_TEST(msg-migration-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/migration/migration.tesh)
+ ADD_TEST(msg-ptask-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/parallel_task/parallel_task.tesh)
+ ADD_TEST(msg-priority-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/priority/priority.tesh)
+ ADD_TEST(msg-actions-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/actions --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/actions actions.tesh)
+ ADD_TEST(msg-icomms-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg/icomms --cd ${CMAKE_BINARY_DIR}/examples/msg/icomms ${CMAKE_HOME_DIRECTORY}/examples/msg/icomms/peer.tesh)
+ ADD_TEST(msg-properties-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/properties/msg_prop.tesh)
+ ADD_TEST(msg-trace-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg --cd ${CMAKE_HOME_DIRECTORY}/examples/msg trace/trace.tesh)
+ ADD_TEST(msg-chord-no-crosstraffic-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord.tesh)
+ ADD_TEST(msg-chord-no-crosstraffic-raw-parallel ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --cfg contexts/nthreads:4 --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord.tesh)
+ ADD_TEST(msg-chord-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord_crosstraffic.tesh)
+ ADD_TEST(msg-chord-raw-parallel ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --cfg contexts/nthreads:4 --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/chord --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/chord chord_crosstraffic.tesh)
endif(HAVE_RAWCTX)
IF(${ARCH_32_BITS})
ADD_TEST(simdag-test-prop ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/simdag --cd ${CMAKE_BINARY_DIR}/examples/simdag ${CMAKE_HOME_DIRECTORY}/examples/simdag/properties/test_prop.tesh)
ADD_TEST(simdag-minmin-test ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --setenv bindir=${CMAKE_BINARY_DIR}/examples/simdag/scheduling --cd ${CMAKE_HOME_DIRECTORY}/examples/simdag/scheduling test_minmin.tesh)
+ADD_TEST(msg-gtnets-crosstraffic-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-crosstraffic.tesh)
+ADD_TEST(msg-gtnets-crosstraffic-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-crosstraffic.tesh)
+ADD_TEST(msg-gtnets-crosstraffic-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-crosstraffic.tesh)
+
if(HAVE_GTNETS)
ADD_TEST(msg-gtnets-waxman-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-waxman.tesh)
ADD_TEST(msg-gtnets-dogbone-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-dogbone-gtnets.tesh)
ADD_TEST(msg-gtnets-onelink-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-onelink-gtnets.tesh)
ADD_TEST(msg-gtnets-dogbone-lv08-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-dogbone-lv08.tesh)
ADD_TEST(msg-gtnets-onelink-lv08-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-onelink-lv08.tesh)
-ADD_TEST(msg-gtnets-fullduplex-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-fullduplex.tesh)
if(HAVE_UCONTEXT_H)
ADD_TEST(msg-gtnets-waxman-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-waxman.tesh)
ADD_TEST(msg-gtnets-dogbone-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-dogbone-gtnets.tesh)
ADD_TEST(msg-gtnets-onelink-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-onelink-gtnets.tesh)
ADD_TEST(msg-gtnets-dogbone-lv08-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-dogbone-lv08.tesh)
ADD_TEST(msg-gtnets-onelink-lv08-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-onelink-lv08.tesh)
- ADD_TEST(msg-gtnets-fullduplex-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-fullduplex.tesh)
endif(HAVE_UCONTEXT_H)
if(HAVE_RAWCTX)
ADD_TEST(msg-gtnets-waxman-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-waxman.tesh)
ADD_TEST(msg-gtnets-onelink-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-onelink-gtnets.tesh)
ADD_TEST(msg-gtnets-dogbone-lv08-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-dogbone-lv08.tesh)
ADD_TEST(msg-gtnets-onelink-lv08-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-onelink-lv08.tesh)
- ADD_TEST(msg-gtnets-fullduplex-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets/gtnets-fullduplex.tesh)
endif(HAVE_RAWCTX)
if(HAVE_TRACING)
set_tests_properties(mc-centralized-raw PROPERTIES WILL_FAIL true)
endif(HAVE_RAWCTX)
endif(HAVE_MC)
+
+# Those tests are broken due to wrong sort of tesh... TODO sort with more characters
+set_tests_properties(msg-chord-thread msg-chord-thread-parallel PROPERTIES WILL_FAIL true)
+if(HAVE_UCONTEXT_H)
+ set_tests_properties(msg-chord-ucontext msg-chord-ucontext-parallel PROPERTIES WILL_FAIL true)
+endif(HAVE_UCONTEXT_H)
+if(HAVE_RAWCTX)
+ set_tests_properties(msg-chord-raw msg-chord-raw-parallel PROPERTIES WILL_FAIL true)
+endif(HAVE_RAWCTX)
+
endif(release)
endif(NOT enable_memcheck)
endif(enable_gtnets)
if(enable_smpi)
include(FindF2c)
+ if(HAVE_F2C_H)
+ SET(HAVE_SMPI 1)
+ endif(HAVE_F2C_H)
endif(enable_smpi)
if(enable_lua)
include(FindLua51Simgrid)
src/surf/surf.c
src/surf/surfxml_parse.c
src/surf/surfxml_parseplatf.c
- src/surf/cpu.c
src/surf/network.c
- src/surf/network_im.c
src/surf/network_constant.c
src/surf/workstation.c
src/surf/workstation_ptask_L07.c
src/surf/cpu_ti.c
- src/surf/cpu_im.c
+ src/surf/cpu_cas01.c
src/surf/sg_platf.c
src/xbt/xbt_sg_stubs.c
)
COMMAND ${CMAKE_COMMAND} -E echo "XX First Doxygen pass"
COMMAND ${DOXYGEN_PATH}/doxygen Doxyfile
COMMAND ${CMAKE_HOME_DIRECTORY}/tools/doxygen/index_create.pl simgrid.tag index-API.doc
- COMMAND ${CMAKE_HOME_DIRECTORY}/tools/doxygen/toc_create.pl pls.doc index.doc gtut-introduction.doc installSimgrid.doc bindings.doc options.doc tracing.doc
+ COMMAND ${CMAKE_HOME_DIRECTORY}/tools/doxygen/toc_create.pl pls.doc index.doc FAQ.doc gtut-introduction.doc installSimgrid.doc bindings.doc options.doc tracing.doc
COMMAND ${CMAKE_COMMAND} -E echo "XX Second Doxygen pass"
COMMAND ${DOXYGEN_PATH}/doxygen Doxyfile
add_subdirectory(${CMAKE_HOME_DIRECTORY}/examples/msg/mc)\r
endif(HAVE_MC)\r
\r
-if(HAVE_GTNETS)\r
- add_subdirectory(${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets)\r
-endif(HAVE_GTNETS)\r
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/examples/msg/gtnets)\r
\r
if(HAVE_NS3)\r
add_subdirectory(${CMAKE_HOME_DIRECTORY}/examples/msg/ns3)\r
/* Indicates that we have GTNETS support */
#cmakedefine HAVE_GTNETS @HAVE_GTNETS@
+/* Indicates that we have SMPI support */
+#cmakedefine HAVE_SMPI @HAVE_SMPI@
+
/* Indicates that we have NS3 support */
#cmakedefine HAVE_NS3 @HAVE_NS3@
+++ /dev/null
-<?xml version='1.0'?>
-<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid.dtd">
-<platform version="3">
- <AS id="AS_grid5000" routing="Floyd" >
- <AS id="AS_interne" routing="Floyd">
- <router id="lille"/>
- <router id="paris"/>
- <router id="nancy"/>
- <router id="rennes"/>
- <router id="lyon"/>
- <router id="bordeaux"/>
- <router id="grenoble"/>
- <router id="marseille"/>
- <router id="toulouse"/>
- <router id="sophia"/>
-
- <link id="Lille_Paris" bandwidth="1.25E9" latency="1.0E-4"/>
- <link id="Paris_Nancy" bandwidth="1.25E9" latency="1.0E-4"/>
- <link id="Paris_Rennes" bandwidth="1.25E9" latency="1.0E-4"/>
- <link id="Paris_Lyon" bandwidth="1.25E9" latency="1.0E-4"/>
- <link id="Bordeaux_Lyon" bandwidth="1.25E9" latency="1.0E-4"/>
- <link id="Lyon_Grenoble" bandwidth="1.25E9" latency="1.0E-4"/>
- <link id="Lyon_Marseille" bandwidth="1.25E9" latency="1.0E-4"/>
- <link id="Marseille_Sophia" bandwidth="1.25E9" latency="1.0E-4"/>
- <link id="Marseille_Toulouse" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <route src="lille" dst="paris" ><link_ctn id="Lille_Paris"/></route>
- <route src="paris" dst="nancy" ><link_ctn id="Paris_Nancy"/></route>
- <route src="paris" dst="rennes" ><link_ctn id="Paris_Rennes"/></route>
- <route src="paris" dst="lyon" ><link_ctn id="Paris_Lyon"/></route>
- <route src="bordeaux" dst="lyon" ><link_ctn id="Bordeaux_Lyon"/></route>
- <route src="lyon" dst="grenoble" ><link_ctn id="Lyon_Grenoble"/></route>
- <route src="lyon" dst="marseille" ><link_ctn id="Lyon_Marseille"/></route>
- <route src="marseille" dst="sophia" ><link_ctn id="Marseille_Sophia"/></route>
- <route src="marseille" dst="toulouse" ><link_ctn id="Marseille_Toulouse"/></route>
- </AS>
- <AS id="AS_bordeaux" routing="RuleBased" >
- <cluster id="AS_bordeplage" prefix="bordeplage-" suffix=".bordeplage.grid5000.fr"
- radical="1-51" power="5.2297E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_bordeplage" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <cluster id="AS_bordereau" prefix="bordereau-" suffix=".bordereau.grid5000.fr"
- radical="1-93" power="8.8925E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_bordereau" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <cluster id="AS_borderline" prefix="borderline-" suffix=".borderline.grid5000.fr"
- radical="1-10" power="13.357E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_borderline" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <AS id="gw_AS_bordeaux" routing="Full">
- <router id="gw_bordeaux"/>
- </AS>
- <link id="link_gw_bordeaux" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <ASroute src="^AS_(.*)$" dst="^AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="YES">
- <link_ctn id="link_$1src"/>
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- <ASroute src="^AS_(.*)$" dst="^gw_AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="gw_$1dst"
- symmetrical="NO">
- <link_ctn id="link_$1src"/>
- </ASroute>
-
- <ASroute src="^gw_AS_(.*)$" dst="^AS_(.*)$"
- gw_src="gw_$1src"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="NO">
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- </AS>
- <AS id="AS_grenoble" routing="RuleBased" >
- <cluster id="AS_adonis" prefix="adonis-" suffix=".adonis.grid5000.fr"
- radical="1-12" power="23.681E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_adonis" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <cluster id="AS_edel" prefix="edel-" suffix=".edel.grid5000.fr"
- radical="1-72" power="23.492E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_edel" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <cluster id="AS_genepi" prefix="genepi-" suffix=".genepi.grid5000.fr"
- radical="1-34" power="21.175E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_genepi" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <AS id="gw_AS_grenoble" routing="Full">
- <router id="gw_grenoble"/>
- </AS>
- <link id="link_gw_grenoble" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <ASroute src="^AS_(.*)$" dst="^AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="YES">
- <link_ctn id="link_$1src"/>
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- <ASroute src="^AS_(.*)$" dst="^gw_AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="gw_$1dst"
- symmetrical="NO">
- <link_ctn id="link_$1src"/>
- </ASroute>
-
- <ASroute src="^gw_AS_(.*)$" dst="^AS_(.*)$"
- gw_src="gw_$1src"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="NO">
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- </AS>
- <AS id="AS_lille" routing="RuleBased" >
- <cluster id="AS_chicon" prefix="chicon-" suffix=".chicon.grid5000.fr"
- radical="1-26" power="8.9618E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_chicon" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <cluster id="AS_chinqchint" prefix="chinqchint-" suffix=".chinqchint.grid5000.fr"
- radical="1-46" power="22.270E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_chinqchint" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <AS id="gw_AS_lille" routing="Full">
- <router id="gw_lille"/>
- </AS>
- <link id="link_gw_lille" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <ASroute src="^AS_(.*)$" dst="^AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="YES">
- <link_ctn id="link_$1src"/>
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- <ASroute src="^AS_(.*)$" dst="^gw_AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="gw_$1dst"
- symmetrical="NO">
- <link_ctn id="link_$1src"/>
- </ASroute>
-
- <ASroute src="^gw_AS_(.*)$" dst="^AS_(.*)$"
- gw_src="gw_$1src"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="NO">
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- </AS>
- <AS id="AS_lyon" routing="RuleBased" >
- <cluster id="AS_capricorne" prefix="capricorne-" suffix=".capricorne.grid5000.fr"
- radical="1-56" power="4.7233E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_capricorne" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <cluster id="AS_sagittaire" prefix="sagittaire-" suffix=".sagittaire.grid5000.fr"
- radical="1-79" power="5.6693E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_sagittaire" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <AS id="gw_AS_lyon" routing="Full">
- <router id="gw_lyon"/>
- </AS>
- <link id="link_gw_lyon" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <ASroute src="^AS_(.*)$" dst="^AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="YES">
- <link_ctn id="link_$1src"/>
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- <ASroute src="^AS_(.*)$" dst="^gw_AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="gw_$1dst"
- symmetrical="NO">
- <link_ctn id="link_$1src"/>
- </ASroute>
-
- <ASroute src="^gw_AS_(.*)$" dst="^AS_(.*)$"
- gw_src="gw_$1src"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="NO">
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- </AS>
- <AS id="AS_nancy" routing="RuleBased" >
- <cluster id="AS_graphene" prefix="graphene-" suffix=".graphene.grid5000.fr"
- radical="1-144" power="16.673E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_graphene" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <cluster id="AS_griffon" prefix="griffon-" suffix=".griffon.grid5000.fr"
- radical="1-92" power="20.678E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_griffon" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <AS id="gw_AS_nancy" routing="Full">
- <router id="gw_nancy"/>
- </AS>
- <link id="link_gw_nancy" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <ASroute src="^AS_(.*)$" dst="^AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="YES">
- <link_ctn id="link_$1src"/>
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- <ASroute src="^AS_(.*)$" dst="^gw_AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="gw_$1dst"
- symmetrical="NO">
- <link_ctn id="link_$1src"/>
- </ASroute>
-
- <ASroute src="^gw_AS_(.*)$" dst="^AS_(.*)$"
- gw_src="gw_$1src"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="NO">
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- </AS>
- <AS id="AS_orsay" routing="RuleBased" >
- <cluster id="AS_gdx" prefix="gdx-" suffix=".gdx.grid5000.fr"
- radical="1-310" power="4.7153E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_gdx" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <cluster id="AS_netgdx" prefix="netgdx-" suffix=".netgdx.grid5000.fr"
- radical="1-30" power="4.7144E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_netgdx" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <AS id="gw_AS_orsay" routing="Full">
- <router id="gw_orsay"/>
- </AS>
- <link id="link_gw_orsay" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <ASroute src="^AS_(.*)$" dst="^AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="YES">
- <link_ctn id="link_$1src"/>
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- <ASroute src="^AS_(.*)$" dst="^gw_AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="gw_$1dst"
- symmetrical="NO">
- <link_ctn id="link_$1src"/>
- </ASroute>
-
- <ASroute src="^gw_AS_(.*)$" dst="^AS_(.*)$"
- gw_src="gw_$1src"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="NO">
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- </AS>
- <AS id="AS_rennes" routing="RuleBased" >
- <cluster id="AS_paradent" prefix="paradent-" suffix=".paradent.grid5000.fr"
- radical="1-64" power="21.496E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_paradent" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <cluster id="AS_paramount" prefix="paramount-" suffix=".paramount.grid5000.fr"
- radical="1-33" power="12.910E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_paramount" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <cluster id="AS_parapide" prefix="parapide-" suffix=".parapide.grid5000.fr"
- radical="1-25" power="30.130E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_parapide" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <cluster id="AS_parapluie" prefix="parapluie-" suffix=".parapluie.grid5000.fr"
- radical="1-40" power="27.391E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_parapluie" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <AS id="gw_AS_rennes" routing="Full">
- <router id="gw_rennes"/>
- </AS>
- <link id="link_gw_rennes" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <ASroute src="^AS_(.*)$" dst="^AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="YES">
- <link_ctn id="link_$1src"/>
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- <ASroute src="^AS_(.*)$" dst="^gw_AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="gw_$1dst"
- symmetrical="NO">
- <link_ctn id="link_$1src"/>
- </ASroute>
-
- <ASroute src="^gw_AS_(.*)$" dst="^AS_(.*)$"
- gw_src="gw_$1src"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="NO">
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- </AS>
- <AS id="AS_sophia" routing="RuleBased" >
- <cluster id="AS_helios" prefix="helios-" suffix=".helios.grid5000.fr"
- radical="1-56" power="7.7318E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_helios" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <cluster id="AS_sol" prefix="sol-" suffix=".sol.grid5000.fr"
- radical="1-50" power="8.9388E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_sol" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <cluster id="AS_suno" prefix="suno-" suffix=".suno.grid5000.fr"
- radical="1-45" power="23.530E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_suno" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <AS id="gw_AS_sophia" routing="Full">
- <router id="gw_sophia"/>
- </AS>
- <link id="link_gw_sophia" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <ASroute src="^AS_(.*)$" dst="^AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="YES">
- <link_ctn id="link_$1src"/>
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- <ASroute src="^AS_(.*)$" dst="^gw_AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="gw_$1dst"
- symmetrical="NO">
- <link_ctn id="link_$1src"/>
- </ASroute>
-
- <ASroute src="^gw_AS_(.*)$" dst="^AS_(.*)$"
- gw_src="gw_$1src"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="NO">
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- </AS>
- <AS id="AS_toulouse" routing="RuleBased" >
- <cluster id="AS_pastel" prefix="pastel-" suffix=".pastel.grid5000.fr"
- radical="1-80" power="9.5674E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_pastel" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <cluster id="AS_violette" prefix="violette-" suffix=".violette.grid5000.fr"
- radical="1-52" power="5.1143E9" bw="1.25E8" lat="1.0E-4"
- bb_bw="1.25E9" bb_lat="1.0E-4"></cluster>
- <link id="link_violette" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <AS id="gw_AS_toulouse" routing="Full">
- <router id="gw_toulouse"/>
- </AS>
- <link id="link_gw_toulouse" bandwidth="1.25E9" latency="1.0E-4"/>
-
- <ASroute src="^AS_(.*)$" dst="^AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="YES">
- <link_ctn id="link_$1src"/>
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- <ASroute src="^AS_(.*)$" dst="^gw_AS_(.*)$"
- gw_src="$1src-AS_$1src_router.$1src.grid5000.fr"
- gw_dst="gw_$1dst"
- symmetrical="NO">
- <link_ctn id="link_$1src"/>
- </ASroute>
-
- <ASroute src="^gw_AS_(.*)$" dst="^AS_(.*)$"
- gw_src="gw_$1src"
- gw_dst="$1dst-AS_$1dst_router.$1dst.grid5000.fr"
- symmetrical="NO">
- <link_ctn id="link_$1dst"/>
- </ASroute>
-
- </AS>
- <ASroute src="AS_toulouse" dst="AS_interne" gw_src="gw_toulouse" gw_dst="toulouse" symmetrical="YES">
- <link_ctn id="link_gw_toulouse"/>
- </ASroute>
- <ASroute src="AS_sophia" dst="AS_interne" gw_src="gw_sophia" gw_dst="sophia" symmetrical="YES">
- <link_ctn id="link_gw_sophia"/>
- </ASroute>
- <ASroute src="AS_rennes" dst="AS_interne" gw_src="gw_rennes" gw_dst="rennes" symmetrical="YES">
- <link_ctn id="link_gw_rennes"/>
- </ASroute>
- <ASroute src="AS_orsay" dst="AS_interne" gw_src="gw_orsay" gw_dst="paris" symmetrical="YES">
- <link_ctn id="link_gw_orsay"/>
- </ASroute>
- <ASroute src="AS_nancy" dst="AS_interne" gw_src="gw_nancy" gw_dst="nancy" symmetrical="YES">
- <link_ctn id="link_gw_nancy"/>
- </ASroute>
- <ASroute src="AS_lyon" dst="AS_interne" gw_src="gw_lyon" gw_dst="lyon" symmetrical="YES">
- <link_ctn id="link_gw_lyon"/>
- </ASroute>
- <ASroute src="AS_lille" dst="AS_interne" gw_src="gw_lille" gw_dst="lille" symmetrical="YES">
- <link_ctn id="link_gw_lille"/>
- </ASroute>
- <ASroute src="AS_grenoble" dst="AS_interne" gw_src="gw_grenoble" gw_dst="grenoble" symmetrical="YES">
- <link_ctn id="link_gw_grenoble"/>
- </ASroute>
- <ASroute src="AS_bordeaux" dst="AS_interne" gw_src="gw_bordeaux" gw_dst="bordeaux" symmetrical="YES">
- <link_ctn id="link_gw_bordeaux"/>
- </ASroute>
- </AS>
-</platform>
my($l);
my($tmp);
-print "#! ./tesh\n";
-
while(defined($line=<SH_LIGNE>))
{
- if($line =~ /^p(.*)$/)
+ if($line =~ /^\$(.*)$/)
{
- print "$line\n";
- }
+ $line_exec = $line;
+ $line =~ s/\$\{srcdir\:\=\.\}/./g;
+ $line =~ s/\(/\\(/g;
+ $line =~ s/\)/\\)/g;
+ $line =~ s/\$SG_TEST_EXENV//g;
+ $line =~ s/\$EXEEXT//g;
+ $line =~ s/^\$\ */.\//g;
+ $line =~ s/^.\/lua/lua/g;
+ $line =~ s/^.\/ruby/ruby/g;
+ $line =~ s/--log=([^ ]*)/--log="$1"/g;
+ print "$line_exec";
+ chomp $line;
+ open (FILE, "$line 2>&1|");
+ while(defined($l=<FILE>))
+ {
+ chomp $l;
+ print "\> $l\n";
+ }
+ close(FILE);
+ }
else
{
- if($line =~ /^\$(.*)$/)
- {
- $line_exec = $line;
- $line =~ s/\$\{srcdir\:\=\.\}/./g;
- $line =~ s/\$SG_TEST_EXENV//g;
- $line =~ s/\$EXEEXT//g;
- $line =~ s/^\$\ */.\//g;
- $line =~ s/^.\/lua/lua/g;
- $line =~ s/^.\/ruby/ruby/g;
- $line =~ s/--log=([^ ]*)/--log="$1"/g;
- print "\n$line_exec";
- chomp $line;
- open (FILE, "$line 2>&1|");
- while(defined($l=<FILE>))
- {
- chomp $l;
- print "\> $l\n";
- }
-
+ if($line =~ /^\>(.*)$/)
+ {
}
- close(FILE);
- }
+ else
+ {
+ print "$line";
+ }
+ }
}
close(SH_LIGNE);
S_XY += (X[i] - avg_X) * (Y[i] - avg_Y)
return (S_XY / n)
-##----------------------------------
+##---------------------------------------------------------------------
## variance : variance
## param X data vector ( ..x_i.. )
## (S_X)^2 = (Sum ( x_i - avg(x) )^2 ) / n
-##----------------------------------
+##---------------------------------------------------------------------
def variance (X):
n = len(X)
avg_X = avg (X)
S_X2 += (X[i] - avg_X) ** 2
return (S_X2 / n)
+##---------------------------------------------------------------------
+## calibrate : output correction factors, c_lat on latency, c_bw on bw
+## such that bandwidth * c_bw = bw_regr, latency * c_lat = lat_regr
+## where bw_regr and lat_regr are the values approximating experimental
+## observations.
+##
+## param links number of links traversed during ping-pong
+## param latency as specified on command line, in s
+## param bandwidth as specified on command line, in Byte/s
+## param sizes vector of data sizes, in Bytes
+## param timings vector of time taken: timings[i] for sizes[i], in us
+##---------------------------------------------------------------------
def calibrate (links, latency, bandwidth, sizes, timings):
assert len(sizes) == len(timings)
if len(sizes) < 2:
return None
+ # compute linear regression : find an affine form time = a*size+b
S_XY = cov(sizes, timings)
S_X2 = variance(sizes)
a = S_XY / S_X2
b = avg(timings) - a * avg(sizes)
- return (b * 1e-6) / (latency * links), 1e6 / (a * bandwidth)
+ # corresponding bandwith, in byte/s (was in byte/us in skampi dat)
+ bw_regr = 1e6 / a
+ # corresponding latency, in s (was in us in skampi dat)
+ lat_regr = b*1e-6
+ print("\nregression: {0} * x + {1}".format(a,b))
+ print("corr_bw = bw_regr/bandwidth= {0}/{1}={2} lat_regr/(lat_xml*links)={3}/({4}*{5}))".format(bw_regr,bandwidth,bw_regr/bandwidth,lat_regr,latency,links))
+ # return linear regression result and corresponding correction factors c_bw,c_lat
+ return a,b, bw_regr/bandwidth, lat_regr/(latency*links)
+
+
+##---------------------------------------------------------------------
+## outputs a C formatted conditional return value for factor
+##
+## param lb lower bound
+## param ub upper bound
+## param lb_included boolean to tell if bound is included (<=) or exclude (<)
+## param ub_included boolean to tell if bound is included (<=) or exclude (<)
+##---------------------------------------------------------------------
+def c_code_print (lb,ub, retval, lb_included, ub_included):
+ lb_cmp = ub_cmp = "<"
+ if lb_included:
+ lb_cmp ="<="
+ if ub_included:
+ ub_cmp ="<="
+
+ ub_kib=ub/1024.
+ lb_kib=lb/1024.
+ print("\t /* case {0:.1f} KiB {1} size {2} {3:.1f} KiB */".format(lb_kib,lb_cmp,ub_cmp,ub_kib))
+ print("\t if ({0:d} {1} size && size {2} {3:d}) ".format(lb,lb_cmp,ub_cmp,ub))
+ print("\t return({0});" . format(retval))
+
##-----------------------------------------------------------------------------------------------
## main
## ---------------
#count= 8388608 8388608 144916.1 7.6 32 144916.1 143262.0
#("%s %d %d %f %f %d %f %f\n" % (countlbl, count, countn, time, stddev, iter, mini, maxi)
- readdata.append( (int(l[1]),float(l[3]) / 2 ) ); # divide by 2 because of ping-pong measured
+ readdata.append( (int(l[1]),float(l[3]) / 2) ); # divide by 2 because of ping-pong measured
## These may not be sorted so sort it by message size before processing.
sorteddata = sorted( readdata, key=lambda pair: pair[0])
-sizes,timings = zip(*sorteddata);
+sizes,timings= zip(*sorteddata)
## adds message sizes of interest: if values are specified starting from the 6th command line arg
limits += [idx for idx in range(len(sizes)) if sizes[idx] == int(sys.argv[i])]
limits.append(len(sizes) - 1)
+factors = []
low = 0
for lim in limits:
correc = calibrate(links, latency, bandwidth, sizes[low:lim + 1], timings[low:lim + 1])
if correc:
- print("Segment [%d:%d] -- Latency factor=%g -- Bandwidth factor=%g" % (sizes[low], sizes[lim], correc[0], correc[1]))
+ # save interval [lb,ub] correction, regression line direction and origin
+ # and corresponding correction factors for bw and lat resp.
+ (dircoef,origin,factor_bw,factor_lat) = correc
+ factors.append( (sizes[low],sizes[lim], dircoef, origin, factor_bw,factor_lat) )
+ print("Segment [%d:%d] --Bandwidth factor=%g --Latency factor=%g " % (sizes[low], sizes[lim], factor_bw,factor_lat))
low = lim + 1
+
+# now computes joining lines between segments
+joinseg=[]
+
+print("\n/**\n *------------------ <copy/paste C code snippet in surf/network.c> ----------------------")
+print(" *\n * produced by: {0}\n *".format(' '.join(sys.argv)))
+print(" *---------------------------------------------------------------------------------------\n **/")
+
+# print correction factor for bandwidth for each segment
+print("static double smpi_bandwidth_factor(double size)\n{")
+for (lb,ub,a,b,factor_bw,factor_lat) in factors:
+ c_code_print(lb,ub,factor_bw,True,True)
+
+ # save ends and starts of segments
+ if lb != sizes[0]:
+ joinseg.append( (lb,timings[sizes.index(lb)]) )
+ if ub != sizes[-1]:
+ joinseg.append( (ub,timings[sizes.index(ub)]) )
+
+# print correction factor for bandwidth between segments
+joinseg.reverse()
+print("\n\t /* ..:: inter-segment corrections ::.. */");
+inx=len(joinseg)-1
+while inx>=1:
+ (x0,y0) = joinseg[inx]
+ inx = inx -1
+ (x1,y1) = joinseg[inx]
+ inx = inx -1
+ # line eq. is y = (y1-y0)/(x0-x1) * x + (y0 x1 - y1 x0)/(x1-x0)
+ a = (y1-y0) / (x1-x0)
+ bw_join = 1e6 / a
+ factor_join_bw = bw_join / bandwidth
+ #print("Joining points (%f,%f) -> (%f,%f) : line dir : a=%g\n" % (x0,y0,x1,y1,a))
+ c_code_print(x0,x1,factor_join_bw,False,False)
+
+print("}\n")
+
+# print correction factor for latency for each segment
+print("static double smpi_latency_factor(double size)\n{")
+for (lb,ub,a,b,factor_bw,factor_lat) in factors:
+ c_code_print(lb,ub,factor_lat,True,True)
+
+print("\n\t /* ..:: inter-segment corrections ::.. */");
+while joinseg:
+ (x0,y0) = joinseg.pop()
+ (x1,y1) = joinseg.pop()
+ # line eq. is y = (y0-y1)/(x0-x1) * x + (y0 x1 - y1 x0)/(x1-x0)
+ #print("(%f,%f) -> (%f,%f)\n" % (x0,y0,x1,y1))
+ b = 1e-6 * (y0*x1-y1*x0) / (x1-x0)
+ factor_join_lat = b / (latency*links)
+ c_code_print(x0,x1,factor_join_lat,False,False)
+
+print("}\n")
+
+print("\n/**\n *------------------ <copy/paste C code snippet in surf/network.c> ----------------------\n **/")
--- /dev/null
+# SkaMPI pt2pt test
+# on cluster : griffon (nancy) Grid5000. See Grid5000 website for technical data.
+
+#vendor_id : GenuineIntel
+#cpu family : 6
+#model : 23
+#model name : Intel(R) Xeon(R) CPU L5420 @ 2.50GHz
+#stepping : 10
+#cpu MHz : 2493.757
+#cache size : 6144 KB
+
+
+# /home/sgenaud/openmpi.install/bin/mpiexec --mca btl_tcp_if_include eth0 --mca btl_tcp_if_exclude ib0 --mca btl tcp,self -mca orte_base_help_aggregate 0 -mca plm_rsh_agent oarsh -machinefile machinefile -n 2 skampi -i ski_smpi/skampi_pt2pt.ski
+
+# Finished at Fri Jul 9 16:19:24 2010
+# SKaMPI Version 5.0.4 rev. 355
+# Started at Fri Jul 9 16:19:02 2010
+# Total runtime 22 seconds
+# begin result "Pingpong_Send_Recv"
+# duration = 22.49 sec
+# end result "Pingpong_Send_Recv"
+
+count= 1 1 109.2 0.2 32 106.7 107.7
+count= 2 2 109.3 0.3 32 107.6 107.0
+count= 4 4 111.7 0.7 32 107.8 109.7
+count= 8 8 109.0 0.3 32 106.8 107.0
+count= 16 16 109.1 0.4 32 108.0 106.7
+count= 32 32 111.1 0.4 32 110.5 107.4
+count= 64 64 110.2 0.2 32 107.8 108.6
+count= 128 128 118.9 1.2 32 109.9 116.2
+count= 256 256 145.8 0.4 32 138.4 144.9
+count= 512 512 161.7 0.3 32 154.7 161.6
+count= 1024 1024 190.4 0.2 32 182.7 190.4
+count= 1024 1024 193.4 0.3 32 185.9 193.0
+count= 1536 1536 237.3 0.5 32 226.9 237.3
+count= 2048 2048 248.0 0.8 32 238.4 247.9
+count= 2048 2048 249.2 1.0 32 240.1 247.7
+count= 2560 2560 262.9 0.6 32 251.7 262.9
+count= 3072 3072 277.9 0.4 32 272.7 275.0
+count= 3584 3584 285.0 0.5 32 275.0 285.0
+count= 4096 4096 303.0 0.9 32 293.5 298.9
+count= 4096 4096 303.4 0.8 32 297.2 301.6
+count= 4608 4608 310.0 0.8 32 310.0 291.7
+count= 5120 5120 321.4 0.7 32 321.4 291.5
+count= 5632 5632 343.7 0.7 32 343.7 312.0
+count= 6144 6144 320.7 0.5 32 320.7 282.6
+count= 6656 6656 339.4 0.4 32 339.4 296.6
+count= 7168 7168 353.3 0.7 32 353.3 308.5
+count= 7680 7680 386.7 0.8 32 385.3 378.8
+count= 8192 8192 389.1 0.6 32 389.1 373.8
+count= 8192 8192 393.7 0.8 32 391.1 380.4
+count= 16384 16384 627.7 0.7 32 627.7 533.9
+count= 32768 32768 1060.2 0.9 32 1060.2 828.9
+count= 32768 32768 1062.0 1.0 32 1062.0 836.4
+count= 33792 33792 1096.1 0.8 32 1096.1 838.2
+count= 34816 34816 1109.6 0.7 32 1109.6 835.3
+count= 35840 35840 1125.7 1.7 32 1125.7 856.3
+count= 36864 36864 1168.1 1.4 32 1168.1 930.2
+count= 37888 37888 1208.8 0.6 32 1208.8 937.2
+count= 38912 38912 1229.7 1.0 32 1229.7 952.2
+count= 39936 39936 1255.4 1.2 32 1255.4 969.3
+count= 40960 40960 1292.8 0.9 32 1292.8 978.8
+count= 41984 41984 1300.8 1.6 32 1300.8 976.3
+count= 43008 43008 1322.0 1.2 32 1322.0 988.2
+count= 44032 44032 1365.8 1.0 32 1365.8 1064.5
+count= 45056 45056 1400.6 0.8 32 1400.6 1072.9
+count= 46080 46080 1426.9 0.9 32 1426.9 1096.7
+count= 47104 47104 1445.7 0.9 32 1445.7 1101.4
+count= 48128 48128 1489.7 0.9 32 1489.7 1119.3
+count= 49152 49152 1516.2 0.6 32 1516.2 1137.3
+count= 50176 50176 1515.0 0.9 32 1515.0 1124.5
+count= 51200 51200 1558.4 1.4 32 1558.4 1202.6
+count= 52224 52224 1595.4 0.8 32 1595.4 1209.2
+count= 53248 53248 1614.3 1.0 32 1614.3 1219.8
+count= 54272 54272 1647.7 0.8 32 1647.7 1244.3
+count= 55296 55296 1684.2 0.9 32 1684.2 1258.9
+count= 56320 56320 1714.6 0.8 32 1714.6 1281.8
+count= 57344 57344 1710.3 1.4 32 1710.3 1262.5
+count= 58368 58368 1757.1 1.1 32 1757.1 1344.3
+count= 59392 59392 1793.4 1.3 32 1793.4 1350.9
+count= 60416 60416 1817.8 0.7 32 1817.8 1366.0
+count= 61440 61440 1850.5 1.5 32 1850.5 1392.2
+count= 62464 62464 1884.3 1.3 32 1884.3 1398.8
+count= 63488 63488 1896.7 1.2 32 1896.7 1403.7
+count= 64512 64512 1907.1 1.0 32 1907.1 1405.3
+count= 65536 65536 2250.0 1.2 32 2250.0 1648.1
+count= 65536 65536 2253.6 1.8 32 2253.6 1653.3
+count= 66560 66560 2263.2 0.8 32 2263.2 1654.2
+count= 67584 67584 2284.4 1.4 32 2284.4 1655.7
+count= 68608 68608 2308.5 1.7 32 2308.5 1685.1
+count= 69632 69632 2299.3 2.7 32 2299.3 1672.3
+count= 70656 70656 2342.2 2.4 32 2342.2 1708.0
+count= 71680 71680 2385.5 2.4 32 2385.5 1733.6
+count= 72704 72704 2422.9 2.5 32 2422.9 1765.7
+count= 73728 73728 2441.9 1.7 32 2441.9 1783.1
+count= 74752 74752 2465.0 1.9 32 2465.0 1781.5
+count= 75776 75776 2482.6 2.5 32 2482.6 1806.1
+count= 76800 76800 2490.0 2.1 32 2490.0 1808.3
+count= 77824 77824 2503.4 2.8 32 2503.4 1815.1
+count= 78848 78848 2568.6 2.6 32 2568.6 1864.5
+count= 79872 79872 2618.4 2.2 32 2618.4 1897.0
+count= 80896 80896 2615.7 2.6 32 2615.7 1891.0
+count= 81920 81920 2641.5 2.1 32 2641.5 1919.9
+count= 82944 82944 2647.5 2.4 32 2647.5 1919.4
+count= 83968 83968 2662.6 1.8 32 2662.6 1929.4
+count= 84992 84992 2693.4 1.7 32 2693.4 1954.6
+count= 86016 86016 2733.1 2.3 32 2733.1 1974.2
+count= 87040 87040 2784.9 2.0 32 2784.9 2013.4
+count= 88064 88064 2792.3 1.9 32 2792.3 2014.1
+count= 89088 89088 2820.0 1.8 32 2820.0 2032.9
+count= 90112 90112 2833.6 1.7 32 2833.6 2045.1
+count= 91136 91136 2861.4 1.1 32 2861.4 2056.6
+count= 92160 92160 2873.4 1.5 32 2873.4 2071.6
+count= 93184 93184 2926.1 0.8 32 2926.1 2103.4
+count= 94208 94208 2960.6 1.8 32 2960.6 2128.3
+count= 95232 95232 2970.0 1.6 32 2970.0 2145.8
+count= 96256 96256 2993.8 1.7 32 2993.8 2147.2
+count= 97280 97280 3010.7 2.4 32 3010.7 2169.1
+count= 98304 98304 3039.6 1.7 32 3039.6 2180.1
+count= 99328 99328 3048.7 2.0 32 3048.7 2186.0
+count= 100352 100352 3107.2 1.4 32 3107.2 2227.0
+count= 101376 101376 3147.7 1.4 32 3147.7 2258.1
+count= 102400 102400 3145.7 1.8 32 3145.7 2256.1
+count= 103424 103424 3171.8 1.3 32 3171.8 2266.4
+count= 104448 104448 3200.2 2.4 32 3200.2 2300.5
+count= 105472 105472 3219.3 2.7 32 3219.3 2301.5
+count= 106496 106496 3229.6 2.1 32 3229.6 2308.6
+count= 107520 107520 3284.7 2.3 32 3284.7 2344.4
+count= 108544 108544 3311.8 2.2 32 3311.8 2364.9
+count= 109568 109568 3323.4 1.7 32 3323.4 2376.3
+count= 110592 110592 3354.0 2.0 32 3354.0 2399.0
+count= 111616 111616 3376.2 1.7 32 3376.2 2419.3
+count= 112640 112640 3397.4 2.4 32 3397.4 2429.9
+count= 113664 113664 3401.1 1.4 32 3401.1 2430.6
+count= 114688 114688 3458.1 2.7 32 3458.1 2470.8
+count= 115712 115712 3481.2 3.4 32 3481.2 2484.3
+count= 116736 116736 3512.1 2.2 32 3512.1 2502.9
+count= 117760 117760 3532.9 2.1 32 3532.9 2524.1
+count= 118784 118784 3544.9 1.9 32 3544.9 2527.8
+count= 119808 119808 3575.5 1.4 32 3575.5 2544.6
+count= 120832 120832 3584.4 2.2 32 3584.4 2554.0
+count= 121856 121856 3641.5 2.3 32 3641.5 2593.2
+count= 122880 122880 3662.0 1.7 32 3662.0 2608.6
+count= 123904 123904 3695.2 1.7 32 3695.2 2638.5
+count= 124928 124928 3711.6 1.6 32 3711.6 2643.5
+count= 125952 125952 3737.9 1.3 32 3737.9 2659.9
+count= 126976 126976 3750.0 1.8 32 3750.0 2673.3
+count= 128000 128000 3761.6 2.2 32 3761.6 2678.8
+count= 129024 129024 3822.9 1.8 32 3822.9 2725.9
+count= 130048 130048 3838.5 2.3 32 3838.5 2727.5
+count= 131072 131072 3870.2 1.8 32 3870.2 2750.0
+count= 131072 131072 3873.2 1.8 32 3873.2 2757.4
+count= 262144 262144 7121.5 1.0 32 7121.5 4980.9
+count= 524288 524288 10740.7 4.1 32 10740.7 9458.5
+count= 1048576 1048576 19872.8 4.9 32 19872.8 18373.7
+count= 2097152 2097152 37947.8 63.3 32 37947.8 36227.3
+count= 4194304 4194304 73299.4 7.8 32 73299.4 71898.5
+count= 8388608 8388608 144965.0 15.2 32 144965.0 143205.7
--- /dev/null
+0 98
+1 100
+2 100
+3 100
+4 100
+5 100
+6 100
+7 100
+8 100
+9 100
+10 100
+11 99
+12 97
+13 95
+14 90
+15 83
+16 82
+17 82
+18 82
+19 92
+20 87
+21 82
+22 76
+23 76
+24 89
+25 94
+26 97
+27 100
+28 100
+29 100
+30 100
+31 100
+32 100
+33 100
+34 100
+35 100
+36 100
+37 100
+38 100
+39 99
+40 96
+41 94
+42 89
+43 83
+44 82
+45 86
+46 94
+47 79
+48 76
+49 82
+50 84
+51 92
+52 97
+53 98
+54 100
+55 100
+56 100
+57 100
+58 100
+59 100
+60 100
+61 100
+62 100
+63 100
+64 100
+65 100
+66 99
+67 97
+68 94
+69 86
+70 82
+71 82
+72 91
+73 94
+74 90
+75 88
+76 89
+77 93
+78 97
+79 100
+80 100
+81 100
+82 100
+83 100
+84 100
+85 100
+86 100
+87 100
+88 100
+89 100
+90 100
+91 100
+92 99
+93 97
+94 94
+95 90
+96 82
+97 82
+98 87
+99 94
+100 93
+101 88
+102 90
+103 92
+104 98
+105 99
+106 100
+107 100
+108 100
+109 100
+110 100
+111 100
+112 100
+113 100
+114 100
+115 100
+116 99
+117 100
+118 100
+119 100
+120 99
+121 97
+122 95
+123 92
+124 90
+125 82
+126 82
+127 94
+128 86
+129 85
+130 95
+131 97
+132 99
+133 100
+134 100
+135 100
+136 100
+137 100
+138 100
+139 100
+140 100
+141 100
+142 100
+143 100
+144 100
+145 100
+146 100
+147 100
+148 98
+149 96
+150 94
+151 94
+152 87
+153 88
+154 94
+155 94
+156 92
+157 97
+158 98
+159 100
+160 100
+161 100
+162 100
+163 100
+164 100
+165 100
+166 100
+167 100
+168 100
+169 100
+170 100
+171 100
+172 100
+173 100
+174 100
+175 100
+176 98
+177 103
+178 84
+179 94
+180 87
+181 85
+182 97
+183 97
+184 99
+185 100
+186 100
+187 100
+188 100
+189 100
+190 100
+191 100
+192 100
+193 100
+194 100
+195 100
+196 100
+197 100
+198 100
+199 100
+200 100
+201 100
+202 99
+203 103
+204 93
+205 86
+206 95
+207 82
+208 96
+209 100
+210 98
+211 100
+212 100
+213 100
+214 100
+215 100
+216 100
+217 100
+218 100
+219 100
+220 100
+221 100
+222 100
+223 100
+224 100
+225 100
+226 100
+227 100
+228 100
+229 100
+230 102
+231 107
+232 94
+233 82
+234 94
+235 95
+236 101
+237 102
+238 100
+239 100
+240 100
+241 100
+242 100
+243 100
+244 100
+245 100
+246 100
+247 100
+248 100
+249 100
+250 100
+251 100
+252 100
+253 100
+254 100
+255 91
+256 84
+257 98
+258 103
+259 103
+260 100
+261 101
+262 100
+263 100
+264 100
+265 100
+266 100
+267 100
+268 100
+269 100
+270 100
+271 100
+272 100
+273 100
+274 100
+275 100
+276 100
+277 100
+278 100
+279 100
+280 109
+281 96
+282 99
+283 97
+284 92
+285 101
+286 103
+287 104
+288 102
+289 100
+290 100
+291 100
+292 100
+293 100
+294 100
+295 100
+296 100
+297 100
+298 100
+299 100
+300 100
+301 100
+302 100
+303 100
+304 100
+305 101
+306 103
+307 112
+308 98
+309 101
+310 101
+311 101
+312 104
+313 105
+314 107
+315 104
+316 100
+317 100
+318 100
+319 100
+320 100
+321 100
+322 100
+323 100
+324 100
+325 100
+326 100
+327 100
+328 100
+329 100
+330 100
+331 100
+332 100
+333 112
+334 112
+335 98
+336 108
+337 103
+338 99
+339 104
+340 106
+341 107
+342 106
+343 102
+344 100
+345 100
+346 100
+347 100
+348 100
+349 100
+350 100
+351 100
+352 100
+353 100
+354 100
+355 100
+356 100
+357 100
+358 100
+359 104
+360 111
+361 107
+362 111
+363 104
+364 107
+365 107
+366 104
+367 106
+368 106
+369 106
+370 101
+371 100
+372 100
+373 100
+374 100
+375 100
+376 100
+377 100
+378 100
+379 100
+380 100
+381 100
+382 100
+383 100
+384 100
+385 100
+386 106
+387 100
+388 100
+389 100
+390 110
+391 103
+392 109
+393 106
+394 105
+395 106
+396 107
+397 105
+398 101
+399 100
+400 100
+401 100
+402 100
+403 100
+404 100
+405 100
+406 100
+407 100
+408 100
+409 100
+410 100
+411 100
+412 100
+413 105
+414 112
+415 100
+416 100
+417 100
+418 107
+419 117
+420 114
+421 108
+422 106
+423 106
+424 107
+425 100
+426 100
+427 100
+428 101
+429 100
+430 100
+431 100
+432 100
+433 101
+434 106
+435 112
+436 113
+437 112
+438 113
+439 114
+440 117
+441 116
+442 107
+443 106
+444 107
+445 106
+446 102
+447 100
+448 100
+449 100
+450 100
+451 100
+452 100
+453 100
+454 100
+455 100
+456 100
+457 100
+458 100
+459 101
+460 100
+461 106
+462 113
+463 112
+464 101
+465 113
+466 118
+467 119
+468 116
+469 108
+470 107
+471 107
+472 107
+473 105
+474 100
+475 100
+476 100
+477 100
+478 100
+479 100
+480 100
+481 100
+482 100
+483 100
+484 100
+485 100
+486 100
+487 105
+488 112
+489 100
+490 113
+491 103
+492 119
+493 119
+494 119
+495 114
+496 107
+497 106
+498 106
+499 106
+500 104
+501 100
+502 100
+503 100
+504 100
+505 100
+506 100
+507 100
+508 100
+509 100
+510 100
+511 100
+512 100
+513 102
+514 104
+515 112
+516 112
+517 107
+518 113
+519 117
+520 119
+521 120
+522 120
+523 116
+524 109
+525 107
+526 106
+527 107
+528 104
+529 101
+530 100
+531 100
+532 100
+533 100
+534 100
+535 100
+536 100
+537 100
+538 100
+539 100
+540 100
+541 105
+542 100
+543 113
+544 112
+545 101
+546 115
+547 121
+548 121
+549 123
+550 121
+551 118
+552 112
+553 107
+554 106
+555 107
+556 106
+557 104
+558 100
+559 100
+560 100
+561 100
+562 100
+563 100
+564 100
+565 100
+566 100
+567 102
+568 110
+569 100
+570 112
+571 100
+572 112
+573 118
+574 120
+575 122
+576 123
+577 122
+578 119
+579 112
+580 107
+581 107
+582 107
+583 106
+584 102
+585 101
+586 100
+587 100
+588 100
+589 100
+590 100
+591 100
+592 100
+593 101
+594 101
+595 105
+596 100
+597 116
+598 118
+599 120
+600 121
+601 124
+602 106
+603 107
+604 104
+605 101
+606 100
+607 100
+608 100
+609 100
+610 100
+611 100
+612 101
+613 100
+614 101
+615 110
+616 100
+617 112
+618 112
+619 112
+620 119
+621 122
+622 124
+623 124
+624 122
+625 121
+626 114
+627 108
+628 107
+629 106
+630 106
+631 104
+632 100
+633 100
+634 100
+635 100
+636 100
+637 100
+638 100
+639 100
+640 100
+641 105
+642 111
+643 107
+644 100
+645 112
+646 113
+647 116
+648 121
+649 123
+650 125
+651 124
+652 122
+653 120
+654 114
+655 107
+656 107
+657 108
+658 106
+659 105
+660 102
+661 100
+662 100
+663 100
+664 100
+665 100
+666 100
+667 100
+668 100
+669 103
+670 106
+671 107
+672 106
+673 102
+674 117
+675 121
+676 123
+677 125
+678 125
+679 125
+680 122
+681 119
+682 113
+683 107
+684 106
+685 107
+686 107
+687 104
+688 100
+689 100
+690 101
+691 100
+692 100
+693 100
+694 102
+695 107
+696 111
+697 113
+698 111
+699 111
+700 116
+701 121
+702 122
+703 125
+704 125
+705 125
+706 123
+707 122
+708 115
+709 108
+710 107
+711 107
+712 106
+713 103
+714 100
+715 100
+716 101
+717 100
+718 100
+719 100
+720 102
+721 110
+722 106
+723 112
+724 113
+725 115
+726 120
+727 122
+728 123
+729 124
+730 125
+731 125
+732 123
+733 122
+734 119
+735 109
+736 108
+737 107
+738 106
+739 105
+740 101
+741 100
+742 100
+743 100
+744 100
+745 100
+746 100
+747 108
+748 115
+749 106
+750 110
+751 116
+752 107
+753 120
+754 123
+755 124
+756 125
+757 125
+758 125
+759 125
+760 124
+761 120
+762 121
+763 112
+764 107
+765 110
+766 106
+767 108
+768 100
+769 101
+770 101
+771 113
+772 108
+773 108
+774 106
+775 122
+776 125
+777 125
+778 125
+779 124
+780 124
+781 123
+782 121
+783 118
+784 112
+785 110
+786 107
+787 110
+788 102
+789 102
+790 100
+791 100
+792 100
+793 101
+794 103
+795 100
+796 100
+797 100
+798 113
+799 106
+800 113
+801 120
+802 124
+803 124
+804 125
+805 125
+806 125
+807 125
+808 124
+809 122
+810 123
+811 117
+812 110
+813 108
+814 111
+815 106
+816 106
+817 101
+818 100
+819 100
+820 100
+821 104
+822 100
+823 100
+824 117
+825 101
+826 108
+827 119
+828 120
+829 124
+830 125
+831 125
+832 125
+833 125
+834 125
+835 124
+836 121
+837 121
+838 116
+839 111
+840 104
+841 108
+842 107
+843 106
+844 103
+845 100
+846 101
+847 100
+848 101
+849 106
+850 112
+851 106
+852 106
+853 113
+854 116
+855 121
+856 124
+857 125
+858 125
+859 125
+860 124
+861 125
+862 125
+863 124
+864 123
+865 121
+866 114
+867 108
+868 107
+869 106
+870 107
+871 106
+872 101
+873 101
+874 100
+875 101
+876 108
+877 100
+878 112
+879 100
+880 113
+881 109
+882 121
+883 122
+884 124
+885 124
+886 125
+887 125
+888 124
+889 125
+890 125
+891 124
+892 122
+893 119
+894 114
+895 108
+896 106
+897 107
+898 107
+899 104
+900 100
+901 101
+902 103
+903 100
+904 112
+905 101
+906 106
+907 114
+908 117
+909 119
+910 123
+911 123
+912 125
+913 125
+914 125
+915 125
+916 124
+917 124
+918 125
+919 124
+920 122
+921 122
+922 115
+923 109
+924 107
+925 107
+926 107
+927 105
+928 103
+929 101
+930 101
+931 111
+932 107
+933 113
+934 101
+935 115
+936 118
+937 121
+938 122
+939 125
+940 125
+941 125
+942 125
+943 125
+944 124
+945 122
+946 120
+947 115
+948 110
+949 108
+950 108
+951 107
+952 100
+953 100
+954 112
+955 101
+956 113
+957 120
+958 122
+959 123
+960 125
+961 125
+962 125
+963 125
+964 125
+965 125
+966 125
+967 125
+968 124
+969 124
+970 122
+971 120
+972 114
+973 110
+974 107
+975 106
+976 106
+977 105
+978 106
+979 113
+980 113
+981 100
+982 108
+983 107
+984 119
+985 121
+986 124
+987 125
+988 125
+989 125
+990 124
+991 125
+992 125
+993 125
+994 125
+995 125
+996 124
+997 123
+998 122
+999 120
+1000 112
+1001 108
+1002 107
+1003 107
+1004 106
+1005 109
+1006 101
+1007 113
+1008 108
+1009 114
+1010 102
+1011 117
+1012 121
+1013 123
+1014 125
+1015 125
+1016 125
+1017 125
+1018 125
+1019 125
+1020 125
+1021 125
+1022 125
+1023 124
+1024 125
+1026 123
+1028 122
+1030 118
+1032 109
+1034 107
+1036 107
+1038 107
+1040 108
+1042 109
+1044 101
+1046 113
+1048 107
+1050 102
+1052 111
+1054 115
+1056 120
+1058 124
+1060 125
+1062 125
+1064 125
+1066 125
+1068 125
+1070 125
+1072 125
+1074 125
+1076 125
+1078 125
+1080 125
+1082 125
+1084 125
+1086 125
+1088 123
+1090 121
+1092 116
+1094 107
+1096 108
+1098 106
+1100 112
+1102 118
+1104 100
+1106 107
+1108 114
+1110 119
+1112 122
+1114 123
+1116 125
+1118 125
+1120 125
+1122 124
+1124 125
+1126 125
+1128 125
+1130 125
+1132 125
+1134 125
+1136 125
+1138 125
+1140 125
+1142 125
+1144 123
+1146 122
+1148 118
+1150 112
+1152 109
+1154 116
+1156 113
+1158 119
+1160 114
+1162 114
+1164 112
+1166 119
+1168 122
+1170 124
+1172 125
+1174 125
+1176 125
+1178 125
+1180 125
+1182 125
+1184 125
+1186 125
+1188 125
+1190 125
+1192 124
+1194 125
+1196 125
+1198 125
+1200 124
+1202 124
+1204 121
+1206 120
+1208 117
+1210 119
+1212 119
+1214 108
+1216 120
+1218 119
+1220 120
+1222 125
+1224 125
+1226 125
+1228 125
+1230 125
+1232 125
+1234 125
+1236 125
+1238 125
+1240 125
+1242 124
+1244 123
+1246 122
+1248 119
+1250 120
+1252 108
+1254 115
+1256 113
+1258 117
+1260 121
+1262 123
+1264 124
+1266 125
+1268 124
+1270 125
+1272 125
+1274 125
+1276 125
+1278 125
+1280 125
+1282 125
+1284 125
+1286 125
+1288 125
+1290 125
+1292 125
+1294 125
+1296 125
+1298 124
+1300 125
+1302 131
+1304 116
+1306 123
+1308 120
+1310 115
+1312 120
+1314 126
+1316 126
+1318 126
+1320 125
+1322 125
+1324 125
+1326 125
+1328 125
+1330 124
+1332 125
+1334 124
+1336 125
+1338 125
+1340 125
+1342 125
+1344 125
+1346 125
+1348 125
+1350 125
+1352 125
+1354 134
+1356 123
+1358 121
+1360 128
+1362 123
+1364 121
+1366 128
+1368 129
+1370 128
+1372 126
+1374 125
+1376 125
+1378 125
+1380 125
+1382 125
+1384 125
+1386 125
+1388 125
+1390 125
+1392 125
+1394 125
+1396 125
+1398 125
+1400 125
+1402 125
+1404 125
+1406 125
+1408 128
+1410 125
+1412 136
+1414 135
+1416 119
+1418 127
+1420 127
+1422 129
+1424 130
+1426 100
+1428 100
+1430 101
+1432 96
+1434 100
+1436 101
+1438 100
+1440 101
+1442 99
+1444 101
+1446 101
+1448 97
+1450 99
+1452 98
+1454 100
+1456 99
+1458 99
+1460 98
+1462 100
+1464 100
+1466 100
+1468 101
+1470 100
+1472 100
+1474 100
+1476 100
+1478 100
+1480 100
+1482 100
+1484 100
+1486 100
+1488 101
+1490 100
+1492 104
+1494 102
+1496 99
+1498 98
+1500 94
+1502 100
+1504 101
+1506 101
+1508 100
+1510 100
+1512 99
+1514 101
+1516 100
+1518 100
+1520 97
+1522 101
+1524 100
+1526 100
+1528 100
+1530 100
+1532 100
+1534 100
+1536 101
+1538 100
+1540 100
+1542 100
+1544 100
+1546 100
+1548 101
+1550 101
+1552 101
+1554 101
+1556 99
+1558 99
+1560 100
+1562 97
+1564 103
+1566 99
+1568 100
+1570 100
+1572 99
+1574 102
+1576 99
+1578 99
+1580 100
+1582 100
+1584 100
+1586 100
+1588 100
+1590 100
+1592 101
+1594 99
+1596 96
+1598 99
+1600 95
+1602 100
+1604 96
+1606 102
+1608 102
+1610 97
+1612 96
+1614 98
+1616 99
+1618 101
+1620 101
+1622 100
+1624 100
+1626 100
+1628 100
+1630 100
+1632 100
+1634 100
+1636 100
+1638 100
+1640 100
+1642 100
+1644 100
+1646 100
+1648 100
+1650 100
+1652 98
+1654 98
+1656 101
+1658 101
+1660 100
+1662 100
+1664 101
+1666 97
+1668 101
+1670 99
+1672 95
+1674 98
+1676 98
+1678 101
+1680 99
+1682 98
+1684 101
+1686 100
+1688 100
+1690 100
+1692 100
+1694 100
+1696 100
+1698 100
+1700 100
+1702 100
+1704 100
+1706 100
+1708 101
+1710 99
+1712 98
+1714 100
+1716 100
+1718 103
+1720 100
+1722 103
+1724 100
+1726 102
+1728 97
+1730 100
+1732 99
+1734 101
+1736 103
+1738 101
+1740 101
+1742 101
+1744 100
+1746 100
+1748 100
+1750 100
+1752 100
+1754 101
+1756 100
+1758 100
+1760 100
+1762 100
+1764 100
+1766 102
+1768 103
+1770 98
+1772 101
+1774 103
+1776 100
+1778 104
+1780 96
+1782 101
+1784 99
+1786 103
+1788 102
+1790 102
+1792 98
+1794 102
+1796 102
+1798 100
+1800 100
+1802 100
+1804 100
+1806 100
+1808 100
+1810 100
+1812 100
+1814 100
+1816 100
+1818 101
+1820 100
+1822 102
+1824 100
+1826 99
+1828 99
+1830 104
+1832 103
+1834 104
+1836 104
+1838 104
+1840 100
+1842 105
+1844 101
+1846 103
+1848 103
+1850 102
+1852 102
+1854 102
+1856 101
+1858 100
+1860 100
+1862 100
+1864 101
+1866 100
+1868 101
+1870 100
+1872 100
+1874 100
+1876 100
+1878 100
+1880 100
+1882 101
+1884 104
+1886 106
+1888 102
+1890 101
+1892 106
+1894 105
+1896 103
+1898 106
+1900 106
+1902 104
+1904 104
+1906 105
+1908 104
+1910 102
+1912 103
+1914 101
+1916 100
+1918 101
+1920 100
+1922 100
+1924 100
+1926 100
+1928 100
+1930 100
+1932 100
+1934 100
+1936 100
+1938 101
+1940 100
+1942 101
+1944 100
+1946 105
+1948 103
+1950 106
+1952 107
+1954 106
+1956 104
+1958 105
+1960 105
+1962 103
+1964 106
+1966 105
+1968 102
+1970 101
+1972 101
+1974 100
+1976 100
+1978 100
+1980 100
+1982 100
+1984 101
+1986 101
+1988 101
+1990 101
+1992 101
+1994 101
+1996 102
+1998 100
+2000 105
+2002 109
+2004 105
+2006 104
+2008 105
+2010 105
+2012 106
+2014 106
+2016 108
+2018 106
+2020 104
+2022 102
+2024 103
+2026 103
+2028 102
+2030 100
+2032 101
+2034 101
+2036 101
+2038 100
+2040 101
+2042 100
+2044 101
+2046 100
+2048 100
+2052 101
+2056 101
+2060 105
+2064 106
+2068 102
+2072 109
+2076 107
+2080 108
+2084 107
+2088 107
+2092 108
+2096 104
+2100 108
+2104 107
+2108 109
+2112 105
+2116 105
+2120 101
+2124 103
+2128 101
+2132 102
+2136 100
+2140 100
+2144 100
+2148 100
+2152 100
+2156 101
+2160 100
+2164 100
+2168 106
+2172 108
+2176 107
+2180 108
+2184 109
+2188 111
+2192 108
+2196 107
+2200 107
+2204 112
+2208 108
+2212 106
+2216 104
+2220 102
+2224 101
+2228 100
+2232 100
+2236 101
+2240 101
+2244 101
+2248 100
+2252 101
+2256 101
+2260 102
+2264 103
+2268 101
+2272 101
+2276 107
+2280 103
+2284 109
+2288 110
+2292 112
+2296 108
+2300 111
+2304 112
+2308 111
+2312 109
+2316 112
+2320 110
+2324 111
+2328 107
+2332 105
+2336 104
+2340 101
+2344 101
+2348 102
+2352 102
+2356 101
+2360 101
+2364 101
+2368 101
+2372 100
+2376 102
+2380 101
+2384 101
+2388 101
+2392 103
+2396 107
+2400 109
+2404 111
+2408 107
+2412 112
+2416 108
+2420 111
+2424 113
+2428 113
+2432 112
+2436 114
+2440 107
+2444 112
+2448 113
+2452 109
+2456 103
+2460 101
+2464 100
+2468 103
+2472 101
+2476 101
+2480 101
+2484 101
+2488 100
+2492 101
+2496 105
+2500 101
+2504 107
+2508 103
+2512 111
+2516 112
+2520 111
+2524 108
+2528 113
+2532 112
+2536 116
+2540 116
+2544 114
+2548 115
+2552 112
+2556 113
+2560 113
+2564 111
+2568 109
+2572 109
+2576 102
+2580 102
+2584 101
+2588 104
+2592 101
+2596 100
+2600 101
+2604 104
+2608 101
+2612 101
+2616 106
+2620 108
+2624 112
+2628 112
+2632 116
+2636 115
+2640 115
+2644 117
+2648 113
+2652 114
+2656 112
+2660 114
+2664 111
+2668 106
+2672 102
+2676 102
+2680 102
+2684 103
+2688 102
+2692 102
+2696 101
+2700 101
+2704 101
+2708 101
+2712 101
+2716 103
+2720 109
+2724 112
+2728 112
+2732 113
+2736 117
+2740 118
+2744 113
+2748 118
+2752 115
+2756 114
+2760 117
+2764 114
+2768 115
+2772 113
+2776 113
+2780 111
+2784 108
+2788 103
+2792 101
+2796 104
+2800 102
+2804 105
+2808 101
+2812 107
+2816 106
+2820 106
+2824 102
+2828 104
+2832 108
+2836 112
+2840 111
+2844 116
+2848 114
+2852 118
+2856 118
+2860 119
+2864 118
+2868 117
+2872 118
+2876 124
+2880 126
+2884 119
+2888 124
+2892 118
+2896 121
+2900 114
+2904 118
+2908 123
+2912 116
+2916 116
+2920 123
+2924 121
+2928 120
+2932 121
+2936 125
+2940 105
+2944 126
+2948 111
+2952 129
+2956 116
+2960 111
+2964 116
+2968 129
+2972 128
+2976 119
+2980 128
+2984 123
+2988 127
+2992 130
+2996 129
+3000 118
+3004 128
+3008 122
+3012 115
+3016 127
+3020 114
+3024 119
+3028 118
+3032 119
+3036 126
+3040 102
+3044 111
+3048 103
+3052 111
+3056 104
+3060 109
+3064 104
+3068 109
+3072 131
+3076 131
+3080 114
+3084 117
+3088 129
+3092 127
+3096 127
+3100 128
+3104 125
+3108 127
+3112 124
+3116 127
+3120 121
+3124 128
+3128 117
+3132 122
+3136 113
+3140 121
+3144 112
+3148 126
+3152 104
+3156 105
+3160 114
+3164 102
+3168 114
+3172 124
+3176 127
+3180 109
+3184 131
+3188 117
+3192 115
+3196 129
+3200 129
+3204 122
+3208 121
+3212 122
+3216 129
+3220 129
+3224 126
+3228 127
+3232 127
+3236 126
+3240 124
+3244 120
+3248 119
+3252 125
+3256 112
+3260 121
+3264 106
+3268 125
+3272 126
+3276 121
+3280 117
+3284 126
+3288 126
+3292 131
+3296 111
+3300 105
+3304 117
+3308 116
+3312 116
+3316 120
+3320 130
+3324 129
+3328 126
+3332 128
+3336 129
+3340 127
+3344 121
+3348 125
+3352 115
+3356 127
+3360 126
+3364 127
+3368 111
+3372 121
+3376 131
+3380 118
+3384 111
+3388 131
+3392 132
+3396 132
+3400 130
+3404 128
+3408 127
+3412 129
+3416 128
+3420 125
+3424 127
+3428 129
+3432 126
+3436 120
+3440 125
+3444 126
+3448 116
+3452 131
+3456 124
+3460 129
+3464 117
+3468 130
+3472 130
+3476 133
+3480 102
+3484 133
+3488 135
+3492 115
+3496 131
+3500 120
+3504 120
+3508 123
+3512 124
+3516 126
+3520 129
+3524 132
+3528 129
+3532 129
+3536 127
+3540 130
+3544 123
+3548 129
+3552 132
+3556 127
+3560 129
+3564 129
+3568 121
+3572 131
+3576 117
+3580 135
+3584 121
+3588 131
+3592 112
+3596 121
+3600 136
+3604 119
+3608 123
+3612 119
+3616 121
+3620 132
+3624 131
+3628 129
+3632 127
+3636 132
+3640 128
+3644 131
+3648 127
+3652 129
+3656 130
+3660 128
+3664 133
+3668 124
+3672 125
+3676 122
+3680 128
+3684 120
+3688 116
+3692 114
+3696 127
+3700 112
+3704 125
+3708 127
+3712 114
+3716 136
+3720 136
+3724 135
+3728 133
+3732 133
+3736 134
+3740 133
+3744 128
+3748 129
+3752 127
+3756 129
+3760 131
+3764 132
+3768 129
+3772 129
+3776 131
+3780 134
+3784 128
+3788 123
+3792 126
+3796 126
+3800 115
+3804 128
+3808 127
+3812 133
+3816 136
+3820 114
+3824 138
+3828 122
+3832 128
+3836 140
+3840 116
+3844 127
+3848 134
+3852 136
+3856 131
+3860 129
+3864 128
+3868 129
+3872 132
+3876 131
+3880 128
+3884 130
+3888 131
+3892 132
+3896 131
+3900 127
+3904 130
+3908 127
+3912 134
+3916 123
+3920 129
+3924 135
+3928 128
+3932 118
+3936 117
+3940 142
+3944 132
+3948 124
+3952 138
+3956 139
+3960 136
+3964 131
+3968 132
+3972 132
+3976 127
+3980 130
+3984 130
+3988 129
+3992 129
+3996 132
+4000 132
+4004 132
+4008 130
+4012 133
+4016 132
+4020 136
+4024 128
+4028 131
+4032 121
+4036 130
+4040 138
+4044 142
+4048 121
+4052 132
+4056 123
+4060 130
+4064 123
+4068 134
+4072 135
+4076 135
+4080 134
+4084 132
+4088 131
+4092 131
+4096 131
+4104 136
+4112 131
+4120 137
+4128 135
+4136 128
+4144 128
+4152 131
+4160 120
+4168 143
+4176 144
+4184 143
+4192 143
+4200 126
+4208 139
+4216 138
+4224 136
+4232 133
+4240 133
+4248 137
+4256 132
+4264 135
+4272 133
+4280 135
+4288 132
+4296 134
+4304 131
+4312 131
+4320 131
+4328 143
+4336 148
+4344 141
+4352 140
+4360 136
+4368 137
+4376 138
+4384 136
+4392 142
+4400 139
+4408 140
+4416 136
+4424 140
+4432 141
+4440 133
+4448 140
+4456 138
+4464 146
+4472 139
+4480 139
+4488 141
+4496 138
+4504 139
+4512 138
+4520 136
+4528 140
+4536 146
+4544 146
+4552 145
+4560 145
+4568 143
+4576 139
+4584 140
+4592 141
+4600 141
+4608 146
+4616 146
+4624 140
+4632 137
+4640 144
+4648 138
+4656 140
+4664 139
+4672 140
+4680 143
+4688 144
+4696 142
+4704 142
+4712 140
+4720 142
+4728 141
+4736 137
+4744 139
+4752 140
+4760 141
+4768 141
+4776 142
+4784 145
+4792 146
+4800 140
+4808 146
+4816 138
+4824 141
+4832 144
+4840 136
+4848 142
+4856 146
+4864 143
+4872 147
+4880 140
+4888 143
+4896 146
+4904 146
+4912 144
+4920 145
+4928 142
+4936 142
+4944 139
+4952 143
+4960 139
+4968 140
+4976 142
+4984 144
+4992 145
+5000 139
+5008 145
+5016 147
+5024 140
+5032 146
+5040 148
+5048 140
+5056 140
+5064 144
+5072 142
+5080 143
+5088 143
+5096 145
+5104 146
+5112 144
+5120 147
+5128 142
+5136 146
+5144 147
+5152 143
+5160 141
+5168 142
+5176 140
+5184 137
+5192 140
+5200 144
+5208 144
+5216 143
+5224 143
+5232 144
+5240 147
+5248 143
+5256 146
+5264 140
+5272 146
+5280 146
+5288 142
+5296 2984
+5304 1277
+5312 142
+5320 286
+5328 239
+5336 267
+5344 211
+5352 140
+5360 141
+5368 144
+5376 145
+5384 145
+5392 148
+5400 144
+5408 149
+5416 146
+5424 150
+5432 143
+5440 144
+5448 149
+5456 148
+5464 145
+5472 146
+5480 146
+5488 147
+5496 146
+5504 143
+5512 141
+5520 145
+5528 137
+5536 145
+5544 143
+5552 142
+5560 143
+5568 146
+5576 145
+5584 148
+5592 145
+5600 145
+5608 145
+5616 149
+5624 143
+5632 150
+5640 150
+5648 148
+5656 145
+5664 147
+5672 146
+5680 148
+5688 149
+5696 148
+5704 150
+5712 147
+5720 149
+5728 144
+5736 146
+5744 147
+5752 140
+5760 148
+5768 211
+5776 147
+5784 151
+5792 150
+5800 151
+5808 149
+5816 146
+5824 153
+5832 159
+5840 146
+5848 154
+5856 151
+5864 149
+5872 152
+5880 147
+5888 150
+5896 149
+5904 151
+5912 149
+5920 148
+5928 151
+5936 150
+5944 150
+5952 150
+5960 150
+5968 148
+5976 149
+5984 148
+5992 147
+6000 148
+6008 150
+6016 150
+6024 150
+6032 149
+6040 146
+6048 158
+6056 144
+6064 149
+6072 147
+6080 152
+6088 153
+6096 149
+6104 151
+6112 150
+6120 150
+6128 149
+6136 149
+6144 150
+6152 151
+6160 151
+6168 151
+6176 150
+6184 150
+6192 151
+6200 150
+6208 149
+6216 149
+6224 151
+6232 151
+6240 153
+6248 153
+6256 152
+6264 148
+6272 150
+6280 150
+6288 153
+6296 151
+6304 152
+6312 149
+6320 152
+6328 148
+6336 150
+6344 151
+6352 152
+6360 151
+6368 150
+6376 153
+6384 153
+6392 151
+6400 151
+6408 151
+6416 150
+6424 151
+6432 151
+6440 152
+6448 151
+6456 153
+6464 152
+6472 151
+6480 151
+6488 155
+6496 155
+6504 155
+6512 158
+6520 155
+6528 154
+6536 152
+6544 154
+6552 152
+6560 152
+6568 151
+6576 153
+6584 152
+6592 152
+6600 152
+6608 153
+6616 151
+6624 151
+6632 153
+6640 151
+6648 154
+6656 152
+6664 153
+6672 151
+6680 151
+6688 160
+6696 160
+6704 151
+6712 152
+6720 155
+6728 158
+6736 153
+6744 158
+6752 878
+6760 157
+6768 780
+6776 152
+6784 175
+6792 154
+6800 153
+6808 154
+6816 156
+6824 152
+6832 157
+6840 155
+6848 158
+6856 159
+6864 158
+6872 155
+6880 154
+6888 150
+6896 154
+6904 155
+6912 154
+6920 154
+6928 151
+6936 153
+6944 152
+6952 152
+6960 152
+6968 154
+6976 154
+6984 151
+6992 153
+7000 154
+7008 157
+7016 157
+7024 156
+7032 156
+7040 153
+7048 160
+7056 158
+7064 156
+7072 156
+7080 157
+7088 161
+7096 162
+7104 156
+7112 156
+7120 158
+7128 157
+7136 155
+7144 155
+7152 157
+7160 155
+7168 152
+7176 153
+7184 152
+7192 155
+7200 152
+7208 154
+7216 177
+7224 157
+7232 158
+7240 156
+7248 160
+7256 155
+7264 158
+7272 155
+7280 155
+7288 156
+7296 157
+7304 154
+7312 162
+7320 160
+7328 158
+7336 161
+7344 162
+7352 163
+7360 161
+7368 162
+7376 164
+7384 161
+7392 162
+7400 160
+7408 162
+7416 162
+7424 161
+7432 161
+7440 161
+7448 157
+7456 156
+7464 157
+7472 154
+7480 160
+7488 156
+7496 156
+7504 162
+7512 157
+7520 156
+7528 161
+7536 160
+7544 164
+7552 162
+7560 163
+7568 163
+7576 164
+7584 164
+7592 163
+7600 162
+7608 164
+7616 162
+7624 161
+7632 162
+7640 163
+7648 163
+7656 160
+7664 161
+7672 162
+7680 161
+7688 157
+7696 160
+7704 161
+7712 156
+7720 157
+7728 155
+7736 162
+7744 156
+7752 161
+7760 159
+7768 162
+7776 164
+7784 163
+7792 165
+7800 167
+7808 166
+7816 165
+7824 165
+7832 165
+7840 164
+7848 164
+7856 166
+7864 166
+7872 163
+7880 164
+7888 163
+7896 162
+7904 165
+7912 157
+7920 155
+7928 163
+7936 160
+7944 163
+7952 162
+7960 163
+7968 162
+7976 160
+7984 162
+7992 164
+8000 165
+8008 166
+8016 166
+8024 164
+8032 168
+8040 168
+8048 167
+8056 166
+8064 167
+8072 167
+8080 167
+8088 166
+8096 164
+8104 165
+8112 166
+8120 167
+8128 168
+8136 164
+8144 161
+8152 162
+8160 167
+8168 167
+8176 165
+8184 165
+8192 164
+8208 165
+8224 168
+8240 167
+8256 168
+8272 169
+8288 167
+8304 167
+8320 168
+8336 167
+8352 166
+8368 167
+8384 166
+8400 169
+8416 166
+8432 170
+8448 170
+8464 167
+8480 167
+8496 167
+8512 168
+8528 159
+8544 168
+8560 168
+8576 168
+8592 169
+8608 169
+8624 169
+8640 171
+8656 170
+8672 172
+8688 172
+8704 173
+8720 171
+8736 171
+8752 170
+8768 173
+8784 169
+8800 171
+8816 168
+8832 170
+8848 170
+8864 171
+8880 171
+8896 169
+8912 171
+8928 166
+8944 170
+8960 166
+8976 169
+8992 172
+9008 173
+9024 173
+9040 172
+9056 172
+9072 173
+9088 172
+9104 172
+9120 173
+9136 172
+9152 172
+9168 172
+9184 173
+9200 173
+9216 172
+9232 172
+9248 170
+9264 173
+9280 174
+9296 175
+9312 174
+9328 173
+9344 173
+9360 173
+9376 240
+9392 244
+9408 244
+9424 243
+9440 249
+9456 250
+9472 247
+9488 244
+9504 251
+9520 250
+9536 250
+9552 259
+9568 252
+9584 250
+9600 251
+9616 250
+9632 251
+9648 250
+9664 251
+9680 250
+9696 250
+9712 251
+9728 250
+9744 251
+9760 251
+9776 251
+9792 251
+9808 250
+9824 250
+9840 252
+9856 251
+9872 251
+9888 251
+9904 251
+9920 251
+9936 252
+9952 251
+9968 251
+9984 251
+10000 251
+10016 251
+10032 251
+10048 251
+10064 251
+10080 251
+10096 250
+10112 251
+10128 277
+10144 274
+10160 276
+10176 275
+10192 275
+10208 275
+10224 278
+10240 276
+10256 274
+10272 277
+10288 279
+10304 277
+10320 275
+10336 277
+10352 280
+10368 277
+10384 272
+10400 275
+10416 281
+10432 279
+10448 278
+10464 279
+10480 279
+10496 275
+10512 277
+10528 278
+10544 279
+10560 280
+10576 282
+10592 281
+10608 277
+10624 278
+10640 281
+10656 279
+10672 280
+10688 277
+10704 277
+10720 279
+10736 280
+10752 277
+10768 279
+10784 274
+10800 278
+10816 282
+10832 277
+10848 276
+10864 283
+10880 283
+10896 282
+10912 280
+10928 281
+10944 283
+10960 281
+10976 283
+10992 282
+11008 280
+11024 282
+11040 280
+11056 285
+11072 283
+11088 369
+11104 279
+11120 282
+11136 280
+11152 279
+11168 281
+11184 285
+11200 283
+11216 277
+11232 283
+11248 284
+11264 282
+11280 280
+11296 280
+11312 284
+11328 279
+11344 284
+11360 281
+11376 282
+11392 281
+11408 280
+11424 278
+11440 282
+11456 283
+11472 282
+11488 280
+11504 281
+11520 284
+11536 282
+11552 293
+11568 283
+11584 296
+11600 279
+11616 277
+11632 282
+11648 302
+11664 293
+11680 285
+11696 279
+11712 285
+11728 280
+11744 279
+11760 282
+11776 278
+11792 282
+11808 285
+11824 281
+11840 281
+11856 280
+11872 281
+11888 281
+11904 307
+11920 281
+11936 283
+11952 286
+11968 281
+11984 283
+12000 296
+12016 279
+12032 285
+12048 286
+12064 294
+12080 297
+12096 307
+12112 277
+12128 277
+12144 281
+12160 283
+12176 308
+12192 286
+12208 281
+12224 291
+12240 286
+12256 279
+12272 304
+12288 290
+12304 298
+12320 282
+12336 278
+12352 290
+12368 278
+12384 281
+12400 281
+12416 288
+12432 280
+12448 298
+12464 302
+12480 310
+12496 280
+12512 282
+12528 302
+12544 304
+12560 306
+12576 278
+12592 279
+12608 301
+12624 286
+12640 283
+12656 280
+12672 286
+12688 280
+12704 286
+12720 313
+12736 279
+12752 281
+12768 307
+12784 294
+12800 304
+12816 285
+12832 280
+12848 316
+12864 302
+12880 289
+12896 286
+12912 328
+12928 294
+12944 281
+12960 312
+12976 294
+12992 335
+13008 298
+13024 320
+13040 292
+13056 319
+13072 282
+13088 307
+13104 285
+13120 283
+13136 290
+13152 293
+13168 325
+13184 281
+13200 310
+13216 284
+13232 286
+13248 284
+13264 301
+13280 309
+13296 286
+13312 285
+13328 316
+13344 309
+13360 287
+13376 298
+13392 292
+13408 319
+13424 300
+13440 284
+13456 320
+13472 304
+13488 332
+13504 312
+13520 303
+13536 293
+13552 321
+13568 288
+13584 296
+13600 286
+13616 311
+13632 322
+13648 287
+13664 324
+13680 313
+13696 334
+13712 319
+13728 288
+13744 287
+13760 340
+13776 297
+13792 284
+13808 322
+13824 339
+13840 328
+13856 288
+13872 335
+13888 297
+13904 286
+13920 294
+13936 285
+13952 290
+13968 332
+13984 286
+14000 338
+14016 290
+14032 297
+14048 348
+14064 290
+14080 288
+14096 340
+14112 332
+14128 291
+14144 330
+14160 327
+14176 307
+14192 311
+14208 294
+14224 338
+14240 342
+14256 356
+14272 321
+14288 330
+14304 321
+14320 289
+14336 330
+14352 287
+14368 288
+14384 288
+14400 330
+14416 347
+14432 288
+14448 316
+14464 299
+14480 311
+14496 298
+14512 295
+14528 328
+14544 319
+14560 338
+14576 341
+14592 326
+14608 313
+14624 334
+14640 320
+14656 315
+14672 318
+14688 314
+14704 361
+14720 352
+14736 296
+14752 307
+14768 325
+14784 363
+14800 340
+14816 363
+14832 301
+14848 326
+14864 366
+14880 323
+14896 341
+14912 359
+14928 356
+14944 334
+14960 337
+14976 371
+14992 319
+15008 287
+15024 344
+15040 322
+15056 366
+15072 346
+15088 317
+15104 301
+15120 330
+15136 345
+15152 324
+15168 346
+15184 352
+15200 327
+15216 334
+15232 344
+15248 334
+15264 367
+15280 338
+15296 338
+15312 364
+15328 368
+15344 315
+15360 357
+15376 348
+15392 325
+15408 334
+15424 374
+15440 358
+15456 360
+15472 351
+15488 356
+15504 372
+15520 346
+15536 363
+15552 334
+15568 352
+15584 369
+15600 359
+15616 326
+15632 341
+15648 372
+15664 370
+15680 374
+15696 366
+15712 368
+15728 369
+15744 349
+15760 355
+15776 368
+15792 354
+15808 366
+15824 346
+15840 342
+15856 363
+15872 371
+15888 347
+15904 324
+15920 368
+15936 367
+15952 373
+15968 367
+15984 372
+16000 364
+16016 372
+16032 356
+16048 372
+16064 371
+16080 372
+16096 366
+16112 370
+16128 363
+16144 371
+16160 374
+16176 352
+16192 359
+16208 364
+16224 359
+16240 362
+16256 330
+16272 369
+16288 322
+16304 362
+16320 381
+16336 370
+16352 375
+16368 374
+16384 372
+16416 378
+16448 373
+16480 373
+16512 372
+16544 380
+16576 377
+16608 372
+16640 371
+16672 376
+16704 374
+16736 363
+16768 372
+16800 373
+16832 375
+16864 379
+16896 383
+16928 381
+16960 376
+16992 374
+17024 373
+17056 381
+17088 373
+17120 385
+17152 373
+17184 383
+17216 378
+17248 376
+17280 376
+17312 385
+17344 384
+17376 383
+17408 383
+17440 387
+17472 379
+17504 393
+17536 382
+17568 379
+17600 391
+17632 390
+17664 383
+17696 384
+17728 388
+17760 388
+17792 382
+17824 388
+17856 394
+17888 388
+17920 388
+17952 383
+17984 385
+18016 379
+18048 390
+18080 394
+18112 387
+18144 392
+18176 385
+18208 396
+18240 388
+18272 393
+18304 386
+18336 480
+18368 402
+18400 398
+18432 395
+18464 399
+18496 390
+18528 392
+18560 388
+18592 392
+18624 391
+18656 389
+18688 393
+18720 385
+18752 395
+18784 390
+18816 393
+18848 393
+18880 389
+18912 392
+18944 391
+18976 390
+19008 392
+19040 396
+19072 389
+19104 389
+19136 393
+19168 394
+19200 390
+19232 390
+19264 399
+19296 392
+19328 394
+19360 397
+19392 396
+19424 389
+19456 392
+19488 395
+19520 397
+19552 399
+19584 395
+19616 403
+19648 397
+19680 396
+19712 396
+19744 397
+19776 398
+19808 389
+19840 396
+19872 401
+19904 401
+19936 399
+19968 398
+20000 401
+20032 395
+20064 399
+20096 395
+20128 401
+20160 394
+20192 396
+20224 397
+20256 394
+20288 396
+20320 396
+20352 392
+20384 402
+20416 409
+20448 392
+20480 396
+20512 390
+20544 392
+20576 394
+20608 397
+20640 393
+20672 399
+20704 393
+20736 411
+20768 396
+20800 400
+20832 394
+20864 399
+20896 398
+20928 398
+20960 400
+20992 413
+21024 398
+21056 396
+21088 416
+21120 397
+21152 395
+21184 411
+21216 400
+21248 396
+21280 397
+21312 398
+21344 393
+21376 400
+21408 418
+21440 401
+21472 396
+21504 399
+21536 398
+21568 397
+21600 399
+21632 396
+21664 399
+21696 399
+21728 410
+21760 412
+21792 415
+21824 410
+21856 405
+21888 409
+21920 411
+21952 408
+21984 415
+22016 396
+22048 409
+22080 412
+22112 411
+22144 415
+22176 412
+22208 410
+22240 412
+22272 414
+22304 415
+22336 412
+22368 417
+22400 411
+22432 413
+22464 419
+22496 413
+22528 415
+22560 409
+22592 414
+22624 413
+22656 421
+22688 411
+22720 408
+22752 414
+22784 414
+22816 400
+22848 395
+22880 413
+22912 421
+22944 416
+22976 416
+23008 417
+23040 416
+23072 395
+23104 415
+23136 417
+23168 421
+23200 416
+23232 410
+23264 411
+23296 412
+23328 410
+23360 398
+23392 412
+23424 411
+23456 412
+23488 412
+23520 407
+23552 412
+23584 415
+23616 411
+23648 411
+23680 413
+23712 409
+23744 411
+23776 418
+23808 414
+23840 410
+23872 410
+23904 417
+23936 414
+23968 414
+24000 413
+24032 412
+24064 419
+24096 411
+24128 421
+24160 416
+24192 417
+24224 414
+24256 413
+24288 414
+24320 418
+24352 416
+24384 409
+24416 418
+24448 412
+24480 421
+24512 417
+24544 421
+24576 418
+24608 434
+24640 428
+24672 430
+24704 430
+24736 434
+24768 438
+24800 432
+24832 434
+24864 429
+24896 432
+24928 430
+24960 434
+24992 428
+25024 432
+25056 435
+25088 434
+25120 424
+25152 429
+25184 433
+25216 433
+25248 426
+25280 430
+25312 434
+25344 429
+25376 439
+25408 432
+25440 440
+25472 434
+25504 431
+25536 436
+25568 441
+25600 436
+25632 449
+25664 449
+25696 443
+25728 437
+25760 439
+25792 441
+25824 441
+25856 442
+25888 451
+25920 448
+25952 450
+25984 443
+26016 434
+26048 457
+26080 461
+26112 456
+26144 457
+26176 464
+26208 450
+26240 465
+26272 451
+26304 455
+26336 467
+26368 463
+26400 453
+26432 456
+26464 456
+26496 455
+26528 463
+26560 447
+26592 455
+26624 455
+26656 462
+26688 457
+26720 466
+26752 465
+26784 460
+26816 458
+26848 454
+26880 458
+26912 463
+26944 453
+26976 477
+27008 455
+27040 460
+27072 471
+27104 475
+27136 459
+27168 488
+27200 464
+27232 459
+27264 472
+27296 469
+27328 461
+27360 476
+27392 465
+27424 473
+27456 472
+27488 484
+27520 482
+27552 491
+27584 482
+27616 471
+27648 480
+27680 481
+27712 482
+27744 484
+27776 476
+27808 486
+27840 484
+27872 485
+27904 494
+27936 478
+27968 478
+28000 490
+28032 478
+28064 481
+28096 480
+28128 478
+28160 487
+28192 487
+28224 481
+28256 491
+28288 479
+28320 495
+28352 486
+28384 480
+28416 480
+28448 501
+28480 478
+28512 491
+28544 489
+28576 472
+28608 497
+28640 483
+28672 494
+28704 474
+28736 491
+28768 494
+28800 503
+28832 490
+28864 503
+28896 500
+28928 493
+28960 495
+28992 492
+29024 496
+29056 487
+29088 498
+29120 491
+29152 491
+29184 502
+29216 489
+29248 489
+29280 488
+29312 497
+29344 506
+29376 495
+29408 494
+29440 491
+29472 504
+29504 498
+29536 501
+29568 512
+29600 496
+29632 488
+29664 489
+29696 503
+29728 502
+29760 490
+29792 502
+29824 494
+29856 510
+29888 501
+29920 1673
+29952 506
+29984 490
+30016 504
+30048 497
+30080 491
+30112 509
+30144 489
+30176 495
+30208 500
+30240 502
+30272 508
+30304 498
+30336 527
+30368 517
+30400 514
+30432 509
+30464 524
+30496 520
+30528 527
+30560 520
+30592 510
+30624 525
+30656 524
+30688 509
+30720 518
+30752 533
+30784 525
+30816 515
+30848 520
+30880 525
+30912 519
+30944 502
+30976 516
+31008 510
+31040 518
+31072 526
+31104 533
+31136 513
+31168 519
+31200 513
+31232 519
+31264 518
+31296 535
+31328 519
+31360 549
+31392 527
+31424 527
+31456 518
+31488 544
+31520 527
+31552 518
+31584 515
+31616 527
+31648 510
+31680 525
+31712 539
+31744 519
+31776 532
+31808 516
+31840 560
+31872 534
+31904 575
+31936 555
+31968 547
+32000 526
+32032 515
+32064 545
+32096 551
+32128 543
+32160 532
+32192 522
+32224 524
+32256 565
+32288 520
+32320 527
+32352 525
+32384 548
+32416 537
+32448 514
+32480 545
+32512 536
+32544 527
+32576 551
+32608 549
+32640 573
+32672 525
+32704 537
+32736 545
+32768 524
+32832 538
+32896 538
+32960 534
+33024 563
+33088 528
+33152 567
+33216 528
+33280 566
+33344 574
+33408 569
+33472 549
+33536 572
+33600 571
+33664 520
+33728 585
+33792 570
+33856 582
+33920 544
+33984 581
+34048 551
+34112 572
+34176 578
+34240 571
+34304 590
+34368 581
+34432 585
+34496 582
+34560 568
+34624 584
+34688 587
+34752 591
+34816 591
+34880 584
+34944 581
+35008 586
+35072 562
+35136 573
+35200 587
+35264 593
+35328 594
+35392 600
+35456 568
+35520 589
+35584 586
+35648 591
+35712 591
+35776 595
+35840 589
+35904 600
+35968 608
+36032 600
+36096 612
+36160 599
+36224 612
+36288 602
+36352 611
+36416 607
+36480 602
+36544 606
+36608 605
+36672 603
+36736 591
+36800 608
+36864 612
+36928 610
+36992 604
+37056 615
+37120 613
+37184 619
+37248 613
+37312 607
+37376 611
+37440 615
+37504 611
+37568 626
+37632 618
+37696 619
+37760 623
+37824 618
+37888 630
+37952 615
+38016 622
+38080 624
+38144 622
+38208 616
+38272 629
+38336 623
+38400 616
+38464 625
+38528 614
+38592 626
+38656 635
+38720 624
+38784 624
+38848 623
+38912 615
+38976 622
+39040 629
+39104 635
+39168 623
+39232 639
+39296 634
+39360 629
+39424 642
+39488 637
+39552 637
+39616 637
+39680 648
+39744 640
+39808 647
+39872 636
+39936 632
+40000 645
+40064 638
+40128 645
+40192 640
+40256 644
+40320 635
+40384 629
+40448 641
+40512 647
+40576 648
+40640 646
+40704 640
+40768 643
+40832 657
+40896 638
+40960 647
+41024 648
+41088 649
+41152 644
+41216 642
+41280 644
+41344 648
+41408 648
+41472 663
+41536 646
+41600 666
+41664 651
+41728 656
+41792 650
+41856 647
+41920 653
+41984 667
+42048 664
+42112 672
+42176 670
+42240 660
+42304 667
+42368 673
+42432 659
+42496 674
+42560 675
+42624 670
+42688 673
+42752 662
+42816 666
+42880 670
+42944 664
+43008 673
+43072 675
+43136 680
+43200 680
+43264 672
+43328 671
+43392 678
+43456 680
+43520 689
+43584 683
+43648 697
+43712 699
+43776 685
+43840 675
+43904 680
+43968 685
+44032 681
+44096 683
+44160 688
+44224 681
+44288 702
+44352 705
+44416 685
+44480 689
+44544 694
+44608 692
+44672 688
+44736 688
+44800 694
+44864 708
+44928 702
+44992 700
+45056 707
+45120 704
+45184 707
+45248 707
+45312 702
+45376 707
+45440 709
+45504 710
+45568 710
+45632 710
+45696 714
+45760 710
+45824 701
+45888 709
+45952 712
+46016 706
+46080 714
+46144 712
+46208 711
+46272 716
+46336 715
+46400 724
+46464 726
+46528 720
+46592 713
+46656 713
+46720 710
+46784 719
+46848 716
+46912 715
+46976 725
+47040 725
+47104 715
+47168 725
+47232 718
+47296 719
+47360 725
+47424 728
+47488 715
+47552 721
+47616 718
+47680 712
+47744 735
+47808 727
+47872 737
+47936 742
+48000 736
+48064 738
+48128 740
+48192 728
+48256 732
+48320 729
+48384 743
+48448 736
+48512 733
+48576 739
+48640 741
+48704 742
+48768 741
+48832 742
+48896 735
+48960 735
+49024 740
+49088 738
+49152 737
+49216 747
+49280 739
+49344 740
+49408 744
+49472 749
+49536 756
+49600 744
+49664 733
+49728 748
+49792 744
+49856 743
+49920 752
+49984 748
+50048 750
+50112 741
+50176 752
+50240 745
+50304 751
+50368 741
+50432 748
+50496 742
+50560 751
+50624 751
+50688 747
+50752 764
+50816 749
+50880 759
+50944 752
+51008 757
+51072 760
+51136 759
+51200 755
+51264 759
+51328 748
+51392 758
+51456 760
+51520 747
+51584 755
+51648 761
+51712 764
+51776 763
+51840 761
+51904 782
+51968 766
+52032 764
+52096 761
+52160 759
+52224 772
+52288 773
+52352 764
+52416 768
+52480 779
+52544 770
+52608 757
+52672 779
+52736 773
+52800 769
+52864 778
+52928 773
+52992 786
+53056 778
+53120 772
+53184 771
+53248 771
+53312 771
+53376 772
+53440 776
+53504 800
+53568 792
+53632 791
+53696 783
+53760 801
+53824 798
+53888 795
+53952 786
+54016 796
+54080 783
+54144 792
+54208 788
+54272 802
+54336 787
+54400 782
+54464 779
+54528 794
+54592 794
+54656 785
+54720 789
+54784 791
+54848 808
+54912 798
+54976 799
+55040 805
+55104 791
+55168 803
+55232 812
+55296 805
+55360 812
+55424 814
+55488 802
+55552 810
+55616 798
+55680 806
+55744 807
+55808 803
+55872 811
+55936 814
+56000 807
+56064 799
+56128 804
+56192 816
+56256 805
+56320 805
+56384 820
+56448 821
+56512 823
+56576 808
+56640 810
+56704 809
+56768 826
+56832 831
+56896 812
+56960 823
+57024 825
+57088 820
+57152 812
+57216 822
+57280 809
+57344 832
+57408 811
+57472 824
+57536 810
+57600 821
+57664 821
+57728 817
+57792 819
+57856 835
+57920 833
+57984 838
+58048 823
+58112 826
+58176 830
+58240 844
+58304 828
+58368 828
+58432 821
+58496 825
+58560 839
+58624 850
+58688 829
+58752 838
+58816 826
+58880 838
+58944 835
+59008 828
+59072 830
+59136 838
+59200 847
+59264 829
+59328 851
+59392 847
+59456 837
+59520 838
+59584 835
+59648 842
+59712 846
+59776 849
+59840 854
+59904 848
+59968 856
+60032 859
+60096 838
+60160 848
+60224 849
+60288 858
+60352 846
+60416 849
+60480 848
+60544 855
+60608 847
+60672 870
+60736 857
+60800 858
+60864 882
+60928 862
+60992 853
+61056 857
+61120 851
+61184 866
+61248 848
+61312 872
+61376 873
+61440 881
+61504 864
+61568 878
+61632 880
+61696 871
+61760 877
+61824 863
+61888 879
+61952 861
+62016 867
+62080 877
+62144 876
+62208 884
+62272 866
+62336 860
+62400 882
+62464 874
+62528 865
+62592 867
+62656 886
+62720 875
+62784 877
+62848 872
+62912 861
+62976 899
+63040 870
+63104 885
+63168 889
+63232 863
+63296 876
+63360 868
+63424 866
+63488 879
+63552 885
+63616 869
+63680 878
+63744 885
+63808 884
+63872 878
+63936 882
+64000 889
+64064 878
+64128 895
+64192 875
+64256 889
+64320 882
+64384 880
+64448 889
+64512 883
+64576 886
+64640 879
+64704 892
+64768 880
+64832 890
+64896 908
+64960 919
+65024 882
+65088 896
+65152 895
+65216 894
+65280 891
+65344 889
+65408 891
+65472 1164
+65536 1148
+65664 1172
+65792 1171
+65920 1164
+66048 1162
+66176 1176
+66304 1169
+66432 1184
+66560 1171
+66688 1198
+66816 1176
+66944 1169
+67072 1185
+67200 1180
+67328 1186
+67456 1170
+67584 1191
+67712 1171
+67840 1170
+67968 1194
+68096 1186
+68224 1175
+68352 1188
+68480 1190
+68608 1181
+68736 1195
+68864 1192
+68992 1198
+69120 1195
+69248 1198
+69376 1189
+69504 1196
+69632 1199
+69760 1201
+69888 1209
+70016 1208
+70144 1217
+70272 1213
+70400 1197
+70528 1209
+70656 1215
+70784 1221
+70912 1215
+71040 1218
+71168 1223
+71296 1213
+71424 1257
+71552 1225
+71680 1228
+71808 1207
+71936 1225
+72064 1239
+72192 1213
+72320 1230
+72448 1229
+72576 1229
+72704 1232
+72832 1260
+72960 1218
+73088 1225
+73216 1227
+73344 1231
+73472 1253
+73600 1238
+73728 1242
+73856 1240
+73984 1236
+74112 1245
+74240 1249
+74368 1242
+74496 1226
+74624 1264
+74752 1237
+74880 1257
+75008 1253
+75136 1257
+75264 1258
+75392 1260
+75520 1272
+75648 1263
+75776 1267
+75904 1258
+76032 1275
+76160 1269
+76288 1281
+76416 1271
+76544 1267
+76672 1277
+76800 1264
+76928 1275
+77056 1267
+77184 1272
+77312 1284
+77440 1275
+77568 1305
+77696 1308
+77824 1303
+77952 1284
+78080 1284
+78208 1280
+78336 1314
+78464 1286
+78592 1291
+78720 1278
+78848 1300
+78976 1280
+79104 1272
+79232 1270
+79360 1300
+79488 1323
+79616 1289
+79744 1310
+79872 1298
+80000 1345
+80128 1333
+80256 1302
+80384 1306
+80512 1334
+80640 1318
+80768 1304
+80896 1303
+81024 1347
+81152 1326
+81280 1303
+81408 1323
+81536 1316
+81664 1315
+81792 1335
+81920 1347
+82048 1316
+82176 1328
+82304 1315
+82432 1342
+82560 1338
+82688 1322
+82816 1320
+82944 1329
+83072 1366
+83200 1323
+83328 1356
+83456 1367
+83584 1338
+83712 1369
+83840 1369
+83968 1341
+84096 1343
+84224 1364
+84352 1345
+84480 1349
+84608 1348
+84736 1335
+84864 1337
+84992 1365
+85120 1355
+85248 1361
+85376 1348
+85504 1345
+85632 1329
+85760 1343
+85888 1365
+86016 1367
+86144 1333
+86272 1357
+86400 1349
+86528 1360
+86656 1393
+86784 1364
+86912 1385
+87040 1401
+87168 1368
+87296 1364
+87424 1378
+87552 1354
+87680 1400
+87808 1368
+87936 1372
+88064 1383
+88192 1399
+88320 1376
+88448 1418
+88576 1388
+88704 1382
+88832 1386
+88960 1408
+89088 1377
+89216 1383
+89344 1376
+89472 1389
+89600 1376
+89728 1417
+89856 1380
+89984 1402
+90112 1394
+90240 1389
+90368 1419
+90496 1393
+90624 1386
+90752 1388
+90880 1402
+91008 1382
+91136 1403
+91264 1379
+91392 1431
+91520 1432
+91648 1435
+91776 1404
+91904 1402
+92032 1434
+92160 1431
+92288 1402
+92416 1401
+92544 1412
+92672 1418
+92800 1423
+92928 1389
+93056 1442
+93184 1425
+93312 1428
+93440 1403
+93568 1430
+93696 1452
+93824 1401
+93952 1414
+94080 1415
+94208 1414
+94336 1421
+94464 1444
+94592 1421
+94720 1462
+94848 1457
+94976 1422
+95104 1423
+95232 1432
+95360 1416
+95488 1447
+95616 1442
+95744 1446
+95872 1444
+96000 1443
+96128 1426
+96256 1437
+96384 1442
+96512 1423
+96640 1457
+96768 1464
+96896 1454
+97024 1439
+97152 1464
+97280 1447
+97408 1437
+97536 1417
+97664 1430
+97792 1475
+97920 1446
+98048 1450
+98176 1445
+98304 1421
+98432 1445
+98560 1443
+98688 1441
+98816 1482
+98944 1447
+99072 1466
+99200 1449
+99328 1455
+99456 1467
+99584 1489
+99712 1461
+99840 1452
+99968 1466
+100096 1465
+100224 1458
+100352 1451
+100480 1510
+100608 1448
+100736 1468
+100864 1489
+100992 1480
+101120 1445
+101248 1487
+101376 1476
+101504 1475
+101632 1466
+101760 1455
+101888 1505
+102016 1465
+102144 1462
+102272 1488
+102400 1483
+102528 1514
+102656 1489
+102784 1491
+102912 1478
+103040 1474
+103168 1498
+103296 1497
+103424 1526
+103552 1521
+103680 1461
+103808 1532
+103936 1493
+104064 1469
+104192 1481
+104320 1482
+104448 1522
+104576 1515
+104704 1537
+104832 1504
+104960 1537
+105088 1500
+105216 1478
+105344 1514
+105472 1472
+105600 1483
+105728 1555
+105856 1481
+105984 1504
+106112 1537
+106240 1502
+106368 1530
+106496 1518
+106624 1506
+106752 1517
+106880 1543
+107008 1550
+107136 1506
+107264 1525
+107392 1548
+107520 1564
+107648 1549
+107776 1514
+107904 1526
+108032 1522
+108160 1557
+108288 1532
+108416 1531
+108544 1538
+108672 1544
+108800 1568
+108928 1550
+109056 1540
+109184 1527
+109312 1541
+109440 1519
+109568 1579
+109696 1532
+109824 1598
+109952 1547
+110080 1573
+110208 1545
+110336 1566
+110464 1553
+110592 1547
+110720 1566
+110848 1582
+110976 1568
+111104 1557
+111232 1559
+111360 1590
+111488 1561
+111616 1547
+111744 1574
+111872 1572
+112000 1542
+112128 1611
+112256 1554
+112384 1579
+112512 1581
+112640 1563
+112768 1592
+112896 1593
+113024 1553
+113152 1592
+113280 1590
+113408 1569
+113536 1596
+113664 1606
+113792 1594
+113920 1642
+114048 1570
+114176 1642
+114304 1648
+114432 1570
+114560 1630
+114688 1649
+114816 1581
+114944 1628
+115072 1573
+115200 1628
+115328 1631
+115456 1576
+115584 1635
+115712 1624
+115840 1592
+115968 1634
+116096 1614
+116224 1661
+116352 1609
+116480 1662
+116608 1577
+116736 1662
+116864 1647
+116992 1617
+117120 1627
+117248 1657
+117376 1631
+117504 1598
+117632 1681
+117760 1643
+117888 1634
+118016 1618
+118144 1639
+118272 1651
+118400 1605
+118528 1591
+118656 1655
+118784 1631
+118912 1622
+119040 1624
+119168 1629
+119296 1598
+119424 1665
+119552 1676
+119680 1620
+119808 1616
+119936 1624
+120064 1669
+120192 1673
+120320 1638
+120448 1627
+120576 1611
+120704 1700
+120832 1653
+120960 1715
+121088 1661
+121216 1659
+121344 1640
+121472 1666
+121600 1662
+121728 1680
+121856 1679
+121984 1707
+122112 1640
+122240 1626
+122368 1684
+122496 1666
+122624 1652
+122752 1633
+122880 1674
+123008 1676
+123136 1665
+123264 1687
+123392 1664
+123520 1659
+123648 1643
+123776 1672
+123904 1694
+124032 1641
+124160 1752
+124288 1648
+124416 1658
+124544 1720
+124672 1711
+124800 1723
+124928 1662
+125056 1647
+125184 1772
+125312 1671
+125440 1647
+125568 1719
+125696 1636
+125824 1694
+125952 1648
+126080 1750
+126208 1713
+126336 1771
+126464 1748
+126592 1655
+126720 1720
+126848 1720
+126976 1675
+127104 1662
+127232 1722
+127360 1675
+127488 1667
+127616 1671
+127744 1668
+127872 1659
+128000 1701
+128128 1667
+128256 1731
+128384 1671
+128512 1670
+128640 1675
+128768 1688
+128896 1690
+129024 1714
+129152 1761
+129280 1761
+129408 1684
+129536 1685
+129664 1774
+129792 1751
+129920 1679
+130048 1678
+130176 1712
+130304 1688
+130432 1810
+130560 1756
+130688 1684
+130816 1686
+130944 1688
+131072 1680
+131328 1750
+131584 1683
+131840 1697
+132096 1687
+132352 1694
+132608 1683
+132864 1683
+133120 1760
+133376 1718
+133632 1687
+133888 1707
+134144 1754
+134400 1761
+134656 1705
+134912 1695
+135168 1758
+135424 1697
+135680 1761
+135936 1754
+136192 1730
+136448 1725
+136704 1838
+136960 1743
+137216 1741
+137472 1739
+137728 1798
+137984 1737
+138240 1846
+138496 1744
+138752 1761
+139008 1755
+139264 1837
+139520 1776
+139776 1749
+140032 1832
+140288 1777
+140544 1792
+140800 1839
+141056 1777
+141312 1830
+141568 1787
+141824 1793
+142080 1798
+142336 1779
+142592 1806
+142848 1856
+143104 1804
+143360 1793
+143616 1798
+143872 1810
+144128 1805
+144384 1820
+144640 1808
+144896 1823
+145152 1812
+145408 1810
+145664 1840
+145920 1829
+146176 1826
+146432 1931
+146688 1889
+146944 1892
+147200 1816
+147456 1832
+147712 1873
+147968 1831
+148224 1836
+148480 1856
+148736 1846
+148992 1851
+149248 1902
+149504 1842
+149760 1846
+150016 1849
+150272 1867
+150528 1909
+150784 1846
+151040 1852
+151296 1859
+151552 1891
+151808 1845
+152064 1895
+152320 1857
+152576 1846
+152832 1929
+153088 1850
+153344 1859
+153600 1923
+153856 1868
+154112 1930
+154368 1867
+154624 1873
+154880 1921
+155136 1881
+155392 1879
+155648 1932
+155904 1874
+156160 1867
+156416 1984
+156672 1882
+156928 1872
+157184 1876
+157440 1948
+157696 1893
+157952 1886
+158208 1892
+158464 1890
+158720 1955
+158976 1893
+159232 1901
+159488 1881
+159744 1971
+160000 1891
+160256 1972
+160512 1904
+160768 1982
+161024 1934
+161280 1924
+161536 1925
+161792 1978
+162048 1915
+162304 1944
+162560 1937
+162816 1933
+163072 1929
+163328 2001
+163584 1934
+163840 1947
+164096 1949
+164352 1947
+164608 2008
+164864 1944
+165120 1964
+165376 1960
+165632 1947
+165888 1953
+166144 1971
+166400 2006
+166656 1964
+166912 1970
+167168 1968
+167424 1963
+167680 2001
+167936 1972
+168192 1989
+168448 1980
+168704 1980
+168960 2002
+169216 2061
+169472 1998
+169728 1991
+169984 2017
+170240 2036
+170496 2083
+170752 2016
+171008 2007
+171264 2009
+171520 2023
+171776 2027
+172032 2024
+172288 2022
+172544 2052
+172800 2031
+173056 2039
+173312 2042
+173568 2101
+173824 2042
+174080 2039
+174336 2045
+174592 2048
+174848 2049
+175104 2065
+175360 2056
+175616 2068
+175872 2073
+176128 2062
+176384 2073
+176640 2072
+176896 2061
+177152 2059
+177408 2093
+177664 2085
+177920 2085
+178176 2149
+178432 2084
+178688 2068
+178944 2124
+179200 2074
+179456 2088
+179712 2140
+179968 2088
+180224 2138
+180480 2088
+180736 2094
+180992 2069
+181248 2065
+181504 2084
+181760 2080
+182016 2058
+182272 2078
+182528 2069
+182784 2067
+183040 2084
+183296 2082
+183552 2079
+183808 2075
+184064 2081
+184320 2076
+184576 2092
+184832 2085
+185088 2163
+185344 2083
+185600 2092
+185856 2094
+186112 2091
+186368 2105
+186624 2080
+186880 2115
+187136 2093
+187392 2084
+187648 2170
+187904 2106
+188160 2096
+188416 2116
+188672 2117
+188928 2105
+189184 2109
+189440 2122
+189696 2121
+189952 2103
+190208 2107
+190464 2123
+190720 2123
+190976 2115
+191232 2137
+191488 2129
+191744 2136
+192000 2125
+192256 2135
+192512 2130
+192768 2134
+193024 2141
+193280 2135
+193536 2156
+193792 2142
+194048 2148
+194304 2163
+194560 2154
+194816 2156
+195072 2171
+195328 2233
+195584 2186
+195840 2179
+196096 2171
+196352 2190
+196608 2201
+196864 2216
+197120 2219
+197376 2203
+197632 2211
+197888 2198
+198144 2219
+198400 2222
+198656 2239
+198912 2235
+199168 2224
+199424 2234
+199680 2229
+199936 2218
+200192 2236
+200448 2268
+200704 2284
+200960 2240
+201216 2282
+201472 2252
+201728 2269
+201984 2277
+202240 2264
+202496 2277
+202752 2278
+203008 2279
+203264 2262
+203520 2286
+203776 2299
+204032 2283
+204288 2294
+204544 2283
+204800 2278
+205056 2299
+205312 2298
+205568 2317
+205824 2298
+206080 2311
+206336 2286
+206592 2300
+206848 2306
+207104 2301
+207360 2367
+207616 2308
+207872 2312
+208128 2305
+208384 2327
+208640 2307
+208896 2324
+209152 2312
+209408 2315
+209664 2331
+209920 2317
+210176 2409
+210432 2321
+210688 2322
+210944 2319
+211200 2322
+211456 2319
+211712 2336
+211968 2331
+212224 2323
+212480 2322
+212736 2345
+212992 2334
+213248 2335
+213504 2339
+213760 2344
+214016 2324
+214272 2328
+214528 2343
+214784 2326
+215040 2343
+215296 2345
+215552 2333
+215808 2341
+216064 2353
+216320 2341
+216576 2340
+216832 2346
+217088 2354
+217344 2344
+217600 2351
+217856 2344
+218112 2353
+218368 2350
+218624 2347
+218880 2359
+219136 2360
+219392 2361
+219648 2365
+219904 2359
+220160 2366
+220416 2372
+220672 2369
+220928 2359
+221184 2386
+221440 2389
+221696 2379
+221952 2384
+222208 2382
+222464 2396
+222720 2394
+222976 2402
+223232 2403
+223488 2407
+223744 2400
+224000 2409
+224256 2421
+224512 2427
+224768 2430
+225024 2405
+225280 2420
+225536 2435
+225792 2487
+226048 2449
+226304 2457
+226560 2544
+226816 2462
+227072 2440
+227328 2469
+227584 2456
+227840 2456
+228096 2472
+228352 2483
+228608 2478
+228864 2476
+229120 2473
+229376 2484
+229632 2483
+229888 2495
+230144 2496
+230400 2499
+230656 2505
+230912 2513
+231168 2505
+231424 2510
+231680 2516
+231936 2521
+232192 2509
+232448 2522
+232704 2531
+232960 2509
+233216 2525
+233472 2524
+233728 2525
+233984 2528
+234240 2520
+234496 2547
+234752 2534
+235008 2544
+235264 2532
+235520 2545
+235776 2549
+236032 2537
+236288 2554
+236544 2537
+236800 2539
+237056 2546
+237312 2550
+237568 2559
+237824 2560
+238080 2553
+238336 2549
+238592 2557
+238848 2557
+239104 2556
+239360 2569
+239616 2563
+239872 2557
+240128 2565
+240384 2567
+240640 2557
+240896 2562
+241152 2563
+241408 2584
+241664 2563
+241920 2582
+242176 2568
+242432 2554
+242688 2571
+242944 2586
+243200 2553
+243456 2574
+243712 2589
+243968 2581
+244224 2586
+244480 2581
+244736 2585
+244992 2583
+245248 2595
+245504 2575
+245760 2601
+246016 2606
+246272 2569
+246528 2564
+246784 2565
+247040 2567
+247296 2577
+247552 2584
+247808 2585
+248064 2649
+248320 2590
+248576 2574
+248832 2578
+249088 2592
+249344 2600
+249600 2598
+249856 2612
+250112 2599
+250368 2603
+250624 2602
+250880 2601
+251136 2614
+251392 2620
+251648 2619
+251904 2618
+252160 2636
+252416 2624
+252672 2633
+252928 2619
+253184 2631
+253440 2633
+253696 2647
+253952 2662
+254208 2662
+254464 2668
+254720 2668
+254976 2659
+255232 2681
+255488 2678
+255744 2684
+256000 2691
+256256 2712
+256512 2702
+256768 2693
+257024 2693
+257280 2712
+257536 2709
+257792 2712
+258048 2709
+258304 2723
+258560 2720
+258816 2717
+259072 2734
+259328 2728
+259584 2728
+259840 2739
+260096 2742
+260352 2739
+260608 2747
+260864 2751
+261120 2746
+261376 2742
+261632 2740
+261888 2757
+262144 2754
+262656 2768
+263168 2764
+263680 2773
+264192 2765
+264704 2773
+265216 2779
+265728 2788
+266240 2778
+266752 2788
+267264 2796
+267776 2791
+268288 2800
+268800 2814
+269312 2805
+269824 2808
+270336 2821
+270848 2805
+271360 2813
+271872 2812
+272384 2808
+272896 2816
+273408 2819
+273920 2832
+274432 2842
+274944 2829
+275456 2844
+275968 2850
+276480 2858
+276992 2857
+277504 2862
+278016 2871
+278528 2873
+279040 2883
+279552 2892
+280064 2891
+280576 2890
+281088 2914
+281600 2965
+282112 2929
+282624 2910
+283136 2912
+283648 2924
+284160 2927
+284672 2935
+285184 2945
+285696 2957
+286208 2965
+286720 2966
+287232 2983
+287744 2989
+288256 2979
+288768 2982
+289280 2976
+289792 2991
+290304 3011
+290816 3017
+291328 3018
+291840 3029
+292352 3027
+292864 3014
+293376 3018
+293888 3033
+294400 3025
+294912 3022
+295424 3028
+295936 3064
+296448 3033
+296960 3035
+297472 3037
+297984 3039
+298496 3058
+299008 3037
+299520 3064
+300032 3094
+300544 3065
+301056 3070
+301568 3076
+302080 3060
+302592 3076
+303104 3077
+303616 3076
+304128 3091
+304640 3085
+305152 3092
+305664 3100
+306176 3090
+306688 3113
+307200 3119
+307712 3110
+308224 3133
+308736 3118
+309248 3121
+309760 3152
+310272 3140
+310784 3135
+311296 3140
+311808 3143
+312320 3150
+312832 3149
+313344 3136
+313856 3154
+314368 3166
+314880 3177
+315392 3180
+315904 3192
+316416 3190
+316928 3205
+317440 3212
+317952 3219
+318464 3221
+318976 3217
+319488 3229
+320000 3235
+320512 3239
+321024 3251
+321536 3236
+322048 3239
+322560 3360
+323072 3269
+323584 3252
+324096 3257
+324608 3337
+325120 3280
+325632 3277
+326144 3274
+326656 3310
+327168 3289
+327680 3294
+328192 3299
+328704 3293
+329216 3396
+329728 3299
+330240 3317
+330752 3360
+331264 3320
+331776 3333
+332288 3335
+332800 3327
+333312 3328
+333824 3352
+334336 3353
+334848 3388
+335360 3361
+335872 3354
+336384 3357
+336896 3373
+337408 3423
+337920 3391
+338432 3407
+338944 3409
+339456 3413
+339968 3407
+340480 3402
+340992 3515
+341504 3404
+342016 3425
+342528 3444
+343040 3437
+343552 3435
+344064 3463
+344576 3446
+345088 3481
+345600 3560
+346112 3488
+346624 3484
+347136 3468
+347648 3502
+348160 3485
+348672 3475
+349184 3503
+349696 3498
+350208 3557
+350720 3493
+351232 3511
+351744 3549
+352256 3519
+352768 3499
+353280 3527
+353792 3523
+354304 3568
+354816 3526
+355328 3574
+355840 3648
+356352 3556
+356864 3571
+357376 3614
+357888 3582
+358400 3574
+358912 3543
+359424 3562
+359936 3603
+360448 3595
+360960 3577
+361472 3565
+361984 3600
+362496 3570
+363008 3601
+363520 3590
+364032 3589
+364544 3613
+365056 3668
+365568 3611
+366080 3619
+366592 3626
+367104 3641
+367616 3620
+368128 3654
+368640 3672
+369152 3652
+369664 3675
+370176 3691
+370688 3719
+371200 3671
+371712 3747
+372224 3697
+372736 3679
+373248 3692
+373760 3735
+374272 3750
+374784 3714
+375296 3787
+375808 3711
+376320 3714
+376832 3735
+377344 3746
+377856 3739
+378368 3719
+378880 3741
+379392 3779
+379904 3809
+380416 3768
+380928 3811
+381440 3777
+381952 3763
+382464 3835
+382976 3780
+383488 3780
+384000 3873
+384512 3785
+385024 3785
+385536 3823
+386048 3784
+386560 3796
+387072 3823
+387584 3827
+388096 3824
+388608 3900
+389120 3822
+389632 3886
+390144 3880
+390656 3830
+391168 3928
+391680 3857
+392192 3854
+392704 3943
+393216 3897
+393728 3867
+394240 3850
+394752 3912
+395264 3969
+395776 3880
+396288 3877
+396800 3866
+397312 3889
+397824 3903
+398336 3922
+398848 3935
+399360 3917
+399872 3919
+400384 3941
+400896 3938
+401408 3935
+401920 3936
+402432 3991
+402944 3929
+403456 4009
+403968 4012
+404480 3963
+404992 4051
+405504 3991
+406016 4029
+406528 3987
+407040 4075
+407552 4063
+408064 4041
+408576 4026
+409088 4079
+409600 4005
+410112 4002
+410624 4025
+411136 4006
+411648 4062
+412160 4019
+412672 4037
+413184 4008
+413696 4042
+414208 4047
+414720 4051
+415232 4110
+415744 4088
+416256 4094
+416768 4112
+417280 4155
+417792 4156
+418304 4077
+418816 4140
+419328 4063
+419840 4093
+420352 4114
+420864 4140
+421376 4148
+421888 4116
+422400 4094
+422912 4117
+423424 4087
+423936 4115
+424448 4141
+424960 4122
+425472 4143
+425984 4161
+426496 4141
+427008 4250
+427520 4203
+428032 4191
+428544 4153
+429056 4179
+429568 4264
+430080 4168
+430592 4189
+431104 4217
+431616 4217
+432128 4258
+432640 4193
+433152 4228
+433664 4224
+434176 4210
+434688 4296
+435200 4226
+435712 4293
+436224 4235
+436736 4390
+437248 4369
+437760 4275
+438272 4279
+438784 4281
+439296 4308
+439808 4285
+440320 4272
+440832 4353
+441344 4329
+441856 4307
+442368 4276
+442880 4308
+443392 4289
+443904 4293
+444416 4303
+444928 4300
+445440 4366
+445952 4312
+446464 4399
+446976 4304
+447488 4405
+448000 4355
+448512 4330
+449024 4332
+449536 4408
+450048 4334
+450560 4415
+451072 4360
+451584 4363
+452096 4401
+452608 4342
+453120 4359
+453632 4506
+454144 4385
+454656 4446
+455168 4374
+455680 4451
+456192 4468
+456704 4416
+457216 4400
+457728 4403
+458240 4403
+458752 4402
+459264 4411
+459776 4409
+460288 4428
+460800 4416
+461312 4492
+461824 4432
+462336 4461
+462848 4459
+463360 4451
+463872 4480
+464384 4481
+464896 4559
+465408 4569
+465920 4469
+466432 4498
+466944 4485
+467456 4502
+467968 4481
+468480 4564
+468992 4498
+469504 4530
+470016 4531
+470528 4511
+471040 4582
+471552 4508
+472064 4563
+472576 4537
+473088 4533
+473600 4611
+474112 4530
+474624 4578
+475136 4581
+475648 4554
+476160 4548
+476672 4547
+477184 4643
+477696 4598
+478208 4571
+478720 4626
+479232 4668
+479744 4572
+480256 4610
+480768 4598
+481280 4612
+481792 4676
+482304 4614
+482816 4693
+483328 4697
+483840 4626
+484352 4620
+484864 4645
+485376 4699
+485888 4663
+486400 4653
+486912 4640
+487424 4795
+487936 4687
+488448 4703
+488960 4710
+489472 4698
+489984 4790
+490496 4708
+491008 4748
+491520 4683
+492032 4771
+492544 4782
+493056 4799
+493568 4753
+494080 4741
+494592 4709
+495104 4726
+495616 4713
+496128 4735
+496640 4781
+497152 4788
+497664 4770
+498176 4763
+498688 4799
+499200 4764
+499712 4814
+500224 4824
+500736 4765
+501248 4835
+501760 4787
+502272 4862
+502784 4894
+503296 4855
+503808 4813
+504320 4826
+504832 4817
+505344 4810
+505856 4800
+506368 4888
+506880 4806
+507392 4814
+507904 4875
+508416 4842
+508928 4895
+509440 4876
+509952 4828
+510464 4911
+510976 4908
+511488 4908
+512000 4888
+512512 4881
+513024 4862
+513536 4889
+514048 4925
+514560 4880
+515072 4927
+515584 4874
+516096 4891
+516608 4950
+517120 4975
+517632 4906
+518144 4968
+518656 4944
+519168 4946
+519680 4925
+520192 4917
+520704 4964
+521216 5071
+521728 5075
+522240 5015
+522752 5026
+523264 5012
+523776 4997
+524288 4981
+525312 5057
+526336 4977
+527360 4990
+528384 5117
+529408 5119
+530432 5079
+531456 5086
+532480 5128
+533504 5029
+534528 5157
+535552 5060
+536576 5054
+537600 5049
+538624 5079
+539648 5093
+540672 5132
+541696 5146
+542720 5117
+543744 5128
+544768 5169
+545792 5181
+546816 5171
+547840 5296
+548864 5163
+549888 5211
+550912 5240
+551936 5208
+552960 5407
+553984 5274
+555008 5300
+556032 5382
+557056 5325
+558080 5242
+559104 5310
+560128 5295
+561152 5369
+562176 5294
+563200 5313
+564224 5315
+565248 5310
+566272 5326
+567296 5377
+568320 5376
+569344 5452
+570368 5340
+571392 5413
+572416 5478
+573440 5441
+574464 5477
+575488 5435
+576512 5469
+577536 5476
+578560 5496
+579584 5498
+580608 5531
+581632 5451
+582656 5485
+583680 5559
+584704 5487
+585728 5536
+586752 5572
+587776 5557
+588800 5512
+589824 5559
+590848 5581
+591872 5534
+592896 5714
+593920 5768
+594944 6079
+595968 5834
+596992 5649
+598016 5585
+599040 5638
+600064 5668
+601088 5621
+602112 5645
+603136 5698
+604160 5646
+605184 5676
+606208 5697
+607232 5683
+608256 5774
+609280 5773
+610304 5828
+611328 5792
+612352 5868
+613376 5794
+614400 5781
+615424 5808
+616448 5811
+617472 5814
+618496 5799
+619520 5818
+620544 5876
+621568 5862
+622592 5788
+623616 5935
+624640 5899
+625664 5921
+626688 5895
+627712 5841
+628736 5864
+629760 6007
+630784 5937
+631808 5892
+632832 5983
+633856 5906
+634880 6000
+635904 6005
+636928 5995
+637952 6018
+638976 6042
+640000 5948
+641024 6030
+642048 6051
+643072 6053
+644096 5970
+645120 5991
+646144 6129
+647168 6057
+648192 6173
+649216 6088
+650240 6092
+651264 6037
+652288 6026
+653312 6208
+654336 6090
+655360 6174
+656384 6140
+657408 6124
+658432 6106
+659456 6114
+660480 6114
+661504 6183
+662528 6204
+663552 6257
+664576 6276
+665600 6139
+666624 6243
+667648 6217
+668672 6315
+669696 6280
+670720 6201
+671744 6294
+672768 6294
+673792 6382
+674816 6267
+675840 6303
+676864 6245
+677888 6285
+678912 6279
+679936 6318
+680960 6370
+681984 6371
+683008 6305
+684032 6325
+685056 6300
+686080 6379
+687104 6402
+688128 6456
+689152 6421
+690176 6408
+691200 6367
+692224 6454
+693248 6386
+694272 6385
+695296 6512
+696320 6477
+697344 6444
+698368 6524
+699392 6545
+700416 6665
+701440 6561
+702464 6567
+703488 6595
+704512 6569
+705536 6568
+706560 6595
+707584 6516
+708608 6614
+709632 6599
+710656 6627
+711680 6660
+712704 6642
+713728 6581
+714752 6688
+715776 6675
+716800 6677
+717824 6733
+718848 6614
+719872 6796
+720896 6697
+721920 6747
+722944 6775
+723968 6838
+724992 6770
+726016 6775
+727040 6720
+728064 6789
+729088 6869
+730112 6782
+731136 6785
+732160 6826
+733184 6775
+734208 6846
+735232 6829
+736256 6852
+737280 6949
+738304 6820
+739328 6786
+740352 6881
+741376 6906
+742400 6802
+743424 6864
+744448 6816
+745472 6859
+746496 6902
+747520 6839
+748544 6924
+749568 6969
+750592 6960
+751616 6970
+752640 6953
+753664 6898
+754688 6902
+755712 6930
+756736 6926
+757760 6981
+758784 7012
+759808 7089
+760832 7108
+761856 7017
+762880 7071
+763904 7065
+764928 7063
+765952 7143
+766976 7113
+768000 7131
+769024 7114
+770048 7053
+771072 7097
+772096 7143
+773120 7092
+774144 7088
+775168 7117
+776192 7140
+777216 7117
+778240 7222
+779264 7209
+780288 7221
+781312 7172
+782336 7336
+783360 7239
+784384 7265
+785408 7261
+786432 7332
+787456 7277
+788480 7211
+789504 7350
+790528 7319
+791552 7237
+792576 7289
+793600 7266
+794624 7342
+795648 7337
+796672 7348
+797696 7447
+798720 7367
+799744 7319
+800768 7321
+801792 7342
+802816 7339
+803840 7417
+804864 7346
+805888 7434
+806912 7385
+807936 7500
+808960 7389
+809984 7477
+811008 7484
+812032 7503
+813056 7499
+814080 7433
+815104 7577
+816128 7538
+817152 7549
+818176 7579
+819200 7535
+820224 7580
+821248 7500
+822272 7557
+823296 7610
+824320 7569
+825344 7538
+826368 7549
+827392 7558
+828416 7546
+829440 7674
+830464 7653
+831488 7633
+832512 7665
+833536 7651
+834560 7641
+835584 7695
+836608 7675
+837632 7622
+838656 7692
+839680 7721
+840704 7734
+841728 7702
+842752 7650
+843776 7683
+844800 7724
+845824 7853
+846848 7689
+847872 7718
+848896 7773
+849920 7781
+850944 7791
+851968 7822
+852992 7868
+854016 7833
+855040 7753
+856064 7791
+857088 7875
+858112 7811
+859136 7776
+860160 7855
+861184 7798
+862208 7918
+863232 7846
+864256 7928
+865280 8029
+866304 7870
+867328 7996
+868352 7878
+869376 7874
+870400 7981
+871424 7975
+872448 7918
+873472 7997
+874496 7942
+875520 7953
+876544 8077
+877568 7981
+878592 8030
+879616 8048
+880640 8078
+881664 8105
+882688 8133
+883712 8025
+884736 8176
+885760 8036
+886784 8111
+887808 8136
+888832 8104
+889856 8210
+890880 8178
+891904 8107
+892928 8249
+893952 8192
+894976 8209
+896000 8115
+897024 8127
+898048 8143
+899072 8229
+900096 8259
+901120 8246
+902144 8222
+903168 8329
+904192 8276
+905216 8183
+906240 8307
+907264 8208
+908288 8279
+909312 8220
+910336 8335
+911360 8281
+912384 8250
+913408 8250
+914432 8275
+915456 8267
+916480 8423
+917504 8285
+918528 8282
+919552 8325
+920576 8340
+921600 8411
+922624 8467
+923648 8423
+924672 8379
+925696 8403
+926720 8454
+927744 8369
+928768 8379
+929792 8543
+930816 8396
+931840 8455
+932864 8409
+933888 8503
+934912 8435
+935936 8543
+936960 8455
+937984 8480
+939008 8469
+940032 8594
+941056 8493
+942080 8689
+943104 8512
+944128 8509
+945152 8524
+946176 8553
+947200 8541
+948224 8542
+949248 8554
+950272 8561
+951296 8584
+952320 8583
+953344 8593
+954368 8600
+955392 8622
+956416 8616
+957440 8721
+958464 8649
+959488 8637
+960512 8654
+961536 8669
+962560 8688
+963584 8690
+964608 8715
+965632 8813
+966656 8763
+967680 8829
+968704 8801
+969728 8754
+970752 8778
+971776 8802
+972800 8769
+973824 8792
+974848 8802
+975872 8808
+976896 8909
+977920 8848
+978944 8841
+979968 8953
+980992 8902
+982016 8865
+983040 8878
+984064 8885
+985088 8888
+986112 8897
+987136 8914
+988160 8960
+989184 8922
+990208 8937
+991232 8943
+992256 8986
+993280 8982
+994304 8984
+995328 8976
+996352 8976
+997376 8995
+998400 9005
+999424 9006
+1000448 9021
+1001472 9019
+1002496 9055
+1003520 9059
+1004544 9062
+1005568 9069
+1006592 9127
+1007616 9073
+1008640 9146
+1009664 9084
+1010688 9097
+1011712 9098
+1012736 9101
+1013760 9124
+1014784 9123
+1015808 9129
+1016832 9135
+1017856 9238
+1018880 9204
+1019904 9173
+1020928 9219
+1021952 9192
+1022976 9204
+1024000 9262
+1025024 9220
+1026048 9257
+1027072 9271
+1028096 9332
+1029120 9270
+1030144 9269
+1031168 9380
+1032192 9341
+1033216 9294
+1034240 9334
+1035264 9385
+1036288 9343
+1037312 9363
+1038336 9411
+1039360 9336
+1040384 9363
+1041408 9413
+1042432 9377
+1043456 9356
+1044480 9417
+1045504 9414
+1046528 9392
+1047552 9444
+1048576 9497
+1050624 9501
+1052672 9456
+1054720 9487
+1056768 9553
+1058816 9524
+1060864 9609
+1062912 9643
+1064960 9658
+1067008 9673
+1069056 9597
+1071104 9651
+1073152 9649
+1075200 9663
+1077248 9750
+1079296 9728
+1081344 9819
+1083392 9813
+1085440 9830
+1087488 9823
+1089536 9792
+1091584 9881
+1093632 9793
+1095680 9875
+1097728 9830
+1099776 9927
+1101824 9920
+1103872 9975
+1105920 9888
+1107968 10016
+1110016 9945
+1112064 10008
+1114112 10061
+1116160 9981
+1118208 9999
+1120256 10028
+1122304 10055
+1124352 10140
+1126400 10070
+1128448 10092
+1130496 10116
+1132544 10129
+1134592 10139
+1136640 10168
+1138688 10275
+1140736 10201
+1142784 10311
+1144832 10249
+1146880 10268
+1148928 10295
+1150976 10381
+1153024 10326
+1155072 10332
+1157120 10360
+1159168 10447
+1161216 10375
+1163264 10403
+1165312 10424
+1167360 10434
+1169408 10450
+1171456 10460
+1173504 10574
+1175552 10490
+1177600 10500
+1179648 10529
+1181696 10547
+1183744 10553
+1185792 10580
+1187840 10586
+1189888 10597
+1191936 10625
+1193984 10632
+1196032 10649
+1198080 10679
+1200128 10697
+1202176 10715
+1204224 10735
+1206272 10754
+1208320 10758
+1210368 10769
+1212416 10803
+1214464 10819
+1216512 10828
+1218560 10836
+1220608 10878
+1222656 10879
+1224704 10907
+1226752 10934
+1228800 10964
+1230848 10980
+1232896 10996
+1234944 11025
+1236992 11036
+1239040 11055
+1241088 11072
+1243136 11081
+1245184 11101
+1247232 11131
+1249280 11143
+1251328 11172
+1253376 11183
+1255424 11189
+1257472 11210
+1259520 11217
+1261568 11233
+1263616 11241
+1265664 11292
+1267712 11280
+1269760 11301
+1271808 11311
+1273856 11322
+1275904 11338
+1277952 11385
+1280000 11387
+1282048 11402
+1284096 11419
+1286144 11418
+1288192 11462
+1290240 11477
+1292288 11509
+1294336 11528
+1296384 11550
+1298432 11669
+1300480 11565
+1302528 11574
+1304576 11586
+1306624 11613
+1308672 11646
+1310720 11652
+1312768 11676
+1314816 11700
+1316864 11786
+1318912 11739
+1320960 11742
+1323008 11771
+1325056 11782
+1327104 11805
+1329152 11825
+1331200 11829
+1333248 11876
+1335296 11876
+1337344 11884
+1339392 11938
+1341440 11934
+1343488 11938
+1345536 11950
+1347584 11981
+1349632 12002
+1351680 12012
+1353728 12026
+1355776 12047
+1357824 12068
+1359872 12069
+1361920 12118
+1363968 12116
+1366016 12161
+1368064 12142
+1370112 12159
+1372160 12174
+1374208 12316
+1376256 12230
+1378304 12312
+1380352 12329
+1382400 12262
+1384448 12307
+1386496 12339
+1388544 12315
+1390592 12373
+1392640 12442
+1394688 12346
+1396736 12440
+1398784 12459
+1400832 12451
+1402880 12531
+1404928 12461
+1406976 12465
+1409024 12560
+1411072 12530
+1413120 12542
+1415168 12550
+1417216 12672
+1419264 12634
+1421312 12655
+1423360 12715
+1425408 12736
+1427456 12758
+1429504 12673
+1431552 12773
+1433600 12760
+1435648 12732
+1437696 12825
+1439744 12785
+1441792 12770
+1443840 12791
+1445888 12843
+1447936 12823
+1449984 12892
+1452032 12889
+1454080 12953
+1456128 12936
+1458176 13531
+1460224 12912
+1462272 13011
+1464320 13012
+1466368 13054
+1468416 12992
+1470464 13056
+1472512 13066
+1474560 13048
+1476608 13143
+1478656 13058
+1480704 13082
+1482752 13101
+1484800 13159
+1486848 13138
+1488896 13221
+1490944 13176
+1492992 13208
+1495040 13225
+1497088 13252
+1499136 13360
+1501184 13284
+1503232 13307
+1505280 13385
+1507328 13451
+1509376 13352
+1511424 13388
+1513472 13385
+1515520 13415
+1517568 13540
+1519616 13448
+1521664 13447
+1523712 13468
+1525760 13490
+1527808 13606
+1529856 13556
+1531904 13535
+1533952 13558
+1536000 13558
+1538048 13571
+1540096 13613
+1542144 13605
+1544192 13637
+1546240 13646
+1548288 13654
+1550336 13684
+1552384 13701
+1554432 13724
+1556480 13745
+1558528 13760
+1560576 13773
+1562624 13801
+1564672 13810
+1566720 13815
+1568768 13842
+1570816 13885
+1572864 13901
+1574912 13895
+1576960 13993
+1579008 13931
+1581056 13970
+1583104 13990
+1585152 14002
+1587200 14023
+1589248 14035
+1591296 14150
+1593344 14110
+1595392 14132
+1597440 14193
+1599488 14220
+1601536 14155
+1603584 14199
+1605632 14175
+1607680 14229
+1609728 14211
+1611776 14222
+1613824 14240
+1615872 14263
+1617920 14284
+1619968 14294
+1622016 14293
+1624064 14306
+1626112 14352
+1628160 14337
+1630208 14362
+1632256 14382
+1634304 14384
+1636352 14425
+1638400 14420
+1640448 14460
+1642496 14470
+1644544 14552
+1646592 14498
+1648640 14522
+1650688 14532
+1652736 14572
+1654784 14577
+1656832 14589
+1658880 14649
+1660928 14632
+1662976 14757
+1665024 14763
+1667072 14737
+1669120 14715
+1671168 14849
+1673216 14759
+1675264 14791
+1677312 14812
+1679360 14803
+1681408 14885
+1683456 14837
+1685504 14980
+1687552 14958
+1689600 14905
+1691648 14958
+1693696 14962
+1695744 14953
+1697792 14961
+1699840 14968
+1701888 14990
+1703936 15125
+1705984 15062
+1708032 15060
+1710080 15039
+1712128 15120
+1714176 15193
+1716224 15234
+1718272 15166
+1720320 15179
+1722368 15274
+1724416 15216
+1726464 15241
+1728512 15303
+1730560 15287
+1732608 15372
+1734656 15310
+1736704 15284
+1738752 15364
+1740800 15357
+1742848 15337
+1744896 15420
+1746944 15460
+1748992 15521
+1751040 15518
+1753088 15560
+1755136 15452
+1757184 15456
+1759232 15567
+1761280 15497
+1763328 15607
+1765376 15619
+1767424 15680
+1769472 15642
+1771520 15590
+1773568 15675
+1775616 15716
+1777664 15761
+1779712 15777
+1781760 15795
+1783808 15825
+1785856 15819
+1787904 15798
+1789952 15855
+1792000 15803
+1794048 15862
+1796096 15776
+1798144 15909
+1800192 15871
+1802240 15857
+1804288 15840
+1806336 15848
+1808384 15951
+1810432 15904
+1812480 15920
+1814528 16019
+1816576 15958
+1818624 16075
+1820672 15989
+1822720 16010
+1824768 16032
+1826816 16031
+1828864 16062
+1830912 16063
+1832960 16242
+1835008 16175
+1837056 16134
+1839104 16313
+1841152 16150
+1843200 16191
+1845248 16368
+1847296 16221
+1849344 16359
+1851392 16273
+1853440 16285
+1855488 16299
+1857536 16322
+1859584 16334
+1861632 16360
+1863680 16376
+1865728 16391
+1867776 16398
+1869824 16433
+1871872 16458
+1873920 16466
+1875968 16482
+1878016 16498
+1880064 16498
+1882112 16517
+1884160 16536
+1886208 16535
+1888256 16575
+1890304 16594
+1892352 16592
+1894400 16618
+1896448 16633
+1898496 16662
+1900544 16674
+1902592 16676
+1904640 16703
+1906688 16708
+1908736 16738
+1910784 16750
+1912832 16768
+1914880 16787
+1916928 16813
+1918976 16805
+1921024 16843
+1923072 16855
+1925120 16876
+1927168 16890
+1929216 16903
+1931264 16932
+1933312 16949
+1935360 16960
+1937408 16996
+1939456 17016
+1941504 17042
+1943552 17061
+1945600 17085
+1947648 17102
+1949696 17112
+1951744 17134
+1953792 17138
+1955840 17155
+1957888 17174
+1959936 17192
+1961984 17209
+1964032 17220
+1966080 17245
+1968128 17266
+1970176 17292
+1972224 17297
+1974272 17310
+1976320 17315
+1978368 17323
+1980416 17347
+1982464 17362
+1984512 17400
+1986560 17412
+1988608 17427
+1990656 17423
+1992704 17451
+1994752 17476
+1996800 17527
+1998848 17549
+2000896 17522
+2002944 17537
+2004992 17571
+2007040 17590
+2009088 17596
+2011136 17685
+2013184 17652
+2015232 17652
+2017280 17660
+2019328 17709
+2021376 17750
+2023424 17726
+2025472 17758
+2027520 17792
+2029568 17792
+2031616 17862
+2033664 17881
+2035712 17843
+2037760 17863
+2039808 17909
+2041856 17922
+2043904 17963
+2045952 17982
+2048000 17964
+2050048 17954
+2052096 17982
+2054144 17996
+2056192 18006
+2058240 18085
+2060288 18087
+2062336 18141
+2064384 18061
+2066432 18081
+2068480 18079
+2070528 18220
+2072576 18193
+2074624 18205
+2076672 18184
+2078720 18283
+2080768 18288
+2082816 18224
+2084864 18316
+2086912 18319
+2088960 18280
+2091008 18310
+2093056 18307
+2095104 18429
+2097152 18422
+2101248 18392
+2105344 18478
+2109440 18458
+2113536 18517
+2117632 18554
+2121728 18587
+2125824 18691
+2129920 18648
+2134016 18840
+2138112 18724
+2142208 18819
+2146304 18804
+2150400 18896
+2154496 18904
+2158592 18879
+2162688 18922
+2166784 18939
+2170880 18979
+2174976 19076
+2179072 19136
+2183168 19066
+2187264 19153
+2191360 19264
+2195456 19223
+2199552 19247
+2203648 19274
+2207744 19314
+2211840 19344
+2215936 19378
+2220032 19425
+2224128 19491
+2228224 19469
+2232320 19507
+2236416 19551
+2240512 19577
+2244608 19633
+2248704 19634
+2252800 19685
+2256896 19723
+2260992 19759
+2265088 19774
+2269184 19847
+2273280 19861
+2277376 19957
+2281472 19930
+2285568 20058
+2289664 20033
+2293760 20039
+2297856 20148
+2301952 20202
+2306048 20179
+2310144 20190
+2314240 20211
+2318336 20245
+2322432 20273
+2326528 20376
+2330624 20433
+2334720 20451
+2338816 20401
+2342912 20474
+2347008 20475
+2351104 20582
+2355200 20537
+2359296 20587
+2363392 20661
+2367488 20730
+2371584 20765
+2375680 20863
+2379776 20782
+2383872 20882
+2387968 20994
+2392064 20901
+2396160 20945
+2400256 20947
+2404352 21040
+2408448 21035
+2412544 21101
+2416640 21159
+2420736 21201
+2424832 21165
+2428928 21219
+2433024 21231
+2437120 21346
+2441216 21331
+2445312 21327
+2449408 21368
+2453504 21583
+2457600 21468
+2461696 21622
+2465792 21549
+2469888 21604
+2473984 21664
+2478080 21648
+2482176 21640
+2486272 21707
+2490368 21722
+2494464 21878
+2498560 21841
+2502656 21820
+2506752 21871
+2510848 21908
+2514944 21966
+2519040 21952
+2523136 22027
+2527232 22014
+2531328 22132
+2535424 22083
+2539520 22126
+2543616 22172
+2547712 22224
+2551808 22259
+2555904 22339
+2560000 22404
+2564096 22338
+2568192 22379
+2572288 22466
+2576384 22437
+2580480 22495
+2584576 22515
+2588672 22551
+2592768 22579
+2596864 22599
+2600960 22672
+2605056 22683
+2609152 22728
+2613248 22812
+2617344 22772
+2621440 22804
+2625536 22873
+2629632 22876
+2633728 22931
+2637824 22966
+2641920 22992
+2646016 23031
+2650112 23143
+2654208 23112
+2658304 23156
+2662400 23181
+2666496 23216
+2670592 23310
+2674688 23399
+2678784 23419
+2682880 23432
+2686976 23466
+2691072 23389
+2695168 23469
+2699264 23486
+2703360 23528
+2707456 23561
+2711552 23590
+2715648 23613
+2719744 23650
+2723840 23724
+2727936 23818
+2732032 23785
+2736128 23805
+2740224 23972
+2744320 23883
+2748416 23951
+2752512 23987
+2756608 24052
+2760704 24046
+2764800 24047
+2768896 24153
+2772992 24137
+2777088 24151
+2781184 24275
+2785280 24208
+2789376 24352
+2793472 24297
+2797568 24389
+2801664 24358
+2805760 24408
+2809856 24556
+2813952 24535
+2818048 24554
+2822144 24611
+2826240 24580
+2830336 24644
+2834432 24676
+2838528 24769
+2842624 24787
+2846720 24805
+2850816 24778
+2854912 24832
+2859008 24929
+2863104 24928
+2867200 25037
+2871296 24939
+2875392 25035
+2879488 25115
+2883584 25048
+2887680 25115
+2891776 25177
+2895872 25156
+2899968 25209
+2904064 25303
+2908160 25335
+2912256 25330
+2916352 26120
+2920448 25459
+2924544 25501
+2928640 25514
+2932736 25508
+2936832 25549
+2940928 25557
+2945024 25649
+2949120 25629
+2953216 25699
+2957312 25788
+2961408 25751
+2965504 25778
+2969600 25866
+2973696 25964
+2977792 25853
+2981888 25972
+2985984 25972
+2990080 25980
+2994176 26013
+2998272 26099
+3002368 26099
+3006464 26186
+3010560 26221
+3014656 26279
+3018752 26300
+3022848 26393
+3026944 26365
+3031040 26451
+3035136 26419
+3039232 26484
+3043328 26384
+3047424 26490
+3051520 26576
+3055616 26600
+3059712 26669
+3063808 26633
+3067904 26710
+3072000 26765
+3076096 26766
+3080192 26820
+3084288 26807
+3088384 26846
+3092480 26976
+3096576 26999
+3100672 27065
+3104768 26957
+3108864 27018
+3112960 27099
+3117056 27131
+3121152 27169
+3125248 27101
+3129344 27227
+3133440 27165
+3137536 27273
+3141632 27329
+3145728 27462
+3149824 27395
+3153920 27337
+3158016 27482
+3162112 27401
+3166208 27571
+3170304 27483
+3174400 27526
+3178496 27567
+3182592 27596
+3186688 27635
+3190784 27657
+3194880 27714
+3198976 27744
+3203072 27770
+3207168 27916
+3211264 27832
+3215360 27969
+3219456 27906
+3223552 27921
+3227648 27972
+3231744 28150
+3235840 28032
+3239936 28208
+3244032 28098
+3248128 28147
+3252224 28185
+3256320 28316
+3260416 28263
+3264512 28357
+3268608 28449
+3272704 28361
+3276800 28413
+3280896 28491
+3284992 28494
+3289088 28508
+3293184 28542
+3297280 28579
+3301376 28682
+3305472 28689
+3309568 28765
+3313664 28697
+3317760 28734
+3321856 28764
+3325952 28910
+3330048 28843
+3334144 28895
+3338240 28970
+3342336 28952
+3346432 29095
+3350528 29037
+3354624 29198
+3358720 29146
+3362816 29262
+3366912 29229
+3371008 29210
+3375104 29242
+3379200 29359
+3383296 29338
+3387392 29422
+3391488 29375
+3395584 29482
+3399680 29576
+3403776 29506
+3407872 29556
+3411968 29626
+3416064 29678
+3420160 29837
+3424256 29745
+3428352 29721
+3432448 29813
+3436544 29847
+3440640 29835
+3444736 29929
+3448832 29955
+3452928 29892
+3457024 30063
+3461120 30043
+3465216 30116
+3469312 30032
+3473408 30062
+3477504 30102
+3481600 30326
+3485696 30148
+3489792 30304
+3493888 30230
+3497984 30408
+3502080 30383
+3506176 30338
+3510272 30496
+3514368 30413
+3518464 30453
+3522560 30486
+3526656 30516
+3530752 30564
+3534848 30642
+3538944 30774
+3543040 30658
+3547136 30696
+3551232 30751
+3555328 30835
+3559424 30786
+3563520 30826
+3567616 30851
+3571712 30897
+3575808 30932
+3579904 30957
+3584000 31000
+3588096 31038
+3592192 31062
+3596288 31100
+3600384 31138
+3604480 31168
+3608576 31206
+3612672 31251
+3616768 31287
+3620864 31332
+3624960 31354
+3629056 31400
+3633152 31613
+3637248 31463
+3641344 31511
+3645440 31530
+3649536 31585
+3653632 31754
+3657728 31627
+3661824 31693
+3665920 31767
+3670016 31727
+3674112 31775
+3678208 31807
+3682304 31836
+3686400 31877
+3690496 31906
+3694592 31983
+3698688 32019
+3702784 32051
+3706880 32118
+3710976 32099
+3715072 32150
+3719168 32153
+3723264 32246
+3727360 32242
+3731456 32272
+3735552 32336
+3739648 32340
+3743744 32378
+3747840 32441
+3751936 32473
+3756032 32476
+3760128 32576
+3764224 32524
+3768320 32566
+3772416 32706
+3776512 32695
+3780608 32672
+3784704 32772
+3788800 32752
+3792896 32786
+3796992 32919
+3801088 32863
+3805184 32961
+3809280 33032
+3813376 32962
+3817472 33026
+3821568 33123
+3825664 33074
+3829760 33216
+3833856 33127
+3837952 33151
+3842048 33208
+3846144 33322
+3850240 33269
+3854336 33301
+3858432 33333
+3862528 33366
+3866624 33401
+3870720 33436
+3874816 33471
+3878912 33515
+3883008 33544
+3887104 33581
+3891200 33619
+3895296 33668
+3899392 33695
+3903488 33733
+3907584 33759
+3911680 33790
+3915776 33855
+3919872 33868
+3923968 33884
+3928064 33920
+3932160 33971
+3936256 33998
+3940352 34038
+3944448 34055
+3948544 34106
+3952640 34145
+3956736 34163
+3960832 34236
+3964928 34269
+3969024 34283
+3973120 34321
+3977216 34350
+3981312 34392
+3985408 34483
+3989504 34469
+3993600 34494
+3997696 34558
+4001792 34564
+4005888 34611
+4009984 34645
+4014080 34681
+4018176 34690
+4022272 34749
+4026368 34759
+4030464 34787
+4034560 34869
+4038656 34913
+4042752 34891
+4046848 34962
+4050944 34996
+4055040 35014
+4059136 35069
+4063232 35179
+4067328 35116
+4071424 35231
+4075520 35264
+4079616 35297
+4083712 35258
+4087808 35371
+4091904 35326
+4096000 35422
+4100096 35463
+4104192 35421
+4108288 35520
+4112384 35550
+4116480 35526
+4120576 35632
+4124672 35634
+4128768 35731
+4132864 35766
+4136960 35717
+4141056 35741
+4145152 35780
+4149248 35834
+4153344 35868
+4157440 35893
+4161536 35932
+4165632 35956
+4169728 36094
+4173824 36030
+4177920 36175
+4182016 36082
+4186112 36226
+4190208 36138
+4194304 36181
+4202496 36269
+4210688 36332
+4218880 36487
+4227072 36555
+4235264 36544
+4243456 36627
+4251648 36708
+4259840 36762
+4268032 36823
+4276224 36908
+4284416 37036
+4292608 37039
+4300800 37088
+4308992 37166
+4317184 37336
+4325376 37330
+4333568 37385
+4341760 37469
+4349952 37537
+4358144 37602
+4366336 37667
+4374528 37763
+4382720 37834
+4390912 37944
+4399104 38028
+4407296 38040
+4415488 38112
+4423680 38167
+4431872 38302
+4440064 38298
+4448256 38376
+4456448 38441
+4464640 38598
+4472832 38576
+4481024 38667
+4489216 38757
+4497408 38786
+4505600 38941
+4513792 39008
+4521984 39005
+4530176 39088
+4538368 39186
+4546560 39317
+4554752 39254
+4562944 39343
+4571136 39397
+4579328 39489
+4587520 39603
+4595712 39647
+4603904 39685
+4612096 39778
+4620288 39966
+4628480 39917
+4636672 40109
+4644864 40114
+4653056 40090
+4661248 40341
+4669440 40316
+4677632 40319
+4685824 40420
+4694016 40456
+4702208 40539
+4710400 40625
+4718592 40646
+4726784 40828
+4734976 40786
+4743168 40940
+4751360 41034
+4759552 41072
+4767744 41126
+4775936 41193
+4784128 41333
+4792320 41324
+4800512 41346
+4808704 41487
+4816896 41525
+4825088 41600
+4833280 41830
+4841472 41748
+4849664 41856
+4857856 41909
+4866048 42064
+4874240 42031
+4882432 42148
+4890624 42171
+4898816 42181
+4907008 42330
+4915200 42434
+4923392 42500
+4931584 42475
+4939776 42627
+4947968 42755
+4956160 42677
+4964352 42826
+4972544 42943
+4980736 42974
+4988928 43017
+4997120 43038
+5005312 43194
+5013504 43282
+5021696 43227
+5029888 43320
+5038080 43376
+5046272 43443
+5054464 43537
+5062656 43606
+5070848 43744
+5079040 43803
+5087232 43855
+5095424 43931
+5103616 44021
+5111808 44198
+5120000 44157
+5128192 44207
+5136384 44307
+5144576 44372
+5152768 44557
+5160960 44414
+5169152 44479
+5177344 44551
+5185536 44790
+5193728 44673
+5201920 44763
+5210112 44844
+5218304 44914
+5226496 44968
+5234688 45244
+5242880 45222
+5251072 45205
+5259264 45299
+5267456 45311
+5275648 45389
+5283840 45463
+5292032 45532
+5300224 45605
+5308416 45751
+5316608 45736
+5324800 45848
+5332992 45878
+5341184 46057
+5349376 46007
+5357568 46092
+5365760 46157
+5373952 46228
+5382144 46309
+5390336 46373
+5398528 46451
+5406720 46507
+5414912 46586
+5423104 46662
+5431296 46781
+5439488 46809
+5447680 46975
+5455872 47039
+5464064 46988
+5472256 47098
+5480448 47214
+5488640 47193
+5496832 47272
+5505024 47447
+5513216 47495
+5521408 47534
+5529600 47550
+5537792 47719
+5545984 47685
+5554176 47764
+5562368 47824
+5570560 47899
+5578752 47958
+5586944 48036
+5595136 48115
+5603328 48171
+5611520 48323
+5619712 48306
+5627904 48381
+5636096 48512
+5644288 48534
+5652480 48588
+5660672 48681
+5668864 48740
+5677056 48800
+5685248 48893
+5693440 48971
+5701632 49021
+5709824 49080
+5718016 49226
+5726208 49220
+5734400 49298
+5742592 49370
+5750784 49435
+5758976 49503
+5767168 49619
+5775360 49646
+5783552 49836
+5791744 49871
+5799936 49869
+5808128 49915
+5816320 50040
+5824512 50099
+5832704 50154
+5840896 50229
+5849088 50275
+5857280 50374
+5865472 50494
+5873664 50581
+5881856 50609
+5890048 50671
+5898240 50747
+5906432 50956
+5914624 50933
+5922816 50957
+5931008 51067
+5939200 51193
+5947392 51219
+5955584 51261
+5963776 51268
+5971968 51311
+5980160 51587
+5988352 51564
+5996544 51673
+6004736 51599
+6012928 51769
+6021120 51733
+6029312 51803
+6037504 51888
+6045696 51937
+6053888 52008
+6062080 52093
+6070272 52143
+6078464 52238
+6086656 52306
+6094848 52355
+6103040 52446
+6111232 52529
+6119424 52558
+6127616 52665
+6135808 52698
+6144000 52791
+6152192 52901
+6160384 53014
+6168576 53078
+6176768 53043
+6184960 53127
+6193152 53192
+6201344 53271
+6209536 53388
+6217728 53402
+6225920 53464
+6234112 53649
+6242304 53607
+6250496 53671
+6258688 53738
+6266880 53821
+6275072 54005
+6283264 53971
+6291456 54020
+6299648 54232
+6307840 54161
+6316032 54252
+6324224 54299
+6332416 54372
+6340608 54441
+6348800 54509
+6356992 54595
+6365184 54652
+6373376 54731
+6381568 54795
+6389760 54857
+6397952 54960
+6406144 55015
+6414336 55067
+6422528 55170
+6430720 55203
+6438912 55270
+6447104 55427
+6455296 55439
+6463488 55573
+6471680 55578
+6479872 55636
+6488064 55692
+6496256 55757
+6504448 55877
+6512640 55900
+6520832 56069
+6529024 56044
+6537216 56112
+6545408 56177
+6553600 56265
+6561792 56319
+6569984 56390
+6578176 56455
+6586368 56533
+6594560 56596
+6602752 56668
+6610944 56736
+6619136 56806
+6627328 56885
+6635520 56948
+6643712 57027
+6651904 57092
+6660096 57175
+6668288 57237
+6676480 57315
+6684672 57374
+6692864 57429
+6701056 57596
+6709248 57589
+6717440 57687
+6725632 57720
+6733824 57835
+6742016 58421
+6750208 57959
+6758400 58019
+6766592 58061
+6774784 58167
+6782976 58200
+6791168 58279
+6799360 58451
+6807552 58539
+6815744 58489
+6823936 58548
+6832128 58673
+6840320 58737
+6848512 58788
+6856704 58829
+6864896 58894
+6873088 58989
+6881280 59034
+6889472 59179
+6897664 59197
+6905856 59278
+6914048 59382
+6922240 59427
+6930432 59459
+6938624 59528
+6946816 59577
+6955008 59659
+6963200 59872
+6971392 59846
+6979584 59880
+6987776 60074
+6995968 60140
+7004160 60144
+7012352 60197
+7020544 60250
+7028736 60291
+7036928 60361
+7045120 60466
+7053312 60580
+7061504 60585
+7069696 60671
+7077888 60756
+7086080 60820
+7094272 60847
+7102464 61022
+7110656 61083
+7118848 61077
+7127040 61168
+7135232 61199
+7143424 61286
+7151616 61511
+7159808 61394
+7168000 61571
+7176192 61543
+7184384 61625
+7192576 61701
+7200768 61750
+7208960 61823
+7217152 61928
+7225344 61967
+7233536 62064
+7241728 62128
+7249920 62169
+7258112 62245
+7266304 62309
+7274496 62430
+7282688 62476
+7290880 62516
+7299072 62597
+7307264 62684
+7315456 62763
+7323648 62836
+7331840 62876
+7340032 62967
+7348224 63038
+7356416 63084
+7364608 63181
+7372800 63334
+7380992 63344
+7389184 63410
+7397376 63483
+7405568 63611
+7413760 63586
+7421952 63683
+7430144 63767
+7438336 63904
+7446528 63912
+7454720 64098
+7462912 64003
+7471104 64179
+7479296 64316
+7487488 64268
+7495680 64271
+7503872 64369
+7512064 64491
+7520256 64510
+7528448 64535
+7536640 64610
+7544832 64776
+7553024 64832
+7561216 64854
+7569408 64921
+7577600 64949
+7585792 65096
+7593984 65189
+7602176 65220
+7610368 65257
+7618560 65348
+7626752 65439
+7634944 65547
+7643136 65533
+7651328 65663
+7659520 65758
+7667712 65902
+7675904 66435
+7684096 65998
+7692288 65999
+7700480 66194
+7708672 66211
+7716864 66230
+7725056 66221
+7733248 66303
+7741440 66387
+7749632 66529
+7757824 66514
+7766016 66621
+7774208 66643
+7782400 66687
+7790592 66861
+7798784 66862
+7806976 67003
+7815168 67037
+7823360 67020
+7831552 67118
+7839744 67195
+7847936 67350
+7856128 67324
+7864320 67390
+7872512 67560
+7880704 67523
+7888896 67586
+7897088 67654
+7905280 67749
+7913472 67822
+7921664 67880
+7929856 67938
+7938048 68013
+7946240 68085
+7954432 68153
+7962624 68225
+7970816 68304
+7979008 68361
+7987200 68429
+7995392 68512
+8003584 68591
+8011776 68641
+8019968 68722
+8028160 68788
+8036352 68887
+8044544 69110
+8052736 69085
+8060928 69196
+8069120 69131
+8077312 69237
+8085504 69357
+8093696 69368
+8101888 69643
+8110080 69597
+8118272 69609
+8126464 69648
+8134656 69748
+8142848 69919
+8151040 69898
+8159232 69981
+8167424 70067
+8175616 70028
+8183808 70218
+8192000 70272
+8200192 70357
+8208384 70410
+8216576 70514
+8224768 70503
+8232960 70623
+8241152 70720
+8249344 70659
+8257536 70779
+8265728 70792
+8273920 70978
+8282112 71039
+8290304 71013
+8298496 71213
+8306688 71203
+8314880 71231
+8323072 71298
+8331264 71347
+8339456 71517
+8347648 71609
+8355840 71575
+8364032 71642
+8372224 71709
+8380416 71824
+8388608 71855
#!/usr/bin/env python
#---------------------------------------------------------------------------------------------------
+# Example invokation:
+# % ./regress.py griffon_skampi_pt2pt.ski.dat 65536 120832
+#
+#
# Given two vectors of same length n: message size S(.. s_i ..), and communication time T( .. t_i .. )
# where t_i is the time associated to a mesage size s_i, computes the segmentation of the vectors
# in 3 segments such that linear regressions on the 3 segments maximize correlation.
if len(sys.argv) != 2 and len(sys.argv) != 4:
print("Usage : {} datafile".format(sys.argv[0]))
- print("or : {} datafile p1 p2".format(sys.argv[0]))
+ print("or : {0} datafile p1 p2".format(sys.argv[0]))
print("where : p1 < p2 belongs to sizes in datafiles")
sys.exit(-1)
Z.append( a * X[i] + b )
# compare real values and computed values
e = mean_logerr( Y[start:stop+1] , Z )
- #print(" range [%d,%d] err=%f‰ weight=%f" % (X[start],X[stop],e,(stop-start+1)/len(X)))
correl.append( (e, stop-start+1) ); # store correl. coef + number of values (segment length)
interv.append( (a,b, X[start],X[stop],e) );
for k in range(top_n_sol):
(err,interval) = result[k]
- print("\n RANK {}\n-------".format(k))
+ print(k)
+ print("\n RANK {0}\n-------".format(k))
print("** overall metric = {0}".format(err))
for (a,b,i,j,e) in interval:
print("** OPT: [{0} .. {1}] segment_metric={2} slope: {3} x + {4}".format(i,j,e,a,b))
--- /dev/null
+#!/usr/bin/python
+# This script takes the following command line parameters
+# 1) an input file containing 2 columns: message size and 1-way trip time
+# 2) the maximum relative error for a line segment
+# 3) the minimum number of points needed to justify adding a line segment
+# 4) the number of links
+# 5) the latency
+# 6) the bandwidth
+
+import sys
+
+def compute_regression(points):
+ N = len(points)
+
+ if N < 1:
+ return None
+
+ if N < 2:
+ return (0, points[0][1])
+
+ Sx = Sy = Sxx = Syy = Sxy = 0.0
+
+ for x, y in points:
+ Sx += x
+ Sy += y
+ Sxx += x*x
+ Syy += y*y
+ Sxy += x*y
+ denom = Sxx * N - Sx * Sx
+ # don't return 0 or negative values as a matter of principle...
+ m = max(sys.float_info.min, (Sxy * N - Sy * Sx) / denom)
+ b = max(sys.float_info.min, (Sxx * Sy - Sx * Sxy) / denom)
+ return (m, b)
+
+def compute_error(m, b, x, y):
+ yp = m*x+b
+ return abs(yp - y) / max(min(yp, y), sys.float_info.min)
+
+def compute_max_error(m, b, points):
+ max_error = 0.0
+ for x, y in points:
+ max_error = max(max_error, compute_error(m, b, x, y))
+ return max_error
+
+def get_max_error_point(m, b, points):
+ max_error_index = -1
+ max_error = 0.0
+
+ i = 0
+ while i < len(points):
+ x, y = points[i]
+ error = compute_error(m, b, x, y)
+ if error > max_error:
+ max_error_index = i
+ max_error = error
+ i += 1
+
+ return (max_error_index, max_error)
+
+infile_name = sys.argv[1]
+error_bound = float(sys.argv[2])
+min_seg_points = int(sys.argv[3])
+links = int(sys.argv[4])
+latency = float(sys.argv[5])
+bandwidth = float(sys.argv[6])
+
+infile = open(infile_name, 'r')
+
+# read datafile
+points = []
+for line in infile:
+ fields = line.split()
+ points.append((int(fields[0]), int(fields[1])))
+infile.close()
+
+# should sort points by x values
+points.sort()
+
+# break points up into segments
+pointsets = []
+lbi = 0
+while lbi < len(points):
+ min_ubi = lbi
+ max_ubi = len(points) - 1
+ while max_ubi - min_ubi > 1:
+ ubi = (min_ubi + max_ubi) / 2
+ m, b = compute_regression(points[lbi:ubi+1])
+ max_error = compute_max_error(m, b, points[lbi:ubi+1])
+ if max_error > error_bound:
+ max_ubi = ubi - 1
+ else:
+ min_ubi = ubi
+ ubi = max_ubi
+ if min_ubi < max_ubi:
+ m, b = compute_regression(points[lbi:max_ubi+1])
+ max_error = compute_max_error(m, b, points[lbi:max_ubi+1])
+ if max_error > error_bound:
+ ubi = min_ubi
+ pointsets.append(points[lbi:ubi+1])
+ lbi = ubi+1
+
+# try to merge larger segments if possible and compute piecewise regression
+i = 0
+segments = []
+notoutliers = 0
+while i < len(pointsets):
+ currpointset = []
+ j = i
+ while j < len(pointsets):
+ newpointset = currpointset + pointsets[j]
+ # if joining a small segment, we can delete bad points
+ if len(pointsets[j]) < min_seg_points:
+ k = 0
+ while k < len(pointsets[j]):
+ m, b = compute_regression(newpointset)
+ max_error_index, max_error = get_max_error_point(m, b, newpointset)
+ if max_error <= error_bound:
+ break
+ del newpointset[max_error_index]
+ k += 1
+ # only add new pointset if we had to delete fewer than its length
+ # points
+ if k < len(pointsets[j]):
+ i = j
+ currpointset = newpointset
+ # otherwise, we just see if it works...
+ else:
+ m, b = compute_regression(newpointset)
+ max_error = compute_max_error(m, b, newpointset)
+ if max_error > error_bound:
+ break
+ i = j
+ currpointset = newpointset
+ j += 1
+ i += 1
+ # outliers are ignored when constructing the piecewise funciton
+ if len(currpointset) < min_seg_points:
+ continue
+ notoutliers += len(currpointset)
+ m, b = compute_regression(currpointset)
+ lb = min(x for x, y in currpointset)
+ lat_factor = b / (1.0e6 * links * latency)
+ bw_factor = 1.0e6 / (m * bandwidth)
+ segments.append((lb, m, b, lat_factor, bw_factor))
+
+outliers = len(points) - notoutliers
+segments.sort()
+segments.reverse()
+
+print "/**--------- <copy/paste C code snippet in surf/network.c> -------------"
+print " * produced by:"
+print " *", " ".join(sys.argv)
+print " * outliers:", outliers
+print " * gnuplot: "
+print " plot \"%s\" using 1:2 with lines title \"data\", \\" % (infile_name)
+for lb, m, b, lat_factor, bw_factor in segments:
+ print " (x >= %d) ? %g*x+%g : \\" % (lb, m, b)
+print " 1.0 with lines title \"piecewise function\""
+print " *-------------------------------------------------------------------*/"
+print
+print "static double smpi_bandwidth_factor(double size)\n{\n"
+for lb, m, b, lat_factor, bw_factor in segments:
+ print " if (size >= %d) return %g;" % (lb, bw_factor)
+print " return 1.0;\n}\n"
+print "static double smpi_latency_factor(double size)\n{\n"
+for lb, m, b, lat_factor, bw_factor in segments:
+ print " if (size >= %d) return %g;" % (lb, lat_factor)
+print " return 1.0;\n}\n"
+print "/**--------- <copy/paste C code snippet in surf/network.c> -----------*/"
--- /dev/null
+/**--------- <copy/paste C code snippet in surf/network.c> -------------
+ * produced by:
+ * ./regression2.py ./pingpong-in.dat 0.15 30 2 2.4e-5 1.25e8
+ * outliers: 66
+ * gnuplot:
+ plot "./pingpong-in.dat" using 1:2 with lines title "data", \
+ (x >= 65472) ? 0.00850436*x+558.894 : \
+ (x >= 15424) ? 0.0114635*x+167.446 : \
+ (x >= 9376) ? 0.0136219*x+124.464 : \
+ (x >= 5776) ? 0.00735707*x+105.022 : \
+ (x >= 3484) ? 0.0103235*x+90.2886 : \
+ (x >= 1426) ? 0.0131384*x+77.3159 : \
+ (x >= 732) ? 0.0233927*x+93.6146 : \
+ (x >= 257) ? 0.0236608*x+93.7637 : \
+ (x >= 127) ? 0.0246645*x+94.0447 : \
+ (x >= 64) ? 0.037963*x+93.0877 : \
+ (x >= 0) ? 2.22507e-308*x+98.0633 : \
+ 1.0 with lines title "piecewise function"
+ *-------------------------------------------------------------------*/
+
+static double smpi_bandwidth_factor(double size)
+{
+
+ if (size >= 65472) return 0.940694;
+ if (size >= 15424) return 0.697866;
+ if (size >= 9376) return 0.58729;
+ if (size >= 5776) return 1.08739;
+ if (size >= 3484) return 0.77493;
+ if (size >= 1426) return 0.608902;
+ if (size >= 732) return 0.341987;
+ if (size >= 257) return 0.338112;
+ if (size >= 127) return 0.324353;
+ if (size >= 64) return 0.210731;
+ if (size >= 0) return 3.59539e+305;
+ return 1.0;
+}
+
+static double smpi_latency_factor(double size)
+{
+
+ if (size >= 65472) return 11.6436;
+ if (size >= 15424) return 3.48845;
+ if (size >= 9376) return 2.59299;
+ if (size >= 5776) return 2.18796;
+ if (size >= 3484) return 1.88101;
+ if (size >= 1426) return 1.61075;
+ if (size >= 732) return 1.9503;
+ if (size >= 257) return 1.95341;
+ if (size >= 127) return 1.95926;
+ if (size >= 64) return 1.93933;
+ if (size >= 0) return 2.04299;
+ return 1.0;
+}
+
+/**--------- <copy/paste C code snippet in surf/network.c> -----------*/
# with spaces.
INPUT = index.doc \
+ FAQ.doc \
installSimgrid.doc \
bindings.doc \
options.doc \
--- /dev/null
+/*! \page FAQ Frequently Asked Questions
+
+\htmlinclude .FAQ.doc.toc
+
+\section faq_simgrid I'm new to SimGrid. I have some questions. Where should I start?
+
+You are at the right place... Having a look to these
+<a href="http://www.loria.fr/~quinson/blog/2010/06/28/Tutorial_at_HPCS/">the slides of the HPCS'10 tutorial</a>
+(or to these <a href="http://graal.ens-lyon.fr/~alegrand/articles/slides_g5k_simul.pdf">ancient
+slides</a>, or to these
+<a href="http://graal.ens-lyon.fr/~alegrand/articles/Simgrid-Introduction.pdf">"obsolete" slides</a>)
+may give you some insights on what SimGrid can help you to do and what
+are its limitations. Then you definitely should read the \ref
+MSG_examples. The \ref GRAS_tut can also help you.
+
+If you are stuck at any point and if this FAQ cannot help you, please drop us a
+mail to the user mailing list: <simgrid-user@lists.gforge.inria.fr>.
+
+\subsection faq_interfaces What is the difference between MSG, SimDag, and GRAS? Do they serve the same purpose?
+
+It depend on how you define "purpose", I guess ;)
+
+They all allow you to build a prototype of application which you can run
+within the simulator afterward. They all share the same simulation kernel,
+which is the core of the SimGrid project. They differ by the way you express
+your application.
+
+With SimDag, you express your code as a collection of interdependent
+parallel tasks. So, in this model, applications can be seen as a DAG of
+tasks. This is the interface of choice for people wanting to port old
+code designed for SimGrid v1 or v2 to the framework current version.
+
+With both GRAS and MSG, your application is seen as a set of communicating
+processes, exchanging data by the way of messages and performing computation
+on their own.
+
+The difference between both is that MSG is somehow easier to use, but GRAS
+is not limited to the simulator. Once you're done writing your GRAS code,
+you can run your code both in the simulator and on a real platform. For this,
+there are two implementations of the GRAS interface, one for simulation, and one
+for real execution. So, you just have to relink your code to choose one of
+both worlds.
+
+\subsection faq_visualization Visualizing and analyzing the results
+
+It is sometime convenient to "see" how the agents are behaving. If you
+like colors, you can use <tt>tools/MSG_visualization/colorize.pl </tt>
+as a filter to your MSG outputs. It works directly with INFO. Beware,
+INFO() prints on stderr. Do not forget to redirect if you want to
+filter (e.g. with bash):
+\verbatim
+./msg_test small_platform.xml small_deployment.xml 2>&1 | ../../tools/MSG_visualization/colorize.pl
+\endverbatim
+
+We also have a more graphical output. Have a look at section \ref options_tracing.
+
+\subsection faq_C Argh! Do I really have to code in C?
+
+Currently bindings on top of MSG are supported for Java, Ruby and Lua. You can find a few
+documentation about them on the doc page. Note that bindings are released separately from the main dist
+and so have their own version numbers.
+
+Moreover If you use C++,
+you should be able to use the SimGrid library as a standard C library
+and everything should work fine (simply <i>link</i> against this
+library; recompiling SimGrid with a C++ compiler won't work and it
+wouldn't help if you could).
+
+For now,
+we do not feel a real demand for any other language. But if you think there is one,
+ please speak up!
+
+\section faq_howto Feature related questions
+
+\subsection faq_MIA "Could you please add (your favorite feature here) to SimGrid?"
+
+Here is the deal. The whole SimGrid project (MSG, SURF, GRAS, ...) is
+meant to be kept as simple and generic as possible. We cannot add
+functions for everybody's needs when these functions can easily be
+built from the ones already in the API. Most of the time, it is
+possible and when it was not possible we always have upgraded the API
+accordingly. When somebody asks us a question like "How to do that?
+Is there a function in the API to simply do this?", we're always glad
+to answer and help. However if we don't need this code for our own
+need, there is no chance we're going to write it... it's your job! :)
+The counterpart to our answers is that once you come up with a neat
+implementation of this feature (task duplication, RPC, thread
+synchronization, ...), you should send it to us and we will be glad to
+add it to the distribution. Thus, other people will take advantage of
+it (and we don't have to answer this question again and again ;).
+
+You'll find in this section a few "Missing In Action" features. Many
+people have asked about it and we have given hints on how to simply do
+it with MSG. Feel free to contribute...
+
+\subsection faq_MIA_MSG MSG features
+
+\subsubsection faq_MIA_examples I want some more complex MSG examples!
+
+Many people have come to ask me a more complex example and each time,
+they have realized afterward that the basics were in the previous three
+examples.
+
+Of course they have often been needing more complex functions like
+MSG_process_suspend(), MSG_process_resume() and
+MSG_process_isSuspended() (to perform synchronization), or
+MSG_task_Iprobe() and MSG_process_sleep() (to avoid blocking
+receptions), or even MSG_process_create() (to design asynchronous
+communications or computations). But the examples are sufficient to
+start.
+
+We know. We should add some more examples, but not really some more
+complex ones... We should add some examples that illustrate some other
+functionalists (like how to simply encode asynchronous
+communications, RPC, process migrations, thread synchronization, ...)
+and we will do it when we will have a little bit more time. We have
+tried to document the examples so that they are understandable. Tell
+us if something is not clear and once again feel free to participate!
+:)
+
+\subsubsection faq_MIA_taskdup Missing in action: MSG Task duplication/replication
+
+There is no task duplication in MSG. When you create a task, you can
+process it or send it somewhere else. As soon as a process has sent
+this task, he doesn't have this task anymore. It's gone. The receiver
+process has got the task. However, you could decide upon receiving to
+create a "copy" of a task but you have to handle by yourself the
+semantic associated to this "duplication".
+
+As we already told, we prefer keeping the API as simple as
+possible. This kind of feature is rather easy to implement by users
+and the semantic you associate really depends on people. Having a
+*generic* task duplication mechanism is not that trivial (in
+particular because of the data field). That is why I would recommend
+that you write it by yourself even if I can give you advice on how to
+do it.
+
+You have the following functions to get information about a task:
+MSG_task_get_name(), MSG_task_get_compute_duration(),
+MSG_task_get_remaining_computation(), MSG_task_get_data_size(),
+and MSG_task_get_data().
+
+You could use a dictionary (#xbt_dict_t) of dynars (#xbt_dynar_t). If
+you still don't see how to do it, please come back to us...
+
+\subsubsection faq_MIA_asynchronous I want to do asynchronous communications in MSG
+
+In the past (version <= 3.4), there was no function to perform asynchronous communications.
+It could easily be implemented by creating new process when needed though. Since version 3.5,
+we have introduced the following functions:
+ - MSG_task_isend()
+ - MSG_task_irecv()
+ - MSG_comm_test()
+ - MSG_comm_wait()
+ - MSG_comm_waitall()
+ - MSG_comm_waitany()
+ - MSG_comm_destroy()
+
+We refer you to the description of these functions for more details on their usage as well
+as to the example section on \ref MSG_ex_asynchronous_communications.
+
+\subsubsection faq_MIA_thread_synchronization I need to synchronize my MSG processes
+
+You obviously cannot use pthread_mutexes of pthread_conds since we handle every
+scheduling related decision within SimGrid.
+
+In the past (version <=3.3.4) you could do it by playing with
+MSG_process_suspend() and MSG_process_resume() or with fake communications (using MSG_task_get(),
+MSG_task_put() and MSG_task_Iprobe()).
+
+Since version 3.4, you can use classical synchronization structures. See page \ref XBT_synchro or simply check in
+include/xbt/synchro_core.h.
+
+\subsubsection faq_MIA_host_load Where is the get_host_load function hidden in MSG?
+
+There is no such thing because its semantic wouldn't be really
+clear. Of course, it is something about the amount of host throughput,
+but there is as many definition of "host load" as people asking for
+this function. First, you have to remember that resource availability
+may vary over time, which make any load notion harder to define.
+
+It may be instantaneous value or an average one. Moreover it may be only the
+power of the computer, or may take the background load into account, or may
+even take the currently running tasks into account. In some SURF models,
+communications have an influence on computational power. Should it be taken
+into account too?
+
+First of all, it's near to impossible to predict the load beforehand in the
+simulator since it depends on too much parameters (background load
+variation, bandwidth sharing algorithmic complexity) some of them even being
+not known beforehand (other task starting at the same time). So, getting
+this information is really hard (just like in real life). It's not just that
+we want MSG to be as painful as real life. But as it is in some way
+realistic, we face some of the same problems as we would face in real life.
+
+How would you do it for real? The most common option is to use something
+like NWS that performs active probes. The best solution is probably to do
+the same within MSG, as in next code snippet. It is very close from what you
+would have to do out of the simulator, and thus gives you information that
+you could also get in real settings to not hinder the realism of your
+simulation.
+
+\verbatim
+double get_host_load() {
+ m_task_t task = MSG_task_create("test", 0.001, 0, NULL);
+ double date = MSG_get_clock();
+
+ MSG_task_execute(task);
+ date = MSG_get_clock() - date;
+ MSG_task_destroy(task);
+ return (0.001/date);
+}
+\endverbatim
+
+Of course, it may not match your personal definition of "host load". In this
+case, please detail what you mean on the mailing list, and we will extend
+this FAQ section to fit your taste if possible.
+
+\subsubsection faq_MIA_communication_time How can I get the *real* communication time?
+
+Communications are synchronous and thus if you simply get the time
+before and after a communication, you'll only get the transmission
+time and the time spent to really communicate (it will also take into
+account the time spent waiting for the other party to be
+ready). However, getting the *real* communication time is not really
+hard either. The following solution is a good starting point.
+
+\verbatim
+int sender()
+{
+ m_task_t task = MSG_task_create("Task", task_comp_size, task_comm_size,
+ calloc(1,sizeof(double)));
+ *((double*) task->data) = MSG_get_clock();
+ MSG_task_put(task, slaves[i % slaves_count], PORT_22);
+ XBT_INFO("Send completed");
+ return 0;
+}
+int receiver()
+{
+ m_task_t task = NULL;
+ double time1,time2;
+
+ time1 = MSG_get_clock();
+ a = MSG_task_get(&(task), PORT_22);
+ time2 = MSG_get_clock();
+ if(time1<*((double *)task->data))
+ time1 = *((double *) task->data);
+ XBT_INFO("Communication time : \"%f\" ", time2-time1);
+ free(task->data);
+ MSG_task_destroy(task);
+ return 0;
+}
+\endverbatim
+
+\subsection faq_MIA_SimDag SimDag related questions
+
+\subsubsection faq_SG_comm Implementing communication delays between tasks.
+
+A classic question of SimDag newcomers is about how to express a
+communication delay between tasks. The thing is that in SimDag, both
+computation and communication are seen as tasks. So, if you want to
+model a data dependency between two DAG tasks t1 and t2, you have to
+create 3 SD_tasks: t1, t2 and c and add dependencies in the following
+way:
+
+\verbatim
+SD_task_dependency_add(NULL, NULL, t1, c);
+SD_task_dependency_add(NULL, NULL, c, t2);
+\endverbatim
+
+This way task t2 cannot start before the termination of communication c
+which in turn cannot start before t1 ends.
+
+When creating task c, you have to associate an amount of data (in bytes)
+corresponding to what has to be sent by t1 to t2.
+
+Finally to schedule the communication task c, you have to build a list
+comprising the workstations on which t1 and t2 are scheduled (w1 and w2
+for example) and build a communication matrix that should look like
+[0;amount ; 0; 0].
+
+\subsubsection faq_SG_DAG How to implement a distributed dynamic scheduler of DAGs.
+
+Distributed is somehow "contagious". If you start making distributed
+decisions, there is no way to handle DAGs directly anymore (unless I
+am missing something). You have to encode your DAGs in term of
+communicating process to make the whole scheduling process
+distributed. Here is an example of how you could do that. Assume T1
+has to be done before T2.
+
+\verbatim
+ int your_agent(int argc, char *argv[] {
+ ...
+ T1 = MSG_task_create(...);
+ T2 = MSG_task_create(...);
+ ...
+ while(1) {
+ ...
+ if(cond) MSG_task_execute(T1);
+ ...
+ if((MSG_task_get_remaining_computation(T1)=0.0) && (you_re_in_a_good_mood))
+ MSG_task_execute(T2)
+ else {
+ /* do something else */
+ }
+ }
+ }
+\endverbatim
+
+If you decide that the distributed part is not that much important and that
+DAG is really the level of abstraction you want to work with, then you should
+give a try to \ref SD_API.
+
+\subsection faq_MIA_generic Generic features
+
+\subsubsection faq_more_processes Increasing the amount of simulated processes
+
+Here are a few tricks you can apply if you want to increase the amount
+of processes in your simulations.
+
+ - <b>A few thousands of simulated processes</b> (soft tricks)\n
+ SimGrid can use either pthreads library or the UNIX98 contexts. On
+ most systems, the number of pthreads is limited and then your
+ simulation may be limited for a stupid reason. This is especially
+ true with the current linux pthreads, and I cannot get more than
+ 2000 simulated processes with pthreads on my box. The UNIX98
+ contexts allow me to raise the limit to 25,000 simulated processes
+ on my laptop.\n\n
+ The <tt>--with-context</tt> option of the <tt>./configure</tt>
+ script allows you to choose between UNIX98 contexts
+ (<tt>--with-context=ucontext</tt>) and the pthread version
+ (<tt>--with-context=pthread</tt>). The default value is ucontext
+ when the script detect a working UNIX98 context implementation. On
+ Windows boxes, the provided value is discarded and an adapted
+ version is picked up.\n\n
+ We experienced some issues with contexts on some rare systems
+ (solaris 8 and lower or old alpha linuxes comes to mind). The main
+ problem is that the configure script detect the contexts as being
+ functional when it's not true. If you happen to use such a system,
+ switch manually to the pthread version, and provide us with a good
+ patch for the configure script so that it is done automatically ;)
+
+ - <b>Hundred thousands of simulated processes</b> (hard-core tricks)\n
+ As explained above, SimGrid can use UNIX98 contexts to represent
+ and handle the simulated processes. Thanks to this, the main
+ limitation to the number of simulated processes becomes the
+ available memory.\n\n
+ Here are some tricks I had to use in order to run a token ring
+ between 25,000 processes on my laptop (1Gb memory, 1.5Gb swap).\n
+ - First of all, make sure your code runs for a few hundreds
+ processes before trying to push the limit. Make sure it's
+ valgrind-clean, i.e. that valgrind does not report neither memory
+ error nor memory leaks. Indeed, numerous simulated processes
+ result in *fat* simulation hindering debugging.
+ - It was really boring to write 25,000 entries in the deployment
+ file, so I wrote a little script
+ <tt>examples/gras/mutual_exclusion/simple_token/make_deployment.pl</tt>, which you may
+ want to adapt to your case. You could also think about hijacking
+ the SURFXML parser (have look at \ref faq_flexml_bypassing).
+ - The deployment file became quite big, so I had to do what is in
+ the FAQ entry \ref faq_flexml_limit
+ - Each UNIX98 context has its own stack entry. As debugging this is
+ quite hairy, the default value is a bit overestimated so that
+ user doesn't get into trouble about this. You want to tune this
+ size to increase the number of processes. This is the
+ <tt>STACK_SIZE</tt> define in
+ <tt>src/xbt/xbt_context_sysv.c</tt>, which is 128kb by default.
+ Reduce this as much as you can, but be warned that if this value
+ is too low, you'll get a segfault. The token ring example, which
+ is quite simple, runs with 40kb stacks.
+ - You may tweak the logs to reduce the stack size further. When
+ logging something, we try to build the string to display in a
+ char array on the stack. The size of this array is constant (and
+ equal to XBT_LOG_BUFF_SIZE, defined in include/xbt/log/h). If the
+ string is too large to fit this buffer, we move to a dynamically
+ sized buffer. In which case, we have to traverse one time the log
+ event arguments to compute the size we need for the buffer,
+ malloc it, and traverse the argument list again to do the actual
+ job.\n
+ The idea here is to move XBT_LOG_BUFF_SIZE to 1, forcing the logs
+ to use a dynamic array each time. This allows us to lower further
+ the stack size at the price of some performance loss...\n
+ This allowed me to run the reduce the stack size to ... 4k. Ie,
+ on my 1Gb laptop, I can run more than 250,000 processes!
+
+\subsubsection faq_MIA_batch_scheduler Is there a native support for batch schedulers in SimGrid?
+
+No, there is no native support for batch schedulers and none is
+planned because this is a very specific need (and doing it in a
+generic way is thus very hard). However some people have implemented
+their own batch schedulers. Vincent Garonne wrote one during his PhD
+and put his code in the contrib directory of our SVN so that other can
+keep working on it. You may find inspiring ideas in it.
+
+\subsubsection faq_MIA_checkpointing I need a checkpointing thing
+
+Actually, it depends on whether you want to checkpoint the simulation, or to
+simulate checkpoints.
+
+The first one could help if your simulation is a long standing process you
+want to keep running even on hardware issues. It could also help to
+<i>rewind</i> the simulation by jumping sometimes on an old checkpoint to
+cancel recent calculations.\n
+Unfortunately, such thing will probably never exist in SG. One would have to
+duplicate all data structures because doing a rewind at the simulator level
+is very very hard (not talking about the malloc free operations that might
+have been done in between). Instead, you may be interested in the Libckpt
+library (http://www.cs.utk.edu/~plank/plank/www/libckpt.html). This is the
+checkpointing solution used in the condor project, for example. It makes it
+easy to create checkpoints (at the OS level, creating something like core
+files), and rerunning them on need.
+
+If you want to simulate checkpoints instead, it means that you want the
+state of an executing task (in particular, the progress made towards
+completion) to be saved somewhere. So if a host (and the task executing on
+it) fails (cf. #MSG_HOST_FAILURE), then the task can be restarted
+from the last checkpoint.\n
+
+Actually, such a thing does not exist in SimGrid either, but it's just
+because we don't think it is fundamental and it may be done in the user code
+at relatively low cost. You could for example use a watcher that
+periodically get the remaining amount of things to do (using
+MSG_task_get_remaining_computation()), or fragment the task in smaller
+subtasks.
+
+\subsection faq_platform Platform building and Dynamic resources
+
+\subsubsection faq_platform_example Where can I find SimGrid platform files?
+
+There are several little examples in the archive, in the examples/msg
+directory. From time to time, we are asked for other files, but we
+don't have much at hand right now.
+
+You should refer to the Platform Description Archive
+(http://pda.gforge.inria.fr) project to see the other platform file we
+have available, as well as the Simulacrum simulator, meant to generate
+SimGrid platforms using all classical generation algorithms.
+
+\subsubsection faq_platform_alnem How can I automatically map an existing platform?
+
+We are working on a project called ALNeM (Application-Level Network
+Mapper) which goal is to automatically discover the topology of an
+existing network. Its output will be a platform description file
+following the SimGrid syntax, so everybody will get the ability to map
+their own lab network (and contribute them to the catalog project).
+This tool is not ready yet, but it move quite fast forward. Just stay
+tuned.
+
+\subsubsection faq_platform_synthetic Generating synthetic but realistic platforms
+
+The third possibility to get a platform file (after manual or
+automatic mapping of real platforms) is to generate synthetic
+platforms. Getting a realistic result is not a trivial task, and
+moreover, nobody is really able to define what "realistic" means when
+speaking of topology files. You can find some more thoughts on this
+topic in these
+<a href="http://graal.ens-lyon.fr/~alegrand/articles/Simgrid-Introduction.pdf">slides</a>.
+
+If you are looking for an actual tool, there we have a little tool to
+annotate Tiers-generated topologies. This perl-script is in
+<tt>tools/platform_generation/</tt> directory of the SVN. Dinda et Al.
+released a very comparable tool, and called it GridG.
+
+\subsubsection faq_SURF_multicore Modeling multi-core resources
+
+Since version 3.6 of simgrid we can specify the core number of a resource.
+To use this feature use tag 'host' with 'core' attribute.
+\verbatim
+<?xml version='1.0'?>
+<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid.dtd">
+<platform version="3">
+ <AS id="AS0" routing="Full">
+ <host id="Tremblay" power="98095000" core="6"/>
+ </AS>
+</platform>
+\endverbatim
+
+The specified computing power will be available to up to 6 sequential
+tasks without sharing. If more tasks are placed on this host, the
+resource will be shared accordingly. For example, if you schedule 12
+tasks on the host, each will get half of the computing power. Please
+note that although sound, this model were never scientifically
+assessed. Please keep this fact in mind when using it.
+
+\subsubsection faq_SURF_dynamic Modeling dynamic resource availability
+
+A nice feature of SimGrid is that it enables you to seamlessly have
+resources whose availability change over time. When you build a
+platform, you generally declare hosts like that:
+
+\verbatim
+ <host id="host A" power="100.00"/>
+\endverbatim
+
+If you want the availability of "host A" to change over time, the only
+thing you have to do is change this definition like that:
+
+\verbatim
+ <host id="host A" power="100.00" availability_file="trace_A.txt" state_file="trace_A_failure.txt"/>
+\endverbatim
+
+For hosts, availability files are expressed in fraction of available
+power. Let's have a look at what "trace_A.txt" may look like:
+
+\verbatim
+PERIODICITY 1.0
+0.0 1.0
+11.0 0.5
+20.0 0.9
+\endverbatim
+
+At time 0, our host will deliver 100 flop/s. At time 11.0, it will
+deliver only 50 flop/s until time 20.0 where it will start
+delivering 90 flop/s. Last at time 21.0 (20.0 plus the periodicity
+1.0), we'll be back to the beginning and it will deliver 100 flop/s.
+
+Now let's look at the state file:
+\verbatim
+PERIODICITY 10.0
+1.0 -1.0
+2.0 1.0
+\endverbatim
+
+A negative value means "off" while a positive one means "on". At time
+1.0, the host is on. At time 1.0, it is turned off and at time 2.0, it
+is turned on again until time 12 (2.0 plus the periodicity 10.0). It
+will be turned on again at time 13.0 until time 23.0, and so on.
+
+Now, let's look how the same kind of thing can be done for network
+links. A usual declaration looks like:
+
+\verbatim
+ <link id="LinkA" bandwidth="10.0" latency="0.2"/>
+\endverbatim
+
+You have at your disposal the following options: bandwidth_file,
+latency_file and state_file. The only difference with hosts is that
+bandwidth_file and latency_file do not express fraction of available
+power but are expressed directly in bytes per seconds and seconds.
+
+\subsubsection faq_platform_multipath How to express multipath routing in platform files?
+
+It is unfortunately impossible to express the fact that there is more
+than one routing path between two given hosts. Let's consider the
+following platform file:
+
+\verbatim
+<route src="A" dst="B">
+ <link_ctn id="1"/>
+</route>
+<route src="B" dst="C">
+ <link_ctn id="2"/>
+</route>
+<route src="A" dst="C">
+ <link_ctn id="3"/>
+</route>
+\endverbatim
+
+Although it is perfectly valid, it does not mean that data traveling
+from A to C can either go directly (using link 3) or through B (using
+links 1 and 2). It simply means that the routing on the graph is not
+trivial, and that data do not following the shortest path in number of
+hops on this graph. Another way to say it is that there is no implicit
+in these routing descriptions. The system will only use the routes you
+declare (such as <route src="A" dst="C"><link_ctn
+id="3"/></route>), without trying to build new routes by aggregating
+the provided ones.
+
+You are also free to declare platform where the routing is not
+symmetric. For example, add the following to the previous file:
+
+\verbatim
+<route src="C" dst="A">
+ <link_ctn id="2"/>
+ <link_ctn id="1"/>
+</route>
+\endverbatim
+
+This makes sure that data from C to A go through B where data from A
+to C go directly. Don't worry about realism of such settings since
+we've seen ways more weird situation in real settings (in fact, that's
+the realism of very regular platforms which is questionable, but
+that's another story).
+
+\subsubsection faq_flexml_bypassing Bypassing the XML parser with your own C functions
+
+So you want to bypass the XML files parser, uh? Maybe doing some parameter
+sweep experiments on your simulations or so? This is possible, and
+it's not even really difficult (well. Such a brutal idea could be
+harder to implement). Here is how it goes.
+
+For this, you have to first remember that the XML parsing in SimGrid is done
+using a tool called FleXML. Given a DTD, this gives a flex-based parser. If
+you want to bypass the parser, you need to provide some code mimicking what
+it does and replacing it in its interactions with the SURF code. So, let's
+have a look at these interactions.
+
+FleXML parser are close to classical SAX parsers. It means that a
+well-formed SimGrid platform XML file might result in the following
+"events":
+
+ - start "platform_description" with attribute version="2"
+ - start "host" with attributes id="host1" power="1.0"
+ - end "host"
+ - start "host" with attributes id="host2" power="2.0"
+ - end "host"
+ - start "link" with ...
+ - end "link"
+ - start "route" with ...
+ - start "link_ctn" with ...
+ - end "link_ctn"
+ - end "route"
+ - end "platform_description"
+
+The communication from the parser to the SURF code uses two means:
+Attributes get copied into some global variables, and a surf-provided
+function gets called by the parser for each event. For example, the event
+ - start "host" with attributes id="host1" power="1.0"
+
+let the parser do something roughly equivalent to:
+\verbatim
+ strcpy(A_host_id,"host1");
+ A_host_power = 1.0;
+ STag_host();
+\endverbatim
+
+In SURF, we attach callbacks to the different events by initializing the
+pointer functions to some the right surf functions. Since there can be
+more than one callback attached to the same event (if more than one
+model is in use, for example), they are stored in a dynar. Example in
+workstation_ptask_L07.c:
+\verbatim
+ /* Adding callback functions */
+ surf_parse_reset_parser();
+ surfxml_add_callback(STag_surfxml_host_cb_list, &parse_cpu_init);
+ surfxml_add_callback(STag_surfxml_prop_cb_list, &parse_properties);
+ surfxml_add_callback(STag_surfxml_link_cb_list, &parse_link_init);
+ surfxml_add_callback(STag_surfxml_route_cb_list, &parse_route_set_endpoints);
+ surfxml_add_callback(ETag_surfxml_link_c_ctn_cb_list, &parse_route_elem);
+ surfxml_add_callback(ETag_surfxml_route_cb_list, &parse_route_set_route);
+
+ /* Parse the file */
+ surf_parse_open(file);
+ xbt_assert(!surf_parse(), "Parse error in %s", file);
+ surf_parse_close();
+\endverbatim
+
+So, to bypass the FleXML parser, you need to write your own version of the
+surf_parse function, which should do the following:
+ - Fill the A_<tag>_<attribute> variables with the wanted values
+ - Call the corresponding STag_<tag>_fun function to simulate tag start
+ - Call the corresponding ETag_<tag>_fun function to simulate tag end
+ - (do the same for the next set of values, and loop)
+
+Then, tell SimGrid that you want to use your own "parser" instead of the stock one:
+\verbatim
+ surf_parse = surf_parse_bypass_environment;
+ MSG_create_environment(NULL);
+ surf_parse = surf_parse_bypass_application;
+ MSG_launch_application(NULL);
+\endverbatim
+
+A set of macros are provided at the end of
+include/surf/surfxml_parse.h to ease the writing of the bypass
+functions. An example of this trick is distributed in the file
+examples/msg/masterslave/masterslave_bypass.c
+
+\section faq_troubleshooting Troubleshooting
+
+\subsection faq_trouble_lib_compil SimGrid compilation and installation problems
+
+\subsubsection faq_trouble_lib_config cmake fails!
+
+We know only one reason for the configure to fail:
+
+ - <b>You are using a broken build environment</b>\n
+ If symptom is that the configury magic complains about gcc not being able to build
+ executables, you are probably missing the libc6-dev package. Damn Ubuntu.
+
+If you experience other kind of issue, please get in touch with us. We are
+always interested in improving our portability to new systems.
+
+\subsubsection faq_trouble_distcheck Dude! "ctest" fails on my machine!
+
+Don't assume we never run this target, because we do. Check
+http://cdash.inria.fr/CDash/index.php?project=Simgrid (click on
+previous if there is no result for today: results are produced only by
+11am, French time) and
+https://buildd.debian.org/status/logs.php?pkg=simgrid if you don't believe us.
+
+If it's failing on your machine in a way not experienced by the
+autobuilders above, please drop us a mail on the mailing list so that
+we can check it out. Make sure to read \ref faq_bugrepport before you
+do so.
+
+\subsection faq_trouble_compil User code compilation problems
+
+\subsubsection faq_trouble_err_logcat "gcc: _simgrid_this_log_category_does_not_exist__??? undeclared (first use in this function)"
+
+This is because you are using the log mecanism, but you didn't created
+any default category in this file. You should refer to \ref XBT_log
+for all the details, but you simply forgot to call one of
+XBT_LOG_NEW_DEFAULT_CATEGORY() or XBT_LOG_NEW_DEFAULT_SUBCATEGORY().
+
+\subsubsection faq_trouble_pthreadstatic "gcc: undefined reference to pthread_key_create"
+
+This indicates that one of the library SimGrid depends on (libpthread
+here) was missing on the linking command line. Dependencies of
+libsimgrid are expressed directly in the dynamic library, so it's
+quite impossible that you see this message when doing dynamic linking.
+
+If you compile your code statically (and if you use a pthread version
+of SimGrid -- see \ref faq_more_processes), you must absolutely
+specify <tt>-lpthread</tt> on the linker command line. As usual, this should
+come after <tt>-lsimgrid</tt> on this command line.
+
+\subsection faq_trouble_errors Runtime error messages
+
+\subsubsection faq_flexml_limit "surf_parse_lex: Assertion `next limit' failed."
+
+This is because your platform file is too big for the parser.
+
+Actually, the message comes directly from FleXML, the technology on top of
+which the parser is built. FleXML has the bad idea of fetching the whole
+document in memory before parsing it. And moreover, the memory buffer size
+must be determined at compilation time.
+
+We use a value which seems big enough for our need without bloating the
+simulators footprints. But of course your mileage may vary. In this case,
+just edit src/surf/surfxml.l modify the definition of
+FLEXML_BUFFERSTACKSIZE. E.g.
+
+\verbatim
+#define FLEXML_BUFFERSTACKSIZE 1000000000
+\endverbatim
+
+Then recompile and everything should be fine, provided that your version of
+Flex is recent enough (>= 2.5.31). If not the compilation process should
+warn you.
+
+A while ago, we worked on FleXML to reduce a bit its memory consumption, but
+these issues remain. There is two things we should do:
+
+ - use a dynamic buffer instead of a static one so that the only limit
+ becomes your memory, not a stupid constant fixed at compilation time
+ (maybe not so difficult).
+ - change the parser so that it does not need to get the whole file in
+ memory before parsing
+ (seems quite difficult, but I'm a complete newbe wrt flex stuff).
+
+These are changes to FleXML itself, not SimGrid. But since we kinda hijacked
+the development of FleXML, I can grant you that any patches would be really
+welcome and quickly integrated.
+
+<b>Update:</b> A new version of FleXML (1.7) was released. Most of the work
+was done by William Dowling, who use it in his own work. The good point is
+that it now use a dynamic buffer, and that the memory usage was greatly
+improved. The downside is that William also changed some things internally,
+and it breaks the hack we devised to bypass the parser, as explained in
+\ref faq_flexml_bypassing. Indeed, this is not a classical usage of the
+parser, and Will didn't imagine that we may have used (and even documented)
+such a crude usage of FleXML. So, we now have to repair the bypassing
+functionality to use the lastest FleXML version and fix the memory usage in
+SimGrid.
+
+\subsubsection faq_trouble_gras_transport GRAS spits networking error messages
+
+Gras, on real platforms, naturally use regular sockets to communicate. They
+are deeply hidden in the gras abstraction, but when things go wrong, you may
+get some weird error messages. Here are some example, with the probable
+reason:
+
+ - <b>Transport endpoint is not connected</b>: several processes try to open
+ a server socket on the same port number of the same machine. This is
+ naturally bad and each process should pick its own port number for this.\n
+ Maybe, you just have some processes remaining from a previous experiment
+ on your machine.\n
+ Killing them may help, but again if you kill -KILL them, you'll have to
+ wait for a while: they didn't close there sockets properly and the system
+ needs a while to notice that this port is free again.
+
+ - <b>Socket closed by remote side</b>: if the remote process is not
+ supposed to close the socket at this point, it may be dead.
+
+ - <b>Connection reset by peer</b>: I found this on Internet about this
+ error. I think it's what's happening here, too:\n
+ <i>This basically means that a network error occurred while the client was
+ receiving data from the server. But what is really happening is that the
+ server actually accepts the connection, processes the request, and sends
+ a reply to the client. However, when the server closes the socket, the
+ client believes that the connection has been terminated abnormally
+ because the socket implementation sends a TCP reset segment telling the
+ client to throw away the data and report an error.\n
+ Sometimes, this problem is caused by not properly closing the
+ input/output streams and the socket connection. Make sure you close the
+ input/output streams and socket connection properly. If everything is
+ closed properly, however, and the problem persists, you can work around
+ it by adding a one-second sleep before closing the streams and the
+ socket. This technique, however, is not reliable and may not work on all
+ systems.</i>\n
+ Since GRAS sockets are closed properly (repeat after me: there is no bug
+ in GRAS), it is either that you are closing your sockets on server side
+ before the client get a chance to read them (use gras_os_sleep() to delay
+ the server), or the server died awfully before the client got the data.
+
+\subsubsection faq_trouble_errors_big_fat_warning I'm told that my XML files are too old.
+
+The format of the XML platform description files is sometimes
+improved. For example, we decided to change the units used in SimGrid
+from MBytes, MFlops and seconds to Bytes, Flops and seconds to ease
+people exchanging small messages. We also reworked the route
+descriptions to allow more compact descriptions.
+
+That is why the XML files are versionned using the 'version' attribute
+of the root tag. Currently, it should read:
+\verbatim
+ <platform version="2">
+\endverbatim
+
+If your files are too old, you can use the simgrid_update_xml.pl
+script which can be found in the tools directory of the archive.
+
+\subsection faq_trouble_valgrind Valgrind-related and other debugger issues
+
+If you don't, you really should use valgrind to debug your code, it's
+almost magic.
+
+\subsubsection faq_trouble_vg_longjmp longjmp madness in valgrind
+
+This is when valgrind starts complaining about longjmp things, just like:
+
+\verbatim ==21434== Conditional jump or move depends on uninitialised value(s)
+==21434== at 0x420DBE5: longjmp (longjmp.c:33)
+==21434==
+==21434== Use of uninitialised value of size 4
+==21434== at 0x420DC3A: __longjmp (__longjmp.S:48)
+\endverbatim
+
+This is the sign that you didn't used the exception mecanism well. Most
+probably, you have a <tt>return;</tt> somewhere within a <tt>TRY{}</tt>
+block. This is <b>evil</b>, and you must not do this. Did you read the section
+about \ref XBT_ex??
+
+\subsubsection faq_trouble_vg_libc Valgrind spits tons of errors about backtraces!
+
+It may happen that valgrind, the memory debugger beloved by any decent C
+programmer, spits tons of warnings like the following :
+\verbatim ==8414== Conditional jump or move depends on uninitialised value(s)
+==8414== at 0x400882D: (within /lib/ld-2.3.6.so)
+==8414== by 0x414EDE9: (within /lib/tls/i686/cmov/libc-2.3.6.so)
+==8414== by 0x400B105: (within /lib/ld-2.3.6.so)
+==8414== by 0x414F937: _dl_open (in /lib/tls/i686/cmov/libc-2.3.6.so)
+==8414== by 0x4150F4C: (within /lib/tls/i686/cmov/libc-2.3.6.so)
+==8414== by 0x400B105: (within /lib/ld-2.3.6.so)
+==8414== by 0x415102D: __libc_dlopen_mode (in /lib/tls/i686/cmov/libc-2.3.6.so)
+==8414== by 0x412D6B9: backtrace (in /lib/tls/i686/cmov/libc-2.3.6.so)
+==8414== by 0x8076446: xbt_dictelm_get_ext (dict_elm.c:714)
+==8414== by 0x80764C1: xbt_dictelm_get (dict_elm.c:732)
+==8414== by 0x8079010: xbt_cfg_register (config.c:208)
+==8414== by 0x806821B: MSG_config (msg_config.c:42)
+\endverbatim
+
+This problem is somewhere in the libc when using the backtraces and there is
+very few things we can do ourselves to fix it. Instead, here is how to tell
+valgrind to ignore the error. Add the following to your ~/.valgrind.supp (or
+create this file on need). Make sure to change the obj line according to
+your personnal mileage (change 2.3.6 to the actual version you are using,
+which you can retrieve with a simple "ls /lib/ld*.so").
+
+\verbatim {
+ name: Backtrace madness
+ Memcheck:Cond
+ obj:/lib/ld-2.3.6.so
+ fun:dl_open_worker
+ fun:_dl_open
+ fun:do_dlopen
+ fun:dlerror_run
+ fun:__libc_dlopen_mode
+}\endverbatim
+
+Then, you have to specify valgrind to use this suppression file by passing
+the <tt>--suppressions=$HOME/.valgrind.supp</tt> option on the command line.
+You can also add the following to your ~/.bashrc so that it gets passed
+automatically. Actually, it passes a bit more options to valgrind, and this
+happen to be my personnal settings. Check the valgrind documentation for
+more information.
+
+\verbatim export VALGRIND_OPTS="--leak-check=yes --leak-resolution=high --num-callers=40 --tool=memcheck --suppressions=$HOME/.valgrind.supp" \endverbatim
+
+\subsubsection faq_trouble_backtraces Truncated backtraces
+
+When debugging SimGrid, it's easier to pass the
+--disable-compiler-optimization flag to the configure if valgrind or
+gdb get fooled by the optimization done by the compiler. But you
+should remove these flag when everything works before going in
+production (before launching your 1252135 experiments), or everything
+will run only one half of the true SimGrid potential.
+
+\subsection faq_deadlock There is a deadlock in my code!!!
+
+Unfortunately, we cannot debug every code written in SimGrid. We
+furthermore believe that the framework provides ways enough
+information to debug such informations yourself. If the textual output
+is not enough, Make sure to check the \ref faq_visualization FAQ entry to see
+how to get a graphical one.
+
+Now, if you come up with a really simple example that deadlocks and
+you're absolutely convinced that it should not, you can ask on the
+list. Just be aware that you'll be severely punished if the mistake is
+on your side... We have plenty of FAQ entries to redact and new
+features to implement for the impenitents! ;)
+
+\subsection faq_surf_network_latency I get weird timings when I play with the latencies.
+
+OK, first of all, remember that units should be Bytes, Flops and
+Seconds. If you don't use such units, some SimGrid constants (e.g. the
+SG_TCP_CTE_GAMMA constant used in most network models) won't have the
+right unit and you'll end up with weird results.
+
+Here is what happens with a single transfer of size L on a link
+(bw,lat) when nothing else happens.
+
+\verbatim
+0-----lat--------------------------------------------------t
+|-----|**** real_bw =min(bw,SG_TCP_CTE_GAMMA/(2*lat)) *****|
+\endverbatim
+
+In more complex situations, this min is the solution of a complex
+max-min linear system. Have a look
+<a href="http://lists.gforge.inria.fr/pipermail/simgrid-devel/2006-April/thread.html">here</a>
+and read the two threads "Bug in SURF?" and "Surf bug not
+fixed?". You'll have a few other examples of such computations. You
+can also read "A Network Model for Simulation of Grid Application" by
+Henri Casanova and Loris Marchal to have all the details. The fact
+that the real_bw is smaller than bw is easy to understand. The fact
+that real_bw is smaller than SG_TCP_CTE_GAMMA/(2*lat) is due to the
+window-based congestion mechanism of TCP. With TCP, you can't exploit
+your huge network capacity if you don't have a good round-trip-time
+because of the acks...
+
+Anyway, what you get is t=lat + L/min(bw,SG_TCP_CTE_GAMMA/(2*lat)).
+
+ * if I you set (bw,lat)=(100 000 000, 0.00001), you get t = 1.00001 (you fully
+use your link)
+ * if I you set (bw,lat)=(100 000 000, 0.0001), you get t = 1.0001 (you're on the
+limit)
+ * if I you set (bw,lat)=(100 000 000, 0.001), you get t = 10.001 (ouch!)
+
+This bound on the effective bandwidth of a flow is not the only thing
+that may make your result be unexpected. For example, two flows
+competing on a saturated link receive an amount of bandwidth inversely
+proportional to their round trip time.
+
+\subsection faq_bugrepport So I've found a bug in SimGrid. How to report it?
+
+We do our best to make sure to hammer away any bugs of SimGrid, but this is
+still an academic project so please be patient if/when you find bugs in it.
+If you do, the best solution is to drop an email either on the simgrid-user
+or the simgrid-devel mailing list and explain us about the issue. You can
+also decide to open a formal bug report using the
+<a href="https://gforge.inria.fr/tracker/?atid=165&group_id=12&func=browse">relevant
+interface</a>. You need to login on the server to get the ability to submit
+bugs.
+
+We will do our best to solve any problem repported, but you need to help us
+finding the issue. Just telling "it segfault" isn't enough. Telling "It
+segfaults when running the attached simulator" doesn't really help either.
+You may find the following article interesting to see how to repport
+informative bug repports:
+http://www.chiark.greenend.org.uk/~sgtatham/bugs.html (it is not SimGrid
+specific at all, but it's full of good advices).
+
+\author Arnaud Legrand (arnaud.legrand::imag.fr)
+\author Martin Quinson (martin.quinson::loria.fr)
+
+
+*/
+
+******************************************************************
+* OLD CRUFT NOT USED ANYMORE *
+******************************************************************
+
+
+subsection faq_crosscompile Cross-compiling a Windows DLL of SimGrid from linux
+
+At the moment, we do not distribute Windows pre-compiled version of SimGrid
+because the support for this platform is still experimental. We know that
+some parts of the GRAS environment do not work, and we think that the others
+environments (MSG and SD) have good chances to work, but we didn't test
+ourselves. This section explains how we generate the SimGrid DLL so that you
+can build it for yourself. First of all, you need to have a version more
+recent than 3.1 (ie, a SVN version as time of writting).
+
+In order to cross-compile the package to windows from linux, you need to
+install mingw32 (minimalist gnu win32). On Debian, you can do so by
+installing the packages mingw32 (compiler), mingw32-binutils (linker and
+so), mingw32-runtime.
+
+You can use the VPATH support of configure to compile at the same time for
+linux and windows without dupplicating the source nor cleaning the tree
+between each. Just run bootstrap (if you use the SVN) to run the autotools.
+Then, create a linux and a win directories. Then, type:
+\verbatim cd linux; ../configure --srcdir=.. <usual configure flags>; make; cd ..
+cd win; ../configure --srcdir=.. --host=i586-mingw32msvc <flags>; make; cd ..
+\endverbatim
+The trick to VPATH builds is to call configure from another directory,
+passing it an extra --srcdir argument to tell it where all the sources are.
+It will understand you want to use VPATH. Then, the trick to cross-compile
+is simply to add a --host argument specifying the target you want to build
+for. The i586-mingw32msvc string is what you have to pass to use the mingw32
+environment as distributed in Debian.
+
+After that, you can run all make targets from both directories, and test
+easily that what you change for one arch does not break the other one.
+
+It is possible that this VPATH build thing breaks from time to time in the
+SVN since it's quite fragile, but it's granted to work in any released
+version. If you experience problems, drop us a mail.
+
+Another possible source of issue is that at the moment, building the
+examples request to use the gras_stub_generator tool, which is a compiled
+program, not a script. In cross-compilation, you need to cross-execute with
+wine for example, which is not really pleasant. We are working on this, but
+in the meanwhile, simply don't build the examples in cross-compilation
+(<tt>cd src</tt> before running make).
+
+Program (cross-)compiled with mingw32 do request an extra DLL at run-time to be
+usable. For example, if you want to test your build with wine, you should do
+the following to put this library where wine looks for DLLs.
+\verbatim
+cp /usr/share/doc/mingw32-runtime/mingwm10.dll.gz ~/.wine/c/windows/system/
+gunzip ~/.wine/c/windows/system/mingwm10.dll.gz
+\endverbatim
+
+The DLL is built in src/.libs, and installed in the <i>prefix</i>/bin directory
+when you run make install.
+
+If you want to use it in a native project on windows, you need to use
+simgrid.dll and mingwm10.dll. For each DLL, you need to build .def file
+under linux (listing the defined symbols), and convert it into a .lib file
+under windows (specifying this in a way that windows compilers like). To
+generate the def files, run (under linux):
+\verbatim echo "LIBRARY libsimgrid-0.dll" > simgrid.def
+echo EXPORTS >> simgrid.def
+nm libsimgrid-0.dll | grep ' T _' | sed 's/.* T _//' >> simgrid.def
+nm libsimgrid-0.dll | grep ' D _' | sed 's/.* D _//' | sed 's/$/ DATA/' >> simgrid.def
+
+echo "LIBRARY mingwm10.dll" > mingwm10.def
+echo EXPORTS >> mingwm10.def
+nm mingwm10.dll | grep ' T _' | sed 's/.* T _//' >> mingwm10.def
+nm mingwm10.dll | grep ' D _' | sed 's/.* D _//' | sed 's/$/ DATA/' >> mingwm10.def
+\endverbatim
+
+To create the import .lib files, use the <tt>lib</tt> windows tool (from
+MSVC) the following way to produce simgrid.lib and mingwm10.lib
+\verbatim lib /def:simgrid.def
+lib /def:mingwm10.def
+\endverbatim
+
+If you happen to use Borland C Builder, the right command line is the
+following (note that you don't need any file.def to get this working).
+\verbatim implib simgrid.lib libsimgrid-0.dll
+implib mingwm10.lib mingwm10.dll
+\endverbatim
+
+Then, set the following parameters in Visual C++ 2005:
+Linker -> Input -> Additional dependencies = simgrid.lib mingwm10.lib
+
+Just in case you wonder how to generate a DLL from libtool in another
+project, we added -no-undefined to any lib*_la_LDFLAGS variables so that
+libtool accepts to generate a dynamic library under windows. Then, to make
+it true, we pass any dependencies (such as -lws2 under windows or -lpthread
+on need) on the linking line. Passing such deps is a good idea anyway so
+that they get noted in the library itself, avoiding the users to know about
+our dependencies and put them manually on their compilation line. Then we
+added the AC_LIBTOOL_WIN32_DLL macro just before AC_PROG_LIBTOOL in the
+configure.ac. It means that we exported any symbols which need to be.
+Nowadays, functions get automatically exported, so we don't need to load our
+header files with tons of __declspec(dllexport) cruft. We only need to do so
+for data, but there is no public data in SimGrid so we are good.
You should use MSG if you want to study some heuristics for a
given problem you don't really want to implement. If you want to
use the C programming language, your are in the right
- section. To use the Java programming interface, please refer to
- \ref MSG_JAVA.
+ section. To use the Java or Ruby programming interfaces, please refer to
+ the documentation provided in the relevant packages.
*/
/** @addtogroup MSG_LUA
\section MSG_Lua_funct Lua offered functionnalities in MSG
- - \ref host_management
- - \ref tasks_management
- - \ref environment_management
+ - \ref lua_host_management
+ - \ref lua_tasks_management
+ - \ref lua_environment_management
\section Lua_examples Examples of lua MSG
- \ref MSG_ex_master_slave_lua
\htmlinclude .options.doc.toc
-\section options_simgrid_configuration Changing SimGrid's behavior
-
A number of options can be given at runtime to change the default
-SimGrid behavior. In particular, you can change the default cpu and
-network models...
+SimGrid behavior. For a complete list of all configuration options
+accepted by the SimGrid version used in your simulator, simply pass
+the --help configuration flag to your program. If some of the options
+are not documented on this page, this is a bug that you should please
+report so that we can fix it.
-\subsection options_simgrid_configuration_fullduplex Using Fullduplex
+\section options_using Passing configuration options to the simulators
-Experimental fullduplex support is now available on the svn branch. In order to fullduple to work your platform must have two links for each pair
-of interconnected hosts, see an example here:
-\verbatim
- simgrid_svn_sources/exemples/msg/gtnets/fullduplex-p.xml
+There is several way to pass configuration options to the simulators.
+The most common way is to use the \c --cfg command line argument. For
+example, to set the item \c Item to the value \c Value, simply
+type the following: \verbatim
+my_simulator --cfg=Item:Value (other arguments)
\endverbatim
-Using fullduplex support ongoing and incoming communication flows are
-treated independently for most models. The exception is the LV08 model which
-adds 0.05 of usage on the opposite direction for each new created flow. This
-can be useful to simulate some important TCP phenomena such as ack compression.
+Several \c --cfg command line arguments can naturally be used. If you
+need to include spaces in the argument, don't forget to quote the
+argument. You can even escape the included quotes (write \' for ' if
+you have your argument between ').
-Running a fullduplex example:
-\verbatim
- cd simgrid_svn_sources/exemples/msg/gtnets
- ./gtnets fullduplex-p.xml fullduplex-d.xml --cfg=fullduplex:1
+Another solution is to use the \c \<config\> tag in the platform file. The
+only restriction is that this tag must occure before the first
+platform element (be it \c \<AS\>, \c \<cluster\>, \c \<peer\> or whatever).
+The \c \<config\> tag takes an \c id attribute, but it is currently
+ignored so you don't really need to pass it. The important par is that
+within that tag, you can pass one or several \c \<prop\> tags to specify
+the configuration to use. For example, setting \c Item to \c Value
+can be done by adding the following to the beginning of your platform
+file: \verbatim
+<config>
+ <prop id="Item" value="Value"/>
+</config>
\endverbatim
-\subsection options_simgrid_configuration_alternate_network Using alternative flow models
+A last solution is to pass your configuration directly using the C
+interface. Unfortunately, this path is not really easy to use right
+now, and you mess directly with surf internal variables as follows. Check the
+\ref XBT_config "relevant page" for details on all the functions you
+can use in this context, \c _surf_cfg_set being the only configuration set
+currently used in SimGrid. \code
+#include <xbt/config.h>
-The default simgrid network model uses a max-min based approach as
-explained in the research report
-<a href="ftp://ftp.ens-lyon.fr/pub/LIP/Rapports/RR/RR2002/RR2002-40.ps.gz">A Network Model for Simulation of Grid Application</a>.
-Other models have been proposed and implemented since then (see for example
-<a href="http://mescal.imag.fr/membres/arnaud.legrand/articles/simutools09.pdf">Accuracy Study and Improvement of Network Simulation in the SimGrid Framework</a>)
-and can be activated at runtime. For example:
-\verbatim
-./mycode platform.xml deployment.xml --cfg=workstation/model:compound --cfg=network/model:LV08 -cfg=cpu/model:Cas01
-\endverbatim
+extern xbt_cfg_t _surf_cfg_set;
-Possible models for the network are currently "Constant", "CM02",
-"LegrandVelho", "GTNets", Reno", "Reno2", "Vegas". Others will
-probably be added in the future and many of the previous ones are
-experimental and are likely to disappear without notice... To know the
-list of the currently implemented models, you should use the
---help-models command line option.
+int main(int argc, char *argv[]) {
+ MSG_global_init(&argc, argv);
+
+ xbt_cfg_set_parse(_surf_cfg_set,"Item:Value");
+
+ // Rest of your code
+}
+\endcode
-\verbatim
-./masterslave_forwarder ../small_platform.xml deployment_masterslave.xml --help-models
-Long description of the workstation models accepted by this simulator:
- CLM03: Default workstation model, using LV08 and CM02 as network and CPU
- compound: Workstation model allowing you to use other network and CPU models
- ptask_L07: Workstation model with better parallel task modeling
-Long description of the CPU models accepted by this simulator:
- Cas01_fullupdate: CPU classical model time=size/power
- Cas01: Variation of Cas01_fullupdate with partial invalidation optimization of lmm system. Should produce the same values, only faster
- CpuTI: Variation of Cas01 with also trace integration. Should produce the same values, only faster if you use availability traces
-Long description of the network models accepted by this simulator:
- Constant: Simplistic network model where all communication take a constant time (one second)
- CM02: Realistic network model with lmm_solve and no correction factors
- LV08: Realistic network model with lmm_solve and these correction factors: latency*=10.4, bandwidth*=.92, S=8775
- Reno: Model using lagrange_solve instead of lmm_solve (experts only)
- Reno2: Model using lagrange_solve instead of lmm_solve (experts only)
- Vegas: Model using lagrange_solve instead of lmm_solve (experts only)
+\section options_model Configuring the platform models
+
+\subsection options_model_select Selecting the platform models
+
+SimGrid comes with several network and CPU models built in, and you
+can change the used model at runtime by changing the passed
+configuration. The three main configuration items are given below.
+For each of these items, passing the special \c help value gives
+you a short description of all possible values. Also, \c --help-models
+should provide information about all models for all existing resources.
+ - \b network/model: specify the used network model
+ - \b cpu/model: specify the used CPU model
+ - \b workstation/model: specify the used workstation model
+
+As of writting, the accepted network models are the following. Over
+the time new models can be added, and some experimental models can be
+removed; check the values on your simulators for an uptodate
+information. Note that the CM02 model is described in the research report
+<a href="ftp://ftp.ens-lyon.fr/pub/LIP/Rapports/RR/RR2002/RR2002-40.ps.gz">A
+Network Model for Simulation of Grid Application</a> while LV08 is
+described in
+<a href="http://mescal.imag.fr/membres/arnaud.legrand/articles/simutools09.pdf">Accuracy Study and Improvement of Network Simulation in the SimGrid Framework</a>.
+
+ - \b LV08 (default one): Realistic network analytic model
+ (slow-start modeled by multiplying latency by 10.4, bandwidth by
+ .92; bottleneck sharing uses a payload of S=8775 for evaluating RTT)
+ - \b Constant: Simplistic network model where all communication
+ take a constant time (one second). This model provides the lowest
+ realism, but is (marginally) faster.
+ - \b SMPI: Realistic network model specifically tailored for HPC
+ settings (accurate modeling of slow start with correction factors on
+ three intervals: < 1KiB, < 64 KiB, >= 64 KiB). See also \ref
+ options_model_network_coefs "this section" for more info.
+ - \b CM02: Legacy network analytic model (Very similar to LV08, but
+ without corrective factors. The timings of small messages are thus
+ poorly modeled)
+ - \b Reno: Model from Steven H. Low using lagrange_solve instead of
+ lmm_solve (experts only; check the code for more info).
+ - \b Reno2: Model from Steven H. Low using lagrange_solve instead of
+ lmm_solve (experts only; check the code for more info).
+ - \b Vegas: Model from Steven H. Low using lagrange_solve instead of
+ lmm_solve (experts only; check the code for more info).
+
+If you compiled SimGrid accordingly, you can use packet-level network
+simulators as network models (see \ref pls). In that case, you have
+two extra models, described below, and some \ref options_pls "specific
+additional configuration flags".
+ - \b GTNets: Network pseudo-model using the GTNets simulator instead
+ of an analytic model
+ - \b NS3: Network pseudo-model using the NS3 tcp model instead of an
+ analytic model
+
+Concerning the CPU, we have only one model for now:
+ - \b Cas01: Simplistic CPU model (time=size/power)
+
+The workstation concept is the aggregation of a CPU with a network
+card. Three models exists, but actually, only 2 of them are
+interesting. The "compound" one is simply due to the way our internal
+code is organized, and can easily be ignored. So at the end, you have
+two workstation models: The default one allows to aggregate an
+existing CPU model with an existing network model, but does not allow
+parallel tasks because these beasts need some collaboration between
+the network and CPU model. That is why, ptask_07 is used by default
+when using SimDag.
+ - \b default: Default workstation model. Currently, CPU:Cas01 and
+ network:LV08 (with cross traffic enabled)
+ - \b compound: Workstation model that is automatically chosen if
+ you change the network and CPU models
+ - \b ptask_L07: Workstation model somehow similar to Cas01+CM02 but
+ allowing parallel tasks
+
+\subsection options_model_optim Optimization level of the platform models
+
+The network and CPU models that are based on lmm_solve (that
+is, all our analytical models) accept specific optimization
+configurations.
+ - items \b network/optim and \b CPU/optim (both default to 'Lazy'):
+ - \b Lazy: Lazy action management (partial invalidation in lmm +
+ heap in action remaining).
+ - \b TI: Trace integration. Highly optimized mode when using
+ availability traces (only available for the Cas01 CPU model for
+ now).
+ - \b Full: Full update of remaining and variables. Slow but may be
+ useful when debugging.
+ - items \b network/maxmin_selective_update and
+ \b cpu/maxmin_selective_update: configure whether the underlying
+ should be lazily updated or not. It should have no impact on the
+ computed timings, but should speed up the computation.
+
+It is still possible to disable the \c maxmin_selective_update feature
+because it can reveal counter-productive in very specific scenarios
+where the interaction level is high. In particular, if all your
+communication share a given backbone link, you should disable it:
+without \c maxmin_selective_update, every communications are updated
+at each step through a simple loop over them. With that feature
+enabled, every communications will still get updated in this case
+(because of the dependency induced by the backbone), but through a
+complicated pattern aiming at following the actual dependencies.
+
+\subsection options_model_precision Numerical precision of the platform models
+
+The analytical models handle a lot of floating point values. It is
+possible to change the epsilon used to update and compare them through
+the \b maxmin/precision item (default value: 1e-9). Changing it
+may speedup the simulation by discarding very small actions, at the
+price of a reduced numerical precision.
+
+\subsection options_model_network Configuring the Network model
+
+\subsubsection options_model_network_gamma Maximal TCP window size
+
+The analytical models need to know the maximal TCP window size to take
+the TCP congestion mechanism into account. This is set to 20000 by
+default, but can be changed using the \b network/TCP_gamma item.
+
+On linux, this value can be retrieved using the following
+commands. Both give a set of values, and you should use the last one,
+which is the maximal size.\verbatim
+cat /proc/sys/net/ipv4/tcp_rmem # gives the sender window
+cat /proc/sys/net/ipv4/tcp_wmem # gives the receiver window
\endverbatim
-\section options_modelchecking Model-Checking
-\subsection options_modelchecking_howto How to use it
+\subsubsection options_model_network_coefs Corrective simulation factors
+
+These factors allow to betterly take the slow start into account.
+The corresponding values were computed through data fitting one the
+timings of packet-level simulators. You should not change these values
+unless you are really certain of what you are doing. See
+<a href="http://mescal.imag.fr/membres/arnaud.legrand/articles/simutools09.pdf">Accuracy Study and Improvement of Network Simulation in the SimGrid Framework</a>
+for more informations about these coeficients.
+
+If you are using the SMPI model, these correction coeficients are
+themselves corrected by constant values depending on the size of the
+exchange. Again, only hardcore experts should bother about this fact.
+
+\subsubsection options_model_network_crosstraffic Simulating cross-traffic
+
+As of SimGrid v3.7, cross-traffic effects can be taken into account in
+analytical simulations. It means that ongoing and incoming
+communication flows are treated independently. In addition, the LV08
+model adds 0.05 of usage on the opposite direction for each new
+created flow. This can be useful to simulate some important TCP
+phenomena such as ack compression.
+
+For that to work, your platform must have two links for each
+pair of interconnected hosts. An example of usable platform is
+available in <tt>examples/msg/gtnets/crosstraffic-p.xml</tt>.
+
+This is activated through the \b network/crosstraffic item, that
+can be set to 0 (disable this feature) or 1 (enable it).
+
+Note that with the default workstation model this option is activated by default.
+
+\subsubsection options_model_network_coord Coordinated-based network models
+
+When you want to use network coordinates, as it happens when you use
+an \<AS\> in your platform file with \c Vivaldi as a routing, you must
+set the \b network/coordinates to \c yes so that all mandatory
+initialization are done in the simulator.
+
+\subsubsection options_model_network_sendergap Simulating sender gap
+
+(this configuration item is experimental and may change or disapear)
+
+It is possible to specify a timing gap between consecutive emission on
+the same network card through the \b network/sender_gap item. This
+is still under investigation as of writting, and the default value is
+to wait 0 seconds between emissions (no gap applied).
+
+\subsubsection options_pls Configuring packet-level pseudo-models
+
+When using the packet-level pseudo-models, several specific
+configuration flags are provided to configure the associated tools.
+There is by far not enough such SimGrid flags to cover every aspects
+of the associated tools, since we only added the items that we
+needed ourselves. Feel free to request more items (or even better:
+provide patches adding more items).
+
+When using NS3, the only existing item is \b ns3/TcpModel,
+corresponding to the ns3::TcpL4Protocol::SocketType configuration item
+in NS3. The only valid values (enforced on the SimGrid side) are
+'NewReno' or 'Reno' or 'Tahoe'.
+
+When using GTNeTS, two items exist:
+ - \b gtnets/jitter, that is a double value to oscillate
+ the link latency, uniformly in random interval
+ [-latency*gtnets_jitter,latency*gtnets_jitter). It defaults to 0.
+ - \b gtnets/jitter_seed, the positive seed used to reproduce jitted
+ results. Its value must be in [1,1e8] and defaults to 10.
+
+\section options_modelchecking Configuring the Model-Checking
+
To enable the experimental SimGrid model-checking support the program should
be executed with the command line argument
\verbatim
void MC_assert(int prop);
\endverbatim
+\section options_virt Configuring the User Process Virtualization
+
+\subsection options_virt_factory Selecting the virtualization factory
+
+In SimGrid, the user code is virtualized in a specific mecanism
+allowing the simulation kernel to control its execution: when a user
+process requires a blocking action (such as sending a message), it is
+interrupted, and only gets released when the simulated clock reaches
+the point where the blocking operation is done.
+
+In SimGrid, the containers in which user processes are virtualized are
+called contexts. Several context factory are provided, and you can
+select the one you want to use with the \b contexts/factory
+configuration item. Some of the following may not exist on your
+machine because of portability issues. In any case, the default one
+should be the most effcient one (please report bugs if the
+auto-detection fails for you). They are sorted here from the slowest
+to the most effient:
+ - \b thread: very slow factory using full featured threads (either
+ ptheads or windows native threads)
+ - \b ucontext: fast factory using System V contexts (or a portability
+ layer of our own on top of Windows fibers)
+ - \b raw: amazingly fast factory using a context switching mecanism
+ of our own, directly implemented in assembly (only available for x86
+ and amd64 platforms for now)
+
+The only reason to change this setting is when the debuging tools get
+fooled by the optimized context factories. Threads are the most
+debugging-friendly contextes.
+
+\subsection options_virt_stacksize Adapting the used stack size
+
+(this only works if you use ucontexts or raw context factories)
+
+Each virtualized used process is executed using a specific system
+stack. The size of this stack has a huge impact on the simulation
+scalability, but its default value is rather large. This is because
+the error messages that you get when the stack size is too small are
+rather disturbing: this leads to stack overflow (overwriting other
+stacks), leading to segfaults with corrupted stack traces.
+
+If you want to push the scalability limits of your code, you really
+want to reduce the \b contexts/stack_size item. Its default value
+is 128 (in Kib), while our Chord simulation works with stacks as small
+as 16 Kib, for example.
+
+\subsection options_virt_parallel Running user code in parallel
+
+Parallel execution of the user code is only considered stable in
+SimGrid v3.7 and higher. It is described in
+<a href="http://hal.inria.fr/inria-00602216/">INRIA RR-7653</a>.
+
+If you are using the \c ucontext or \c raw context factories, you can
+request to execute the user code in parallel. Several threads are
+launched, each of them handling as much user contexts at each run. To
+actiave this, set the \b contexts/nthreads item to the amount of
+core that you have in your computer.
+
+Even if you asked several worker threads using the previous option,
+you can request to start the parallel execution (and pay the
+associated synchronization costs) only if the potential parallelism is
+large enough. For that, set the \b contexts/parallel_threshold
+item to the minimal amount of user contexts needed to start the
+parallel execution. In any given simulation round, if that amount is
+not reached, the contexts will be run sequentially directly by the
+main thread (thus saving the synchronization costs). Note that this
+option is mainly useful when the grain of the user code is very fine,
+because our synchronization is now very efficient.
+
+When parallel execution is activated, you can choose the
+synchronization schema used with the \b contexts/synchro item,
+which value is either:
+ - \b futex: ultra optimized synchronisation schema, based on futexes
+ (fast user-mode mutexes), and thus only available on Linux systems.
+ This is the default mode when available.
+ - \b posix: slow but portable synchronisation using only POSIX
+ primitives.
+ - \b busy_wait: not really a synchronisation: the worker threads
+ constantly request new contexts to execute. It should be the most
+ efficient synchronisation schema, but it loads all the cores of your
+ machine for no good reason. You probably prefer the other less
+ eager schemas.
+
+\section options_tracing Configuring the tracing subsystem
+
+The \ref tracing "tracing subsystem" can be configured in several
+different ways depending on the nature of the simulator (MSG, SimDag,
+SMPI) and the kind of traces that need to be obtained. See the \ref
+tracing_tracing_options "Tracing Configuration Options subsection" to
+get a detailed description of each configuration option.
+
+We detail here a simple way to get the traces working for you, even if
+you never used the tracing API.
+
+
+- Any SimGrid-based simulator (MSG, SimDag, SMPI, ...) and raw traces:
+\verbatim
+--cfg=tracing:1 --cfg=tracing/uncategorized:1 --cfg=triva/uncategorized:uncat.plist
+\endverbatim
+ The first parameter activates the tracing subsystem, the second
+ tells it to trace host and link utilization (without any
+ categorization) and the third creates a graph configuration file
+ to configure Triva when analysing the resulting trace file.
+
+- MSG or SimDag-based simulator and categorized traces (you need to declare categories and classify your tasks according to them)
+\verbatim
+--cfg=tracing:1 --cfg=tracing/categorized:1 --cfg=triva/categorized:cat.plist
+\endverbatim
+ The first parameter activates the tracing subsystem, the second
+ tells it to trace host and link categorized utilization and the
+ third creates a graph configuration file to configure Triva when
+ analysing the resulting trace file.
+
+- SMPI simulator and traces for a space/time view:
+\verbatim
+smpirun -trace ...
+\endverbatim
+ The <i>-trace</i> parameter for the smpirun script runs the
+simulation with --cfg=tracing:1 and --cfg=tracing/smpi:1. Check the
+smpirun's <i>-help</i> parameter for additional tracing options.
+
+\section options_smpi Configuring SMPI
+
+The SMPI interface provides several specific configuration items.
+These are uneasy to see since the code is usually launched through the
+\c smiprun script directly.
+
+\subsection options_smpi_bench Automatic benchmarking of SMPI code
+
+In SMPI, the sequential code is automatically benchmarked, and these
+computations are automatically reported to the simulator. That is to
+say that if you have a large computation between a \c MPI_Recv() and a
+\c MPI_Send(), SMPI will automatically benchmark the duration of this
+code, and create an execution task within the simulator to take this
+into account. For that, the actual duration is measured on the host
+machine and then scaled to the power of the corresponding simulated
+machine. The variable \b smpi/running_power allows to specify the
+computational power of the host machine (in flop/s) to use when
+scaling the execution times. It defaults to 20000, but you really want
+to update it to get accurate simulation results.
+
+When the code is constituted of numerous consecutive MPI calls, the
+previous mechanism feeds the simulation kernel with numerous tiny
+computations. The \b smpi/cpu_threshold item becomes handy when this
+impacts badly the simulation performance. It specify a threshold (in
+second) under which the execution chunks are not reported to the
+simulation kernel (default value: 1e-6). Please note that in some
+circonstances, this optimization can hinder the simulation accuracy.
+
+\subsection options_smpi_timing Reporting simulation time
+
+Most of the time, you run MPI code through SMPI to compute the time it
+would take to run it on a platform that you don't have. But since the
+code is run through the \c smpirun script, you don't have any control
+on the launcher code, making difficult to report the simulated time
+when the simulation ends. If you set the \b smpi/display_timing item
+to 1, \c smpirun will display this information when the simulation ends. \verbatim
+Simulation time: 1e3 seconds.
+\endverbatim
+
+\section options_generic Configuring other aspects of SimGrid
+
+\subsection options_generic_path XML file inclusion path
+
+It is possible to specify a list of directories to search into for the
+\<include\> tag in XML files by using the \b path configuration
+item. To add several directory to the path, set the configuration
+item several times, as in \verbatim
+--cfg=path:toto --cfg=path:tutu
+\endverbatim
+
+\subsection options_generic_exit Behavior on Ctrl-C
+
+By default, when Ctrl-C is pressed, the status of all existing
+simulated processes is displayed. This is very useful to debug your
+code, but it can reveal troublesome in some cases (such as when the
+amount of processes becomes really big). This behavior is disabled
+when \b verbose-exit is set to 0 (it is to 1 by default).
+
+\section options_index Index of all existing configuration items
+
+- \c contexts/factory: \ref options_virt_factory
+- \c contexts/nthreads: \ref options_virt_parallel
+- \c contexts/parallel_threshold: \ref options_virt_parallel
+- \c contexts/stack_size: \ref options_virt_stacksize
+- \c contexts/synchro: \ref options_virt_parallel
+
+- \c cpu/maxmin_selective_update: \ref options_model_optim
+- \c cpu/model: \ref options_model_select
+- \c cpu/optim: \ref options_model_optim
+
+- \c gtnets/jitter: \ref options_pls
+- \c gtnets/jitter_seed: \ref options_pls
+
+- \c maxmin/precision: \ref options_model_precision
+
+- \c network/bandwidth_factor: \ref options_model_network_coefs
+- \c network/coordinates: \ref options_model_network_coord
+- \c network/crosstraffic: \ref options_model_network_crosstraffic
+- \c network/latency_factor: \ref options_model_network_coefs
+- \c network/maxmin_selective_update: \ref options_model_optim
+- \c network/model: \ref options_model_select
+- \c network/optim: \ref options_model_optim
+- \c network/sender_gap: \ref options_model_network_sendergap
+- \c network/TCP_gamma: \ref options_model_network_gamma
+- \c network/weight_S: \ref options_model_network_coefs
+
+- \c ns3/TcpModel: \ref options_pls
+
+- \c smpi/running_power: \ref options_smpi_bench
+- \c smpi/display_timing: \ref options_smpi_timing
+- \c smpi/cpu_threshold: \ref options_smpi_bench
+
+- \c path: \ref options_generic_path
+- \c verbose-exit: \ref options_generic_exit
+
+- \c workstation/model: \ref options_model_select
+
*/
\ No newline at end of file
\c category must contain a category that was previously defined by the function
\c TRACE_category.
+\li <b>\c TRACE_declare_mark(const char *mark_type)</b>: This function
+declares a new Paje event type in the trace file that can be used by
+simulators to declare application-level marks. This function is
+independent of which API is used in SimGrid.
+
+\li <b>\c TRACE_mark(const char *mark_type, const char *mark_value)</b>:
+This function creates a mark in the trace file. The first parameter
+had to be previously declared using \c TRACE_declare_mark, the second
+is the identifier for this mark instance. We recommend that the \c
+mark_value (the second parameter) is a unique value for the whole
+trace file (the whole simulation). Nevertheless, this is not a strong
+requirement: the trace will be valid if there are multiple mark
+identifiers for the same trace.
+
\li <b>\c TRACE_[host|link]_variable_declare (const char *variable)</b>:
Declare a user variable that will be associated to host/link. A variable can
be used to trace user variables such as the number of tasks in a server,
the number of clients in an application (for hosts), and so on.
+\li <b>\c TRACE_[host|link]_variable_declare_with_color (const char
+*var, const char *color)</b>: Same as \c
+TRACE_[host|link]_variable_declare, but user decides which color will
+be assigned to the variable. The color needs to be a string with three
+numbers separated by spaces in the range [0,1]. A light-gray color can
+be specified using "0.7 0.7 0.7" as color.
+
\li <b>\c TRACE_[host|link]_variable_[set|add|sub] (const char *[host|link], const char *variable, double value)</b>:
Set the value of a given user variable for a given host/link. The value
of this variable is always associated to the host/link. The host/link
--cfg=tracing/msg/process:1
\endverbatim
+\li <b>\c
+tracing/buffer
+</b>:
+ This option put some events in a time-ordered buffer using the
+ insertion sort algorithm. The process of acquiring and releasing
+ locks to access this buffer and the cost of the sorting algorithm
+ make this process slow. The simulator performance can be severely
+ impacted if this option is activated, but you are sure to get a trace
+ file with events sorted.
+\verbatim
+--cfg=tracing/buffer:1
+\endverbatim
+
+\li <b>\c
+tracing/onelink_only
+</b>:
+This option changes the way SimGrid register its platform on the trace
+file. Normally, the tracing considers all routes (no matter their
+size) on the platform file to re-create the resource topology. If this
+option is activated, only the routes with one link are used to
+register the topology within an AS. Routes among AS continue to be
+traced as usual.
+\verbatim
+--cfg=tracing/onelink_only:1
+\endverbatim
+
+\li <b>\c
+tracing/disable_destroy
+</b>:
+Disable the destruction of containers at the end of simulation. This
+can be used with simulators that have a different notion of time
+(different from the simulated time).
+\verbatim
+--cfg=tracing/disable_destroy:1
+\endverbatim
+
\li <b>\c
triva/categorized
</b>:
This option generates a graph configuration file for Triva considering
uncategorized resource utilization.
\verbatim
---cfg=triva/categorized:graph_uncategorized.plist
+--cfg=triva/uncategorized:graph_uncategorized.plist
\endverbatim
\subsection tracing_tracing_example_parameters Case studies
special configurations tunned to SimGrid needs. This part of the documentation
explains how to configure and use Triva to analyse a SimGrid trace file.
-- <b>Installing Triva</b>: the tool is available in the INRIAGforge,
+- <b>Installing Triva</b>: the tool is available in the Inria's Forge,
at <a href="http://triva.gforge.inria.fr">http://triva.gforge.inria.fr</a>.
Use the following command to get the sources, and then check the file
<i>INSTALL</i>. This file contains instructions to install
the tool's dependencies in a Ubuntu/Debian Linux. The tool can also
-be compiled in MacOSes natively, check <i>INSTALL.mac</i> file.
+be compiled in MacOSX natively, check <i>INSTALL.mac</i> file.
\verbatim
-$ svn checkout svn://scm.gforge.inria.fr/svn/triva
+$ git clone git://scm.gforge.inria.fr/triva/triva.git
$ cd triva
$ cat INSTALL
\endverbatim
when the checkbox <i>Update Drawings on Sliders
Change</i> is selected will not be followed.
-- <b>Understanding Triva - graph</b>: this part of the documention explains how
- to analyze the traces using the graph view of Triva, when the user executes
-the tool passing <em>--graph</em> as parameter. Triva opens three windows when
-this parameter is used: the <i>Time Interval</i> window (previously described),
-the <i>Graph Representation</i> window, and the <em>Graph Configuration</em>
-window. The Graph Representation is the window where drawings take place.
-Initially, it is completely white waiting for a proper graph configuration input
-by the user. We start the description of this type of analysis by describing the
-<i>Graph Configuration</i> window (depicted below). By using a particular
-configuration, triva
-can be used to customize the graph drawing according to
-the SimGrid trace that was created with user-specific categories. Before delving
-into the details of this customization, let us first explain the major parts of
-the graph configuration window. The buttons located in the top-right corner can
-be used to delete, copy and create a new configuration. The checkbox in the
-top-middle part of the window indicates if the configuration typed in the
-textfield is syntactically correct (we are using the non-XML
-<a href="http://en.wikipedia.org/wiki/Property_list">Property List Format</a> to
-describe the configuration). The pop-up button located on the top-left corner
-indicates the selected configuration (the user can have multiple graph
-configurations). The bottom-left text field contains the name of the current
-configuration (updates on this field must be followed by typing enter on the
-keyboard to take into account the name change). The bottom-right <em>Apply</em>
-button activates the current configuration, resulting on an update on the graph
-drawings.
-<center>
-\htmlonly
-<a href="triva-graph_configuration.png" border=0><img src="triva-graph_configuration.png" width="50%" border=0></a>
-\endhtmlonly
-</center>
-<b>Basic SimGrid Configuration</b>: The figure shows in the big textfield the
-basic configuration that should be used during the analysis of a SimGrid trace
-file. The basic logic of the configuration is as follows:
+- <b>Understanding Triva - graph</b>: one possibility to analyze
+ SimGrid traces is to use Triva's graph view, using the
+ <em>--graph</em> parameter to activate this view, and
+ <em>--gc_conf</em> with a graph configuration to customize the graph
+ according to the traces. A valid graph configuration (we are using
+ the non-XML <a
+ href="http://en.wikipedia.org/wiki/Property_list">Property List
+ Format</a> to describe the configuration) can be created for any
+ SimGrid-based simulator using the
+ <em>--cfg=triva/uncategorized:graph_uncategorized.plist</em> or
+ <em>--cfg=triva/categorized:graph_categorized.plist</em> (if the
+ simulator defines resource utilization categories) when executing
+ the simulation.
+
+<b>Basic SimGrid Configuration</b>: The basic description of the configuration
+is as follows:
\verbatim
{
- node = (HOST);
- edge = (LINK);
-\endverbatim
-The nodes of the graph will be created based on the <i>node</i> parameter, which
-in this case is the different <em>"HOST"</em>s of the platform
-used to simulate. The <i>edge</i> parameter indicates that the edges of the
-graph will be created based on the <em>"LINK"</em>s of the platform. After the
-definition of these two parameters, the configuration must detail how
-<em>HOST</em>s and <em>LINK</em>s should be drawn. For that, the configuration
-must have an entry for each of the types used. For <em>HOST</em>, as basic
-configuration, we have:
-\verbatim
- HOST = {
- size = power;
- scale = global;
- };
-\endverbatim
-The parameter <em>size</em> indicates which variable from the trace file will be
-used to define the size of the node HOST in the visualization. If the simulation
-was executed with availability traces, the size of the nodes will be changed
-according to these traces. The parameter <em>scale</em> indicates if the value
-of the variable is <em>global</em> or <em>local</em>. If it is global, the value
-will be relative to the power of all other hosts, if it is local, the value will
-be relative locally.
-For <em>LINK</em> we have:
-\verbatim
- LINK = {
- src = source;
- dst = destination;
-
- size = bandwidth;
- scale = global;
- };
+ node = (LINK, HOST, );
+ edge = (HOST-LINK, LINK-HOST, LINK-LINK, );
\endverbatim
-For the types specified in the <em>edge</em> parameter (such as <em>LINK</em>),
-the configuration must contain two additional parameters: <em>src</em> and
-<em>dst</em> that are used to properly identify which nodes this edge is
-connecting. The values <em>source</em> and <em>destination</em> are always present
-in the SimGrid trace file and should not be changed in the configuration. The
-parameter <em>size</em> for the LINK, in this case, is configured as the
-variable <em>bandwidth</em>, with a <em>global</em> scale. The scale meaning
-here is exactly the same used for nodes. The last parameter is the GraphViz
-algorithm used to calculate the position of the nodes in the graph
-representation.
-\verbatim
- graphviz-algorithm = neato;
-}
-\endverbatim
-<b>Customizing the Graph Representation</b>: triva is capable to handle
-a customized graph representation based on the variables present in the trace
-file. In the case of SimGrid, every time a category is created for tasks, two
-variables in the trace file are defined: one to indicate node utilization (how
-much power was used by that task category), and another to indicate link
-utilization (how much bandwidth was used by that category). For instance, if the
-user declares a category named <i>request</i>, there will be variables named
-<b>p</b><i>request</i> and a <b>b</b><i>request</i> (<b>p</b> for power and
-<b>b</b> for bandwidth). It is important to notice that the variable
-<i>prequest</i> in this case is only available for HOST, and
-<i>brequest</i> is only available for LINK. <b>Example</b>: suppose there are
-two categories for tasks: request and compute. To create a customized graph
-representation with a proportional separation of host and link utilization, use
-as configuration for HOST and LINK this:
-\verbatim
- HOST = {
- size = power;
- scale = global;
-
- sep_host = {
- type = separation;
- size = power;
- values = (prequest, pcomputation);
- };
- };
- LINK = {
- src = source;
- dst = destination;
- size = bandwidth;
- scale = global;
+The nodes of the graph will be created based on the <i>node</i>
+parameter, which in this case is the different <em>"HOST"</em>s and
+<em>"LINK"</em>s of the platform used to simulate. The <i>edge</i>
+parameter indicates that the edges of the graph will be created based
+on the <em>"HOST-LINK"</em>s, <em>"LINK-HOST"</em>s, and
+<em>"LINK-LINK"</em>s of the platform. After the definition of these
+two parameters, the configuration must detail how the nodes
+(<em>HOST</em>s and <em>LINK</em>s) should be drawn.
- sep_link = {
- type = separation;
- size = bandwidth;
- values = (brequest, bcomputation);
- };
- };
-\endverbatim
-Where <i>sep_host</i> contains a composition of type <i>separation</i> where
-its max size is the <i>power</i> of the host and the variables <i>prequest</i>
-and <i>pcomputation</i> are drawn proportionally to the size of the HOST. And
-<i>sep_link</i> is also a separation where max is defined as the
-<i>bandwidth</i> of the link, and the variables <i>brequest</i> and
-<i>bcomputation</i> are drawn proportionally within a LINK.
-<i>This configuration enables the analysis of resource utilization by MSG tasks,
-and the identification of load-balancing issues, network bottlenecks, for
-instance.</i> \n
-<b>Other compositions</b>: besides <i>separation</i>, it is possible to use
-other types of compositions, such as gradients, and colors, like this:
-\verbatim
- gra_host = {
- type = gradient;
- scale = global;
- values = (numberOfTasks);
- };
- color_host = {
- type = color;
- values = (is_server);
- };
-\endverbatim
-Where <i>gra_host</i> creates a gradient within a node of the graph, using a
-global scale and using as value a variable called <i>numberOfTasks</i>, that
-could be declared by the user using the optional tracing functions of SimGrid.
-If scale is global, the max and min value for the gradient will be equal to the
-max and min numberOfTasks among all hosts, and if scale is local, the max and
-min value based on the value of numberOfTasks locally in each host.
-And <i>color_host</i> composition draws a square based on a positive value of
-the variable <i>is_server</i>, that could also be defined by the user using the
-SimGrid tracing functions. \n
-<b>The Graph Visualization</b>: The next figure shows a graph visualization of a
-given time-slice of the masterslave_forwarder example (present in the SimGrid
-sources). The red color indicates tasks from the <i>compute</i> category. This
-visualization was generated with the following configuration:
-\verbatim
-{
- node = (HOST);
- edge = (LINK);
+For that, the configuration must have an entry for each of
+the types used. For <em>HOST</em>, as basic configuration, we have:
+\verbatim
HOST = {
+ type = square;
size = power;
- scale = global;
-
- sep_host = {
- type = separation;
- size = power;
- values = (pcompute, pfinalize);
- };
+ values = (power_used);
};
- LINK = {
- src = source;
- dst = destination;
- size = bandwidth;\section tracing_tracing Tracing Simulations for Visualization
-
-The trace visualization is widely used to observe and understand the behavior
-of parallel applications and distributed algorithms. Usually, this is done in a
-two-step fashion: the user instruments the application and the traces are
-analyzed after the end of the execution. The visualization itself can highlights
-unexpected behaviors, bottlenecks and sometimes can be used to correct
-distributed algorithms. The SimGrid team has instrumented the library
-in order to let users trace their simulations and analyze them. This part of the
-user manual explains how the tracing-related features can be enabled and used
-during the development of simulators using the SimGrid library.
-
-\subsection tracing_tracing_howitworks How it works
-
-For now, the SimGrid library is instrumented so users can trace the <b>platform
-utilization</b> using the MSG, SimDAG and SMPI interface. This means that the tracing will
-register how much power is used for each host and how much bandwidth is used for
-each link of the platform. The idea with this type of tracing is to observe the
-overall view of resources utilization in the first place, especially the
-identification of bottlenecks, load-balancing among hosts, and so on.
-
-The idea of the tracing facilities is to give SimGrid users to possibility to
-classify MSG and SimDAG tasks by category, tracing the platform utilization
-(hosts and links) for each of the categories. For that,
-the tracing interface enables the declaration of categories and a function to
-mark a task with a previously declared category. <em>The tasks that are not
-classified according to a category are not traced</em>. Even if the user
-does not specify any category, the simulations can still be traced in terms
-of resource utilization by using a special parameter that is detailed below.
-
-\subsection tracing_tracing_enabling Enabling using CMake
-
-With the sources of SimGrid, it is possible to enable the tracing
-using the parameter <b>-Denable_tracing=ON</b> when the cmake is executed.
-The section \ref tracing_tracing_functions describes all the functions available
-when this Cmake options is activated. These functions will have no effect
-if SimGrid is configured without this option (they are wiped-out by the
-C-preprocessor).
-
-\verbatim
-$ cmake -Denable_tracing=ON .
-$ make
-\endverbatim
-
-\subsection tracing_tracing_functions Tracing Functions
-
-\li <b>\c TRACE_category (const char *category)</b>: This function should be used
-to define a user category. The category can be used to differentiate the tasks
-that are created during the simulation (for example, tasks from server1,
-server2, or request tasks, computation tasks, communication tasks).
-All resource utilization (host power and link bandwidth) will be
-classified according to the task category. Tasks that do not belong to a
-category are not traced. The color for the category that is being declared
-is random (use next function to specify a color).
-
-\li <b>\c TRACE_category_with_color (const char *category, const char *color)</b>: Same
-as TRACE_category, but let user specify a color encoded as a RGB-like string with
-three floats from 0 to 1. So, to specify a red color, the user can pass "1 0 0" as
-color parameter. A light-gray color can be specified using "0.7 0.7 0.7" as color.
-
-\li <b>\c TRACE_msg_set_task_category (m_task_t task, const char *category)</b>:
-This function should be called after the creation of a MSG task, to define the
-category of that task. The first parameter \c task must contain a task that was
-created with the function \c MSG_task_create. The second parameter
-\c category must contain a category that was previously defined by the function
-\c TRACE_category.
-
-\li <b>\c TRACE_sd_set_task_category (SD_task_t task, const char *category)</b>:
-This function should be called after the creation of a SimDAG task, to define the
-category of that task. The first parameter \c task must contain a task that was
-created with the function \c MSG_task_create. The second parameter
-\c category must contain a category that was previously defined by the function
-\c TRACE_category.
-
-\li <b>\c TRACE_[host|link]_variable_declare (const char *variable)</b>:
-Declare a user variable that will be associated to host/link. A variable can
-be used to trace user variables such as the number of tasks in a server,
-the number of clients in an application (for hosts), and so on.
-
-\li <b>\c TRACE_[host|link]_variable_[set|add|sub] (const char *[host|link], const char *variable, double value)</b>:
-Set the value of a given user variable for a given host/link. The value
-of this variable is always associated to the host/link. The host/link
-parameters should be its name as the one listed in the platform file.
-
-\li <b>\c TRACE_[host|link]_variable_[set|add|sub]_with_time (double time, const char *[host|link], const char *variable, double value)</b>:
-Same as TRACE_[host|link]_variable_[set|add|sub], but let user specify
-the time used to trace it. Users can specify a time that is not the
-simulated clock time as defined by the core simulator. This allows
-a fine-grain control of time definition, but should be used with
-caution since the trace can be inconsistent if resource utilization
-traces are also traced.
-
-\li <b>\c TRACE_link_srcdst_variable_[set|add|sub] (const char *src, const char *dst, const char *variable, double value)</b>:
-Same as TRACE_link_variable_[set|add|sub], but now users specify a source and
-destination hosts (as the names from the platform file). The tracing library
-will get the corresponding route that connects those two hosts (src and dst) and
-[set|add|sub] the value's variable for all the links of the route.
-
-\li <b>\c TRACE_link_srcdst_variable_[set|add|sub]_with_time (double time, const char *src, const char *dst, const char *variable, double value)</b>:
-Same as TRACE_link_srcdst_variable_[set|add|sub], but user specify a time different from the simulated time.
-
-\subsection tracing_tracing_options Tracing configuration Options
-
-These are the options accepted by the tracing system of SimGrid:
-
-\li <b>\c
-tracing
-</b>:
- Safe switch. It activates (or deactivates) the tracing system.
- No other tracing options take effect if this one is not activated.
-
-\li <b>\c
-tracing/platform
-</b>:
- Register the simulation platform in the trace file.
-
-\li <b>\c
-tracing/onelink_only
-</b>:
- By default, the tracing system uses all routes in the platform file
- to re-create a "graph" of the platform and register it in the trace file.
- This option let the user tell the tracing system to use only the routes
- that are composed with just one link.
-
-\li <b>\c
-tracing/categorized
-</b>:
- It activates the categorized resource utilization tracing. It should
- be enabled if tracing categories are used by this simulator.
-
-\li <b>\c
-tracing/uncategorized
-</b>:
- It activates the uncategorized resource utilization tracing. Use it if
- this simulator do not use tracing categories and resource use have to be
- traced.
-
-\li <b>\c
-tracing/filename
-</b>:
- A file with this name will be created to register the simulation. The file
- is in the Paje format and can be analyzed using Triva or Paje visualization
- tools. More information can be found in these webpages:
- <a href="http://triva.gforge.inria.fr/">http://triva.gforge.inria.fr/</a>
- <a href="http://paje.sourceforge.net/">http://paje.sourceforge.net/</a>
-
-\li <b>\c
-tracing/smpi
-</b>:
- This option only has effect if this simulator is SMPI-based. Traces the MPI
- interface and generates a trace that can be analyzed using Gantt-like
- visualizations. Every MPI function (implemented by SMPI) is transformed in a
- state, and point-to-point communications can be analyzed with arrows.
-
-\li <b>\c
-tracing/smpi/group
-</b>:
- This option only has effect if this simulator is SMPI-based. The processes
- are grouped by the hosts where they were executed.
-
-\li <b>\c
-tracing/msg/task
-</b>:
- This option only has effect if this simulator is MSG-based. It traces the
- behavior of all categorized MSG tasks, grouping them by hosts.
-
-\li <b>\c
-tracing/msg/process
-</b>:
- This option only has effect if this simulator is MSG-based. It traces the
- behavior of all categorized MSG processes, grouping them by hosts. This option
- can be used to track process location if this simulator has process migration.
-
-
-\li <b>\c
-triva/categorized:graph_categorized.plist
-</b>:
- This option generates a graph configuration file for Triva considering
- categorized resource utilization.
-
-\li <b>\c
-triva/uncategorized:graph_uncategorized.plist
-</b>:
- This option generates a graph configuration file for Triva considering
- uncategorized resource utilization.
-
-\subsection tracing_tracing_example Example of Instrumentation
-
-A simplified example using the tracing mandatory functions.
-
-\verbatim
-int main (int argc, char **argv)
-{
- MSG_global_init (&argc, &argv);
-
- //(... after deployment ...)
-
- //note that category declaration must be called after MSG_create_environment
- TRACE_category_with_color ("request", "1 0 0");
- TRACE_category_with_color ("computation", "0.3 1 0.4");
- TRACE_category ("finalize");
-
- m_task_t req1 = MSG_task_create("1st_request_task", 10, 10, NULL);
- m_task_t req2 = MSG_task_create("2nd_request_task", 10, 10, NULL);
- m_task_t req3 = MSG_task_create("3rd_request_task", 10, 10, NULL);
- m_task_t req4 = MSG_task_create("4th_request_task", 10, 10, NULL);
- TRACE_msg_set_task_category (req1, "request");
- TRACE_msg_set_task_category (req2, "request");
- TRACE_msg_set_task_category (req3, "request");
- TRACE_msg_set_task_category (req4, "request");
-
- m_task_t comp = MSG_task_create ("comp_task", 100, 100, NULL);
- TRACE_msg_set_task_category (comp, "computation");
-
- m_task_t finalize = MSG_task_create ("finalize", 0, 0, NULL);
- TRACE_msg_set_task_category (finalize, "finalize");
-
- //(...)
-
- MSG_clean();
- return 0;
-}
-\endverbatim
-
-\subsection tracing_tracing_analyzing Analyzing the SimGrid Traces
-
-The SimGrid library, during an instrumented simulation, creates a trace file in
-the Paje file format that contains the platform utilization for the simulation
-that was executed. The visualization analysis of this file is performed with the
-visualization tool <a href="http://triva.gforge.inria.fr">Triva</a>, with
-special configurations tunned to SimGrid needs. This part of the documentation
-explains how to configure and use Triva to analyse a SimGrid trace file.
-
-- <b>Installing Triva</b>: the tool is available in the INRIAGforge,
-at <a href="http://triva.gforge.inria.fr">http://triva.gforge.inria.fr</a>.
-Use the following command to get the sources, and then check the file
-<i>INSTALL</i>. This file contains instructions to install
-the tool's dependencies in a Ubuntu/Debian Linux. The tool can also
-be compiled in MacOSes natively, check <i>INSTALL.mac</i> file.
-\verbatim
-$ svn checkout svn://scm.gforge.inria.fr/svn/triva
-$ cd triva
-$ cat INSTALL
\endverbatim
-- <b>Executing Triva</b>: a binary called <i>Triva</i> is available after the
- installation (you can execute it passing <em>--help</em> to check its
-options). If the triva binary is not available after following the
-installation instructions, you may want to execute the following command to
-initialize the GNUstep environment variables. We strongly recommend that you
-use the latest GNUstep packages, and not the packages available through apt-get
-in Ubuntu/Debian packaging systems. If you install GNUstep using the latest
-available packages, you can execute this command:
-\verbatim
-$ source /usr/GNUstep/System/Library/Makefiles/GNUstep.sh
-\endverbatim
-You should be able to see this output after the installation of triva:
-\verbatim
-$ ./Triva.app/Triva --help
-Usage: Triva [OPTIONS...] TRACE0 [TRACE1]
-Trace Analysis through Visualization
+The parameter <em>size</em> indicates which variable from the trace
+file will be used to define the size of the node HOST in the
+visualization. If the simulation was executed with availability
+traces, the size of the nodes will be changed according to these
+traces. The parameter <em>type</em> indicates which geometrical shape
+will be used to represent HOST, and the <em>values</em> parameter
+indicates which values from the trace will be used to fill the shape.
-TimeInterval
- --ti_frequency {double} Animation: frequency of updates
- --ti_hide Hide the TimeInterval window
- --ti_forward {double} Animation: value to move time-slice
- --ti_apply Apply the configuration
- --ti_update Update on slider change
- --ti_animate Start animation
- --ti_start {double} Start of time slice
- --ti_size {double} Size of time slice
-Triva
- --comparison Compare Trace Files (Experimental)
- --graph Configurable Graph
- --list Print Trace Type Hierarchy
- --hierarchy Export Trace Type Hierarchy (dot)
- --stat Trace Statistics and Memory Utilization
- --instances List All Trace Entities
- --linkview Link View (Experimental)
- --treemap Squarified Treemap
- --merge Merge Trace Files (Experimental)
- --check Check Trace File Integrity
-GraphConfiguration
- --gc_conf {file} Graph Configuration in Property List Format
- --gc_apply Apply the configuration
- --gc_hide Hide the GraphConfiguration window
-\endverbatim
-Triva expects that the user choose one of the available options
-(currently <em>--graph</em> or <em>--treemap</em> for a visualization analysis)
-and the trace file from the simulation.
-
-- <b>Understanding Triva - time-slice</b>: the analysis of a trace file using
- the tool always takes into account the concept of the <em>time-slice</em>.
-This concept means that what is being visualized in the screen is always
-calculated considering a specific time frame, with its beggining and end
-timestamp. The time-slice is configured by the user and can be changed
-dynamically through the window called <em>Time Interval</em> that is opened
-whenever a trace file is being analyzed. The next figure depicts the time-slice
-configuration window.
-In the top of the window, in the space named <i>Trace Time</i>,
-the two fields show the beggining of the trace (which usually starts in 0) and
-the end (that depends on the time simulated by SimGrid). The middle of the
-window, in the square named <i>Time Slice Configuration</i>, contains the
-aspects related to the time-slice, including its <i>start</i> and its
-<i>size</i>. The gray rectangle in the bottom of this part indicates the
-<i>current time-slice</i> that is considered for the drawings. If the checkbox
-<i>Update Drawings on Sliders Change</i> is not selected, the button
-<i>Apply</i> must be clicked in order to inform triva that the
-new time-slice must be considered. The bottom part of the window, in the space
-indicated by the square <i>Time Slice Animation</i> can be used to advance
-the time-frame automatically. The user configures the amount of time that the
-time-frame will forward and how frequent this update will happen. Once this is
-configured, the user clicks the <i>Play</i> button in order to see the dynamic
-changes on the drawings.
-<center>
-\htmlonly
-<a href="triva-time_interval.png" border=0><img src="triva-time_interval.png" width="50%" border=0></a>
-\endhtmlonly
-</center>
-<b>Remarks:</b> when the trace has too many hosts or links, the computation to
-take into account a new time-slice can be expensive. When this happens, the
-<i>Frequency</i> parameter, but also updates caused by change on configurations
-when the checkbox <i>Update Drawings on Sliders
-Change</i> is selected will not be followed.
-
-- <b>Understanding Triva - graph</b>: this part of the documention explains how
- to analyze the traces using the graph view of Triva, when the user executes
-the tool passing <em>--graph</em> as parameter. Triva opens three windows when
-this parameter is used: the <i>Time Interval</i> window (previously described),
-the <i>Graph Representation</i> window, and the <em>Graph Configuration</em>
-window. The Graph Representation is the window where drawings take place.
-Initially, it is completely white waiting for a proper graph configuration input
-by the user. We start the description of this type of analysis by describing the
-<i>Graph Configuration</i> window (depicted below). By using a particular
-configuration, triva
-can be used to customize the graph drawing according to
-the SimGrid trace that was created with user-specific categories. Before delving
-into the details of this customization, let us first explain the major parts of
-the graph configuration window. The buttons located in the top-right corner can
-be used to delete, copy and create a new configuration. The checkbox in the
-top-middle part of the window indicates if the configuration typed in the
-textfield is syntactically correct (we are using the non-XML
-<a href="http://en.wikipedia.org/wiki/Property_list">Property List Format</a> to
-describe the configuration). The pop-up button located on the top-left corner
-indicates the selected configuration (the user can have multiple graph
-configurations). The bottom-left text field contains the name of the current
-configuration (updates on this field must be followed by typing enter on the
-keyboard to take into account the name change). The bottom-right <em>Apply</em>
-button activates the current configuration, resulting on an update on the graph
-drawings.
-<center>
-\htmlonly
-<a href="triva-graph_configuration.png" border=0><img src="triva-graph_configuration.png" width="50%" border=0></a>
-\endhtmlonly
-</center>
-<b>Basic SimGrid Configuration</b>: The figure shows in the big textfield the
-basic configuration that should be used during the analysis of a SimGrid trace
-file. The basic logic of the configuration is as follows:
-\verbatim
-{
- node = (HOST);
- edge = (LINK);
-\endverbatim
-The nodes of the graph will be created based on the <i>node</i> parameter, which
-in this case is the different <em>"HOST"</em>s of the platform
-used to simulate. The <i>edge</i> parameter indicates that the edges of the
-graph will be created based on the <em>"LINK"</em>s of the platform. After the
-definition of these two parameters, the configuration must detail how
-<em>HOST</em>s and <em>LINK</em>s should be drawn. For that, the configuration
-must have an entry for each of the types used. For <em>HOST</em>, as basic
-configuration, we have:
-\verbatim
- HOST = {
- size = power;
- scale = global;
- };
-\endverbatim
-The parameter <em>size</em> indicates which variable from the trace file will be
-used to define the size of the node HOST in the visualization. If the simulation
-was executed with availability traces, the size of the nodes will be changed
-according to these traces. The parameter <em>scale</em> indicates if the value
-of the variable is <em>global</em> or <em>local</em>. If it is global, the value
-will be relative to the power of all other hosts, if it is local, the value will
-be relative locally.
For <em>LINK</em> we have:
-\verbatim
+
+\verbatim
LINK = {
- src = source;
- dst = destination;
-
+ type = rhombus;
size = bandwidth;
- scale = global;
+ values = (bandwidth_used);
};
-\endverbatim
-For the types specified in the <em>edge</em> parameter (such as <em>LINK</em>),
-the configuration must contain two additional parameters: <em>src</em> and
-<em>dst</em> that are used to properly identify which nodes this edge is
-connecting. The values <em>source</em> and <em>destination</em> are always present
-in the SimGrid trace file and should not be changed in the configuration. The
-parameter <em>size</em> for the LINK, in this case, is configured as the
-variable <em>bandwidth</em>, with a <em>global</em> scale. The scale meaning
-here is exactly the same used for nodes. The last parameter is the GraphViz
-algorithm used to calculate the position of the nodes in the graph
-representation.
-\verbatim
- graphviz-algorithm = neato;
}
\endverbatim
+
+The same configuration parameters are used here: <em>type</em> (with a
+rhombus), the <em>size</em> (whose value is from trace's bandwidth
+variable) and the <em>values</em>.
+
<b>Customizing the Graph Representation</b>: triva is capable to handle
a customized graph representation based on the variables present in the trace
file. In the case of SimGrid, every time a category is created for tasks, two
two categories for tasks: request and compute. To create a customized graph
representation with a proportional separation of host and link utilization, use
as configuration for HOST and LINK this:
+
\verbatim
HOST = {
+ type = square;
size = power;
- scale = global;
-
- sep_host = {
- type = separation;
- size = power;
- values = (prequest, pcomputation);
- };
+ values = (prequest, pcomputation);
};
-
LINK = {
- src = source;
- dst = destination;
+ type = rhombus;
size = bandwidth;
- scale = global;
-
- sep_link = {
- type = separation;
- size = bandwidth;
- values = (brequest, bcomputation);
- };
+ values = (brequest, bcomputation);
};
\endverbatim
-Where <i>sep_host</i> contains a composition of type <i>separation</i> where
-its max size is the <i>power</i> of the host and the variables <i>prequest</i>
-and <i>pcomputation</i> are drawn proportionally to the size of the HOST. And
-<i>sep_link</i> is also a separation where max is defined as the
-<i>bandwidth</i> of the link, and the variables <i>brequest</i> and
-<i>bcomputation</i> are drawn proportionally within a LINK.
-<i>This configuration enables the analysis of resource utilization by MSG tasks,
+
+This configuration enables the analysis of resource utilization by MSG tasks,
and the identification of load-balancing issues, network bottlenecks, for
-instance.</i> \n
-<b>Other compositions</b>: besides <i>separation</i>, it is possible to use
-other types of compositions, such as gradients, and colors, like this:
-\verbatim
- gra_host = {
- type = gradient;
- scale = global;
- values = (numberOfTasks);
- };
- color_host = {
- type = color;
- values = (is_server);
- };
-\endverbatim
-Where <i>gra_host</i> creates a gradient within a node of the graph, using a
-global scale and using as value a variable called <i>numberOfTasks</i>, that
-could be declared by the user using the optional tracing functions of SimGrid.
-If scale is global, the max and min value for the gradient will be equal to the
-max and min numberOfTasks among all hosts, and if scale is local, the max and
-min value based on the value of numberOfTasks locally in each host.
-And <i>color_host</i> composition draws a square based on a positive value of
-the variable <i>is_server</i>, that could also be defined by the user using the
-SimGrid tracing functions. \n
+instance. \n
+
<b>The Graph Visualization</b>: The next figure shows a graph visualization of a
given time-slice of the masterslave_forwarder example (present in the SimGrid
sources). The red color indicates tasks from the <i>compute</i> category. This
visualization was generated with the following configuration:
+
\verbatim
{
- node = (HOST);
- edge = (LINK);
+ node = (LINK, HOST, );
+ edge = (HOST-LINK, LINK-HOST, LINK-LINK, );
- HOST = {
+ host = {
+ type = square;
size = power;
- scale = global;
-
- sep_host = {
- type = separation;
- size = power;
- values = (pcompute, pfinalize);
- };
+ values = (pcompute, pfinalize);
};
- LINK = {
- src = source;
- dst = destination;
+ link = {
+ type = rhombus;
size = bandwidth;
- scale = global;
-
- sep_link = {
- type = separation;
- size = bandwidth;
- values = (bcompute, bfinalize);
- };
+ values = (bcompute, bfinalize);
};
- graphviz-algorithm = neato;
}
\endverbatim
-<center>
-\htmlonly
-<a href="triva-graph_visualization.png" border=0><img src="triva-graph_visualization.png" width="50%" border=0></a>
-\endhtmlonly
-</center>
-
-- <b>Understading Triva - colors</b>: An important issue when using Triva is how
- to define colors. To do that, we have to know which variables are defined in
-the trace file generated by the SimGrid library. The parameter <em>--list</em>
-lists the variables for a given trace file:
-\verbatim
-$ Triva -l masterslave_forwarder.trace
-iFile
-c platform
-c HOST
-v power
-v is_slave
-v is_master
-v task_creation
-v task_computation
-v pcompute
-v pfinalize
-c LINK
-v bandwidth
-v latency
-v bcompute
-v bfinalize
-c user_type
-\endverbatim
-We can see that HOST has seven variables (from power to pfinalize) and LINK has
-four (from bandwidth to bfinalize). To define a red color for the
-<i>pcompute</i> and <i>bcompute</i> (which are defined based on user category
-<i>compute</i>), execute:
-\verbatim
-$ defaults write Triva 'pcompute Color' '1 0 0'
-$ defaults write Triva 'bcompute Color' '1 0 0'
-\endverbatim
-Where the three numbers in each line are the RGB color with values from 0 to 1.
-\verbatim
- scale = global;
- sep_link = {
- type = separation;
- size = bandwidth;
- values = (bcompute, bfinalize);
- };
- };
- graphviz-algorithm = neato;
-}
-\endverbatim
<center>
\htmlonly
<a href="triva-graph_visualization.png" border=0><img src="triva-graph_visualization.png" width="50%" border=0></a>
\endhtmlonly
</center>
-- <b>Understading Triva - colors</b>: An important issue when using Triva is how
- to define colors. To do that, we have to know which variables are defined in
-the trace file generated by the SimGrid library. The parameter <em>--list</em>
-lists the variables for a given trace file:
-\verbatim
-$ Triva -l masterslave_forwarder.trace
-iFile
-c platform
-c HOST
-v power
-v is_slave
-v is_master
-v task_creation
-v task_computation
-v pcompute
-v pfinalize
-c LINK
-v bandwidth
-v latency
-v bcompute
-v bfinalize
-c user_type
-\endverbatim
-We can see that HOST has seven variables (from power to pfinalize) and LINK has
-four (from bandwidth to bfinalize). To define a red color for the
-<i>pcompute</i> and <i>bcompute</i> (which are defined based on user category
-<i>compute</i>), execute:
-\verbatim
-$ defaults write Triva 'pcompute Color' '1 0 0'
-$ defaults write Triva 'bcompute Color' '1 0 0'
-\endverbatim
-Where the three numbers in each line are the RGB color with values from 0 to 1.
+- <b>Understading Triva - colors</b>: Colors are now registered in
+trace files. See the tracing API to how to define them for your
+simulator.
*/
\ No newline at end of file
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="645"
- height="559"
+ width="1062"
+ height="705"
id="svg2"
version="1.1"
- inkscape:version="0.47pre4 r22446"
+ inkscape:version="0.48.2 r9819"
sodipodi:docname="New document 1">
<defs
id="defs4">
- <inkscape:perspective
- sodipodi:type="inkscape:persp3d"
- inkscape:vp_x="0 : 526.18109 : 1"
- inkscape:vp_y="0 : 1000 : 0"
- inkscape:vp_z="744.09448 : 526.18109 : 1"
- inkscape:persp3d-origin="372.04724 : 350.78739 : 1"
- id="perspective10" />
- <inkscape:perspective
- id="perspective2824"
- inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
- inkscape:vp_z="1 : 0.5 : 1"
- inkscape:vp_y="0 : 1000 : 0"
- inkscape:vp_x="0 : 0.5 : 1"
- sodipodi:type="inkscape:persp3d" />
- <inkscape:perspective
- id="perspective2838"
- inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
- inkscape:vp_z="1 : 0.5 : 1"
- inkscape:vp_y="0 : 1000 : 0"
- inkscape:vp_x="0 : 0.5 : 1"
- sodipodi:type="inkscape:persp3d" />
- <inkscape:perspective
- id="perspective3062"
- inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
- inkscape:vp_z="1 : 0.5 : 1"
- inkscape:vp_y="0 : 1000 : 0"
- inkscape:vp_x="0 : 0.5 : 1"
- sodipodi:type="inkscape:persp3d" />
- <inkscape:perspective
- id="perspective3087"
- inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
- inkscape:vp_z="1 : 0.5 : 1"
- inkscape:vp_y="0 : 1000 : 0"
- inkscape:vp_x="0 : 0.5 : 1"
- sodipodi:type="inkscape:persp3d" />
<marker
inkscape:stockid="Arrow2Lend"
orient="auto"
id="Arrow2Lend"
style="overflow:visible">
<path
+ inkscape:connector-curvature="0"
id="path3782"
style="font-size:12px;fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
</marker>
- <inkscape:perspective
- id="perspective3115"
- inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
- inkscape:vp_z="1 : 0.5 : 1"
- inkscape:vp_y="0 : 1000 : 0"
- inkscape:vp_x="0 : 0.5 : 1"
- sodipodi:type="inkscape:persp3d" />
<marker
inkscape:stockid="Arrow2Lend"
orient="auto"
refY="0"
refX="0"
- id="Arrow2Lend-1"
+ id="Arrow2Lend-9"
style="overflow:visible">
<path
- id="path3782-0"
+ inkscape:connector-curvature="0"
+ id="path3782-9"
style="font-size:12px;fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
</marker>
- <inkscape:perspective
- id="perspective3143"
- inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
- inkscape:vp_z="1 : 0.5 : 1"
- inkscape:vp_y="0 : 1000 : 0"
- inkscape:vp_x="0 : 0.5 : 1"
- sodipodi:type="inkscape:persp3d" />
- <inkscape:perspective
- id="perspective3168"
- inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
- inkscape:vp_z="1 : 0.5 : 1"
- inkscape:vp_y="0 : 1000 : 0"
- inkscape:vp_x="0 : 0.5 : 1"
- sodipodi:type="inkscape:persp3d" />
<marker
inkscape:stockid="Arrow2Lend"
orient="auto"
refY="0"
refX="0"
- id="Arrow2Lend-8"
+ id="Arrow2Lend-5"
style="overflow:visible">
<path
- id="path3782-3"
+ inkscape:connector-curvature="0"
+ id="path3782-2"
style="font-size:12px;fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
</marker>
- <inkscape:perspective
- id="perspective3196"
- inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
- inkscape:vp_z="1 : 0.5 : 1"
- inkscape:vp_y="0 : 1000 : 0"
- inkscape:vp_x="0 : 0.5 : 1"
- sodipodi:type="inkscape:persp3d" />
<marker
inkscape:stockid="Arrow2Lend"
orient="auto"
refY="0"
refX="0"
- id="Arrow2Lend-2"
+ id="marker3237"
style="overflow:visible">
<path
- id="path3782-5"
+ inkscape:connector-curvature="0"
+ id="path3239"
style="font-size:12px;fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
</marker>
- <inkscape:perspective
- id="perspective3224"
- inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
- inkscape:vp_z="1 : 0.5 : 1"
- inkscape:vp_y="0 : 1000 : 0"
- inkscape:vp_x="0 : 0.5 : 1"
- sodipodi:type="inkscape:persp3d" />
- <inkscape:perspective
- id="perspective3253"
- inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
- inkscape:vp_z="1 : 0.5 : 1"
- inkscape:vp_y="0 : 1000 : 0"
- inkscape:vp_x="0 : 0.5 : 1"
- sodipodi:type="inkscape:persp3d" />
- <marker
- inkscape:stockid="Arrow2Lend"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow2Lend-81"
- style="overflow:visible">
- <path
- id="path3782-33"
- style="font-size:12px;fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
- d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
- transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
- </marker>
- <inkscape:perspective
- id="perspective3281"
- inkscape:persp3d-origin="0.5 : 0.33333333 : 1"
- inkscape:vp_z="1 : 0.5 : 1"
- inkscape:vp_y="0 : 1000 : 0"
- inkscape:vp_x="0 : 0.5 : 1"
- sodipodi:type="inkscape:persp3d" />
</defs>
<sodipodi:namedview
id="base"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
- inkscape:zoom="0.7"
- inkscape:cx="433.67329"
- inkscape:cy="388.53198"
+ inkscape:zoom="0.49497475"
+ inkscape:cx="285.9796"
+ inkscape:cy="122.02939"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="false"
- inkscape:window-width="977"
- inkscape:window-height="1124"
- inkscape:window-x="621"
- inkscape:window-y="25"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:window-width="1395"
+ inkscape:window-height="979"
+ inkscape:window-x="202"
+ inkscape:window-y="54"
inkscape:window-maximized="0" />
<metadata
id="metadata7">
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
- transform="translate(-66.071426,-270.00504)">
+ transform="translate(-323.28574,125.8521)">
<image
- y="270.00504"
- x="66.071426"
- id="image2840"
- height="559"
- width="645"
- xlink:href="file:///home/schnorr/Desktop/Screenshot.png" />
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans"
- x="299.89056"
- y="787.12109"
- id="text3662-9"><tspan
- sodipodi:role="line"
- id="tspan3664-7"
- x="299.89056"
- y="787.12109"
- style="font-size:16px">Host</tspan></text>
+ y="-125.8521"
+ x="323.28574"
+ id="image2993"
+ xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABCYAAALBCAYAAACEOqA/AAAABHNCSVQICAgIfAhkiAAAIABJREFU
+eJzs3Xd4HPdh5//PzGzDohMEQQAURBFFlGiVSLIKRVOierHVaMaOUy53cXI5Jz/JsZ12yhM7yZXk
+Euccxxfbjy/P+ZxYTkzJlGVJF9lqlGhLVrNsiRYEVoEESRAkGhfbZ+b3x2JAgARIlF3MYvf9eh6K
+xGLnO18Uln3rO98xLqw413VcV64kmVJkeVTRuqjMiCWZkmGZAgAAAAAAmA/XdiRHclK24sNxJQfi
+kiMZkkzDUCAXJVxZVUHVr2mQETS1bFn9xADGaUMa0z04bzMOZZz1GfMfe3YnXtj4cxpq2s9yHiYw
+84ELmNo8DjvDIPMYf3aHGAUe/9QD5jSreYw/u5EW8C0xi6cs4Df+rMafx0ELGn+Oz8jvt/HpRxZk
+/FMHWtBneRbjn/5AYcdf4PfNWc5RkN/Vcx5/HufL71/Tp59zluO7E/+Z9j0LNvP4Cz+fO+Mb+Tnf
+wsY/84gF/JRPGsqd7sE8jj/No3k4xxmHOO2dC/y+mfNwZ35C/r/V3TO8lY/xpzt0FgPM4xwzHzLN
+e/I6/jTPnOP48/qUzuEPkIWPf+Z3zvtbYtbfCvM8w4K+1Rb+fTq3Wc/w7DMMsrA/Dt0Cjz/dQKf/
+HTVYNaTKFdUa2Xtc2bGMHFcKOHIVqApqWVejKqsqJUmpVDpf0wEAAAAAAJAkVVZWSpWS0WVoaNdx
+ZWNpmTKl+jXLJ6IEAAAAAABAIVVVV6l+zTLJlMzI8qiMYH4XfQIAAAAAAJyJETQVaahQoLK+MreU
+AgAAAAAAYJFUVlbKXpaVWde8zO+5AAAAAACAMmRWWDLjY3G/5wEAAAAAAMqQYRkK2I7t9zyKgCvH
+dpS1XQWCAZlsuQEAAAAA8IHrujIMQ1k7q2QiKdd1T3u/60qO68x6TNMwZIz/mO84hmHINA0Zhilj
+/G130v27w5GIgoHArOc02fyOKim2kidsRVa0qr05ooFd+xQ38xknXGUTcZ1I2FIwrOqqsAKEDwAA
+AADAKVzXleO4SqWTsrPZiRf+JwOAIcPIRYGAaU0JDZN/7Y3l/ewd7zjunMc5dUzXdeVKchxX3lMN
+w1A6lVImk1I4FJFlWXP6uBcpTLjKplJKpLLK2rZsR5JhyLIsBUMhRSJBn16sO0qOBvT+z/69fqdD
+knr0+Y//td4OBRQ+7bmu0idGNZrOvWVVVKkuaunUabvZpIZHUrIlyQypKhJS6y2/q7+4q0P2W9/V
+F762Q8OhEHECAAAAADDBixLJRFy2kwsA5523Wq2tLWpoWK76+joNDQ3r+PFj6us7pH379o/HBckw
+jYmVFrmfp47Z2tqijo521dXXq7qqSrFYTMPDI9qzZ48OHDg4MY45aZzp5nZvV7OubK3X6roqNVVX
+qP9EQvuHY3qlb0jbeg7LdA0lk0lFIpWaS5soeJhw7bRiMVOrPnCP7rqmS+ed26aW2oCc1Akd7d2j
+t15/SU+/uFPH3UARv1h3lY45Ov+Br+oPLw9Jkvq3/ak+9diQqibFCTebVKzuQ/r7r96pFZI0/Iz+
+8x+8oQ/ec4maI5Ku+qBufeIF/d9+V1XF+8ECAAAAABaR98I/EY/LcR1VRKO6/rrrtGxZnWzbkW07
+isVOKBwO65xzztHq1efqggsu0PPbtysRj8twJU363+bOeNiIRMK69toNampaIcfJjZNMJhQKhdXc
+vFKrVrXo2LHjevHFHYrH4+MrK06PEi2VIf3VdReopTok17ElJyN3LKWmgKmmFRW6emWVPtzVpD/c
+/o4OjaWVTMZVWRmd9cdv5uWzOAPXTioeuki//Lm/0V/9x9t07cVr1FKbayFmuForOy/VzR/9D/rw
+akOJjHuW0fxkKBhKqfvZnUqNP9K0/v1qcjKyJ03bTrtquur9uSghafClH+q9xFG90Z3IPTD6rn42
+IAXZxAIAAAAAMM4wDKXSSTmuq8rKKt12662qrKpQLBZTPDGmZDKudDr3gj+eGFMsFlNlVYVuu/VW
+RaOVck7ZJsJ1pUgkoltuuUX1y2o1NpYbJ5VKnDZOdU2VbrnlFkUiFdOM42p1VVhfvnGtmqy0MsMD
+yo4OKhsblh0fVTY2rOzooDLDA2qy0vryjWu1KhqU6zpKJZKz/vgLt2LCzWoss1of+/P/pDu8V+o6
+od6du7Tv8KhU06jWNeerI/pTPdWdVjhcIcmVnU4rmcoqY9uy7dy1K4ZpKhStVHXYlJtJ6kQ8o4zt
+yNsDxLSCikQjqgiZ423HVTaZVDxtK5u15YxXH9MyFQiFVVkRlDVjG8jqxOCIThhG7rmVYQUNyQiG
+lH53u95M/oKuikhqukrrmx7Xd0elgCXJtZVyGnXN+oksoR++2CcjGtIP/8f9emNlg5yjR5WpqlGV
+t6Rl/IuVSGWVdVwZpqVgePL8XKVGR3UiI0mmKmqrVBkwJNdWbDimpCMFq2tUGzIkOUoMn9CYLUmW
+quqrFClodgIAAAAA5EPWtmVnbUmu1q+/Sq6bVSKePbmnw/gPY9ImltlsRsFgSOvXX6mnn35uYrWD
+99xr1l8t05QS8cRZxskqGAxqw4arJ8bxLuUwJP3pFc0KjB1XKnWWO3qOjSgYqdTnLl+pj7/Qq6xr
+T7NFwvQCkuScmkXywE5m1HDrr56MEkMv6e//4uva0e8oEDAkx5HtmIoui8qwQgq4jrJpV5XnX6e7
+N1yi961p1oqGOkUDUma0T6/889/pK6+MKNzxQX36l69W16pGVQclKaPh3rf0g3/9Fz328zFFIqac
+tLT82o/oP67v1OrWJjVWhyQldfzgAe3+yTP61nde12CwQhHL1dR1Gp36rb/9qj5dE5LSw9rz6r/p
+/3z9WR0wIwqbhkKpXfr+G3FdtT4qaYU+sL5J2x4dlF1hSnZaTtOV+sDK8aGO/kjP90mGE9C1f/61
+SXtY/A+9pahCbkaxVJ0uv/ff655rzteaFWGNDRxUz4//n77x8BsaDoelhKGL/+B/61MXWZKO6dE/
+elAPHw/IzbTo3335Qd1Sa+unn39Af/WOq0C6Ujf89y/q19skubv1vz7xV3olW6EwcQIAAAAAilo6
+lZLrulq1qlWRSETxeEK5TS9dOY4zsYGlt2GlaZoyDEOZTFYVFVG1rmpV38E+GUbuec3NK1VVWTk+
+jvTcc9uVzWanPffNN9+oTCYzZRwpt1rizpYKrXDHlBkZlCQdHcvoWCKjC5efvEyjZzChqqClluqQ
+7OSoVlYu0x2tUT15KDHr1lCgMOEoZS/Xzde1jr+d0I//8Vv64VBA1VWnXK+SzMgwDTmOo3QypF+4
+96P6YMfU0YI1K1WZiCsjWwqco4vPa1Tw5HtV13aZtvz+BTrn83+kL+7MKJAOq23jDXr/lHEialjV
+qYZVnbrq2pf1+Qf/UT/NhjS1TBiqrsntIaFQndqv/aj+S2e9/vgPHtaRUFhmKK2eH7ym0fUbVSOp
+6dqr1Pyd7+q4E5JSrlauv0Zelzj47I902DAVmOYKFdfOKOacq4/9tz/SB1tOPl7VeK4u++Bv67JL
+v6c/+JPH1G9J7/5ov3RRu6TluvDcqLJHxmTUdenCWkmydO5FTTJ/elDp4Cpd3Dw+UO8r+nnMlBV1
+TluKAwAAAAAoLt5r8uaWZiUSCdm2LUlyXWdilYPn5GoHc+LYlpbmiaAguWppWalkMjeOd+jNN99w
+2jjPPfeCMpmMDCN3rlWtLVPCxHUrwsrGh+Vkc3eB6E+6+tvXDuvTv9CoCxsi6hlK6a/fOKrfvbxV
+Kytyz8nGh3X9iko90Rf3OUy4trLBJq1r8h44oB/1xBW0QuO3J5HkZBVPZOVIMqyQohHjlNULw3rp
+4Sf09omg6hrDeq83o5BpyT3ysh5/8j2N9A1oOGmovmujPnJrpyKq0NWb369vvPmCRqcsGBnU9oce
+V4/ToAuvv03XrrKkZVfrt3/9NT3wpZ8pqdCk547qle88oZ+N1eqSO+7Q+xskrdikD1/8uP7mrawq
+Apbsvdu1Y3Cj7lgmqfFqbWx9TP/Sn5XcZn1gQ+P4OL16+qUBmVZA7mlRylU2Zar9Y785HiUcHdj+
+iB59c1TLr7pbv3T1cmnVB/Vb1+/Qnz47qPQ7r6pX7WqTdM6lLQrt6Faw/WJ5yafuwi7VOfs1tPJC
+rR6vNYdf2akh01DEcUSXAAAAAIDi5sWHqspKpdOpicdOfY53140cW4ZhyLazqoxGJ12qIVVWViqd
+Tk8ZI5FITBnHY9vZ8Z9tRcfHyT1POiciOfExuePN4H0rlun3P7RRf/297dp8XrW27YvpgTs26CJz
+SE7suCTJcWytilbKdWffGgoUJhy54SpVeJcRxAc1knYl05FjSHJsZZffpD/+vRu0MiAldn5dn/3H
+HjlTPu9H9aN/e0avpS0ZriszOH7XjpHX9S//bMvx7sX60jvKnP83+vhqSfWrVG/ZGspMHueYXnv2
+eb2WNvSD595R9guf0XXVUuVVd+jy//umto9Mfu4RvfjkD/RaUnrx4Ap99Y+vUEQhndOxTMZPjsgx
+DQWcA3r6+aO6474Vkhq04bpV+vY/7Zd73gZtXD7+4b/7jF4+LllhR9nTVky4ygY79aFNDbk3+76j
+v/7akzqUlfRGv5Zf8J91c62h9itXK/z0oFLH39bLR6S2lVJF+1o1mvtUe8W5J/dJbblY7dGntavz
+fC2TJB3Xq68fkzu+CgUAAAAAsEQYmrjkwjAM/ehHP57xqbm9KHIxIhyZfG9Od/wyj8zEONLU0DH5
+17lVFbm3w+Gp40SVVjp9chNLZ+SIzq9O68NXvk9f3/Ez/cr692mdO6Ds8MCkj8FQxElLcucWJk4t
+MQvmSkqPKeUqt1tGdJlqQ5KTcXO3AXFducF6rWppVIMkHYtIjnvqXUkkKbdh5fhGkK5tK1O5Tnf9
+u9u16bI1aqkJSbJle08OVyo0w6aWphWQldyrF99N67orQpIadN4yS8+PTPdcU5nBQxqWtFJSdWOl
+LMeR45oyAo4Ob39a++77mM6TtOyajVr9L33SpqvGw0BabzzxhoYtU+EZPq9m7Tlq9RZ1tH5YX/zn
+D5/2HKOhVfXm6+p3B/TKK8f1i3c1SCvWqaPhp2o7PyIpoaPJCq2InKsrzqlV9hfGr+MYflM/Omwr
+YFnK95cVAAAAAFA4iXhSrpt7hWsYpq6/fuPEvhIeb1VEIpGQ4zoyxo+bLJlMyTByL8hN82SYmDyO
+x3Gc8SsbXKVSqUnvMXQikVLYtuW6JwNDd2+fvvPzhD52zUV69LWfa/UFUV1QezJoGKapWCIpbyPO
+2SjQHhOujMwx7TomrWuUpHO0vqtSL/80IdOSTlkaMemoaR5zxldZuK4ywfP1m3/xe7px2fg77aRi
+SVNVlVOrzkzjSIayKS9j2MrMNA/XlWunJ4JH7gvpynUcuTJkHHtV33v7I7r/fZZUe6Vu/4Wd0tU1
+uSefeFWPvzUmy7DkONN9TK5cJ3sypshWMpE57XmZmCPTcCVldfiVNzV4141aplVa//4r1LhM0pFn
+9S8/uVr3396gC699vwLtuRusjP7kxzqYlQyDyzgAAAAAYKkwDENDQ8Oqrq6U6zoyTVPxeFymefIO
+Gt4KiVxMyP0wDFMnTpw4eScNw9DIyOj4OK5M05S3eiH365xcNHBzd7J0HBmGoVhsVN6KAcOQdo2m
+tC7gyLFzwWL3Cel/dqf0/930fr0vktD5t12tv/5/L+mBtSGtrckdZxph7YmlZRjW3FZM5J8hyzmi
+l14Z1D13LpNUoav+wxZd9blv6KVjtixDmvPNIlxHwbYNuno8Shx9/L/q09/craQb1ZV/+r/0+xee
+8WDZjiPXWKEL1lSMPzas9wbt6RZpnJVpjeqNJ36ixPuuUIUqdO1v/XtpfNhj259WT8aUOeNn1pAz
+clD9aaktJGnwOf3XTz+kdxJT04RhmgqFTJmGq8zBl/Tq8I26tS6kSz58qyRp6Kdv6CcvV2v09o1q
+uOEeXSNJOqHXX+hVxjKm7JwBAAAAACheXnQ4dmxAlZUVsm1bhuHINB1ZlnlamLDtk2EiEAjo6NGj
+M45jmoZcV3r55VenPXc2m5HjuBPjeIsqDMPQi8ccrWvK3cBBkqIVdbr/pot0QbZPmUNH1F7VqM/c
+epXCA7vk2sO5A62InhuYfnXGTAoUJiTTsvXe49/Wjzf9tq6KSlr2AX3qCxfq3Tfe0Z6+IWUbLlTd
+HMd0XWdiZUH16nW67MKsDowEtSJ6pqPadMdHPqi699JquPQW3TN+xYO7+0d6Jzbt1SNnZZiGEu88
+qWcGr9AHl0mq8GLHfj32/QNyLOOM4cXM7NP3Xz6h92+slpbdpD/7yxX6/ovv6FDMVaRmudrOcfTM
+V/9VPVlJMmRlDmj7qyO69eZaKWxIGtNbrxxSrPfH+ll8ozZEx68LOfGmntuXVsC0znB2AAAAAEAx
+MZQLAcePD6ph+XKFQ8HcKgbTkGVb42EityGl67qyHVuuk1sNMTg0rMHBodzKimnGydquLr30ElmW
+OX41QO6KAMfJraKIJxIyDUNj8bgGBo5NWXnx3EhQd9RZWmVacrIJtURSUnK/sieOSJLskT6121kp
+nJGTSMsMVOj1E5a2nwhMXEIyGwULEzIMBWKv6cv/7VvSJ39JVy2XZDbo/Cs26Pwr5jOeqeyBl/Tj
+oWt1Y71U8b579Hvvu2cWB0Z0wc2bdcGUxw7pu1/foeOmKdOe4bAzT0bB7AH92+Pv6YO/du7Eo6nX
+v6cdxyUreIZDJZlmUm899H/03Nr7tWmFZDZdrNs+fPGkZwzo6NateqdfMg3JtLJ67/lXNHjzzbl9
+LJLdenF/SlZ6v7a/ldSGqyKSpOEfv6C9GVPmWc4PAAAAACgiRi4quK60b+9+tbevzq1+sF3Zhn3a
+ignXdWWYprLprA4eODDx/snj7N/3ntasOU+GKcViJybdZjQXDBzHkevm/se74xrat3f/+FxOTss1
+Df1NX1R/1hJXjWHIHjt62tTtkV5JkhmIaMQ29dWjlXNaLSHN44qKuTAsQ9nep/W3n/oD/dW3d+j1
+d/t0LH7y1ibxwUN69/UX9K9PHZB9tv/JbxgKpt/RP3727/XtH+7W0bFJ9+HMxjV4aI/eePFtDbrG
+KasgUjrw3nElJSk1pN63n9dX/+i/6Fvv2Qou4KM3LFcDOx7X6xN7gwzr2Ud3asw69fwzfCyJn+kr
+v//H+vyjr2jne8cUcyTJVWrkiHa98aYOpMyTXxzTkH1gu57NRSll3vmRdidNWVZSPdvfUW4Kx/X8
+0+8pa81nDQgAAAAAwF+GDFPKZNLq6dmjEydicpzcHhDpdEapVFrpdGZ8TwhXJ0ZPaPfu3cpmbZnm
+5A0yc+Ok0ynt2rVbsUnjZDIZpdNppdPpib0l0qm09u7dq1QqJdMwNPkVrWEYOuKE9Jm+FXp1rEq5
+TSNtuW5WrpOW62Yl2ZJp6dWxKn2mb4WG3cCcw4Sx4dqrFuHeDa7srCPbkRxn0uaUhiHTMGRZhgLj
+L6jtdFZpR5IMhcKWTnud7brKZB3Z9tRNLg0jt9toMGjKyVTp2j/7n/qdDknq0ec//t/1Uiz3PNM0
+ZQVMBSZFiWnP6TpKpsYvHbEsVQSn+cS6rtKpk3cFCYQC08aOmT+m3Ocla+d2VXUnfU4CQfOUj91V
+JmXnbj9qWoqExkuYYyuZds94fgAAAABAccttRmnIlSPXyW1lUF1drarqSkUiERmGKdd1lEwmFTsx
+Nr7hpSnD0MQ+Et6qCkOSI3dinLq6WlVWVSocDkuuIRmu0um0xmJjGh7O3arSMAwZpiG5Oi0seKs0
+rgyd0HXREbUHY6q2Ujphh7UnU6Xt8Vq9kq6esiJjLgp3KccUhqyApdnsfGCFAt4+kjMMZSgYtBSc
+0+UKM0SOM53TMBWJnOVVvmEoFDn7p3Dmj2n88zKrr4KhYDigUz9sw7RUEZnN8QAAAACAYnUyKpgy
+TFeuayoWiykWi0257ab3wt80zYlfu5NiwnTjjIyManh4VFPvG2mcvBWpkdsn0Z0mSnhjGoahVzM1
+emW4WrnpuFPGmMueEqdapDABAAAAAADOZPLlGLkX/a7kunInxQIj90TJkGbaSGDqOCdjxWm3fzBO
+bpip09877bjzWRFxNot0KcfiO+slIQAAAAAAwHclu2LirJeEAAAAAAAA37FVIgAAAAAA8A1hAgAA
+AAAA+IYwAQAAAAAAfEOYAAAAAAAAviFMAAAAAAAA3wQk6f4HPu33PAAAAAAAQJn54t99nhUTAAAA
+AADAP4QJAAAAAADgG8IEAAAAAADwTWC6B7ds2bLY8wAAAAAAACVu69atpz3GigkAAAAAAOAbwgQA
+AAAAAPANYQIAAAAAAPiGMAEAAAAAAHxDmAAAAAAAAL4hTAAAAAAAAN8QJgAAAAAAgG8IEwAAAAAA
+wDeECQAAAAAA4BvCBAAAAAAA8A1hAgAAAAAA+IYwAQAAAAAAfEOYAAAAAAAAviFMAAAAAAAA3xAm
+AAAAAACAbwgTAAAAAADAN4QJAAAAAADgG8IEAAAAAADwDWECAAAAAAD4hjABAAAAAAB8Q5gAAAAA
+AAC+IUwAAAAAAADfECYAAAAAAIBvCBMAAAAAAMA3hAkAAAAAAOAbwgQAAAAAAPANYQIAAAAAAPiG
+MAEAAAAAAHxDmAAAAAAAAL4hTAAAAAAAAN8E/J4A5i81MCDZtt/TKAnhlSv9ngIAAAAAlCXCxBKW
+XLdOVYODkmH4PZUlzcpmJdf1exoAAAAAUJYIE0uYlc3KYsUEAAAAAGAJY48JAAAAAADgG8IEAAAA
+AADwDWECAAAAAAD4xrRM2gQAAAAAAFh8lmnKNC3L73kAAAAAAIAyZFqWTIswAQAAAAAAfGARJgAA
+AAAAgF9yYYI9JgAAAAAAgA8s02TFBAAAAAAA8AeXcgAAAAAAAN9YlsVdOQAAAAAAgD/G78rBHhMA
+AAAAAGDxWZYp0zJZMQEAAAAAABafZbLHBAAAAAAA8AmbXwIAAAAAAN9Y7DEBAAAAAAD8YlmmTJM9
+JgAAAAAAgA9M9pgAAAAAAAB+YY8JAAAAAADgG/aYAAAAAAAAvrEsU6bFHhMAAAAAAMAHFntMAAAA
+AAAAv1iWJdMkTAAAAAAAAB+Y7DEBAAAAAAD8wh4TAAAAAADAN+wxAQAAAAAAfDN+u1DCBAAAAAAA
+WHwWe0wAAAAAAAC/WJYp02SPCQAAAAAA4AOTPSYAAAAAAIBf2GMCAAAAAAD4hj0mAAAAAACAbyzL
+lGmxxwQAAAAAAPCBxR4TAAAAAADAL+wxAQAAAAAAfGNZlkyTPSYAAAAAAIAPTPaYAAAAAAAAfmGP
+CQAAAAAA4Bv2mAAAAAAAAL4ZDxPsMQEAAAAAABafxR4TAAAAAADAL5ZpyTS5lAMAAAAAAPjAZI8J
+AAAAAADgF8uyxAYTAAAAAADAN4QJAAAAAADgG8IEAAAAAADwDWECAAAAAAD4hjABAAAAAAB8Q5gA
+AAAAAAC+IUwAAAAAAADfECYAAAAAAIBvCBMAAAAAAMA3hAkAAAAAAOAbwgQAAAAAAPANYQIAAAAA
+APiGMAEAAAAAAHxDmAAAAAAAAL4hTAAAAAAAAN8QJgAAAAAAgG8IEwAAAAAAwDeECQAAAAAA4BvC
+BAAAAAAA8A1hAgAAAAAA+IYwAQAAAAAAfEOYAAAAAAAAviFMAAAAAAAA3xAmAAAAAACAbwgTAAAA
+AADAN4QJAAAAAADgG8IEAAAAAADwDWECAAAAAAD4hjABAAAAAAB8Q5gAAAAAAAC+IUwAAIqG67py
+HEeu6/o9FQAAACwSwgQAoGjYtq0HH3xQY2NjchzH7+kAAABgERAmAABFI5PJ6C//8i/1mc98RkND
+Q8QJAACAMkCYAAAUDS9EfO9739NnP/tZDQ8PEycAAABKXMDvCQAAcKoXXnhB11xzjSoqKvTggw+q
+pqZGpklLBwAAKEWECQBA0VmzZo22b9+uDRs2KBqN6lOf+pSqq6uJEwAAACWIMAEAKDq2beuCCy7Q
+s88+q+uvv14VFRX6xCc+oaqqKuIEAABAiSFMLGGjHR2y33vP72nMmyvJ8HsSkuxAQMv8ngSAKbLZ
+rCzL0iWXXKKnnnpKN998syKRiH7jN35DVVVVMoxi+NMDAAAA+UCYWMJaXnnF7yksSH9/v2zbVktL
+i99TAVBkbNuWbdsKBAK68sor9cQTT+jOO+9UJBLRr/zKr6iyspI4AQAAUCIIE/BNOBzW8ePH/Z4G
+gCKUzWYVCAQUCOT+mtqwYYMeeeQRbd68WZFIRFu2bFE0GiVOAAAAlAAu1IVvotGoEomE39MAUIRs
+21Y2m1U2m5147KabbtJDDz2kT37yk3rssccUj8fluq6PswQAAEA+ECbgm1AoJNd1p7zwAAApt2LC
+u5xjsjvvvFNf+9rX9IlPfELf//73lUwmiRMAAABLHGECvopEIorH435PA0CRmbxi4tR4uWXLFn3x
+i1/Uxz/+cT377LNKpVLECQAAgCWMPSbgq4qKCqVSKb+nAaDIeKslvB+maeqSSy7R3r17J56TSCT0
+a7/2a/re976nK6+8cmI/CgAAACwt/CsOvgqHw+wzAeA03u1Cjx8/rh/+8If69V//dX384x/XV77y
+Ff3u7/6uamtrZVmWQqGQKioqJjbLBAAAwNLDv+Lgq4qKCg0ODvo9DQBFxrZt9fX1afPmzRoeHtbd
+d9+t3/zN39Sf//mfKxAI6IYbbpi4ZWgwGFQoFPJ7ygAAAJgn9piAryKRCJtfAjjNwYMHdffdd+vi
+iy/W2rVr9aUvfUnhcFj333+/HnroIbmuq+rqatXU1KiiokKmyV9nAAAASxX/koOvAoGADMNQMpn0
+eyoAishHP/pRXXPNNbr//vv14IMP6otf/KJGRkb0wAMP6M0339Qbb7zB/jQAAAAlgjAB37EBJoDJ
+DMPQTTfdpN/5nd/RZZddpk2bNunSSy/Vl770JVVXV+vSSy/Vjh07lEjCrYgrAAAgAElEQVQkuBsH
+AABACWCPCfguFAopHo+rtrbW76kA8Jlpmvrc5z6n22+/Xe3t7aqrq5PruvqTP/kT3XvvvXrhhRe0
+c+dObd68WYZh+D1dAAAA5AFhAr6LRqMaGhryexoAikAoFNInP/lJWZY1Ze+I9evX65vf/Kaefvpp
+feQjH9EHPvABRaNR4gQAAEAJIEzAd+FwWOl02u9pACgCpmmqqqpKhmFMiQ7hcFg333yz1q9fL8Mw
+VFlZqWAw6ONMAQAAkC+ECfguEokok8n4PQ0AReDUIDH58XA4rHA47MOsAAAAUEhsfgnfmaY5sc8E
+AAAAAKC8ECZQFMLhsBKJhN/TAAAAAAAsMsIEigK3DAUAAACA8kSYQFFgxQQAAAAAlCfCBIpCNBol
+TAAAAABAGSJMoCiEQiG5rqtsNuv3VAAAAAAAi4gwgaIRiUS4MwcAAAAAlBnCBIoGG2ACAAAAQPkh
+TKBosAEmAAAAAJQfwgSKBismAAAAAKD8ECZQNCKRiJLJpN/TAAAAAAAsIsIEikYgEJBlWcQJAAAA
+ACgjhAkUFS7nAAAAAIDyQphAUQmFQtwyFAAAAADKCGECRSUajbJiAgAAAADKCGECRSUcDrPHBAAA
+AACUEcIEikokElEmk5HjOH5PBQAAAACwCAgTKCqmaSoUCrFqAgAAAADKBGECRSccDiuRSPg9DQAA
+AADAIiBMoOhwy1AAAAAAKB+ECRQdVkwAAAAAQPkgTKDocMtQAAAAACgfhAkUnVAoJNu2lc1m/Z4K
+AAAAAKDACBMoSpFIRPF43O9pAAAAAAAKjDCBosQGmAAAAABQHggTKEpsgAkAAAAA5YEwgaJUWVnJ
+igkAAAAAKAOECRSlUCikZDLp9zQAAAAAAAVGmEBRCgQCCgaDxAkAAAAAKHGECRStUCjE5RwAAAAA
+UOIIEyhaFRUVE7cMzWaz6u3t9XlGAAAAAIB8I0ygaEWjUaVSKWWzWT3yyCN6/vnn1d3d7fe0AAAA
+AAB5FPB7AsBMwuGwxsbG9Mgjj+jyyy/Xeeedp23btkmS1q5d6/PsAAAAAAD5wIoJFC3TNPX666/r
+8ssvV0dHhyzL0r333qu3336blRMAAAAAUCIIEyhK2WxW27Zt0/r169XR0THxOHECAAAAAEoLYQJF
+x9tTwlspcSovTrz11lt69913fZghAAAAACBfCBMoKmeLEh7LsnTffffpZz/7GXECAAAAAJYwwgSK
+xmyjhIc4AQAAAABLH2ECRWGuUcJDnAAAAACApY0wAd/NN0p4iBMAAAAAsHQRJuCrhUYJD3ECAAAA
+AJYmwgR8k68o4SFOAAAAAMDSQ5iAL/IdJTzECQAAAABYWggTWHSFihIe4gQAAAAALB2ECSwq27b1
+8MMPFyxKeIgTAAAAALA0ECawaGzb1tatW3XFFVcUNEp4iBMAAAAAUPwIE1gUix0lPMQJAAAAAChu
+hAkUnF9RwkOcAAAAAIDiRZhAQfkdJTzECQAAAAAoToQJFFQ8HlcikVBDQ4PfU5FlWWpqatKhQ4f8
+ngoAAAAAYBxhAgVVXV2tzZs367vf/a6GhoZ8ncuOHTuUyWS0adMmX+cBAAAAADiJMIGCq6mp0X33
+3edrnNixY4dSqZRuvPFGX84PAAAAAJgeYQKLws84QZQAAAAAgOJFmMCi8SNOECUAAAAAoLgRJrCo
+FjNOECUAAAAAoPgRJrDoFiNOECUAAAAAYGkgTMAXqVRK69at07Zt2/IeJ4gSAAAAALB0BPyewHQG
+V62Slcn4PY2yMnbuuWp55ZWCnycej6uvr0+maeqiiy7S+eefr+985zu6++67VV9fv+DxiRIAAAAA
+sLQUZZhY1tfn9xTKTqFDkOM4Onz4sIaGhtTc3KyGhgZJUiQS0X333ZeXOEGUAAAAAIClh0s5UHCj
+o6Pq6elRJpPR2rVrJ6KEJx97ThAlAAAAAGBpIkygYLLZrPbv369Dhw6ppaVFq1evViAw/SKdhcSJ
+F198UbFYjCgBAAAAAEsQYQIFcfz4cXV3dysYDKqrq0s1NTVnPWY+cWLHjh0aGxvT6tWr5TjOQqcN
+AAAAAFhkRbnHBJauZDKpvr4+OY6jNWvWKBqNzul4L07MZs8J7/KN2267TXv27NHQ0NBpl4kAAAAA
+wGy5rqtUKuX3NIpCOByWYRiLci7CBPLm8OHDOn78uJqamtTY2DjvcWYTJ07dU6K5uVl79+5VbW3t
+jJeLAAAAAMCZjI6O6u/+7u8UDof9noqvUqmUHnjgAdXW1i7K+XgFhwWLxWI6ePCgQqGQ1q5dm5cw
+cKY4Md1Gl9FoVDU1Nerv71dra+uCzw8AAACgPNXU1OiTn/yk39Pw1Re+8IVFPR97TGDeHMdRb2+v
+ent71dzcrDVr1uR1tcJ0e06c6e4bK1eu1NDQkNLpdN7mAAAAAAAoLFZMYF6GhoZ0+PBh1dTUqKur
+q2CXT0xeOdHa2irTNGe8+0YoFFJDQ4MOHTqk1atXF2Q+AAAAAID8YsUE5iSZTGrPnj3q7+9XW1ub
+Vq1aVfA9Hbw4UVlZedZbgjY1NSkejysWixV0TgAAAACA/CBMYFYcx1F/f792796tqqoqdXV1qaqq
+atHOX1NTo/Xr15/1eaZpqqmpSX19fYswKwAAAADAQhEmcFaxWEw9PT2KxWLq6OhQU1OTTLN4v3W8
+W4aOjIz4PBMAAAAAwNmwxwRmlM1mdeTIEY2Ojqq5uXna23YWq9bWVvX29qq6urqoIwoAAAAAlDte
+sWFaIyMj6unpkeM4Wrt27ZKKEpJUVVWlaDSqgYEBv6cCAAAAADgDVkxgimw2q97eXqXTabW1tS3q
+PhL5tnLlSu3evVv19fUKhUJ+TwcAAAAAMA1WTGDCwMCAuru7VVFRobVr1y7pKCFJkUhEy5Yt05Ej
+R/yeCgAAAABgBoQJSJJcSaOjo+ro6FBzc7Pf08mblStXKhaLKR6P+z0VAAAAAMA0CBOQJBmS2tvb
+FYlE/J5KXpmmqcbGRh0+fNjvqQAAAAAApkGYQMlraGhQJpPR8PCw31MBAAAAAJyCMIGSZ5qmVq5c
+qSNHjshxHL+nAwAAAACYhDCBslBXV6dgMKjjx4/P+phMJqPe3t4CzgoAAAAAQJhA2WhubtbAwMCs
+Vk1kMhl9+9vf1vbt27V79+5FmB0AAAAAlKeA3xMAFks0GlVNTY2OHDmilpaWGZ/nRYkNGzaotbVV
+jzzyiCSpo6NjsaYKAAAAAGWDFRMoKytXrtTg4KCSyeS0758cJc4991wFAgFt3rxZr7/+OisnAAAA
+AKAACBMoK4FAQI2NjTp06NBp7zs1Skw+hjgBAAAAAIVBmEDZaWxsVDKZVCwWm3hspijhIU4AAAAA
+QGEQJlB2TNNUc3Oz+vr6JJ09SniIEwAAAACQf4QJlKX6+nqZpqn+/v5ZRQkPcQIAAAAA8oswgbK1
+fPlyPfnkk7OOEh7iBAAAAADkD2ECZSmTyejJJ5/UDTfcMKco4SFOAAAAAEB+ECZQdma7p8TZECcA
+AAAAYOEIEygr+YoSnilxYteuPMwQAAAAAMoLYQJlI99RwhMIBLT5uuu04vrrdfirX83buAAAAABQ
+DggTKAuFihKSpGPHFLjhBtUcPqz6P/xD4gQAAAAAzAFhAiWv0FFCGzdK3d2S6yoyMkKcAAAAAIA5
+IEygpC1mlPAQJwAAAABg9ggTKFl+RAkPcQIAAAAAZocwgZLkZ5TwECcAAAAA4OwIEyg5xRAlPMQJ
+AAAAADgzwgRKSjFFCQ9xAgAAAABmRphAySjGKOEhTgAAAADA9AgTKAkFjxIf+MC8o4SHOAEAAAAA
+pwv4PQHgTBzHkSSl0+kpb2ezWUmS67pyHEevvvqqmpubCxcl3n13QVHCExkZUf2nP63ML/2SgjU1
+eZggAAAAACxthAmckRcCHMeZiAGnxgHbtice935Mfr8XD7zHJ7996mO2bcs0zYljTTO3qCcYDEqS
+DMOQJAUCgYm3TdNUV1eXXnrpJe3cuVPr1q3Lzwef5yghSYn6eo1985taTpQAAAAAAEmECYxzJXV3
+dyubzcodfxHuOM5EGDAMY0oMkE7GAcuy5DjOlLcnv980zYkf3vGnPub9OhAITBlrLtra2rRt2zZJ
+WnicKGSUuP32vIwHAAAAAKWAMAFJkiFp9erVCgQCU2KBH+Z7XtM0de+99y48ThAlAAAAAGDRsPkl
+JkQikYkw4VeUWCgvTnR3d2vnzp1zH4AoAQAAAACLamm++gTOYN5xgigBAAAAAIuOMIGSNOc4QZQA
+AAAAAF8QJlCyZh0niBIAAAAA4BvCBEraWeNEIaJEXR1RAgAAAABmiTCBkjdjnChAlIjX1engP/yD
+lt16a17GAwAAAIBSR5hAWTgtThRopcSJf/5nBa6+Wnv27FEymczLuAAAAABQyggTKBtenNj72mtK
+XXll/i/feOghNd15p8477zzV1dVp9+7dGhoaysv4AAAAAFCqCBMoK6Zp6s6PfUwj1dVyw+G8jOlF
+icl7SjQ2NmrNmjXq7+/X/v375ThOXs4FAAAAAKWGMIGyYwaDWv7aaxro6pIbiSxorOmihCcajaqr
+q0umaaqnp0fxeHxB5wIAAACAUkSYQFnKR5w4U5SYOI9pqq2tTU1NTdq7d68GBgbmO2UAAAAAKEmE
+CZSthcSJ2USJyerr69XR0aHBwUHt27dP2Wx2PlMGAAAAUECZTEZunvahw+wRJlDW5hMn5holPJFI
+RJ2dnQqFQuru7tbo6Oh8pgwAAACgAPr7+7V3716/p1GWCBMoe3OJE/ONEhPnMk21trZq9erVOnjw
+oPr6+tgYEwAAAPDR6Oiouru7FYvFtHr1ahmG4feUyg5hAtDs4sRCo8RkVVVV6urqUjKZ1J49e5RM
+Jhc8ZjFJJBL69re/rZGREb+nAgAAAEwrnU5r//79OnjwoJqbm9Xe3q5wnu7ch7khTADjzhQn8hkl
+PIFAQO3t7aqrq9Pu3bs1NDSUt7H9lEgk9PDDD+uiiy7So48+SpwAAABA0RkYGFBPT4+CwaDWrl2r
+2tpav6dU1ggTwCTTxYlEXZ3GvvWtvEaJyRobG7VmzRr19/ert7d3SV/a4UWJm2++WRdccIHuuusu
+4gQAAEAZKtbN3mOxmN59912Njo6qo6NDra2tMk1eFvuNrwBwislxIlFTk4sSt91W0HNGo1F1dXVJ
+knp6ehSPxwt6vkKYHCVWrlwpKXc3EuIEAABAeUkmk/rGN76hF154we+pTHAcRwcPHtT+/fu1YsUK
+tbe3KzLHO/OhcAgTwDS8OJH6wQ8KHiUmzmmaamtrU3Nzs/bu3av+/v5FOW8+TBclPMQJAACA8pFM
+JrV161bdfvvtsm27KOLE8ePH1d3dLUm68MILVV9f7/OMcCrCBDADMxhU3ZVXLvp5a2tr1dHRoeHh
+Ye3bt69ol8F5zhQlPMQJAACA0udFiZtuuknNzc3atGmTr3EiHo9r165dGhwc1OrVq7Vq1Sou2yhS
+fFWAIhSJRNTZ2alQKKTu7m6Njo76PaVpzSZKeIgTAAAApevUKOHxI044jqNDhw5p7969qqurU2dn
+p6LR6KKdH3NHmACKlGmaam1tVVtbmw4ePKi+vr6i2hhzLlHCQ5wAAAAoPTNFCc9ixomhoSF1d3cr
+m81q7dq1amxsLPg5sXCECaDI1dTUqKurS8lkUnv27FEymfR7SvOKEh7iBAphbGxM27ZtK4rfHwAA
+lJOzRQlPoeOE92/lo0ePqq2tTW1tbQoEAgU5F/KPMAEsAYFAQO3t7aqrq9Pu3bs1NDTk21wWEiU8
+xAnk09jYmB5++GG1t7dr69atxAkAABbJbKOEpxBxwnEcHT58WLt371ZVVZU6OztVVVWVt/GxOEhI
+wBLS2NioyspK9fb26sSJE4u+gU8+ooRncpy45557VFtbm6dZopx4UeKOO+5QY2Ojli9frq1bt2rL
+li3cAgwAgAKaa5TwbNq0Sc8995xeeOEFbdy4cUFzGB0d1cGDBxWNRtXV1aVQKLSg8TxjY2P6p3/6
+p7yMtVSNjY0t6vmMR/71m65tBKc8uGXLlkWdxGkMw9/zl6FYfb2qBgf9ngZmybsPczweV1tb26Js
+5pPPKDHZ0NCQHnvsMeIE5uzUKOE5dOiQnnnmGeIEAAAFMt8oMdlzzz0ny7LmFSfS6bQOHTqkeDyu
+VatWqaamZl5zmE42m1Vvb2/exlvKCnU5zNatW6e8bbkZwgRyCBNL08jIiA4cOKDGxkY1NTUV7DyF
+ihIe4gTmaqYo4SFOAABQGPmIEp65xgnHcTQwMKCBgQE1NDSoqamJ238uQdOFCb6KwBJWW1urrq4u
+DQ8Pa9++fcpms3k/R6GjhMSeE5ibs0UJSWppadGNN97InhMAAORRPqOENLc9J2KxmHbt2qV4PK6O
+jg41NzcTJUoIX0lgiQuFQurs7FQoFFJ3d7dGR0fzNvZiRAkPcQKzMZso4SFOAACQP/mOEp6zxQnv
+0ore3l6tWLFC5513HqshSxBhAigBpmmqtbVVbW1tOnjwoPr6+uQ4zoLGXMwo4SFO4EzmEiU8xAkA
+ABauUFHCM1OcGBgYUHd3twKBgNauXav6+vq8nxvFgTABlJCamhp1dXVN3Md5vi/E/IgSHuIEpjOf
+KOEhTgAAMH+FjhKeyXEiHo9r165dGh4e1po1a9TS0sJlGyWOry5QYgKBgNrb21VXV6fdu3draGho
+Tsf7GSU8xAlMtpAo4SFOAAAwd4sVJTxenPjBD36gZcuWqbOzc1HuPgf/ESaAEtXY2KiOjg719/er
+t7d3Vpd2FEOU8BAnIOUnSniIEwAAzN5iRwnPpk2bVFNTo507dy7aOeE/wgRQwiKRiLq6uiRJ3d3d
+isfjMz63mKKEx4sT27ZtI06UoXxGCQ9xAgCAs/MrSnjmcrcOlAbCBFDiTNNUW1ubmpubtXfvXvX3
+95/2nGKMEp76+nrdc889xIkyU4go4SFOAAAwM7+jhIc4UV4IE0CZqK+vV1dXl0ZHR7Vnzx5ls1lJ
+xR0lPHV1dcSJMlLIKOEhTgAAcLpiiRIe4kT5IEwAZSQUCqm9vV3RaFTd3d06evRo0UcJD3GiPCxG
+lPAQJwAAOKnYooSHOFEeCBNAmTFNU83NzVqxYoWeeOKJJRElPMSJ0raYUcJDnAAAoHijhIc4UfoI
+E0AZSiQSeuqpp3T77bcvmSjhIU6UJj+ihIc4AQAoZ8UeJTzEidJGmADKzFLYU+JsiBOlxc8o4SFO
+AADK0VKJEh7iROkiTABlpBSihIc4URqKIUp4iBMAgHLz05/+VI2NjUsiSnjWr1+vt99+W6Ojo35P
+BXlEmADKRClFCQ9xYmkrpijhIU4AAMrJVVddJcMw9PLLL/s9lVlJpVJ6+OGH9aEPfUg1NTV+Twd5
+RJgAykApRgkPcWJpKsYo4SFOAADKya233qqRkZGijxNelNi4caPOOeccv6eDPCNMACWulKOEhzix
+tBRzlPAQJwAA5aTY4wRRovQRJoASVg5RwkOcWBqWQpTwECcAAOWkWOMEUaI8ECaAElVOUcJDnChu
+SylKeIgTAIByUmxxgihRPgJ+T2Am2UDRTq3kGK4rOxj0exrIo3KMEp7JceLee+9VbW2t31OClmaU
+8EyOE1u2bFEkEvF7SgAAFMytt96qp556Si+//LKuvvpq3+ZBlCgvxiP/+k3XNqa+KN2yZYtP08lJ
+HTni6/nLkmUpvMReLGB65RwlJhseHtajjz5KnCgCSzlKTHbo0CE988wzxAkAKCM9PT1qbGxUfX29
+31NZdE899ZRqa2t9iRNEidK2devWKW9bbqY4wwSA+SFKTDV8/Lj6P/xhtXzpS6pet87v6ZSlUokS
+HuIEAJSPd955R2+99ZYSiYTuuusu4sQiIUqUvunCBHtMACWCKHEK21bdr/6qul5+Wcb11+vEzp1+
+z6jslFqUkNhzAgDKxTvvvKOf//zn2rx5s+6991499thjGhoa8ntai+6KK67QgQMHFm3PCaJE+SJM
+ACWAKHEK25Y+9CFp+3YZyaSqjh0jTiyyUowSHuIEAJQ2L0rcc889sixLNTU1ZRcnksmkdu3apWPH
+junOO+9clA0xiRLljTABLHFEiVNMihKKxyceJk4snlKOEh7iBACUplOjhKdc4oTjOOrv79fu3btV
+U1Ojzs5ORaPRgt+tgygBwgSwhBElTjFDlPAQJwqvHKKEhzgBAKVlpijhKfU4EY/HtWvXLo2Ojqqj
+o0NNTU0yzZMvFwsVJ4gSkAgTwJJFlDjFWaKEhzhROOUUJTzECQAoDWeLEp5SjBOO4+jw4cPau3ev
+li9frs7Ozhk3eM53nCBKwEOYAJaoWCwmx3FUU1Pj91T8N8so4Qmk04oTJvKqHKOEhzgBAEvbbKOE
+p5TiRCwWU09PjxKJhLq6utTQ0HDWY/IVJ4gSmIwwASxRjY2Nuu222/TII48oPosX4yVrjlEiWVur
+kS9/WU2/+IuLMLnyUM5RwkOcAIClaa5RwrPU40Q2m9XBgwfV29ur5uZmrVmzRqFQaNbHLzROECVw
+KsIEsIQ1NTXplltuKd84MZ8o8Q//oKaPfWwRJlceiBInEScAYGmZb5TwLNU4MTw8rJ6eHjmOo7Vr
+16q2tnZe48w3ThAlMB3CBLDElW2cIEr4jihxOuIEACwNC40SnqUUJ7LZrPbv369Dhw5p1apVamtr
+m7K55XzMNU4QJTATwgRQAsouThAlfEeUmBlxAgCKW76ihGcpxImhoSH19PQoFApp7dq1ed2jbLZx
+giiBMyFMACWibOIEUcJ3RImzI04AQHHKd5TwFGucSKfT2rdvn44eParVq1erpaVlwaskpnO2OEGU
+wNkQJoASUvJxgijhO6LE7BEnAKC4FCpKeIotTgwMDKinp0fRaFSdnZ2KRqMFPd9McYIogdkgTAAl
+pmTjBFHCd0SJuSNOAEBxKHSU8BRDnIjH49q1a5eGh4fV0dGhpqamgqySmM6pcYIogdkiTAAlqOTi
+xDyixO7PfU6ZjRs1MjKibDa7CJMsbUSJ+SNOAIC/FitKePyME4cPH9bevXtVV1enzs5ORSKRRT2/
+dDJOvPjii0QJzJr1kS2bP+caU3+Drlu3zqfpAMiXqqoqrVy5Uo8//rg6OzsVDAb9ntL8zHOlxMpf
+/mVls1kNDw/r8OHDGh0dVTqdlmEYCgQCMgxjESZfGogSC1ddXa3GxkY9/vjjOv/88xUIBPyeEgCU
+hcWOEp5wOKw1a9bou9/9rs4991xVVFQU9HyxWEz79u2T4zhqb29XdXV1Qc93Nh0dHdqzZ48uu+yy
+/5+9O4+Pq673P/6eNZNtsidd0hC6JW1pgcrmgoAgKihCW4qAiIgCisq9CArXH4u4gLKKyuKCynZZ
+Cm1ZpCzlSkERKQi0lLa0pbRps+/b7PP7oz0hTbNnZs6Zmdfz8egfSZMznySTSeaV7/keogT2s2HD
+hn1etitCmABSWdLHiQmcvuFyuZSTk6PCwkKVlJTI7XbL7/erublZdXV16urqUjgclsPh4EniMIgS
+sUOcAIDEMitKGBIRJyKRiHbt2qWGhgZNmjQpbptbjseBBx6ovLw8s8eABREmgDSUtHEihntK2Gw2
+ZWRkyOv1qri4WEVFRXK5XOrq6lJ9fb0aGxvV29urSCQih8Nhyi8vVkSUiD3iBAAkhtlRwhDPONHR
+0aHt27fL6XSqsrJS2dnZMTs2EE+ECSBNJV2ciPNGl3a7XR6PR3l5eSopKVFeXp7C4bDa29tVW1ur
+1tZWBQIBRaNRuVyutDztgygRP8QJAIgvq0QJQ6zjRCgUUk1NjVpaWjRlyhSVlpZaZpUEMBqECSCN
+JU2cMOHqGw6HQ9nZ2SooKFBJSYkyMzMVCATU3t6u3bt3q6urS8FgUJLSYn8KokT8EScgSeFwWMFg
+kK8/EENWixKGWMWJ1tZWffjhh/J4PKqsrDRlc0tgoggTQJqzfJywwCVBbTab3G63cnNz+/ancDgc
+CgQCampqUn19vXp6ehQMBlNyfwqiROIQJ9JbOBzWsmXLtH79er7+QIxYNUoYJhInQqGQtm/frs7O
+Tk2bNk3FxcUp/4cSpC7CBADrxgkLRInB2Gw2eTwe5ebmqri4WPn5+ZL2XCO8trZWLS0t6unpUSQS
+kcvlSuqllESJxCNOpCcjSixcuFDTp0/XqlWr+PoDE2T1KGEYT5xobGzUhx9+KK/Xq8rKSrnd7gRM
+CsQPYQKAJAvGCYtGicE4HA5lZWUpLy9PpaWlysrKUjQaVUdHh+rq6tTa2iq/3y+73Z5Up30QJcxj
+xIknn3ySJ6dpoH+UmDVrlvLy8pSfn0+cACYgWaKEYbRxwufz6YMPPlBPT48qKytVWFiY4EmB+CBM
+AOhjmTiRRFFiMC6Xq29/iuLiYmVmZioYDKqpqUkNDQ3q7OxUMBiU3W43PwANgShhPuJEehgYJQzE
+CWD8ki1KGIaLE5FIRA0NDaqpqVFxcbEqKios+zsEMB6ECQD7MD1OJHmUGKj//hTFxcUqLCyUy+VS
+b2+vGhsbVVdXp56eHoXDYTmdTkv8AkWUsA7iRGobKkoYiBPA2CVrlDAMFid6enr0wQcfKBQKafr0
+6fJ6vWaPCcQcYQLAfkyLEykWJQZjXJbU6/Xusz9FZ2dn3/4UPp9P0WhUTqcz4ftTECWshziRmkaK
+EgbiBDB6yR4lDP3jRGZmplpbWzVp0iRNmTIlqT8uYDiECQCDSnicSIMoMRhjf4r8/Py+/SlCoZDa
+2tpUX1+v1tZWBYNB2Wy2uO9PQZSwLuJEahltlDAQJ4CRpUqUMBhx4rnnntPChQtVVFRk9khAXBEm
+AAwpYXEiTaPEYFwul3JyclRYWNi3P4XP51Nzc3Pf/hThcDjmlyUlSlgfcSI1jDVKGIgTwNBSLUoY
+MjIyNGPGDD3xxBNjvpQokGwIEwCGFfc4QZQYkrE/hXHaR2FhodVflJsAACAASURBVBwOh7q7u/v2
+p/D5fIpEInI4HOP+ZYwokTyIE8ltvFHCQJwA9peqUcIwnkuJAsmIMAFgRHGLE0SJMbHb7crMzFRe
+Xp6Ki4vl9XoViUTU2dmp3bt3q62tTYFAQOFwWG63e1SnfRAlkg9xIjlNNEoYiBPAR1I9ShiIE0gH
+hAkAo2LEiSeffDI2cYIoMWFOp1PZ2dnKz89XSUmJMjMzFQqF1N7ert27d6urq0t+v3/I/SmIEsmL
+OJFcYhUlDMQJIH2ihIE4gVRHmAAwajGLE0SJmDNO+zD2pygpKZHD4VAwGFRTU5Pq6urU3d2tUCgk
+l8sln89HlEhyxInkEOsoYSBOIJ2lW5QwECeQyggTAMZkwnGCKJEQNptNHo9Hubm5Ki4uVkFBQd/+
+FB988IFeeOEFffGLXyRKJDnihLXFK0oYiBNIR+kaJQzECaSqwcKE3aRZACSJsrIyfe5zn9OyZcvU
+M4q40IcoYRq3262CggIVFRVp/fr1OuWUU4gSKWLKlCk64YQT9Mgjj8jn85k9DvaKd5QwVFRU6Oij
+j9ayZcvk9/vjdjuAFaR7lDB4vV6ddtppeuKJJ9Ta2mr2OEDcECYAjGjMcYIoYTr2lEhdxAlrSVSU
+MBAnkA6IEvsiTiAdECYAjMqo4wRRwnREidRHnLCGREcJA3ECqYwoMTjiBFIdYQLAqI0YJ4gSpiNK
+pA/ihLnMihIG4kTyikajZo9gWUSJ4REnkMoIEwDGZMg4MY4osfOGG4gSMUSUSD/ECXOYHSUMxInk
+4/P59Je//EXr1q0zexTLIUqMDnECqYowAWDM9osT44gSjbfeKh1/fAKmTQ9EifRFnEgsq0QJA3Ei
+eRiXbj7hhBO0detW4kQ/RImxIU4gFREmAIyLEScee+QRhU86acynb0w991wFAgGFQqEETJvaiBIg
+TiSG1aKEgThhfUaUOO644zRt2jSdcsopxIm9iBLjQ5xAqiFMABi3srIyfWHuXAVee23Me0rY7XZl
+ZWWps7MzAZOmLqIEDMSJ+LJqlDAQJ6yrf5SYOnWqJMlutxMnRJSYKOIEUglhAsCEFB9xhHoee0w9
+BQXDvt1gG13m5OQQJiaAKIGBiBPxYfUoYSBOWM9gUcKQ7nGCKBEbxAmkCsIEgAkrOv549T766JBx
+Yqirb2RnZ6urqysRI6YcogSGQpyIrWSJEgbihHUMFyUM6RoniBKxRZxAKiBMAIiJoeLEcJcEzcrK
+UiQS4cnTGBElMBLiRGwkW5QwECfMN5ooYUjHOPHWW29p/vz5RIkY8nq9Ki0t1fvvv2/2KMC4ECYA
+xMzAODFclDB4vV5O5xgDogRGqy9OPPywfM3NZo+TdJI1ShiIE+YZS5QwGHFiy5YtaREnlixZov/8
+5z/aunWr2aOkjBdffFE5OTk64ogjzB4FGBfCBICYMuJEb07OiFFCknJzcwkTo0SUwFhNmTxZSx95
+RP4FC+Srrzd7nKSR7FHCQJxIvPFECYPdbteXv/zltIgTLpdLixYt0tq1a4kTMfDiiy/K5XLp6KOP
+NnsUYNwIEwBiruj44+VuaBgxSkh79pno7u5WJBJJwGTJiyiBMYtGpSVL5FmzRnl1dfJ97GPEiVFI
+lShhIE4kzkSihIE4gbEiSiBVECYAxIUjM3NUb+d2u+VyudQzisuNpiuiBMZsb5TQc89JXV1SJKL8
+2lrixAhSLUoYiBPxF4soYSBOYLSIEkglhAkApmOfieHV1tYqIyNDhYWFZo+CZDAwShgiEXk6O9X+
+yivmzWZhqRolDMSJ+IlllDAQJzASogRSDWECgOlycnK4bOgwZs6cqYMPPlgrVqxQOBw2exxY2VBR
+Qns2o2277TaVLV5s0nDWlepRwkCciL14RAkDcQJDIUogFREmAJguJydHPp+PfSaGMWfOHM2dO5c4
+gaGNFCVuvVWTzjvPpOGsK12ihIE4ETvxjBIG4gQGIkogVREmAJjObrfL4/Goo6PD7FEsjTiBIREl
+xiXdooSBODFxiYgSBuIEDEQJpDLCBABL8Hq9nM4xCsQJ7IcoMS7pGiUMxInxS2SUMBAnQJRAqiNM
+ALCE3NxcdXd3mz1GUiBOoM8IUaLxl79U4dlnmzScdaV7lDAQJ8bOjChhSJc4EYlE1NvbqyOPPFL/
++Mc/iBMiSiA9OM44ffG1UZtjn1fOmzfPpHEApCuXy6W6ujoVFhbKbqeZjqSkpEQ2m00vv/yyqqqq
++JyloxGixLarrlL4xBPV3Nysuro6NTc3q6WlRe3t7ero6FBPT496enrk9/sVCAQUiURks9lS/r5E
+lNhXXl6e8vPztWrVKlVVVcnpdJo9kmWZGSUMNptNVVVVevXVVxUKhVRWVmbKHLEWiUTU1tam+vp6
+7dq1Sz6fTzk5OVqwYIFeeuklZWZmpu2VqYgSSEUbNmzY52W7IuKnDwDLyMnJUXt7u4qKisweJSnM
+mTNHkrRixQqdeuqpcjgcI7wHUsYoTt+YO+D0jUAgoFAopFAopHA43Pey3+/vezkYDCocDstut8vl
+csnhcMjlcsnpdMrhcMjtdsvpdMrlcslut8vtdidVyCBKDK7/yoklS5YoIyPD7JEsxwpRwmCsnFi5
+cqUkaf78+abOM16hUKgvlHZ1dSkrK0ter1dTpkyR2+3ue7tFixbp8ccflyTNmDHDrHFNQZRAOiFM
+ALCM3NxcdXZ2EibGgDiRhsa5p4Tb7d7nl/3hRCKRvnARDAb7gkZ3d3ff66LRqILBoCT1RQwjXjgc
+Djmdzr6I4XQ6TY8YRInhESeGZqUoYUjWOBEIBNTa2qqOjg75fD7l5uYqPz9f06ZNG3K1jrHnRLrF
+CaIE0g1hAoBl5Obmqr6+3uwxkg5xIo0kaKNL40o5o9E/YoTDYYVCIQUCAfn9fnV3d/etwggGg4pE
+In0RwwgYg63EMEJGrBAlRoc4sT8rRglDssSJrq4utbe3q7OzU6FQSPn5+SorK5PX6x31MdItThAl
+kI4IEwAsw3gi0tPTo6ysLJOnSS7EiTRg0atvjCViSB+dUhIIBPpChhExjJeNkGFEjP6nkgx82Yga
+QyFKjE1FRYWOOeQQ7TrhBE1dvlwZxcVmj2QaK0cJg1XjREdHhzo6OtTW1ian06nc3FyVl5crJydn
+3MdMlzhBlEC6IkwAsBSv16vOzk7CxDgQJ1KYRaPEeBgxYTTf42PZF6N/sDBOH7HZbHrppZd0+OGH
+EyVGq71d5UuXKrp5s9oOO0xauzYt40QyRAmDFeJEKBRSZ2dn38oIj8cjr9er2bNnx3T1U6rHCaIE
+0hlhAoCl5ObmqqmpKWV2Gk804kQKSqEoMVYT2RfD7/dr9erVOuKII4gSo9XeLn3iE9LGjbJFIiqo
+qVFrGsaJZIoSBjPiRCAQUGdnp9ra2tTT06OcnJxBN6+MtVSNE0QJpLvk2UobQFrIzc2Vz+cze4yk
+NmfOHM2dO1crVqxQOBw2exxMRBpHibEyTikxnhy99NJLRImx6BclFInseV04rIKaGvUcdpj8TU3m
+zpcgyRglDEac2LJli9atWxeX2/D5fKqvr9emTZu0efNmdXZ2qrCwUPPmzdOBBx6ooqKiuEYJgxEn
+1q5dq61bt8b99uKNKAEQJgBYjLHxXUdHh9mjJDUjTixfvpw4kayIEuPCnhLjMFiUMKRRnEjmKGGI
+R5zo6urSrl27tHHjRm3btk3BYFCTJ0/W3LlzVVlZqYKCAlOuuJMqcYIoAexBmABgOcY+E5iYOXPm
+6KCDDiJOJCOixLg9++yzmj59OlFitIaLEoZwWPk7d6rt+OMTO1sCpUKUMMQiTnR0dKimpkbr16/X
+rl27ZLfbVVFRoblz56q8vFxer9fUy/8akj1OECWAj5j/iAIAA+Tm5qprwJMxjE91dTVxItkQJSbk
+U5/6lDZs2KCWlhazR7G+0USJvXry85Xx+98naLDESqUoYRhrnAiFQmptbdX27du1bt061dfXy+Vy
+aebMmaqqqtLkyZMtuyl1ssYJogSwL8IEAMvJysrq24kfE0ecSCJEiQnzer067bTT9OSTTxInhtPe
+Ln3yk6OKEt2FhQr+7W/KP/LIBA2XOKkYJQwjxYlAIKDGxkZt3bpVmzZtUktLi3JzczVnzhzNmjVL
+ZWVlY7oUsJmSLU4QJYD9Oc44ffG1Udu+u7bPmzfPpHEAQLLZbOrs7JTD4VBmZqbZ46SE4uJi2e12
+rVmzRlVVVZZYgosBiBIxk5GRoenTp2vlypWqqKjgcWQgI0q89x5RIkWjhMFms6mqqkqvvvqqQqGQ
+8vLy1NzcrNraWjU0NMjhcKiwsFBTp05VUVGRsrKykvbng8PhUFVVlVavXq3MzEwVFhaaPdKgiBKA
+tGHDhn1etitCmABgTeFwWN3d3crLyzN7lJRBnLAwokTMESeGMMYoUXvvvbJVV/ddijUcDstmsyX9
+40c6RAmDESf++c9/qra2VkVFRSopKdGUKVOUn58vj8cjm81m9pgxYfU4QZQA9iBMAEgaDodD9fX1
+KikpMXuUlEKcsCCiRNwQJwYYY5RoX7ZMWYceqkgkIp/Pp87OTrW2tqq+vl4NDQ1qbW1Ve3u7urq6
+1NXVJb/f33canvHYYsUnvOkUJQxGnFi3bp2ysrI0bdo0S35tYsGqcYIoAXxksDDhNGkWABiWx+NR
+JBJRIBAY8Zro0Wg0ZX/Biofq6mpJ0vLly3XaaafJ4XCM8B6IG6JE3Bl7Tixfvlxf+tKXLPMkJeHG
+cfrGlGFO3zAenwOBgILBoEKhkPx+vzo7OxUOh+X3+xUOh+VwOJSRkSGHwyG32y2XyyWn0ymXyyW3
+2y23253QQJqOUcJg7DmxcuVKSdL8+fNNnih+jD0nHn/8cUnSjBkzTJ2HKAGMjDABwLK8Xq/a29uH
+XTXR0NCgxx9/XEuWLFFxcXECp0tu1dXV8vv9evzxx7Vo0SLihBmIEgmT9nEiDntK2O12eTyeYTdH
+jEQifastwuFwX8jw+/0KBoN9/6Q9TyT7/3M6ncrIyJDT6ZTT6RwxUI9GOkcJA3Ei8YgSwOhwKgcA
+y4pEImpvb1dBQcGg/9/Q0KBnnnlGp5xyiv72t7+pvLzcspczs5L29nbt2rVL0WhUWVlZeu211zit
+I9GIEgmXtqd1mLjRpbEXhdvtlsfjUXZ2trxer/Ly8lRYWKji4mKVlZWppKRE2dnZyszMlMPhUDgc
+VjAYVEdHh9ra2tTc3Kzdu3erpaVFbW1t6uzsVFdXl3p7exUMBhUOhxWJRGS324dcPUeU+IjNZlPV
+7NkKL1qkzpoaeY891uyR4sbs0zqIEsDgOJUDQFLJzs5WTU1N3y+c/RlR4vTTT1dWVpYWL16sxx57
+TCeffDIrJwYRiUTU3Nys5uZmORwOFRcXKy8vT3a7XV6vl9M6EokoYZq0WzmRJFffsNvto4rKA08d
+CQaDam9v74sTwWBQNput7zQRh8Mhl8ulaDSqF154Qccff3zaRwlJUjQq+5IlmvrOO/K/+652SZp6
+zTVmTxU3LpdLp556qh5++GFJiVs5QZQAxoYVEwAsy+FwqLW1VdnZ2XK5XH2vHxglJMntdmvmzJla
+uXIlKyf6CQQCqqur086dOxWNRjV58mRNnjxZmZmZfX9ZZEPMBCJKmC5tVk4kSZQYC2OfiszMTGVn
+Zys3N1f5+fkqKirqW31RWFionJwcud1uOZ1O9fT06MUXX9QJJ5xAlJD2PAYtWiQ9/7zU3S2n36+M
+N99UfSCQ0isnWltbVVxcrDfffDMhKyeIEsDwuCoHgKRjnI+cm5srafAoYSBOfKSnp0e1tbXatWuX
+PB6PysvLVVJSMuR52sSJBCBKWEbKx4kUjBKjZbfb5XK55PF4ZLfbtWrVKlZKGAZECUM6xIkPP/xQ
+FRUVOuigg+J+WgdRAhgZYQJA0olGo2ptbVVRUdGwUcKQ7nGira1Nu3btUnNzs3Jzc1VZWam8vDw5
+nSOfuUeciCOihOWkbJxI4yjRH3tKDDBElDCkcpxobGxUOBxWWVlZ3PecIEoAo0OYAJB0XC6Xamtr
+FY1GtWrVqmGjhCHd4kQkElFLS4t27Nih7u5uFRcXq7y8XLm5uWO+jCpxIg6IEpaVcnGCKCGJKLGf
+EaKEwen3y/Pmm6oLheQ95pgEDhg/kUhEO3bs0NSpU/tWDMYrThAlgNEbLEzwGycAS7Pb7fL5fKOO
+EoacnBwtXrxYTz/9tJqamuI8pTkCgYBqa2u1YcMGtbe3a8qUKaqqqlJBQcGEgkJ1dbUOOuggLV++
+XOFwOIYTpyGihOUZG2I++eSTamlpMXuc8SNK9HE6nXK5XOrt7TV7FPONMkoYIjab7JMmJWCwxGht
+bVVGRoZycnL2eb1xKdG1a9dq69atE74dogQwcYQJAJbW0NCgd999V0uXLh3zyodUjRM9PT3asWOH
+Nm3apHA4rNmzZ2v69Onyer0xuw3iRAwQJZJG0scJosQ+nE6nFi9erDfeeENbtmwxexzzjDFK+PLy
+1HrjjZr8rW8lYLj4i0Qiqq+vV1lZ2aD/H6s4QZQAYoMwAcCyjD0lxhMlDKkUJzo6OrR161Zt27ZN
+GRkZmjNnjsrLy4fc0HKiiBMTQJRIOkkbJ4gSg0r7OJHmUUKS2tvb5XK59lst0d9E4wRRAogdwgQA
+SxrNRpejlcxxIhKJqLm5WZs2bVJtba3y8/N10EEHqaysbFQbWk4UcWIciBJJK+niBFFiWGkbJ4gS
+kjTsaon+xhsniBJAbBEmAFhOLKOEIdniRCgUUm1trTZu3KiOjg5NnTpVVVVVKioqSvgsxIkxIEok
+vaSJE0SJUUm7OEGUkLRnbwmbzTbqUxzHGieIEkDsESYAWEo8ooQhGeKEz+fTjh079N577ykYDGr6
+9Ok68MADh12KmgjEiVEgSqQMy8cJosSYpE2cIEr0Ge1qif5GGyeIEkB8ECYAWEY8o4TBqnGi//4R
+LpdLc+bMUUVFhTwej9mj9SFODIMokXIsGyeIEuOS8nGCKNGno6NDNptN+fn5Y37fkeIEUQKIH8IE
+AEtIRJQwWClODNw/Yu7cuZo8eXJC9o8YD+LEIIgSKctycYIoMSEpGyeIEvuor6/XpAlc8nSoOEGU
+AOLLccbpi6+N2hz7vHLevHkmjQMgHSUyShjcbrdmzpyplStXqry8PGG3K+3ZP6K+vl47d+5UKBRS
+WVmZpk6dmtAZJqK4uFh2u11r1qxRVVWV7PY0btxEiZSXkZGh6dOna+XKlaqoqFBmZqY5gxAlYsJu
+t6u6ulqrV6+Wx+NRYWGh2SNNDFFiHx0dHers7NTUqVMndByHw6GqqiqtXr1amZmZeuutt4gSQAxt
+2LBhn5ftirBiAoC5zIgShkSvnPD5fKqpqdHGjRv79o+YMWPGqDfnshJWTogokUZMXzlBlIiplFk5
+QZTYT319vUpLS2NyLGPlxGuvvUaUABKAFRMATGNmlDAkYuVER0eHdu/ercbGRmVlZemAAw5QQUGB
+ZU/XGK20XjlBlEg7pq2cIErERdKvnCBK7Kejo0MdHR2aNm1azI7pcDg0b948VVZWxuyYAFgxAcBC
+rBAlDPFaOWHsH7F79255vV5VV1dbev+I8UjLlRNEibSV8JUTRIm4StqVE0SJQdXX16ukpCTmx02r
+6A6YiO80AAlnpShhiFWcMPaPWL9+vdra2jR58mRVV1erqKgoZX+5Sas4QZRIewmLE0SJhEi6OEGU
+GFRXV5fC4bAKCgrMHgXAOKXmb8kALMuKUcIwkTjRf/+I3t7epN4/YjzSIk4QJbBX3OMEUSKhkiZO
+ECWG1NjYqJKSkpT9AwCQDvjuBZAwVo4ShrHGiY6ODm3btk1btmyRw+HQ7NmzVVlZadmPL55SOk4Q
+JTBA3OIEUcIUlo8T44gSW6+8UrlnnpmA4czV09Oj3t5eFRUVmT0KgAkgTABIiGSIEoaR4kQkElFr
+a2vf/hF5eXmaO3euJk+eLLfbbcLE1pGScYIogSHEPE4QJUxl2TgxzpUSk775TW3fvl07duxQIBBI
+wKDmqKuri8veEgASizABIO6SKUoYBosTkUhE9fX12rhxo5qamlRWVpby+0eMR0rFCaIERhCzOEGU
+sATLxYkJnL5RVFSkuXPnyuFwaPPmzaqtrU3AwInV09Mjn8/HagkgBXC5UABxlYxRwmBcSnTFihVy
+uVxqbGyU0+nUlClTNGnSJHk8HrNHtKyUuJQoUQKjNOFLiRIlLMUylxKNwZ4SNptNXq9XXq9Xra2t
+qq2tldvtTpmfXzt37lReXp5yc3PNHgXAGHC5UAAJlcxRwpCTk6MlS5bo9ddfV1FRkSoqKpL2Y0m0
+pF45QZTAGI175QRRwpJMXzkR440uPR6Ppk+frvLyctXV1Wnr1q3q6emJ9dQJ5fP51NPTw2kcQIog
+TACIi1SIEgYjTjz33HMTupRoOkrKOEGUwDiNOU4QJSzNtDgRx6tveL1ezZo1S16vV9u3b1dNTY1C
+oVAspk64hoYGrsQBpBC+kwHEXCpFCcNELiWa7pIqThAlMEGjjhNEiaSQ8DiRgEuC2u12lZSUqLq6
+WpK0ceNG1dfXKzLC/dBKfD6fOjo62FsCSCGECQAxlYpRwkCcGL+kiBNECcTIiHGCKJFUEhYnEhAl
++rPb7SovL9fMmTPV1dWlzZs3q6OjY1zHipVwOKz3339/xLdraGhQUVGRnE5nAqYCkAhsfgkgZlI5
+ShiMDTFXrlyp8vLylP0448HSG2ISJRBjQ26ISZRISnHfEDPBUaI/p9OpwsJCOZ1O7dq1Sx0dHcrJ
+yZHD4Rj5nWMoHA7r8ccf186dO+X3+zVlypRB3y4QCGj37t064IADrPVzBMCoDbb5JWECQEykQ5Qw
+ECfGz5JxgiiBONkvTgQCRIkkFrc4YWKU6M/j8aikpETBYFA7d+5UKBRSdna2bDZbTG9nMEaUWLBg
+gY499li99dZb6ujoGDRO1NbWKjMzU/n5+XGfC0B8cFUOAHGRTlHCwGkd42ep0zqIEogz47SOZx95
+ROGjjiJKJLmYn9ZhkSjRX1lZmaqrqxUKhbRhwwY1NzfH7bakfaNEVVWVbDabTjrpJNXX12vt2rX7
+vG0gEFBbW5vKysriOhOAxCNMAJiQdIwSBuLE+FkiThAlkCBer1dfOvZY9TQ3EyVSQMzihAWjhMHp
+dKqiokLTp09XW1ubNm3aFJf9JwZGCcNQcaKpqanvtBMAqYUwAWDc0jlKGIgT42dqnCBKIMFyqqtl
+W7NGnSUlQ74NUSJ5TDhOWDhK9JeVlaUZM2aotLRUNTU12rZtmwKBQEyOPVSUMAyME6FQSC0tLSou
+Lo7J7QOwFsIEgHEhSnyEODF+psQJogRMMlycIEokn3HHiSSJEv0VFBSourpamZmZ2rx5s2prayd0
+edGRooShf5xYs2aN8vPz5Xa7x327AKyLMAFgzIgS+yNOjF9C4wRRAiYbLE4QJZLXmONEEkYJg91u
+1+TJkzV79mz5/X5t2LBBra2tYz7OaKOEwYgTvb29qqmpGc/oAJIAYQLAmDU2NionJ0cej8fsUSwl
+KytLWVlZhIlxSEicIErAIvrHCaJE8ht1nEjiKNGf2+1WZWWlKisr1dDQoE2bNqlrwGPqUMYaJQxG
+nGhubta7Dz883tEBWBhhAsCYzZs3T7Nnz9bKlSsntJQzlUQiES1fvlzz5s1TdXW12eMkpbjGCaIE
+LMaIE8FnniFKpAAjTqxdu3bwOJEiUaK/nJwcVVVVqbi4WNu3b9f27duH3X9ivFHCYLPZdNI772j2
++eer5pJLJjI6AAtynHH64mujNsc+r5w3b55J4wBIFmVlZQqFQnr11Vf7Lu+VrowoMWfOHM2dO9fs
+cZJacXGx7Ha71qxZo6qqKtntMejnRAlYlLu4WJ6pU80eAzFit9tVXV2tF154QR6PR4WFhXv+IwWj
+RH9ZWVkqLi6Wz+dTTU2NwuGwsrOz9/m9YKJRQpJ0/fWy3XSTHB0d8mzYoNq6Onm/8IUYfRQAEmnD
+hg37vGxXhDABYPyIE0SJeIhpnCBKAEig/eJEQUFKRwmDzWZTbm6u8vPz1dLSorq6Orndbnk8nphF
+Cd10k9TSIklyBALECSCJDRYmOJUDwITMnz9fM2fOTMvTOogS8ROT0zqIEgBM4HQ6tWTJEq19/XV1
+f+5zKR8l+nO73Zo+fbrKy8tVV1en999/X8uWLYtplOi7rc5Olf75z5zWAaQIwgSACUvHOEGUiL8J
+xQmiBAATOZ1OLfnyl9W7fbuiw+y7YEiFKNGf1+vVzJkztXbtWh1yyCExjxIG4gSQOggTAGIineIE
+USJxxhUniBIALMCZlaX8//xHzZWVirpcQ75dqkUJ6aM9JRYuXBi3KGEgTgCpgTABIGbSIU4QJRJv
+THGCKAHAQpzZ2cPGiVSOEvE4fWMoxAkg+REmAMRUKscJooR5RhUniBIALGioOEGUGMIYo4SBOAEk
+N8IEgJhLxThBlDDfsHGCKAHAwgbGCaLEEMYZJQzECSB5ESYAxEUqxQmihHUMGieIEgCSgBEnmmbO
+JEoMZoJRwkCcAJITYQJA3KRCnCBKWM8+cSIUIkoASBrO7GyVbNhAlBgoRlHCQJwAkg9hAkBcJXOc
+IEpYlxEndh97rPTss0QJADBJc3OzOjs7VVlZOb4DxDhKGNzd3cp9+GFFRnG5VgDmI0wAiLtkjBNE
+Ceurrq6WY9Ei+R2OfV5PlACAxCktLdVnP/tZLVu2TH6/f2zvHKcoIbtdbZMmyfP227K73bE9NoC4
+IEwASIhkihNEieQx5dJL1XzNNfJ7vZKIEgBghmnTpunTn/702OJEnKNE5ptvKqOsLLbHBhA3hAkA
+CZMMcYIokXz64kRWFlECAEwypjhBlAAwAGECQEJZOU4QJZLXlEsvVXTzZqIEAJhoVHGCKAFgEIQJ
+AAlnxThBlEh+nqlTzR4BANLesHGCKAFgCIQJAKawUpwglaGWHgAAIABJREFUSgAAEDuDxgmiBIBh
+ECYAmMYKcYIoAQBA7PWPE+Gf/SxuUaJ98mSiBJACCBMATGVmnCBKAAAQP9OmTdNxc+cqfMMNcYsS
+nrVriRJACiBMADCdGXGCKAEAQPxNOfRQtf3lL+rNz4/dQftHiUmTYndcAKYhTACwhETGCaIEAACJ
+U7pkiTr/8IfYxAmiBJCSCBMALCMRcYIoAQBA4sUkThAlgJRFmABgKfGME0QJAADMM6E4QZQAUhph
+AoDlzJ8/XwcccIBWrFgRszhBlAAAwHzjihN7Lwna9tRTRAkgRREmAFhSaWmpSktLY7JygigBAIB1
+jClO7F0p4XrtNXU6nQqFQvEfEEDCESYAWE4kElFbW5uOOOKICZ/WQZQAAMB6RhUn+p2+kV1ervz8
+fNXX1yduSAAJQ5gAYDmtra3KzMyU2+2e0J4TRAkAAKzLiBO+weLEIHtKTJo0Sa2trayaAFIQYQKA
+5TQ1NamoqKjv5fHECaIEAADWV7pkiToGxokhNrp0Op2smgBSFGECgKX09PQoHA7L6/Xu8/qxxAmi
+BAAAyWOfODHC1TdYNQGkJsIEAEtpaWlRQUGB7Pb9H55GEyeIEgAAJB8jTrSUlw97SVBWTQCpiTAB
+wDKMTS/7n8Yx0HBxgigBAEDyKl2yRIUffjjiJUFLS0tZNQGkGMIEAMvov+nlcAaLE0QJAADSg9vt
+Vn5+vhoaGsweBUCMECYAWEZLS8uwqyX66x8nQqEQUQIAgDRSWlqqlpYWVk0AKYIwAcASenp6FAwG
+99v0cjhGnLjjjjuIEgAApBFWTQCpxWn2AAAgDb/p5XDmz5+vyspK5ebmxmkyAABgRaWlpdq8ebNK
+S0vldPK0BkhmrJgAYLrRbHo5HKIEAADph1UTQOogTAAw3Wg3vQQAAOiPvSaA1ECYAGC6lpYWlZSU
+mD0GAABIMqyaAFIDYQKAqYxNL3NycsweBQAAJCFWTQDJjzABwFTj3fQSAABAYtUEkAp4JgDANBPd
+9BIAAEBi1QSQ7AgTAEzT2tqq7OxsNr0EAAATwqoJILkRJgCYpqWlRYWFhWaPAQAAUgCrJoDkRZgA
+YIqenh75/X7l5uaaPQoAAEgBrJoAkhdhAoApWlpaVFRUxKaXAAAgZlg1ASQnnhEASDg2vQQAAPHA
+qgkgOREmACQcm14CAIB4YdUEkHwIEwASjk0vAQBAvLBqAkg+hAkACcWmlwAAIN5YNQEkF8IEgIRi
+00sAABBvrJoAkgvPDAAkDJteAgCARGHVBJA8CBMAEqa9vZ1NLwEAQEKwagJIHoQJAAnT1NTEppcA
+ACBhjFUTgUDA7FEADIMwASAhfD4fm14CAICEMlZNNDU1mT0KgGEQJgAkRFNTE5teAgCAhGPVBGB9
+PEMAEHdsegkAAMzCqgnA+ggTAOKOTS8BAICZWDUBWBthAkDcseklAAAwE6smAGsjTACIK5/Pp2Aw
+yKaXAADAVKyaAKyLMAEgrlpaWpSXl8emlwAAwFSsmgCsi2cKAOImEomopaVFJSUlZo8CAADAqgnA
+oggTAOKmvb1dmZmZbHoJAAAsgVUTgDURJgDETVNTk4qLi80eAwAAoA+rJgDrIUwAiAufzye/38+m
+lwAAwFJYNQFYD2ECQFy0tLSooKCATS8BAIDlsGoCsBan2QMASE0tLS2aOXOm2WMAAIAU0dTUpI6O
+jpgdr7u7W2+//baKiopidsxkMWnSJGVlZZk9BtCHMAEg5lpbW5WZmSmPx2P2KAAAIEW89tpr2r59
+u7xeb8yOGYlE0m51Z11dnU499VTNmjXL7FGAPoQJADHHppcAACAejjzySB122GFmj5HUHnzwQbNH
+APZDmAAsKhKJaPv27WaPMWaBQEC1tbWy2+1qbW2NyTHLy8u55CgAAACQoggTgEUFAgE98MADqqys
+NHuUMYlGo5KkmpqamByvpqZG559/vkpLS2NyPAAAAADWQpgALMzlcumcc84xewxT3XnnnWaPAAAA
+ACCO0munFwAAAAAAYCmECQAAAAAAYBrCBAAAAAAAMA1hAgAAAAAAmIYwAQAAAAAATEOYAAAAAAAA
+piFMAAAAAAAA0xAmAAAAAACAaQgTAAAAAADANIQJAAAAAABgGsIEAAAAAAAwDWECAAAAAACYhjAB
+AAAAAABMQ5gAAAAAAACmIUwAAAAAAADTECYAAAAAAIBpCBMAAAAAAMA0hAkAAAAAAGAawgQAAAAA
+ADANYQIAAAAAAJiGMAEAAAAAAExDmAAAAAAAAKYhTAAAAAAAANMQJgAAAAAAgGkIEwAAAAAAwDSE
+CQAAAAAAYBrCBAAAAAAAMA1hAgAAAAAAmIYwAQAAAAAATEOYAAAAAAAApiFMAAAAAAAA0xAmAAAA
+AACAaQgTAAAAAADANIQJAAAAAABgGsIEAAAAAAAwDWECAAAAAACYhjABAAAAAABMQ5gAAAAAAACm
+IUwAAAAAAADTECYAAAAAAIBpCBMAAAAAAMA0hAkAAAAAAGAawgQAAAAAADANYQIAAAAAAJiGMAEA
+AAAAAExDmAAAAAAAAKYhTAAAAAAAANMQJgAAAAAAgGkIEwAAAAAAwDSECQAAAAAAYBrCBAAAAAAA
+MA1hAgAAAAAAmIYwAQAAAAAATEOYAAAAAAAApiFMAAAAAAAA0xAmAAAAAACAaQgTAAAAAADANIQJ
+AAAAAABgGsIEAAAAAAAwDWECAAAAAACYhjABAAAAAABMQ5gAAAAAAACmIUwAAAAAAADTOM0eAMDQ
+/H6/fvKTn5g9BgAAAADEDWECsCiPx6NrrrnG7DEAAAAAIK44lQMAAAAAAJiGMAEAAAAAAExDmAAA
+AAAAAKYhTAAAAAAAANMQJgAAAAAAgGm4KgcAAAAAy3P4/XJ0dEitrWaPktTc3d2yhUJmjwHsgzAB
+AAAAwPLm3X23yl98UXLyFGYiFgcCqr3rLmnOHLNHAfrwXQ0AAADA8uzB4J6/9PPX/gkJZmebPQKw
+H/aYAAAAAAAApiFMAAAAAAAA0xAmAAAAAACAaQgTAAAAAJAm7MGg2SMA+2HzSwAAAACWFznxRNXY
++bvqREVtNmXPnm32GMA+CBMAAAAALG/aFVdIV1xh9hgA4oDkCAAAAAAATEOYAAAAAAAApiFMAAAA
+AAAA0xAmAAAAAACAaQgTAAAAAADANIQJAAAAAABgGsIEAAAAAAAwDWECAAAAAACYhjABAAAAAABM
+Q5gAAAAAAACmIUwAAAAAAADTECYAAAAAAIBpCBMAAAAAAMA0hAkAAAAAAGAawgQAAAAAADANYQIA
+AAAAAJiGMAEAAAAAAExDmAAAAAAAAKYhTAAAAAAAANMQJgAAAAAAgGkIEwAAAAAAwDSECQAAAAAA
+YBrCBAAAAAAAMA1hAgAAAAAAmIYwAQAAAAAATEOYAAAAAAAApiFMAAAAAAAA0xAmAAAAAACAaQgT
+AAAAAADANIQJAAAAAABgGsIEAAAAAAAwDWECAAAAAACYhjABAAAAAABMQ5gAAAAAAACmIUwAAAAA
+AADTECYAAAAAAIBpCBMAAAAAAMA0hAkAAAAAAGAawgQAAAAAADANYQIAAAAAAJiGMAEAAAAAAExD
+mAAAAAAAAKYhTAAAAAAAANMQJgAAAAAAgGkIEwAAAAAAwDSECQAAAAAAYBrCBAAAAAAAMA1hAgAA
+AAAAmIYwAQAAAAAATEOYAAAAAAAApiFMAAAAAAAA0xAmAAAAAACAaQgTAAAAAADANIQJAAAAAABg
+mjGFid5/f08zF/xcGwPxGWa444d3/VFHTVmq1Z0D/qP9KZ1YdIyWtcRnJgAAAAAAED/Osbyxe8bX
+dPP1mZoypveyzvEBAAAAAIC1DL5iwr9F911whMpcNtkyynXsFc+pISwFtt6rH1y5UrtDkn/d/9OC
+hZfr1os/qamZGSo96r/02N/v1lcXFMph82rBRStUG5YU7db635+jQ4rdcpcdqQtuu1pHzL5Yr/VI
+vrd+pIOO/Knu/dHRmlp9qf65/qPjK9Ku129drLkFTjkL5+mMO9erJzpgzsB7uvHkr+r5ljX66oLT
+9MDzP9zneGt7uvTW7WfooEKHbDaHChecrbs39O59Z5+2PPhtHTXFI5ujQPPPulPru6OSQmpY/ROd
+PDtfLs9UHb7kKj1ZE4zfVwAAAAAAgDQ2aJhofvpiff+lo/Xn9xq0a80l8v/uu/rte/793q7nP3/W
+i7N+pX9vfU7faPm1lix+Rkf9fr12rL5QPX++To/XhNX71s+06Ecb9enb/qXt//qZiu79pV7v+ugY
+vW/drlsal+r3D1yqeR7jtVF1vvJDnfb/tuiTN/yf3n7iEhWs+K3WdQ0YwD1Hlz99vz5b+Gnd/85y
+LS6x7XO86sb7deH/rNOXV+xSR+N6/fGT/9RV172qLkn+DTdp0ffe1kn/+77qN/xVJ7xxuc79wzb5
+dj+kr33lr8r//nK9u+5+fS3jfp295C5to00AAAAAABBzg540EZVN4Y5d2tEQ0HEfv0yvNF+iqMut
+4Ov7vp2rYrGuvOCTmprVq8VfqNRDoSv1jaOmKKt3iT5X/oyau7u14f5HFTz7Ad3w1YXKUlQ/vO7z
+uuuifreVdZR+evP3dHKB1Ptv47U9evuvT8v1reW6/cLDlamjddvdb+jpU1pH/ID6Hy/afbL++NqX
+NHPuJGX0hpSX41DP+90Ky6/tDz2g1lPu1qXHTFOOpurah+/RigaXGlffpbWH3qiNFx+nYps06/ab
+9dRBd2td9/c0PX+cn2UAAAAAADCoQVdMFH/xj3rke07dd+Ys5Xhn6XM/+F9t2u88Csme6VWmXZJs
+crhc8ng9ew/okMtukxRU09YOFVaXya09b5c5eaaK+uUQV/EsTfEMPHJIrTu7VbpgqjL2viZjyjxN
+co/8Ae17vHa9/ruv67Bpk1Wx4DT9/JUWRSRJQTVuaVfhnEl7j29X3iFLde6JZWrfUq/m58/VrKJC
+FRYWqmjWeVrT3aVdraGRbxwAAAAAAIzJoGGi/YOdKjjjLr38Ybc61/1GC1/8rs69Z7vG/tTcqcKK
+HLVsbpBxJoSvfpta+h3IZnfIbhv4fi4VVeaqaf1uGSeQBGrfU/0orgby0fHC2nnfN/RfrxylO97Y
+oZotr+uxyw9WpjHXAblq29KoPYeMqu1ft+vqP2xW5gFFKjj+Hm1ublFLS4ta6rfpP2v+rLMq2JET
+AAAAAIBYGzRMND59kY496Wd6ua5b/rBDngyn3Jku7dcPRpSpuWefItsDV+i3r9aqbecLuumaVWqN
+SMMfLEvzv3ayAn+4QJf+6R9679V79IPv/nXPppiSFKrTK8uW6eXava+I+tThiww4RlSRUEhRd76K
+cqT2zX/Tr29+Ve2BHgWjHs1ecqoyll2p21+pUdP7T+sX3/yxnmzL0JTPnKf5b/9c1z+5WU1Nm/TE
+FcfoyPOfVOPAwwMAAAAAgAkbNEzM+OYfdd3c5Tplcq4K55yjZw65UX88q1yOcdxA9hHXadm1Rbrn
+8+Wa/InrFTjrTM3w5ipjhMqR+4kb9Ph10/XSZZ/WQSfdorazfqEvlO4dt/cd3fz1r+lXb/dIWfN0
++sd36FvVn9fDDf3rgVMVZ92qS3Jv0mGlVTrt5k362DU/06ffvVBn3rtL7oVX6bFfHqhHFx2okupz
+9ezCW/TAxbOUWXme7r/n83r7ko+ppGS+Lvzn0brj4e9qlmscHzwAAAAAABiW7bGHH4iGbfs+6z79
+9NNjdPioujc9rSd2HKzFn50mt6SOZxdr9v8cq3+99j1VcnYEAAAAAABp49FHH93nZUc0OPiKidix
+ydbytH545sV6cFOXwr2b9dBtr6jolM9oClECAAAAAIC0F+cwIWUd+TPd//2grjusUDmTP6M786/V
+w5fN1SgusAEAAAAAAFJc/Nct2It0zNXPaNvVcb8lAAAAAACQZAYNEwPP+QAAAAAAAIiHuJ/KAQAA
+AAAAMBTCBAAAAAAAMA1hAgAAAAAAmIYwAQAAAAAATEOYAAAAAAAApiFMAAAAAAAA0xAmAAAAAACA
+aZxmDwAAAAAk0umnLzV7hLRms0mPPPLIsG/D18h6RvN1A8aLMAEAAIC08+ijPMEyw6OPPirp0VG+
+LV8jqxjL1w0YD07lAAAAAAAApmHFBLDX0qUsGYwVlvlZC/ft2OG+nXy4/8cO938AQLwQJoB+rr32
+2lG/7YknnqiPf/zj2rx5szZv3rzf/2/dulWzZ8/WjBkzBv1/SXr33Xf1v//7v5o0aZI+/vGPj3ds
+SxnL5xCJY+bX5ZprrpEkPfXUU4P+//PPP69f//rXQ/6/tGcJ6b333mvqx8F9O3ml69cult97AADE
+E2ECmKBgMKje3t79Xt/Z2Sm/3z/k/xvvC6SLob4P2trahv1/ABPD9x4AwOoIEwAAAEB/vnd1+0U/
+0Ss9A17vPlw//sPlOjgzQXP0rtOtF/5Ur/qMV7hUULFAnznnAi05uEAOSYEt9+gHd+XrihsWaWos
+frPveUM/++4TOuH2n+ionNG/W6Rlta668h195bb/1vxEfH78m3T3xVdpdUf/V3pUctCJOu+7Z+qw
+Qkfsb3Ooz43/Pf3u29fopa6PXuXIrdARiy7SBSfNVLZt7De1z9c18NHtLqyL8dcbsAjuzoijqHq3
+v6j77nlc/9jUqF57jiYfdJzO+MYZ+sRk95iOFPMfujBZVN0bH9evb39Mb7Vk6oCPn6HvX/RZTcsY
+8JM73Ko3Hvqt7nl2nRp9TuXNOFpnfvcb+sxUp2pXXqZLHtg14Li5Ou66O/Ttavfojj9akS69//z9
+emDVG3p/V7uUd4AWfuGr+saXD1aBQwrueEg/uiWo7914jg50je8mkGJCu7Xy6tvku+jnOqPCuneK
+W7+2dNgnPPHGY3uqGeVju4Kqf/V+/fbeF7SpzaNph52m73z7ZM3IGudjdLw4C3T8VTfoq9P7/c5i
+c8qTqChhcBXpxB/9QmdWuhXqadC215br97+6Wr5rb9K5szLkLDtGXzvLrXg8D7c8R54+deUvdf7s
+DEkRBVq36Lk7btJv/jRPd1x+qLITOYuzUMdfdf2e+0u4R3VvPqKb77pNy+feqq+O45eDob6uaf31
+Rkrj1wDEj2+j7r3+r9r+yfN1xbcPVUVGs95a+TvddYNDk286a0xP4HgQTjG97+ovN/9NtiU/1Z0L
+A1pz+y/1q2WzdMvZB6r/3cK38T7d8YJTZ/38Hn3C26J3lv1Kv73rXzr0umNUeuLV+t1RAUX2vm2o
+dpVu+UtIx1S4R3380fFr28NX6doXS7X4git00fRc+T/8hx783U26yf0r/eSLk2PyKUHqaNzwil59
++Sk9sqVXXzJ7mJGM8IQn3nhsTzGjfOwN7nxCN961TrO/fq0unt6qv//lLv3qvhm67cI5SvRz/uHZ
+5crMVnb2wD+mGGFltTa1ODX5kJP1ze8s0vw8h4LbH9AVv/folHlv68HXZ+iHvzpL2f/+i35z39/1
+fptb0z5xpv7ros9qWkZE7ese1x1/fFrvNGWocuFntOTrS/SxosG+GfrNkX2gDv3ixbqs5VJd9/Ba
+ffnHn1RW/Uu698F8XbFgmkprBt7+V1W8aajbCarulQGzfXOu/nPDb/ROV4/eu+xG2X51qao/HOL9
+oz3a+rc79dtlr6tWU3T45xbIH03Al2Uftr2fmz2PV9nZB+uE4yv05LP16glL2Y7w0J/nYJ3W3PNr
+3ff3rWq3FWruyd/Wf595sPLsQ7xPXp2euL7/5+YyHentH9P631+yNeNTp+i45f/UO7t6FD0wSw1D
+3GeGmiPT+LrOteuNfrd70QWFeuTBAl2xYJqmOoe5L+54SFfe6dCpH9uqR5/6j+qds3Xy9y7X2Qd7
+ZR/yY0/01w/4CHc/xE249X1t7qnQ50/5tOZM9iq78EB98swLddIBvWruiajzH9fq4ptXacUtl+q8
+r5ypb/7od/q/XX5FJQW3P6Af/M9jeumBq3Xhf/1Vm3a+pHsffF0t4T1/nb7sykf1yrIbdMnXz9BX
+vnmV7nu7Y88T1KhfO1/4jS4//0yd+a3/0d1PP6wrL/mTyZ8JDOTb9qLe8nxGX/nMDBWVzNEXzjxc
+gX+9rJ37bLkRlb34OF10+QU6ZlqOMjy58uZ45PHmyCXJkVmgkrIylZWVqazYrveeXq+q87+iOVm2
+UR5/dKKt/9JfnwnppCt/oEWHz9CkolIdsPBUffc7Ryq0br2aw3vfMNKl9Q9dp++cs1RnnP/jfvdJ
+n7Y/c6t+cN4ZWrr0DJ132e16viYgaf/7+Vb/4Pff9/2SFFb7ukd1/SVf15lnX6grb35Yb/TdOKyk
+buN72hXKU4HLYn/9HZTxi3S28kr2PuH5XEQvPrxWbVENf//d8ZAu+9F9evpPV+nCs8/SN3/8F732
+7vP6zWXn6Yyl5+qyP7yu1kh02Mf6UD2P7alkdI+9YTW9+U+1LThX5xw3S5MOOEKLv/EFZb35gj7w
+mzX52IRrV+nWu9/VrG/drHv+cJ0WZa/WrXf9e8/3jKTA9r/pqY6P68Lvf1GT6p/QTfd8qEMvuV1/
+uPViLdh2n377QoOCrf/Ub297SdlfuFy33PR9fdq1Rrff8pzqR/WwnqHyIxcoa/dGNQ7yc63/7U/t
+Gvp2gjWDzPZ3l06+8ntakDNH37vpch0eHur9o+rdeL9ufKhO1Wddoxt/dJJyXl+lHb7950mkqL9W
+b/27SSWHzlG+Q4oM+XmOqvPNP+nPG+boO7f+UXf/5CQFn71Hq3YFh34f21Sd0u9zs2+UGCiszi2v
+6vXGLJWXeBQZ8j4z9Bx9nEPf7kj3xeD2VXqm+2j94PY7dc3nA3r+vpfUMMzHDpiJMIG4cRTO0YKC
+rXrgdw/qhf9sV2sgKnlm6SuXnq/D8mySImp67SG9UXyafnzjlTptykb96aYn+n6B6f+DddqAP3MP
+/kArBbY/ppse2K25X/+F7vj5V5S7ZqW2mvxDEvvrqa9VsGS2Svd+Xd1lVSrs2qnWfX4m2uQum6/D
+5xbJ/++f6KxzLtQ1T+Vr6TkHK2ef3wWi6njzfj3p+LJOn5ctm6KjPP4oZ936irbnf0pHT+v/FzOb
+cj/2Xf3yys+qbO8ft4J1L+uV8PG64nd36ycnhfTC/S+rMSxFml/W7x/cocN+eLf++qdbdFHVJj28
+bLOMu2X/+3nZ7qHvv0P/cjX2jwnxNX/Rt/Ttby7S/KT809O+T3hGuv/6P/i71k/6qq7/7f/TcV1/
+0803v6VZF96iO68+Qf7/W6Z/N0c00mN9fzy2J7fRPfZGFAqEZbN/9P1hdzil7no1+xL+5/bhhZu1
+6n++qqVLl+79d65++o9WNb75sprnn63FC8uU463Q0WctUsnG1Xp/734U0YzZ+srXvqCF071q+ecr
+6j7sbH1xbpHypnxMp//3d3RSuUMd657TtgPP0Xmfm6fJk+fp8+d9TbMb3xz1E3tHdpEyehvVNcjP
+gI9uv1C+9UPdTlB1/xhstv6LqaPDzBnQh39/U47jL9J5n52jadUn6OsXfEYFiV6LHW7T/119Tt/X
+6Ixz/lt3N52g755WIdew8+9590hvi5raQ8qe+SX99J6btaTcOf6vTbip3/3lTJ1/9QpFj79AS2c6
+1TTCfWb/OUazvjM84nGVNVenn/5JVXgLNfPwQ1QUbFNvZCK3CcQPp3IgfjJm6Zzrr9fs1f+nf624
+VQ/c1Kas6Qt17Clf0amHl0qKyjnpZF1w5tGa5pJmfutb2vJff9a/6k7Vaer3gzVbCmwZcGzjgTZL
+Ch5+iIpeblNvJKial19V6FPf11lHH6gMRfXlpYfo+T+Y8LFjWL3tfjmzM/rKqN2dLXekWx3+qDTI
+OcbZR1yjB+9r1NuPXK/bbnlKc39x6kfnowd36tlHa3TEty9WgV2SwmM+/nB8bZ2K5JQqd4RHS3vu
+x3TW0r33ycMOUeErHfJFJFvuQl30i//f3p3HR1XdfRz/zJqdbCQhAmEThKKCoA+yWQRaBFqoC0uN
+ywPCIy5gENy6aEDQUjeWBAQsKALVhKUqWrCCRVSWWkAQiUUoW4BAAmTPzGRmnj+SQDCZSQghk8D3
+/XrxejH3nnvP7+b+5sy5Z+6505UmzcKw2J0E+huxnbDhAkyUz3MHB5Z4yt/znauZAzoSYoAmox5g
++6TPOFw0kJg6nUQrV7qSC54fyXNWnb/mxt34Tf/riPCzc2vnKL52/Ybb24bjZ7+VzpE7ybW58NbW
+/+anlattb9Cq1/ZaiO50I9Y1f2XVlkb8smkmX7z7Ien2aAodbqAe3WlkCqP35OcZ3qLsgs2INSyA
+08sLCbomgrLhamNQDI1NuWTml4wSmEKaEG4BcJJzooDglmGlU1kMBLbszs9xcCQlm9xdSUwY/Wbp
+XlzYXXHcnO+EoKrnNjnzs7AFRBFcSdHz9ReTe8JTPXYCK40NKDhYWtb79sYsG6E9I85N0zGHNyfM
+nFZl7LXKFEK3hETua1NyNoqzf+Cj2X9h8cY+TBkQ4SV+FyFdxzHx2HJWzprAW3nhXN/nHv43vgd4
+3MYJ3h4Gagqn9+Q/MryFBYPBjH9oOI38jICd/Sc95YynOG4jpsqDd5Lncb+luRgcTWjZCTIYMBig
+5MuVyuus8bO4RGqBBibksnE77TiscXQfMoruQ8DtyObA5lTmzppC7guvMwwwhzcjrKzBtEbSPKSA
+Y7kuCCz/wVpR5Q1tMbkZhQTfEFaa2Aas4U0INunWtPomINQPZ4Ht3PMhXPZ87MZAQqzlPxBd5Kf/
+yAm/lrRubMXoF0WnQYOI/fRz9ucPpWloSdmifWv4p7k/L8SVfSybqrn/6vEPDcKQe5LcYi7IR3fh
+Qb7cmE6LPj2IpSQnG12Qk2Xf/BWwf93bzPzmCAWWSGIb5eH2P7+fCzqQHvPXW+ewep1Ykeq64ILH
+7T1/DdYArEYAA0azGYvJWnpRasRU7u3msa3/ydThO77NAAATKklEQVR9te0NW3XbXr+2I3lyxHyS
+5z/NB65YbvlFb5rtO05kYH27y8hEUEQ0MTHlE9WJPTqAgrQz2GmBBXAVnOS0M5jIIBMUgMFgLM1d
+E8FR/uSfyKGYWEy4yd+3ljWHO9InKoSgG37LrD90p5EBcOaSfiSPsMbVac9tHN26i8Km8efuTinv
+fP1mgj3WYyWz0tiu587uZXvyvv3RKH9yD5/GQThWoPjMUc4W1/iPXUMmAiOiiYkpfSZOTASDe68k
+8bsM7AOivcRvpOBEFkE9xjL1zsexn9pJ6stvkLyhPZO9nRuvd01Uli8ly4M95oyRgpOVxdGBF9pW
+feye91saa6XdHrfHOqcPjK6TBx+LVKa+fQLIFaRg+ys8+sIGskp7KAZLKG1638OQFrn8cCAXF1B8
+ttyHmOM0R3P9aRxckpbnP1grUelyE8GN/ck7nk3ZnY327IxKb3MU3wqMicWcuZ/M0usKR+Z+zgY1
+J+KCDpaT01/N49UVB7CXLnE7Cik2BxNkLksAG4e+/IHGfW/hfF/OUM39V09A6x40P7OJjYfLT352
+k7vrPd5a9R2FZUlaaU66yPpiLm+ntWPMjHm8OedlJg9pecG12AUdWI/5W9Y5fJRZixazePFiFr+V
+xJ+nPEqvanViRaqr7IKnA9GWqvO3ury19RdQ296gVbftddtthHQbx6uL3yflnZlMuMWJLexaohrE
+neQmIm/qSfjuZazecZL83CN8tXwVJ6/rR7sKT+60EHvr/2DZspxP0rLIPb6d1W/+lX/nWwi/vg9x
+B1ex+t/Hyc09xjfLEvndvG/IqXQ2iwtHYT75+fnknDrIzo+TeW2diX4juhLqdbzdQCOP9XiKzVxy
+Yep2UOjAy/Z+xP28C8XrF/DOhh9I/8/nLFm0kTM+f18asQZbcRbacHk9fhc52xeS+NJK9p61Uewy
+YrGYMFvNhFZ1btyO0rt7qstbzniKw1SxOaxQ78XkYnkXUadIHdLAhFw2AW1uo82xZcx5fzP/Sc/i
+zKnD7P50KamHGtOlfShGwHH8Qxa+9zUHju7mk4UL2RbQnVtja9ozsdKs180YNi1j7X/OkJ+1m49S
+dpJfz6asCvi37k+Xos9ZvTWDooIjbFzxL6w9+tDcCjjPkrZlC3vPGInu2gXztvdZm5ZFfvYhvkz9
+hDPtb6N12Qeu7TBffudP147hFzRmXvd/kYwRPXjgFwY+fukVUrbu43jWKQ7t+BvJC3YRPeAOWnn9
+4QI3bqcLtzmQEH8oOL6DT9b8h4JiG84Keeklfw3eOlcXf0wi53m74LmY/PXu0tp6te0NRfXadifF
+GZ8yfeIUVu7OIDtjNx8s2Uqj23tzTYMYmABz7EAmjm1P2vyJjBrzB1Lz+pLwSDfCKulVW1vdzaT7
+otny6uM8lJDMt60eZMKAJlijbmfCo505tPgZHnpoEgt+aM+YiXcQW9lYsyOLT6c8zKhRoxjz2O+Y
++7mDPk9P5d5rq/7lHJOXejzFZvJrTvd2mbyZMJ2t1j4etw+4Lp7JI6L5/t3nefLlNRT0+i03eR8p
+qQMmGjWLxn3gS77PcXs5fhMx/cYxotk2ZvzfA4xKSGJny/sY1ysSi7dzU+5v83V29RshzznjOY4L
+0slDvReTi+X/RtWqU6SOaSqHXDbGiJ6Mfy6PJe8uYerqLOz4Ed6iE30Tfs/dLSwUHjUQ2HowHY+n
+MGXSCcxxvRk1+U5aWKCmdwL6XTuCScPmMXv6ON4P7MjAX/Ui5h/+VW9YQzk5OWRmZpKbm1thncPh
+oKioiJycnErXl5W5KgV04MHJg5iT/Cyj5hpp1m0kk++OK5mnaj/ER8lJuCYt5JlOd5MwbAFzX3mM
+ZXkBNO36a558pHvpsyTAkf41Ox2tefynvzXobf8XyxDAdfdN4/mIJSxf+jKrM/Iwh7eg8+BneG5I
+C6yA57NoonGvBxm0dSbPjllHu94D+dU9I+kwawEzv2jP0y0uLO0pfy2Gss7lSZIXPcNDpxyEte3r
+uRNbD6Wnp3t8HwBe3ydyGZVe8HwKgJmwuE70f3oqd5de8HjN31bVrcRzW1/dFrCu2/YrSZ2+96rd
+tg8h4f4cli6cTGqGP9fdMYZJg6+pX51S/45MeGte5esMVpr0fIhpPR+qsMrSMp6Zs8qX9Seu32PM
+6PfYT0qaiewaz/Nd473HEXADE5ekMNFLEeu1o5nzaumLn9bvrR6PsUXT77kF9Ct76XH7QNoMnsQb
+g8stq8vfSPa7jofnL6iwOKDTU7yzqOyV5+M3BLRhyFOzGFJhjZe/meknf5tzsXTgsQVzPcfqJWc8
+xlH+vP7knPTpWrbcSy7GjeT1mR5eWzwdu4jvGFa+v8ztNDSQIWq5grjJ/eoFJmwYSvIfuxJYS/u0
+HdvON5kt6XZjJGag8NvXmLD8ZyycMbDKrYcPH05iYmK1a+vRoweHDx9m7dq1Hsu0bt0agAMHDngs
+s2fPHuLi4ujevbvHMg1JYmIiKSkpvg6jAfKcvy+9NJCoSxiAuNjcrm1jx47liSee8Fqmf//+fPbZ
+Z17LrFixwqfHodyuidpo6+u2bb+S1OZ7D7ii8n/YsOGkpl45x9OQpKamkpqaWmU+6RzVL9U9byI1
+YXI76tfgtMilMUDedpbO+gzHi09wW+MzfPVxGiE3D78stSUlJfHhhx96j6j0+QNut/fb/eLi4mot
+LmmoPOdveAO5K8KTpk2bVllm5cqVVb5P5GpVt237lUTvPRERaSg0MCFXFL+2Ixk/cA5znx3FQmMo
+TW8axsQhzaq9fW1/q1bdzt66detYt25drdYtDY+n/K2Nhrq+f2Nc3fdKfT8OuTzqW9t+JdGghIiI
+1AeayiEiIiIiVxVNE/AdTeVomDSVQy4nTeUQERERkatSamqqr0OQKugciVw9NDAhIiIiIleVkkdA
+6aK3PtM5Erm6aCqHiIiIiIiIiPiEye3A6OsgREREREREROTqpYEJEREREREREfEZDUyIiIiIiIiI
+iM9oYEJEREREREREfEYDEyIiIiIiIiLiMxqYEBERERERERGf0cCEiIiIiIiIiPiM2dcBiIiI7y1b
+tszXITRo8fHxvg7hqqccvjTKYRER8SUNTIiICPv27aN///4VloeEhDB06FAcDgc7duyosN7lcpGd
+nU3fvn0rXQ/gdDrZv38/+/bto23btrUeu6/Nnj1bF3X1gHK45pTDIiLiaxqYEBERvv/+e4YPH15h
+eWRkJLfccgtFRUVkZGRUWG+z2Thy5AhdunSpdD1AUVERmzZtYtu2bXTr1q3WYxcB5bCIiEhDpoEJ
+ERGpktvtpri4uMLy4uJibDabx/VQ8m2ziK8ph0VEROovPfxSRKSesh94hwkT/8ohB9h/XMT4yatI
+r/y66ZzCnTMY+8Jmct11E6NIeY7D7/Fkwrv811FLOyz4N9NGv8CWvFran4iIiNRLumNCRKQBMMf8
+nAfutRJh8nUkUjNu8tNWMWv2SnaeDqBF9xFMGPcLmvsZLqKcg1Obl5G0ZD17s4oJbd2bEY+Mpl8L
+f87tpfgYHzw/k6Jx0xkRZyndZSH7P5nPvFVbOJxrJLRNb0Y+Npq+zfwwOE+x9rnHWHSwXAjXPsL8
+6bcT/tPQ5Crnwxwux3F4Jb9LTGNk0u/pGggoh0VErggamBARaQCKMzayZHkYz97YnOhj7/HcPBO/
+6bqf1DU7yDC3Y/D4p4jv1KjcFm5sh9bw0rR/0iZhCg90DPZZ7AIU7uHt1z7BcM+LzOti54vZM/jz
+ira8Ht8KSzXLGTO/IPnNrTR5+E9Mal/Et3+bx4JXU2nz+v20Mts5tXcbmzetIeXHQn5dbpfF6R+T
+lJJBj4mv8fs4Jz+ueYM5yf/kxmkDiCo+y5H8pgz949P0jy65idJoDSX0Ui7oXHl8995UXvn0O7Ks
+bfnVhGeI79QIIw4yNi8lacl6fjhtJrbzYMY8ehc3hJrAcYIvFs3i3X/uJ9sQwc8GP8LEYY3Z+PIc
+duUVsHfyK9z65lOXEJRcMh/m8DmOI6xJep9DBTeeX3Y5clhEROqcpnKIiDRAjoNr+Xt+bybNnscL
+d9j5x7sbOVluGnxx5iaS/rSWxqN/T7wGJXyu6MAGdvr3ZWTfNkRGdWDgb2/BvmUTRxzVL1d0aBuH
+on/FXd2a0iiiDb2H3cO1Od+xL9sF7iJOpO0lvTiUcMuFV2QGvxACLRb8AgIJCAgk0N+KJTAQixHc
+9iyO2SJp1zqWmJgYYmJiiAr3v6TOgePEJr509uPZ5PlMGVTMZ0s3ccoJzuNreWP+HtqOfY1FC6dy
+V9B63nhzG2fdbnK3/4XF33fg0TfeYv6UQTjWLWJtRjRDnhvPjcEdGP+qBiV8zZc5XMJB+t8X8HnI
+zUT5nV96OXJYRETqnu6YEBFpiAJ/xrBhPYkLBMctnYncdJZCF4QCrpydvDP1c3a1eZa5t0ag2R++
+V5BxHEdUd6JLv1q2xlxHRN5Wzjjg/NfNbq/lWnYcxyt/CCLSVFr28C6Om2OIDjCCsRE33DWWG2w/
+MH/PvAvqNkX1YcyAT3nmjw+zFMDSgVGvdifMAMXZRznrzOHrt6awPC0Tvza3MWL0ndwUUfPugTGk
+K/cOL83NmzsT8WUORS4nmds3kXVDPHd3iSEI6H3vXXzy1Hr2FdxKe8BVeJrM7GI6tvs1Ly4ahNtk
+hsIahyG1zJc5DOBIX8v89U24/4k2rJj6zbnlzsuQwyIiUvc0oCwi0gCZgqMJLbsYMBgwlPuCsSj9
+a05f342g3av58pR+TaA+KMy2YQ7yO/eha7QGYXXlk2Mr/5RSp9dyRv9wGodaMbhtHNu8hBf//C9a
+x8dzfZD3uu3/XcXsj00MmjCD+fOmMa73WVLmruO4E5yF+fiFRdC0x/089buH6WNaz2szPq7yIave
+mIKjaVQhN53knSwk6JoIrGXHFhRDY1MumfkuQrqOY+JAE1/MmsD9D05g+pKvOGbXE1zrE1/mMMXH
++XTBP4i87z5uanRh1/Vy5LCIiNQ9DSeLiDREXuZP+7cdzcQxN3OgcCLzlu2gR8LNhGi+tU8FhPrh
+LLDhKn3tsudjNwYSYi1/YkxVlnPl7uWj5Nn87VQH7nnuNe5o36iKO2IcpH/1NTmdxjKiVysCgNvj
+7+Wr8SvYnnkHg9s+yJ/eOF86dvT9bJ7wId+d+TVNo2r43UWluWYiODqAgrQz2GmBBXAVnOS0M5jI
+ICMFJ7MI6jGWqXc+jv3UTlJffoPkDR2Y/vOahSC1z3c57CTjswWsDbuXF7uGYjx94Vq/y5HDIiJS
+59Rii4hcYYxmf8zGEDrfO4zYne/wt/02X4d01QuMicWcuZ/M0vn4jsz9nA1qTsQFTw00eC3nLtpP
+yrSZ7Gz7ODP/PIHBVV7QlezTGmAGp4vz32u7cGHG3+wiO20TG787Q9l9NQaTBYvRiLnWewcmIm/q
+SfjuZazecZL83CN8tXwVJ6/rR7sAFznbF5L40kr2nrVR7DJisZgwW00lYxxuB4UO3T3ha77LYRtH
+/5VGxuZZPBL/W+Iff4v/FuxixtgnWLyvsA5zWERELic12yIiVyhTdB9GDzDyj0XrL2k/LpeL3Nxc
+bDZbhX92ux23213puvJlrnb+rfvTpehzVm/NoKjgCBtX/Atrjz40twLOs6Rt2cLeM04v5dzkfpvC
+uqLe3NW9MUWZGWRkZJBx6gyFLm81m4m++VZCd73Du+vTOJ6+m7VvL+XANb34WZgZY+4W3p4xkw++
+PUZ2djpbU9/jQLPbuD6s9rsH5tiBTBzbnrT5Exk15g+k5vUl4ZFuhBlNxPQbx4hm25jxfw8wKiGJ
+nS3vY1yvSIx+zeneLpM3E6ZfUt3K4UvnuxwO4MaEZObNSyIpKYlZU0fS3L89j8xIZHgrvzrNYRER
+uXw0lUNEpJ6ytn6Q2WW3KF87mjmvlv4/biSvzzxfzlL+dednWNi5bI0fre6dxZJLiMHlcpGZmcnq
+1as5ePBghfVOp5PCwkK2b9/Onj17Kt2H0+mkuPgqn/Ad0IEHJw9iTvKzjJprpFm3kUy+O67kmYH2
+Q3yUnIRr0kI6dPZUzsHhvYfIP76DFxM+LLfjVoyZ+zK/bOz5IswSdydPPZzPvKXTeCLLQNT1A3j8
+yQHEmgzQ9WGeHJLMwtcTeK8wgCadBzH+yV8SU8Mnploqy83Xy15ZadLzIab1fKjCdoaANgx5ahZD
+KqyJpt9zC+hXs3AA5XCt8VkOG7CERBBZ+srlDsZstNIoIpwgM7WewyIi4huGle8vczsNlqpLiojI
+FWv48OEkJiZWWG40GsnIyGDDhg0etzWbzTRr1qzSi74yTqeTrKwsxo8fXwvR1i+JiYmkpKT4Ooyr
+nnK45pTDIiLiSya3Q3dMiIhIiWPHjlVY5nQ6OXr0KHFxcV63dblcVZZp165dpXWI1BblsIiISMOk
+OyZERIQPPvjA1yE0aEOHDvV1CFc95fClUQ6LiIivmNwODUyIiIiIiIiIiG+Y3A79KoeIiIiIiIiI
++I4GJkRERERERETEZzQwISIiIiIiIiI+Y4aSOR0iIiIiIiIiInXt/wG2jwXZkqxE2QAAAABJRU5E
+rkJggg==
+"
+ height="705"
+ width="1062" />
<path
+ inkscape:connector-curvature="0"
style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
- d="m 297.14643,783.42128 c -42.50577,2.24797 -82.41658,5.96208 -119.17548,23.35351"
- id="path3756-3"
+ d="m 607.21033,-17.207119 c -35.78105,-2.8028 -28.19512,-2.62422 -82.34995,0.12"
+ id="path3756-3-5-2-3"
sodipodi:nodetypes="cc" />
<text
xml:space="preserve"
style="font-size:40px;font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans"
- x="313.53159"
- y="700.992"
- id="text3662-9-5"><tspan
+ x="614.21716"
+ y="-31.795189"
+ id="text3662-9-5-6-4"><tspan
sodipodi:role="line"
- id="tspan3664-7-0"
- x="313.53159"
- y="700.992"
- style="font-size:16px">Link</tspan></text>
- <path
- style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
- d="m 309.28326,695.63445 c -42.50577,2.24797 -47.56632,-1.10899 -96.44705,-0.89015"
- id="path3756-3-5"
- sodipodi:nodetypes="cc" />
+ id="tspan3664-7-0-0-9"
+ x="614.21716"
+ y="-31.795189"
+ style="font-size:16px">Red Color indicating <tspan
+ style="font-weight:bold;-inkscape-font-specification:Andale Mono Bold"
+ id="tspan3304">host</tspan></tspan><tspan
+ sodipodi:role="line"
+ x="614.21716"
+ y="-11.795189"
+ style="font-size:16px"
+ id="tspan3241-9">utilization by "compute"</tspan><tspan
+ sodipodi:role="line"
+ x="614.21716"
+ y="8.2048111"
+ style="font-size:16px"
+ id="tspan3243-0">task category</tspan></text>
<path
+ inkscape:connector-curvature="0"
style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
- d="m 303.10481,610.86601 c -42.50577,2.24797 -47.56632,-1.10899 -96.44705,-0.89015"
+ d="m 1084.6049,379.41314 c 56.0657,-3.46632 36.7194,-26.82327 43.5529,-53.74729"
id="path3756-3-5-2"
sodipodi:nodetypes="cc" />
<text
xml:space="preserve"
style="font-size:40px;font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans"
- x="307.61954"
- y="596.80054"
+ x="883.40533"
+ y="362.49051"
id="text3662-9-5-6"><tspan
sodipodi:role="line"
id="tspan3664-7-0-0"
- x="307.61954"
- y="596.80054"
+ x="883.40533"
+ y="362.49051"
style="font-size:16px">Red Color indicating <tspan
style="font-weight:bold;-inkscape-font-specification:Andale Mono Bold"
id="tspan3306">link</tspan></tspan><tspan
sodipodi:role="line"
- x="307.61954"
- y="616.80054"
+ x="883.40533"
+ y="382.49051"
style="font-size:16px"
id="tspan3241">utilization by "compute"</tspan><tspan
sodipodi:role="line"
- x="307.61954"
- y="636.80054"
+ x="883.40533"
+ y="402.49051"
style="font-size:16px"
id="tspan3243">task category</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:40px;font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans"
+ x="618.97284"
+ y="474.01813"
+ id="text3662-9"><tspan
+ sodipodi:role="line"
+ id="tspan3664-7"
+ x="618.97284"
+ y="474.01813"
+ style="font-size:16px">Hosts are represented</tspan><tspan
+ sodipodi:role="line"
+ x="618.97284"
+ y="494.01813"
+ style="font-size:16px"
+ id="tspan3274">by squares</tspan></text>
<path
+ inkscape:connector-curvature="0"
style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
- d="m 329.3688,317.92177 c 35.78105,-2.8028 28.19512,-2.62422 82.34995,0.12"
- id="path3756-3-5-2-3"
+ d="m 616.2287,473.17545 c -42.50578,2.24797 -50.98802,-5.46649 -36.31835,-38.07506"
+ id="path3756-3"
sodipodi:nodetypes="cc" />
<text
xml:space="preserve"
style="font-size:40px;font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans"
- x="131.80853"
- y="310.47656"
- id="text3662-9-5-6-4"><tspan
- sodipodi:role="line"
- id="tspan3664-7-0-0-9"
- x="131.80853"
- y="310.47656"
- style="font-size:16px">Red Color indicating <tspan
- style="font-weight:bold;-inkscape-font-specification:Andale Mono Bold"
- id="tspan3304">host</tspan></tspan><tspan
+ x="856.8996"
+ y="105.03188"
+ id="text3662-9-5"><tspan
sodipodi:role="line"
- x="131.80853"
- y="330.47656"
- style="font-size:16px"
- id="tspan3241-9">utilization by "compute"</tspan><tspan
+ id="tspan3664-7-0"
+ x="856.8996"
+ y="105.03188"
+ style="font-size:16px">Links are</tspan><tspan
sodipodi:role="line"
- x="131.80853"
- y="350.47656"
+ x="856.8996"
+ y="125.03188"
style="font-size:16px"
- id="tspan3243-0">task category</tspan></text>
+ id="tspan3272">represented by diamonds</tspan></text>
+ <path
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+ d="m 845.50838,112.53148 c -42.50578,2.24797 -18.9949,48.89101 -47.87563,80.53842"
+ id="path3756-3-5"
+ sodipodi:nodetypes="cc" />
</g>
</svg>
+++ /dev/null
-# Command line : \r
-# Windows -> tesh --directory=%EXAMPLE_DIR% --file=SimGrid.tesh --log="log.thresh:info tesh.fmt:%m%n" --keep-going-unit\r
-# Linux -> tesh --directory=$EXAMPLE_DIR --file=SimGrid.tesh --log="log.thresh:info tesh.fmt:%m%n" --keep-going-unit\r
-D Examples (MSG API functionnality checking (for C, C++ and Java))\r
-\r
-$ cd ${EXAMPLE_DIR:=.}\r
-\r
-$ cd msg\r
-\r
-# Masterslave OK\r
-! include masterslave/masterslave.tesh\r
-\r
-# Outputs difference : __FILE__ difference between Windows and Linux\r
-#! include masterslave/masterslave_bypass.tesh\r
-\r
-# Outputs difference\r
-#! include masterslave/masterslave_failure.tesh\r
-\r
-! include masterslave/masterslave_forwarder.tesh\r
-\r
-# Parllel task (not work)\r
-#! include parallel_task/parallel_task.tesh\r
-\r
-# Priority\r
-! include priority/priority.tesh\r
-\r
-# Properties\r
-! include properties/msg_prop.tesh\r
-\r
-# Send/Receive\r
-\r
-! include sendrecv/sendrecv_CLM03.tesh\r
-\r
-# format d'affichage de %g\r
-! include sendrecv/sendrecv_KCCFLN05.tesh\r
-\r
-\r
-! include sendrecv/sendrecv_Reno.tesh\r
-\r
-! include sendrecv/sendrecv_Vegas.tesh\r
-\r
-# Suspend\r
-! include suspend/suspend.tesh\r
-\r
-\r
-##############################################################################################\r
-# java examples\r
-\r
-$ cd ${MSG4JAVA_CLASSPATH}\simgrid\msg\r
-\r
-$ javac *.java\r
-> Note: Some input files use unchecked or unsafe operations.\r
-> Note: Recompile with -Xlint:unchecked for details.\r
-\r
-$ cd ${EXAMPLE_DIR:=.}\r
-\r
-$ cd java/basic\r
-\r
-! include basic.tesh\r
-\r
-$ cd ..\r
-\r
-$ cd comm_time\r
-\r
-! include comm_time.tesh\r
-\r
-$ cd ..\r
-\r
-$ cd ping_pong\r
-\r
-! include ping_pong.tesh\r
-\r
-$ cd ..\r
-\r
-$ cd suspend\r
-\r
-! include suspend.tesh\r
-\r
-$ cd ..\r
-\r
-$ cd autoDestination\r
-\r
-! include autoDestination.tesh\r
-\r
-$ cd ..\r
-\r
-$ cd explicitDestination\r
-\r
-! include explicitDestination.tesh\r
-\r
-##########################################################################################\r
-# cxx examples \r
-\r
-$ cd ..\r
-\r
-$ cd ..\r
-\r
-$ cd cxx/basic\r
-\r
-! include basic.tesh\r
-\r
-$ cd ..\r
-\r
-$ cd comm_time\r
-\r
-! include comm_time.tesh\r
-\r
-$ cd ..\r
-\r
-$ cd ping_pong\r
-\r
-! include ping_pong.tesh\r
-\r
-$ cd ..\r
-\r
-$ cd suspend\r
-\r
-! include suspend.tesh\r
-\r
-$ cd ..\r
-\r
-$ cd autoDestination\r
-\r
-! include autoDestination.tesh\r
-\r
-$ cd ..\r
-\r
-$ cd explicitDestination\r
-\r
-! include explicitDestination.tesh\r
-\r
-\r
-\r
-\r
-\r
-\r
#! ./tesh
! output sort
-$ $SG_TEST_EXENV ./all2all_simulator$EXEEXT ${srcdir:=.}/../../msg/small_platform.xml ${srcdir:=.}/all2all.xml "--log=root.fmt:[%10.6r]%e(%.2i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ./all2all_simulator$EXEEXT ${srcdir:=.}/../../msg/small_platform.xml ${srcdir:=.}/all2all.xml --cfg=network/crosstraffic:0 "--log=root.fmt:[%10.6r]%e(%.2i:%P@%h)%e%m%n"
+> [ 0.000000] (00:@) Configuration change: Set 'network/crosstraffic' to '0'
> [ 0.000000] (01:sender@Tremblay) Launch current node
> [ 0.000000] (03:sender@Jupiter) Launch current node
> [ 0.000000] (05:sender@Fafard) Launch current node
#! ./tesh
-! output sort
-$ $SG_TEST_EXENV ./all2all_simulator$EXEEXT ${srcdir:=.}/../../msg/small_platform.xml ${srcdir:=.}/all2all.xml "--log=root.fmt:[%10.6r]%e(%.2i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ./all2all_simulator$EXEEXT ${srcdir:=.}/../../msg/small_platform.xml ${srcdir:=.}/all2all.xml --cfg=network/crosstraffic:0 "--log=root.fmt:[%10.6r]%e(%.2i:%P@%h)%e%m%n"
+> [ 0.000000] (00:@) Configuration change: Set 'network/crosstraffic' to '0'
> [ 0.000000] (01:sender@Tremblay) Launch current node
> [ 0.000000] (03:sender@Jupiter) Launch current node
> [ 0.000000] (05:sender@Fafard) Launch current node
> [ 0.000000] (07:sender@Ginette) Launch current node
> [ 0.000000] (09:sender@Bourassa) Launch current node
-> [ 0.000156] (02:receiver@Tremblay) Listening on port 4000 (expecting 4 messages)
> [ 0.000156] (04:receiver@Jupiter) Listening on port 4000 (expecting 4 messages)
-> [ 0.000156] (06:receiver@Fafard) Listening on port 4000 (expecting 4 messages)
-> [ 0.000156] (08:receiver@Ginette) Listening on port 4000 (expecting 4 messages)
+> [ 0.000156] (02:receiver@Tremblay) Listening on port 4000 (expecting 4 messages)
> [ 0.000156] (10:receiver@Bourassa) Listening on port 4000 (expecting 4 messages)
-> [ 0.015510] (01:sender@Tremblay) Sent Data from Tremblay to Jupiter
-> [ 0.015510] (02:receiver@Tremblay) Got Data from Jupiter:5000 (still 3 to go)
-> [ 0.015510] (03:sender@Jupiter) Sent Data from Jupiter to Tremblay
-> [ 0.015510] (04:receiver@Jupiter) Got Data from Tremblay:5000 (still 3 to go)
-> [ 0.036162] (02:receiver@Tremblay) Got Data from Fafard:5000 (still 2 to go)
-> [ 0.036162] (05:sender@Fafard) Sent Data from Fafard to Tremblay
-> [ 0.036318] (01:sender@Tremblay) Sent Data from Tremblay to Fafard
-> [ 0.036318] (06:receiver@Fafard) Got Data from Tremblay:5000 (still 3 to go)
-> [ 0.049459] (02:receiver@Tremblay) Got Data from Ginette:5000 (still 1 to go)
-> [ 0.049459] (07:sender@Ginette) Sent Data from Ginette to Tremblay
-> [ 0.049771] (01:sender@Tremblay) Sent Data from Tremblay to Ginette
-> [ 0.049771] (08:receiver@Ginette) Got Data from Tremblay:5000 (still 3 to go)
-> [ 0.069896] (02:receiver@Tremblay) Got Data from Bourassa:5000 (still 0 to go)
-> [ 0.069896] (02:receiver@Tremblay) Exiting GRAS
-> [ 0.069896] (09:sender@Bourassa) Sent Data from Bourassa to Tremblay
-> [ 0.070364] (01:sender@Tremblay) Sent Data from Tremblay to Bourassa
-> [ 0.070364] (01:sender@Tremblay) Exiting GRAS
-> [ 0.070364] (10:receiver@Bourassa) Got Data from Tremblay:5000 (still 3 to go)
-> [ 0.072245] (03:sender@Jupiter) Sent Data from Jupiter to Fafard
-> [ 0.072245] (04:receiver@Jupiter) Got Data from Fafard:5000 (still 2 to go)
-> [ 0.072245] (05:sender@Fafard) Sent Data from Fafard to Jupiter
-> [ 0.072245] (06:receiver@Fafard) Got Data from Jupiter:5000 (still 2 to go)
-> [ 0.085572] (05:sender@Fafard) Sent Data from Fafard to Ginette
-> [ 0.085572] (08:receiver@Ginette) Got Data from Fafard:5000 (still 2 to go)
-> [ 0.091209] (05:sender@Fafard) Sent Data from Fafard to Bourassa
-> [ 0.091209] (05:sender@Fafard) Exiting GRAS
-> [ 0.091209] (10:receiver@Bourassa) Got Data from Fafard:5000 (still 2 to go)
-> [ 0.100817] (04:receiver@Jupiter) Got Data from Ginette:5000 (still 1 to go)
-> [ 0.100817] (07:sender@Ginette) Sent Data from Ginette to Jupiter
-> [ 0.114144] (03:sender@Jupiter) Sent Data from Jupiter to Ginette
-> [ 0.114144] (06:receiver@Fafard) Got Data from Ginette:5000 (still 1 to go)
-> [ 0.114144] (07:sender@Ginette) Sent Data from Ginette to Fafard
-> [ 0.114144] (08:receiver@Ginette) Got Data from Jupiter:5000 (still 1 to go)
-> [ 0.136528] (04:receiver@Jupiter) Got Data from Bourassa:5000 (still 0 to go)
-> [ 0.136528] (04:receiver@Jupiter) Exiting GRAS
-> [ 0.136528] (09:sender@Bourassa) Sent Data from Bourassa to Jupiter
-> [ 0.142165] (06:receiver@Fafard) Got Data from Bourassa:5000 (still 0 to go)
-> [ 0.142165] (06:receiver@Fafard) Exiting GRAS
-> [ 0.142165] (09:sender@Bourassa) Sent Data from Bourassa to Fafard
-> [ 0.150011] (03:sender@Jupiter) Sent Data from Jupiter to Bourassa
-> [ 0.150011] (03:sender@Jupiter) Exiting GRAS
-> [ 0.150011] (10:receiver@Bourassa) Got Data from Jupiter:5000 (still 1 to go)
-> [ 0.155272] (08:receiver@Ginette) Got Data from Bourassa:5000 (still 0 to go)
-> [ 0.155272] (08:receiver@Ginette) Exiting GRAS
-> [ 0.155272] (09:sender@Bourassa) Sent Data from Bourassa to Ginette
-> [ 0.155272] (09:sender@Bourassa) Exiting GRAS
-> [ 0.162962] (07:sender@Ginette) Sent Data from Ginette to Bourassa
-> [ 0.162962] (07:sender@Ginette) Exiting GRAS
-> [ 0.162962] (10:receiver@Bourassa) Got Data from Ginette:5000 (still 0 to go)
-> [ 0.162962] (10:receiver@Bourassa) Exiting GRAS
+> [ 0.000156] (08:receiver@Ginette) Listening on port 4000 (expecting 4 messages)
+> [ 0.000156] (06:receiver@Fafard) Listening on port 4000 (expecting 4 messages)
+> [ 0.015433] (01:sender@Tremblay) Sent Data from Tremblay to Jupiter
+> [ 0.015433] (04:receiver@Jupiter) Got Data from Tremblay:5000 (still 3 to go)
+> [ 0.020592] (09:sender@Bourassa) Sent Data from Bourassa to Tremblay
+> [ 0.020592] (02:receiver@Tremblay) Got Data from Bourassa:5000 (still 3 to go)
+> [ 0.033889] (07:sender@Ginette) Sent Data from Ginette to Tremblay
+> [ 0.033889] (02:receiver@Tremblay) Got Data from Ginette:5000 (still 2 to go)
+> [ 0.036241] (01:sender@Tremblay) Sent Data from Tremblay to Fafard
+> [ 0.036241] (06:receiver@Fafard) Got Data from Tremblay:5000 (still 3 to go)
+> [ 0.049694] (01:sender@Tremblay) Sent Data from Tremblay to Ginette
+> [ 0.049694] (08:receiver@Ginette) Got Data from Tremblay:5000 (still 3 to go)
+> [ 0.054541] (05:sender@Fafard) Sent Data from Fafard to Tremblay
+> [ 0.054541] (02:receiver@Tremblay) Got Data from Fafard:5000 (still 1 to go)
+> [ 0.056459] (09:sender@Bourassa) Sent Data from Bourassa to Jupiter
+> [ 0.056459] (04:receiver@Jupiter) Got Data from Bourassa:5000 (still 2 to go)
+> [ 0.062096] (09:sender@Bourassa) Sent Data from Bourassa to Fafard
+> [ 0.062096] (06:receiver@Fafard) Got Data from Bourassa:5000 (still 2 to go)
+> [ 0.069818] (03:sender@Jupiter) Sent Data from Jupiter to Tremblay
+> [ 0.069818] (02:receiver@Tremblay) Got Data from Jupiter:5000 (still 0 to go)
+> [ 0.069818] (02:receiver@Tremblay) Exiting GRAS
+> [ 0.070286] (01:sender@Tremblay) Sent Data from Tremblay to Bourassa
+> [ 0.070286] (01:sender@Tremblay) Exiting GRAS
+> [ 0.070286] (10:receiver@Bourassa) Got Data from Tremblay:5000 (still 3 to go)
+> [ 0.075203] (09:sender@Bourassa) Sent Data from Bourassa to Ginette
+> [ 0.075203] (09:sender@Bourassa) Exiting GRAS
+> [ 0.075203] (08:receiver@Ginette) Got Data from Bourassa:5000 (still 2 to go)
+> [ 0.085031] (07:sender@Ginette) Sent Data from Ginette to Jupiter
+> [ 0.085031] (04:receiver@Jupiter) Got Data from Ginette:5000 (still 1 to go)
+> [ 0.105901] (03:sender@Jupiter) Sent Data from Jupiter to Fafard
+> [ 0.105901] (06:receiver@Fafard) Got Data from Jupiter:5000 (still 1 to go)
+> [ 0.119072] (07:sender@Ginette) Sent Data from Ginette to Fafard
+> [ 0.119072] (06:receiver@Fafard) Got Data from Ginette:5000 (still 0 to go)
+> [ 0.119072] (06:receiver@Fafard) Exiting GRAS
+> [ 0.120957] (05:sender@Fafard) Sent Data from Fafard to Jupiter
+> [ 0.120957] (04:receiver@Jupiter) Got Data from Fafard:5000 (still 0 to go)
+> [ 0.120957] (04:receiver@Jupiter) Exiting GRAS
+> [ 0.132180] (07:sender@Ginette) Sent Data from Ginette to Bourassa
+> [ 0.132180] (07:sender@Ginette) Exiting GRAS
+> [ 0.132180] (10:receiver@Bourassa) Got Data from Ginette:5000 (still 2 to go)
+> [ 0.134629] (03:sender@Jupiter) Sent Data from Jupiter to Ginette
+> [ 0.134629] (08:receiver@Ginette) Got Data from Jupiter:5000 (still 1 to go)
+> [ 0.147800] (05:sender@Fafard) Sent Data from Fafard to Ginette
+> [ 0.147800] (08:receiver@Ginette) Got Data from Fafard:5000 (still 0 to go)
+> [ 0.147800] (08:receiver@Ginette) Exiting GRAS
+> [ 0.170496] (03:sender@Jupiter) Sent Data from Jupiter to Bourassa
+> [ 0.170496] (03:sender@Jupiter) Exiting GRAS
+> [ 0.170496] (10:receiver@Bourassa) Got Data from Jupiter:5000 (still 1 to go)
+> [ 0.175976] (05:sender@Fafard) Sent Data from Fafard to Bourassa
+> [ 0.175976] (05:sender@Fafard) Exiting GRAS
+> [ 0.175976] (10:receiver@Bourassa) Got Data from Fafard:5000 (still 0 to go)
+> [ 0.175976] (10:receiver@Bourassa) Exiting GRAS
! output sort 20
-$ $SG_TEST_EXENV ./pmm_simulator$EXEEXT ${srcdir:=.}/../../msg/msg_platform.xml ${srcdir:=.}/pmm.xml "--log=root.fmt:[%10.6r]%e(%2i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ./pmm_simulator$EXEEXT ${srcdir:=.}/../../msg/msg_platform.xml ${srcdir:=.}/pmm.xml --cfg=network/crosstraffic:0 "--log=root.fmt:[%10.6r]%e(%2i:%P@%h)%e%m%n"
+> [ 0.000000] ( 0:@) Configuration change: Set 'network/crosstraffic' to '0'
> [ 0.000156] ( 1:master@Jacquelin) Wait for peers for 2 sec
> [ 0.000156] ( 2:slave@Boivin) Sensor 0 starting
> [ 0.000156] ( 3:slave@Jean_Yves) Sensor 1 starting
#! ./tesh
! output sort 20
-$ $SG_TEST_EXENV ./pmm_simulator$EXEEXT ${srcdir:=.}/../../msg/msg_platform.xml ${srcdir:=.}/pmm.xml "--log=root.fmt:[%10.6r]%e(%2i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ./pmm_simulator$EXEEXT ${srcdir:=.}/../../msg/msg_platform.xml ${srcdir:=.}/pmm.xml --cfg=network/crosstraffic:0 "--log=root.fmt:[%10.6r]%e(%2i:%P@%h)%e%m%n"
+> [ 0.000000] ( 0:@) Configuration change: Set 'network/crosstraffic' to '0'
> [ 0.000156] ( 1:master@Jacquelin) Wait for peers for 2 sec
> [ 0.000156] ( 2:slave@Boivin) Sensor 0 starting
> [ 0.000156] ( 3:slave@Jean_Yves) Sensor 1 starting
> [ 0.000156] ( 8:slave@McGee) Sensor 6 starting
> [ 0.000156] ( 9:slave@Gatien) Sensor 7 starting
> [ 0.000156] (10:slave@Laroche) Sensor 8 starting
-> [ 2.416827] ( 1:master@Jacquelin) Got only 2 pals (of 9). Wait 2 more seconds
-> [ 4.476356] ( 1:master@Jacquelin) Got only 3 pals (of 9). Wait 2 more seconds
-> [ 6.476356] ( 1:master@Jacquelin) Got only 5 pals (of 9). Wait 2 more seconds
-> [ 9.355676] ( 1:master@Jacquelin) Got only 6 pals (of 9). Wait 2 more seconds
-> [ 12.524103] ( 1:master@Jacquelin) Good. Got 9 pals
-> [ 12.525507] ( 1:master@Jacquelin) XXXXXXXXXXXXXXXXXXXXXX begin Multiplication
-> [ 14.369401] ( 2:slave@Boivin) Receive my pos (0,0) and assignment
-> [ 15.113688] ( 3:slave@Jean_Yves) Receive my pos (0,1) and assignment
-> [ 16.572089] ( 4:slave@TeX) Receive my pos (0,2) and assignment
-> [ 16.576641] ( 5:slave@Geoff) Receive my pos (1,0) and assignment
-> [ 18.043825] ( 6:slave@Disney) Receive my pos (1,1) and assignment
-> [ 20.492624] ( 7:slave@iRMX) Receive my pos (1,2) and assignment
-> [ 21.207375] ( 8:slave@McGee) Receive my pos (2,0) and assignment
-> [ 22.232792] ( 9:slave@Gatien) Receive my pos (2,1) and assignment
-> [ 23.681469] (10:slave@Laroche) Receive my pos (2,2) and assignment
-> [ 73.318369] ( 1:master@Jacquelin) XXXXXXXXXXXXXXXXXXXXXX Ok, the result matches expectations
-> [ 74.156986] ( 2:slave@Boivin) Exiting GRAS
-> [ 74.896643] ( 3:slave@Jean_Yves) Exiting GRAS
-> [ 76.345754] ( 4:slave@TeX) Exiting GRAS
-> [ 76.350179] ( 5:slave@Geoff) Exiting GRAS
-> [ 77.808044] ( 6:slave@Disney) Exiting GRAS
-> [ 80.241254] ( 7:slave@iRMX) Exiting GRAS
-> [ 80.951545] ( 8:slave@McGee) Exiting GRAS
-> [ 81.970467] ( 9:slave@Gatien) Exiting GRAS
-> [ 83.410027] ( 1:master@Jacquelin) Exiting GRAS
-> [ 83.410027] (10:slave@Laroche) Exiting GRAS
+> [ 2.879230] ( 1:master@Jacquelin) Got only 1 pals (of 9). Wait 2 more seconds
+> [ 5.915815] ( 1:master@Jacquelin) Got only 4 pals (of 9). Wait 2 more seconds
+> [ 7.915815] ( 1:master@Jacquelin) Got only 6 pals (of 9). Wait 2 more seconds
+> [ 10.795135] ( 1:master@Jacquelin) Got only 7 pals (of 9). Wait 2 more seconds
+> [ 12.795135] ( 1:master@Jacquelin) Good. Got 9 pals
+> [ 12.796539] ( 1:master@Jacquelin) XXXXXXXXXXXXXXXXXXXXXX begin Multiplication
+> [ 15.245272] (10:slave@Laroche) Receive my pos (0,0) and assignment
+> [ 16.089198] ( 2:slave@Boivin) Receive my pos (0,1) and assignment
+> [ 16.833513] ( 3:slave@Jean_Yves) Receive my pos (0,2) and assignment
+> [ 18.291859] ( 4:slave@TeX) Receive my pos (1,0) and assignment
+> [ 18.296410] ( 5:slave@Geoff) Receive my pos (1,1) and assignment
+> [ 19.763580] ( 6:slave@Disney) Receive my pos (1,2) and assignment
+> [ 22.212379] ( 7:slave@iRMX) Receive my pos (2,0) and assignment
+> [ 22.927110] ( 8:slave@McGee) Receive my pos (2,1) and assignment
+> [ 23.952497] ( 9:slave@Gatien) Receive my pos (2,2) and assignment
+> [ 73.553493] ( 1:master@Jacquelin) XXXXXXXXXXXXXXXXXXXXXX Ok, the result matches expectations
+> [ 74.993053] (10:slave@Laroche) Exiting GRAS
+> [ 75.831669] ( 2:slave@Boivin) Exiting GRAS
+> [ 76.571327] ( 3:slave@Jean_Yves) Exiting GRAS
+> [ 78.020437] ( 4:slave@TeX) Exiting GRAS
+> [ 78.024862] ( 5:slave@Geoff) Exiting GRAS
+> [ 79.482727] ( 6:slave@Disney) Exiting GRAS
+> [ 81.915937] ( 7:slave@iRMX) Exiting GRAS
+> [ 82.626229] ( 8:slave@McGee) Exiting GRAS
+> [ 83.645150] ( 1:master@Jacquelin) Exiting GRAS
+> [ 83.645150] ( 9:slave@Gatien) Exiting GRAS
p Testing the Chord implementation with MSG
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/chord$EXEEXT -nb_bits=6 ${srcdir:=.}/../msg_platform.xml ${srcdir:=.}/chord.xml --log=msg_chord.thres:verbose "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ${bindir:=.}/chord$EXEEXT -nb_bits=6 ${srcdir:=.}/../msg_platform.xml ${srcdir:=.}/chord.xml --cfg=network/crosstraffic:0 --log=msg_chord.thres:verbose "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (0:@) Configuration change: Set 'network/crosstraffic' to '0'
> [ 0.000000] (1:node@Gatien) Joining the ring with id 48, knowing node 1
> [ 0.000000] (2:node@McGee) Joining the ring with id 42, knowing node 1
> [ 0.000000] (3:node@iRMX) Joining the ring with id 38, knowing node 1
> [547.073600] (3:node@iRMX) Predecessor: 32
> [649.756626] (0:@) Messages created: 838
> [649.756626] (0:@) Simulated time: 649.757
+
+! output sort
+$ $SG_TEST_EXENV ${bindir:=.}/chord$EXEEXT ${srcdir:=.}/../../platforms/One_cluster.xml ${srcdir:=.}/chord10.xml --cfg=network/crosstraffic:0 --log=msg_chord.thres:verbose "--log=root.fmt:[%11.6r]%e(%i:%P@%h)%e%m%n" --cfg=network/model:Constant
+> [ 0.000000] (0:@) Configuration change: Set 'network/crosstraffic' to '0'
+> [ 0.000000] (0:@) Configuration change: Set 'network/model' to 'Constant'
+> [ 0.000000] (0:@) Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [ 0.000000] (10:node@c-9.me) Joining the ring with id 2015253, knowing node 1319738
+> [ 0.000000] (1:node@c-0.me) My finger table:
+> [ 0.000000] (1:node@c-0.me) Start | Succ
+> [ 0.000000] (1:node@c-0.me) 43 | 42
+> [ 0.000000] (1:node@c-0.me) 44 | 42
+> [ 0.000000] (1:node@c-0.me) 46 | 42
+> [ 0.000000] (1:node@c-0.me) 50 | 42
+> [ 0.000000] (1:node@c-0.me) 58 | 42
+> [ 0.000000] (1:node@c-0.me) 74 | 42
+> [ 0.000000] (1:node@c-0.me) 106 | 42
+> [ 0.000000] (1:node@c-0.me) 170 | 42
+> [ 0.000000] (1:node@c-0.me) 298 | 42
+> [ 0.000000] (1:node@c-0.me) 554 | 42
+> [ 0.000000] (1:node@c-0.me) 1066 | 42
+> [ 0.000000] (1:node@c-0.me) 2090 | 42
+> [ 0.000000] (1:node@c-0.me) 4138 | 42
+> [ 0.000000] (1:node@c-0.me) 8234 | 42
+> [ 0.000000] (1:node@c-0.me) 16426 | 42
+> [ 0.000000] (1:node@c-0.me) 32810 | 42
+> [ 0.000000] (1:node@c-0.me) 65578 | 42
+> [ 0.000000] (1:node@c-0.me) 131114 | 42
+> [ 0.000000] (1:node@c-0.me) 262186 | 42
+> [ 0.000000] (1:node@c-0.me) 524330 | 42
+> [ 0.000000] (1:node@c-0.me) 1048618 | 42
+> [ 0.000000] (1:node@c-0.me) 2097194 | 42
+> [ 0.000000] (1:node@c-0.me) 4194346 | 42
+> [ 0.000000] (1:node@c-0.me) 8388650 | 42
+> [ 0.000000] (1:node@c-0.me) Predecessor: -1
+> [ 0.000000] (2:node@c-1.me) Joining the ring with id 366680, knowing node 42
+> [ 0.000000] (3:node@c-2.me) Joining the ring with id 533744, knowing node 366680
+> [ 0.000000] (4:node@c-3.me) Joining the ring with id 1319738, knowing node 42
+> [ 0.000000] (5:node@c-4.me) Joining the ring with id 16509405, knowing node 366680
+> [ 0.000000] (6:node@c-5.me) Joining the ring with id 10874876, knowing node 533744
+> [ 0.000000] (7:node@c-6.me) Joining the ring with id 16728096, knowing node 1319738
+> [ 0.000000] (8:node@c-7.me) Joining the ring with id 10004760, knowing node 16509405
+> [ 0.000000] (9:node@c-8.me) Joining the ring with id 6518808, knowing node 42
+> [ 4.000000] (3:node@c-2.me) My finger table:
+> [ 4.000000] (3:node@c-2.me) Start | Succ
+> [ 4.000000] (3:node@c-2.me) 533745 | 366680
+> [ 4.000000] (3:node@c-2.me) 533746 | 533744
+> [ 4.000000] (3:node@c-2.me) 533748 | 533744
+> [ 4.000000] (3:node@c-2.me) 533752 | 533744
+> [ 4.000000] (3:node@c-2.me) 533760 | 533744
+> [ 4.000000] (3:node@c-2.me) 533776 | 533744
+> [ 4.000000] (3:node@c-2.me) 533808 | 533744
+> [ 4.000000] (3:node@c-2.me) 533872 | 533744
+> [ 4.000000] (3:node@c-2.me) 534000 | 533744
+> [ 4.000000] (3:node@c-2.me) 534256 | 533744
+> [ 4.000000] (3:node@c-2.me) 534768 | 533744
+> [ 4.000000] (3:node@c-2.me) 535792 | 533744
+> [ 4.000000] (3:node@c-2.me) 537840 | 533744
+> [ 4.000000] (3:node@c-2.me) 541936 | 533744
+> [ 4.000000] (3:node@c-2.me) 550128 | 533744
+> [ 4.000000] (3:node@c-2.me) 566512 | 533744
+> [ 4.000000] (3:node@c-2.me) 599280 | 533744
+> [ 4.000000] (3:node@c-2.me) 664816 | 533744
+> [ 4.000000] (3:node@c-2.me) 795888 | 533744
+> [ 4.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 4.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 4.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 4.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 4.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 4.000000] (3:node@c-2.me) Predecessor: -1
+> [ 4.000000] (6:node@c-5.me) My finger table:
+> [ 4.000000] (6:node@c-5.me) Start | Succ
+> [ 4.000000] (6:node@c-5.me) 10874877 | 533744
+> [ 4.000000] (6:node@c-5.me) 10874878 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10874880 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10874884 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10874892 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10874908 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10874940 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 4.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 4.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 4.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 4.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 4.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 4.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 4.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 4.000000] (6:node@c-5.me) Predecessor: -1
+> [ 5.000000] (5:node@c-4.me) My finger table:
+> [ 5.000000] (5:node@c-4.me) Start | Succ
+> [ 5.000000] (5:node@c-4.me) 16509406 | 366680
+> [ 5.000000] (5:node@c-4.me) 16509407 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509409 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509413 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509421 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509437 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 5.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 5.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 5.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 5.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 5.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 5.000000] (5:node@c-4.me) Predecessor: -1
+> [ 5.000000] (8:node@c-7.me) My finger table:
+> [ 5.000000] (8:node@c-7.me) Start | Succ
+> [ 5.000000] (8:node@c-7.me) 10004761 | 16509405
+> [ 5.000000] (8:node@c-7.me) 10004762 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10004764 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10004768 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10004776 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10004792 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 5.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 5.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 5.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 5.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 5.000000] (8:node@c-7.me) Predecessor: -1
+> [ 6.000000] (2:node@c-1.me) My finger table:
+> [ 6.000000] (2:node@c-1.me) Start | Succ
+> [ 6.000000] (2:node@c-1.me) 366681 | 42
+> [ 6.000000] (2:node@c-1.me) 366682 | 366680
+> [ 6.000000] (2:node@c-1.me) 366684 | 366680
+> [ 6.000000] (2:node@c-1.me) 366688 | 366680
+> [ 6.000000] (2:node@c-1.me) 366696 | 366680
+> [ 6.000000] (2:node@c-1.me) 366712 | 366680
+> [ 6.000000] (2:node@c-1.me) 366744 | 366680
+> [ 6.000000] (2:node@c-1.me) 366808 | 366680
+> [ 6.000000] (2:node@c-1.me) 366936 | 366680
+> [ 6.000000] (2:node@c-1.me) 367192 | 366680
+> [ 6.000000] (2:node@c-1.me) 367704 | 366680
+> [ 6.000000] (2:node@c-1.me) 368728 | 366680
+> [ 6.000000] (2:node@c-1.me) 370776 | 366680
+> [ 6.000000] (2:node@c-1.me) 374872 | 366680
+> [ 6.000000] (2:node@c-1.me) 383064 | 366680
+> [ 6.000000] (2:node@c-1.me) 399448 | 366680
+> [ 6.000000] (2:node@c-1.me) 432216 | 366680
+> [ 6.000000] (2:node@c-1.me) 497752 | 366680
+> [ 6.000000] (2:node@c-1.me) 628824 | 366680
+> [ 6.000000] (2:node@c-1.me) 890968 | 366680
+> [ 6.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 6.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 6.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 6.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 6.000000] (2:node@c-1.me) Predecessor: -1
+> [ 8.000000] (7:node@c-6.me) My finger table:
+> [ 8.000000] (7:node@c-6.me) Start | Succ
+> [ 8.000000] (7:node@c-6.me) 16728097 | 1319738
+> [ 8.000000] (7:node@c-6.me) 16728098 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728100 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728104 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728112 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 8.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 8.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 8.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 8.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 8.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 8.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 8.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 8.000000] (7:node@c-6.me) Predecessor: -1
+> [ 9.000000] (10:node@c-9.me) My finger table:
+> [ 9.000000] (10:node@c-9.me) Start | Succ
+> [ 9.000000] (10:node@c-9.me) 2015254 | 1319738
+> [ 9.000000] (10:node@c-9.me) 2015255 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015257 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015261 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015269 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015285 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015317 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 9.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 9.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 9.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 9.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 9.000000] (10:node@c-9.me) Predecessor: -1
+> [ 11.000000] (4:node@c-3.me) My finger table:
+> [ 11.000000] (4:node@c-3.me) Start | Succ
+> [ 11.000000] (4:node@c-3.me) 1319739 | 42
+> [ 11.000000] (4:node@c-3.me) 1319740 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1319742 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1319746 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 11.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 11.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 11.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 11.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 11.000000] (4:node@c-3.me) Predecessor: -1
+> [ 16.000000] (9:node@c-8.me) My finger table:
+> [ 16.000000] (9:node@c-8.me) Start | Succ
+> [ 16.000000] (9:node@c-8.me) 6518809 | 42
+> [ 16.000000] (9:node@c-8.me) 6518810 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6518812 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6518816 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6518824 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 16.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 16.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 16.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 16.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 16.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 16.000000] (9:node@c-8.me) Predecessor: -1
+> [ 24.000000] (5:node@c-4.me) My finger table:
+> [ 24.000000] (5:node@c-4.me) Start | Succ
+> [ 24.000000] (5:node@c-4.me) 16509406 | 366680
+> [ 24.000000] (5:node@c-4.me) 16509407 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509409 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509413 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509421 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509437 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 24.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 24.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 24.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 24.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 24.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 24.000000] (5:node@c-4.me) Predecessor: 10004760
+> [ 26.000000] (4:node@c-3.me) My finger table:
+> [ 26.000000] (4:node@c-3.me) Start | Succ
+> [ 26.000000] (4:node@c-3.me) 1319739 | 42
+> [ 26.000000] (4:node@c-3.me) 1319740 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1319742 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1319746 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 26.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 26.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 26.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 26.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 26.000000] (4:node@c-3.me) Predecessor: 16728096
+> [ 33.000000] (2:node@c-1.me) My finger table:
+> [ 33.000000] (2:node@c-1.me) Start | Succ
+> [ 33.000000] (2:node@c-1.me) 366681 | 42
+> [ 33.000000] (2:node@c-1.me) 366682 | 366680
+> [ 33.000000] (2:node@c-1.me) 366684 | 366680
+> [ 33.000000] (2:node@c-1.me) 366688 | 366680
+> [ 33.000000] (2:node@c-1.me) 366696 | 366680
+> [ 33.000000] (2:node@c-1.me) 366712 | 366680
+> [ 33.000000] (2:node@c-1.me) 366744 | 366680
+> [ 33.000000] (2:node@c-1.me) 366808 | 366680
+> [ 33.000000] (2:node@c-1.me) 366936 | 366680
+> [ 33.000000] (2:node@c-1.me) 367192 | 366680
+> [ 33.000000] (2:node@c-1.me) 367704 | 366680
+> [ 33.000000] (2:node@c-1.me) 368728 | 366680
+> [ 33.000000] (2:node@c-1.me) 370776 | 366680
+> [ 33.000000] (2:node@c-1.me) 374872 | 366680
+> [ 33.000000] (2:node@c-1.me) 383064 | 366680
+> [ 33.000000] (2:node@c-1.me) 399448 | 366680
+> [ 33.000000] (2:node@c-1.me) 432216 | 366680
+> [ 33.000000] (2:node@c-1.me) 497752 | 366680
+> [ 33.000000] (2:node@c-1.me) 628824 | 366680
+> [ 33.000000] (2:node@c-1.me) 890968 | 366680
+> [ 33.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 33.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 33.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 33.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 33.000000] (2:node@c-1.me) Predecessor: 16509405
+> [ 38.000000] (3:node@c-2.me) My finger table:
+> [ 38.000000] (3:node@c-2.me) Start | Succ
+> [ 38.000000] (3:node@c-2.me) 533745 | 366680
+> [ 38.000000] (3:node@c-2.me) 533746 | 533744
+> [ 38.000000] (3:node@c-2.me) 533748 | 533744
+> [ 38.000000] (3:node@c-2.me) 533752 | 533744
+> [ 38.000000] (3:node@c-2.me) 533760 | 533744
+> [ 38.000000] (3:node@c-2.me) 533776 | 533744
+> [ 38.000000] (3:node@c-2.me) 533808 | 533744
+> [ 38.000000] (3:node@c-2.me) 533872 | 533744
+> [ 38.000000] (3:node@c-2.me) 534000 | 533744
+> [ 38.000000] (3:node@c-2.me) 534256 | 533744
+> [ 38.000000] (3:node@c-2.me) 534768 | 533744
+> [ 38.000000] (3:node@c-2.me) 535792 | 533744
+> [ 38.000000] (3:node@c-2.me) 537840 | 533744
+> [ 38.000000] (3:node@c-2.me) 541936 | 533744
+> [ 38.000000] (3:node@c-2.me) 550128 | 533744
+> [ 38.000000] (3:node@c-2.me) 566512 | 533744
+> [ 38.000000] (3:node@c-2.me) 599280 | 533744
+> [ 38.000000] (3:node@c-2.me) 664816 | 533744
+> [ 38.000000] (3:node@c-2.me) 795888 | 533744
+> [ 38.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 38.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 38.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 38.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 38.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 38.000000] (3:node@c-2.me) Predecessor: 10874876
+> [ 50.000000] (1:node@c-0.me) My finger table:
+> [ 50.000000] (1:node@c-0.me) Start | Succ
+> [ 50.000000] (1:node@c-0.me) 43 | 42
+> [ 50.000000] (1:node@c-0.me) 44 | 42
+> [ 50.000000] (1:node@c-0.me) 46 | 42
+> [ 50.000000] (1:node@c-0.me) 50 | 42
+> [ 50.000000] (1:node@c-0.me) 58 | 42
+> [ 50.000000] (1:node@c-0.me) 74 | 42
+> [ 50.000000] (1:node@c-0.me) 106 | 42
+> [ 50.000000] (1:node@c-0.me) 170 | 42
+> [ 50.000000] (1:node@c-0.me) 298 | 42
+> [ 50.000000] (1:node@c-0.me) 554 | 42
+> [ 50.000000] (1:node@c-0.me) 1066 | 42
+> [ 50.000000] (1:node@c-0.me) 2090 | 42
+> [ 50.000000] (1:node@c-0.me) 4138 | 42
+> [ 50.000000] (1:node@c-0.me) 8234 | 42
+> [ 50.000000] (1:node@c-0.me) 16426 | 42
+> [ 50.000000] (1:node@c-0.me) 32810 | 42
+> [ 50.000000] (1:node@c-0.me) 65578 | 42
+> [ 50.000000] (1:node@c-0.me) 131114 | 42
+> [ 50.000000] (1:node@c-0.me) 262186 | 42
+> [ 50.000000] (1:node@c-0.me) 524330 | 42
+> [ 50.000000] (1:node@c-0.me) 1048618 | 42
+> [ 50.000000] (1:node@c-0.me) 2097194 | 42
+> [ 50.000000] (1:node@c-0.me) 4194346 | 42
+> [ 50.000000] (1:node@c-0.me) 8388650 | 42
+> [ 50.000000] (1:node@c-0.me) Predecessor: 366680
+> [ 60.000000] (1:node@c-0.me) My finger table:
+> [ 60.000000] (1:node@c-0.me) Start | Succ
+> [ 60.000000] (1:node@c-0.me) 43 | 42
+> [ 60.000000] (1:node@c-0.me) 44 | 42
+> [ 60.000000] (1:node@c-0.me) 46 | 42
+> [ 60.000000] (1:node@c-0.me) 50 | 42
+> [ 60.000000] (1:node@c-0.me) 58 | 42
+> [ 60.000000] (1:node@c-0.me) 74 | 42
+> [ 60.000000] (1:node@c-0.me) 106 | 42
+> [ 60.000000] (1:node@c-0.me) 170 | 42
+> [ 60.000000] (1:node@c-0.me) 298 | 42
+> [ 60.000000] (1:node@c-0.me) 554 | 42
+> [ 60.000000] (1:node@c-0.me) 1066 | 42
+> [ 60.000000] (1:node@c-0.me) 2090 | 42
+> [ 60.000000] (1:node@c-0.me) 4138 | 42
+> [ 60.000000] (1:node@c-0.me) 8234 | 42
+> [ 60.000000] (1:node@c-0.me) 16426 | 42
+> [ 60.000000] (1:node@c-0.me) 32810 | 42
+> [ 60.000000] (1:node@c-0.me) 65578 | 42
+> [ 60.000000] (1:node@c-0.me) 131114 | 42
+> [ 60.000000] (1:node@c-0.me) 262186 | 42
+> [ 60.000000] (1:node@c-0.me) 524330 | 42
+> [ 60.000000] (1:node@c-0.me) 1048618 | 42
+> [ 60.000000] (1:node@c-0.me) 2097194 | 42
+> [ 60.000000] (1:node@c-0.me) 4194346 | 42
+> [ 60.000000] (1:node@c-0.me) 8388650 | 42
+> [ 60.000000] (1:node@c-0.me) Predecessor: 1319738
+> [ 70.000000] (1:node@c-0.me) My finger table:
+> [ 70.000000] (1:node@c-0.me) Start | Succ
+> [ 70.000000] (1:node@c-0.me) 43 | 1319738
+> [ 70.000000] (1:node@c-0.me) 44 | 42
+> [ 70.000000] (1:node@c-0.me) 46 | 42
+> [ 70.000000] (1:node@c-0.me) 50 | 42
+> [ 70.000000] (1:node@c-0.me) 58 | 42
+> [ 70.000000] (1:node@c-0.me) 74 | 42
+> [ 70.000000] (1:node@c-0.me) 106 | 42
+> [ 70.000000] (1:node@c-0.me) 170 | 42
+> [ 70.000000] (1:node@c-0.me) 298 | 42
+> [ 70.000000] (1:node@c-0.me) 554 | 42
+> [ 70.000000] (1:node@c-0.me) 1066 | 42
+> [ 70.000000] (1:node@c-0.me) 2090 | 42
+> [ 70.000000] (1:node@c-0.me) 4138 | 42
+> [ 70.000000] (1:node@c-0.me) 8234 | 42
+> [ 70.000000] (1:node@c-0.me) 16426 | 42
+> [ 70.000000] (1:node@c-0.me) 32810 | 42
+> [ 70.000000] (1:node@c-0.me) 65578 | 42
+> [ 70.000000] (1:node@c-0.me) 131114 | 42
+> [ 70.000000] (1:node@c-0.me) 262186 | 42
+> [ 70.000000] (1:node@c-0.me) 524330 | 42
+> [ 70.000000] (1:node@c-0.me) 1048618 | 42
+> [ 70.000000] (1:node@c-0.me) 2097194 | 42
+> [ 70.000000] (1:node@c-0.me) 4194346 | 42
+> [ 70.000000] (1:node@c-0.me) 8388650 | 42
+> [ 70.000000] (1:node@c-0.me) Predecessor: 6518808
+> [ 85.000000] (4:node@c-3.me) My finger table:
+> [ 85.000000] (4:node@c-3.me) Start | Succ
+> [ 85.000000] (4:node@c-3.me) 1319739 | 6518808
+> [ 85.000000] (4:node@c-3.me) 1319740 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1319742 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1319746 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 85.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 85.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 85.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 85.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 85.000000] (4:node@c-3.me) Predecessor: 42
+> [ 90.000000] (7:node@c-6.me) My finger table:
+> [ 90.000000] (7:node@c-6.me) Start | Succ
+> [ 90.000000] (7:node@c-6.me) 16728097 | 1319738
+> [ 90.000000] (7:node@c-6.me) 16728098 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728100 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728104 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728112 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 90.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 90.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 90.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 90.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 90.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 90.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 90.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 90.000000] (7:node@c-6.me) Predecessor: 2015253
+> [ 107.000000] (8:node@c-7.me) My finger table:
+> [ 107.000000] (8:node@c-7.me) Start | Succ
+> [ 107.000000] (8:node@c-7.me) 10004761 | 16509405
+> [ 107.000000] (8:node@c-7.me) 10004762 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10004764 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10004768 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10004776 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10004792 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 107.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 107.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 107.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 107.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 107.000000] (8:node@c-7.me) Predecessor: 533744
+> [ 109.000000] (9:node@c-8.me) My finger table:
+> [ 109.000000] (9:node@c-8.me) Start | Succ
+> [ 109.000000] (9:node@c-8.me) 6518809 | 42
+> [ 109.000000] (9:node@c-8.me) 6518810 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6518812 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6518816 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6518824 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 109.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 109.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 109.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 109.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 109.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 109.000000] (9:node@c-8.me) Predecessor: 366680
+> [ 110.000000] (9:node@c-8.me) My finger table:
+> [ 110.000000] (9:node@c-8.me) Start | Succ
+> [ 110.000000] (9:node@c-8.me) 6518809 | 42
+> [ 110.000000] (9:node@c-8.me) 6518810 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6518812 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6518816 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6518824 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 110.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 110.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 110.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 110.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 110.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 110.000000] (9:node@c-8.me) Predecessor: 1319738
+> [ 145.000000] (1:node@c-0.me) My finger table:
+> [ 145.000000] (1:node@c-0.me) Start | Succ
+> [ 145.000000] (1:node@c-0.me) 43 | 1319738
+> [ 145.000000] (1:node@c-0.me) 44 | 42
+> [ 145.000000] (1:node@c-0.me) 46 | 42
+> [ 145.000000] (1:node@c-0.me) 50 | 42
+> [ 145.000000] (1:node@c-0.me) 58 | 42
+> [ 145.000000] (1:node@c-0.me) 74 | 42
+> [ 145.000000] (1:node@c-0.me) 106 | 42
+> [ 145.000000] (1:node@c-0.me) 170 | 42
+> [ 145.000000] (1:node@c-0.me) 298 | 42
+> [ 145.000000] (1:node@c-0.me) 554 | 42
+> [ 145.000000] (1:node@c-0.me) 1066 | 42
+> [ 145.000000] (1:node@c-0.me) 2090 | 42
+> [ 145.000000] (1:node@c-0.me) 4138 | 42
+> [ 145.000000] (1:node@c-0.me) 8234 | 42
+> [ 145.000000] (1:node@c-0.me) 16426 | 42
+> [ 145.000000] (1:node@c-0.me) 32810 | 42
+> [ 145.000000] (1:node@c-0.me) 65578 | 42
+> [ 145.000000] (1:node@c-0.me) 131114 | 42
+> [ 145.000000] (1:node@c-0.me) 262186 | 42
+> [ 145.000000] (1:node@c-0.me) 524330 | 42
+> [ 145.000000] (1:node@c-0.me) 1048618 | 42
+> [ 145.000000] (1:node@c-0.me) 2097194 | 42
+> [ 145.000000] (1:node@c-0.me) 4194346 | 42
+> [ 145.000000] (1:node@c-0.me) 8388650 | 42
+> [ 145.000000] (1:node@c-0.me) Predecessor: 16728096
+> [ 157.000000] (4:node@c-3.me) My finger table:
+> [ 157.000000] (4:node@c-3.me) Start | Succ
+> [ 157.000000] (4:node@c-3.me) 1319739 | 6518808
+> [ 157.000000] (4:node@c-3.me) 1319740 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1319742 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1319746 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 157.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 157.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 157.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 157.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 157.000000] (4:node@c-3.me) Predecessor: 366680
+> [ 184.000000] (7:node@c-6.me) My finger table:
+> [ 184.000000] (7:node@c-6.me) Start | Succ
+> [ 184.000000] (7:node@c-6.me) 16728097 | 42
+> [ 184.000000] (7:node@c-6.me) 16728098 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728100 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728104 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728112 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 184.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 184.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 184.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 184.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 184.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 184.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 184.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 184.000000] (7:node@c-6.me) Predecessor: 6518808
+> [ 205.000000] (2:node@c-1.me) My finger table:
+> [ 205.000000] (2:node@c-1.me) Start | Succ
+> [ 205.000000] (2:node@c-1.me) 366681 | 1319738
+> [ 205.000000] (2:node@c-1.me) 366682 | 366680
+> [ 205.000000] (2:node@c-1.me) 366684 | 366680
+> [ 205.000000] (2:node@c-1.me) 366688 | 366680
+> [ 205.000000] (2:node@c-1.me) 366696 | 366680
+> [ 205.000000] (2:node@c-1.me) 366712 | 366680
+> [ 205.000000] (2:node@c-1.me) 366744 | 366680
+> [ 205.000000] (2:node@c-1.me) 366808 | 366680
+> [ 205.000000] (2:node@c-1.me) 366936 | 366680
+> [ 205.000000] (2:node@c-1.me) 367192 | 366680
+> [ 205.000000] (2:node@c-1.me) 367704 | 366680
+> [ 205.000000] (2:node@c-1.me) 368728 | 366680
+> [ 205.000000] (2:node@c-1.me) 370776 | 366680
+> [ 205.000000] (2:node@c-1.me) 374872 | 366680
+> [ 205.000000] (2:node@c-1.me) 383064 | 366680
+> [ 205.000000] (2:node@c-1.me) 399448 | 366680
+> [ 205.000000] (2:node@c-1.me) 432216 | 366680
+> [ 205.000000] (2:node@c-1.me) 497752 | 366680
+> [ 205.000000] (2:node@c-1.me) 628824 | 366680
+> [ 205.000000] (2:node@c-1.me) 890968 | 366680
+> [ 205.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 205.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 205.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 205.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 205.000000] (2:node@c-1.me) Predecessor: 42
+> [ 219.000000] (9:node@c-8.me) My finger table:
+> [ 219.000000] (9:node@c-8.me) Start | Succ
+> [ 219.000000] (9:node@c-8.me) 6518809 | 16728096
+> [ 219.000000] (9:node@c-8.me) 6518810 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6518812 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6518816 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6518824 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 219.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 219.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 219.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 219.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 219.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 219.000000] (9:node@c-8.me) Predecessor: 2015253
+> [ 245.000000] (6:node@c-5.me) My finger table:
+> [ 245.000000] (6:node@c-5.me) Start | Succ
+> [ 245.000000] (6:node@c-5.me) 10874877 | 533744
+> [ 245.000000] (6:node@c-5.me) 10874878 | 533744
+> [ 245.000000] (6:node@c-5.me) 10874880 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10874884 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10874892 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10874908 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10874940 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 245.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 245.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 245.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 245.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 245.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 245.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 245.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 245.000000] (6:node@c-5.me) Predecessor: -1
+> [ 246.000000] (1:node@c-0.me) My finger table:
+> [ 246.000000] (1:node@c-0.me) Start | Succ
+> [ 246.000000] (1:node@c-0.me) 43 | 366680
+> [ 246.000000] (1:node@c-0.me) 44 | 366680
+> [ 246.000000] (1:node@c-0.me) 46 | 42
+> [ 246.000000] (1:node@c-0.me) 50 | 42
+> [ 246.000000] (1:node@c-0.me) 58 | 42
+> [ 246.000000] (1:node@c-0.me) 74 | 42
+> [ 246.000000] (1:node@c-0.me) 106 | 42
+> [ 246.000000] (1:node@c-0.me) 170 | 42
+> [ 246.000000] (1:node@c-0.me) 298 | 42
+> [ 246.000000] (1:node@c-0.me) 554 | 42
+> [ 246.000000] (1:node@c-0.me) 1066 | 42
+> [ 246.000000] (1:node@c-0.me) 2090 | 42
+> [ 246.000000] (1:node@c-0.me) 4138 | 42
+> [ 246.000000] (1:node@c-0.me) 8234 | 42
+> [ 246.000000] (1:node@c-0.me) 16426 | 42
+> [ 246.000000] (1:node@c-0.me) 32810 | 42
+> [ 246.000000] (1:node@c-0.me) 65578 | 42
+> [ 246.000000] (1:node@c-0.me) 131114 | 42
+> [ 246.000000] (1:node@c-0.me) 262186 | 42
+> [ 246.000000] (1:node@c-0.me) 524330 | 42
+> [ 246.000000] (1:node@c-0.me) 1048618 | 42
+> [ 246.000000] (1:node@c-0.me) 2097194 | 42
+> [ 246.000000] (1:node@c-0.me) 4194346 | 42
+> [ 246.000000] (1:node@c-0.me) 8388650 | 42
+> [ 246.000000] (1:node@c-0.me) Predecessor: 16728096
+> [ 248.000000] (7:node@c-6.me) My finger table:
+> [ 248.000000] (7:node@c-6.me) Start | Succ
+> [ 248.000000] (7:node@c-6.me) 16728097 | 42
+> [ 248.000000] (7:node@c-6.me) 16728098 | 42
+> [ 248.000000] (7:node@c-6.me) 16728100 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16728104 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16728112 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 248.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 248.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 248.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 248.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 248.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 248.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 248.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 248.000000] (7:node@c-6.me) Predecessor: 6518808
+> [ 254.000000] (3:node@c-2.me) My finger table:
+> [ 254.000000] (3:node@c-2.me) Start | Succ
+> [ 254.000000] (3:node@c-2.me) 533745 | 10004760
+> [ 254.000000] (3:node@c-2.me) 533746 | 10004760
+> [ 254.000000] (3:node@c-2.me) 533748 | 533744
+> [ 254.000000] (3:node@c-2.me) 533752 | 533744
+> [ 254.000000] (3:node@c-2.me) 533760 | 533744
+> [ 254.000000] (3:node@c-2.me) 533776 | 533744
+> [ 254.000000] (3:node@c-2.me) 533808 | 533744
+> [ 254.000000] (3:node@c-2.me) 533872 | 533744
+> [ 254.000000] (3:node@c-2.me) 534000 | 533744
+> [ 254.000000] (3:node@c-2.me) 534256 | 533744
+> [ 254.000000] (3:node@c-2.me) 534768 | 533744
+> [ 254.000000] (3:node@c-2.me) 535792 | 533744
+> [ 254.000000] (3:node@c-2.me) 537840 | 533744
+> [ 254.000000] (3:node@c-2.me) 541936 | 533744
+> [ 254.000000] (3:node@c-2.me) 550128 | 533744
+> [ 254.000000] (3:node@c-2.me) 566512 | 533744
+> [ 254.000000] (3:node@c-2.me) 599280 | 533744
+> [ 254.000000] (3:node@c-2.me) 664816 | 533744
+> [ 254.000000] (3:node@c-2.me) 795888 | 533744
+> [ 254.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 254.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 254.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 254.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 254.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 254.000000] (3:node@c-2.me) Predecessor: 10874876
+> [ 261.000000] (2:node@c-1.me) My finger table:
+> [ 261.000000] (2:node@c-1.me) Start | Succ
+> [ 261.000000] (2:node@c-1.me) 366681 | 1319738
+> [ 261.000000] (2:node@c-1.me) 366682 | 1319738
+> [ 261.000000] (2:node@c-1.me) 366684 | 366680
+> [ 261.000000] (2:node@c-1.me) 366688 | 366680
+> [ 261.000000] (2:node@c-1.me) 366696 | 366680
+> [ 261.000000] (2:node@c-1.me) 366712 | 366680
+> [ 261.000000] (2:node@c-1.me) 366744 | 366680
+> [ 261.000000] (2:node@c-1.me) 366808 | 366680
+> [ 261.000000] (2:node@c-1.me) 366936 | 366680
+> [ 261.000000] (2:node@c-1.me) 367192 | 366680
+> [ 261.000000] (2:node@c-1.me) 367704 | 366680
+> [ 261.000000] (2:node@c-1.me) 368728 | 366680
+> [ 261.000000] (2:node@c-1.me) 370776 | 366680
+> [ 261.000000] (2:node@c-1.me) 374872 | 366680
+> [ 261.000000] (2:node@c-1.me) 383064 | 366680
+> [ 261.000000] (2:node@c-1.me) 399448 | 366680
+> [ 261.000000] (2:node@c-1.me) 432216 | 366680
+> [ 261.000000] (2:node@c-1.me) 497752 | 366680
+> [ 261.000000] (2:node@c-1.me) 628824 | 366680
+> [ 261.000000] (2:node@c-1.me) 890968 | 366680
+> [ 261.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 261.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 261.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 261.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 261.000000] (2:node@c-1.me) Predecessor: 42
+> [ 264.000000] (10:node@c-9.me) My finger table:
+> [ 264.000000] (10:node@c-9.me) Start | Succ
+> [ 264.000000] (10:node@c-9.me) 2015254 | 6518808
+> [ 264.000000] (10:node@c-9.me) 2015255 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015257 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015261 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015269 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015285 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015317 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 264.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 264.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 264.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 264.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 264.000000] (10:node@c-9.me) Predecessor: 1319738
+> [ 267.000000] (8:node@c-7.me) My finger table:
+> [ 267.000000] (8:node@c-7.me) Start | Succ
+> [ 267.000000] (8:node@c-7.me) 10004761 | 16509405
+> [ 267.000000] (8:node@c-7.me) 10004762 | 16509405
+> [ 267.000000] (8:node@c-7.me) 10004764 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10004768 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10004776 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10004792 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 267.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 267.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 267.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 267.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 267.000000] (8:node@c-7.me) Predecessor: 533744
+> [ 268.000000] (4:node@c-3.me) My finger table:
+> [ 268.000000] (4:node@c-3.me) Start | Succ
+> [ 268.000000] (4:node@c-3.me) 1319739 | 2015253
+> [ 268.000000] (4:node@c-3.me) 1319740 | 2015253
+> [ 268.000000] (4:node@c-3.me) 1319742 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1319746 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 268.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 268.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 268.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 268.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 268.000000] (4:node@c-3.me) Predecessor: 366680
+> [ 271.000000] (5:node@c-4.me) My finger table:
+> [ 271.000000] (5:node@c-4.me) Start | Succ
+> [ 271.000000] (5:node@c-4.me) 16509406 | 16728096
+> [ 271.000000] (5:node@c-4.me) 16509407 | 16728096
+> [ 271.000000] (5:node@c-4.me) 16509409 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16509413 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16509421 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16509437 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 271.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 271.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 271.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 271.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 271.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 271.000000] (5:node@c-4.me) Predecessor: 10004760
+> [ 274.000000] (9:node@c-8.me) My finger table:
+> [ 274.000000] (9:node@c-8.me) Start | Succ
+> [ 274.000000] (9:node@c-8.me) 6518809 | 16728096
+> [ 274.000000] (9:node@c-8.me) 6518810 | 16728096
+> [ 274.000000] (9:node@c-8.me) 6518812 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6518816 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6518824 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 274.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 274.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 274.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 274.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 274.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 274.000000] (9:node@c-8.me) Predecessor: 2015253
+> [ 281.000000] (10:node@c-9.me) My finger table:
+> [ 281.000000] (10:node@c-9.me) Start | Succ
+> [ 281.000000] (10:node@c-9.me) 2015254 | 6518808
+> [ 281.000000] (10:node@c-9.me) 2015255 | 6518808
+> [ 281.000000] (10:node@c-9.me) 2015257 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2015261 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2015269 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2015285 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2015317 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 281.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 281.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 281.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 281.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 281.000000] (10:node@c-9.me) Predecessor: 1319738
+> [ 326.000000] (7:node@c-6.me) My finger table:
+> [ 326.000000] (7:node@c-6.me) Start | Succ
+> [ 326.000000] (7:node@c-6.me) 16728097 | 42
+> [ 326.000000] (7:node@c-6.me) 16728098 | 42
+> [ 326.000000] (7:node@c-6.me) 16728100 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16728104 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16728112 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 326.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 326.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 326.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 326.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 326.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 326.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 326.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 326.000000] (7:node@c-6.me) Predecessor: 16509405
+> [ 367.000000] (1:node@c-0.me) My finger table:
+> [ 367.000000] (1:node@c-0.me) Start | Succ
+> [ 367.000000] (1:node@c-0.me) 43 | 366680
+> [ 367.000000] (1:node@c-0.me) 44 | 366680
+> [ 367.000000] (1:node@c-0.me) 46 | 366680
+> [ 367.000000] (1:node@c-0.me) 50 | 42
+> [ 367.000000] (1:node@c-0.me) 58 | 42
+> [ 367.000000] (1:node@c-0.me) 74 | 42
+> [ 367.000000] (1:node@c-0.me) 106 | 42
+> [ 367.000000] (1:node@c-0.me) 170 | 42
+> [ 367.000000] (1:node@c-0.me) 298 | 42
+> [ 367.000000] (1:node@c-0.me) 554 | 42
+> [ 367.000000] (1:node@c-0.me) 1066 | 42
+> [ 367.000000] (1:node@c-0.me) 2090 | 42
+> [ 367.000000] (1:node@c-0.me) 4138 | 42
+> [ 367.000000] (1:node@c-0.me) 8234 | 42
+> [ 367.000000] (1:node@c-0.me) 16426 | 42
+> [ 367.000000] (1:node@c-0.me) 32810 | 42
+> [ 367.000000] (1:node@c-0.me) 65578 | 42
+> [ 367.000000] (1:node@c-0.me) 131114 | 42
+> [ 367.000000] (1:node@c-0.me) 262186 | 42
+> [ 367.000000] (1:node@c-0.me) 524330 | 42
+> [ 367.000000] (1:node@c-0.me) 1048618 | 42
+> [ 367.000000] (1:node@c-0.me) 2097194 | 42
+> [ 367.000000] (1:node@c-0.me) 4194346 | 42
+> [ 367.000000] (1:node@c-0.me) 8388650 | 42
+> [ 367.000000] (1:node@c-0.me) Predecessor: 16728096
+> [ 367.000000] (6:node@c-5.me) My finger table:
+> [ 367.000000] (6:node@c-5.me) Start | Succ
+> [ 367.000000] (6:node@c-5.me) 10874877 | 533744
+> [ 367.000000] (6:node@c-5.me) 10874878 | 533744
+> [ 367.000000] (6:node@c-5.me) 10874880 | 533744
+> [ 367.000000] (6:node@c-5.me) 10874884 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10874892 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10874908 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10874940 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 367.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 367.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 367.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 367.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 367.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 367.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 367.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 367.000000] (6:node@c-5.me) Predecessor: -1
+> [ 380.000000] (3:node@c-2.me) My finger table:
+> [ 380.000000] (3:node@c-2.me) Start | Succ
+> [ 380.000000] (3:node@c-2.me) 533745 | 10004760
+> [ 380.000000] (3:node@c-2.me) 533746 | 10004760
+> [ 380.000000] (3:node@c-2.me) 533748 | 10004760
+> [ 380.000000] (3:node@c-2.me) 533752 | 533744
+> [ 380.000000] (3:node@c-2.me) 533760 | 533744
+> [ 380.000000] (3:node@c-2.me) 533776 | 533744
+> [ 380.000000] (3:node@c-2.me) 533808 | 533744
+> [ 380.000000] (3:node@c-2.me) 533872 | 533744
+> [ 380.000000] (3:node@c-2.me) 534000 | 533744
+> [ 380.000000] (3:node@c-2.me) 534256 | 533744
+> [ 380.000000] (3:node@c-2.me) 534768 | 533744
+> [ 380.000000] (3:node@c-2.me) 535792 | 533744
+> [ 380.000000] (3:node@c-2.me) 537840 | 533744
+> [ 380.000000] (3:node@c-2.me) 541936 | 533744
+> [ 380.000000] (3:node@c-2.me) 550128 | 533744
+> [ 380.000000] (3:node@c-2.me) 566512 | 533744
+> [ 380.000000] (3:node@c-2.me) 599280 | 533744
+> [ 380.000000] (3:node@c-2.me) 664816 | 533744
+> [ 380.000000] (3:node@c-2.me) 795888 | 533744
+> [ 380.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 380.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 380.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 380.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 380.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 380.000000] (3:node@c-2.me) Predecessor: 10874876
+> [ 382.000000] (2:node@c-1.me) My finger table:
+> [ 382.000000] (2:node@c-1.me) Start | Succ
+> [ 382.000000] (2:node@c-1.me) 366681 | 1319738
+> [ 382.000000] (2:node@c-1.me) 366682 | 1319738
+> [ 382.000000] (2:node@c-1.me) 366684 | 1319738
+> [ 382.000000] (2:node@c-1.me) 366688 | 366680
+> [ 382.000000] (2:node@c-1.me) 366696 | 366680
+> [ 382.000000] (2:node@c-1.me) 366712 | 366680
+> [ 382.000000] (2:node@c-1.me) 366744 | 366680
+> [ 382.000000] (2:node@c-1.me) 366808 | 366680
+> [ 382.000000] (2:node@c-1.me) 366936 | 366680
+> [ 382.000000] (2:node@c-1.me) 367192 | 366680
+> [ 382.000000] (2:node@c-1.me) 367704 | 366680
+> [ 382.000000] (2:node@c-1.me) 368728 | 366680
+> [ 382.000000] (2:node@c-1.me) 370776 | 366680
+> [ 382.000000] (2:node@c-1.me) 374872 | 366680
+> [ 382.000000] (2:node@c-1.me) 383064 | 366680
+> [ 382.000000] (2:node@c-1.me) 399448 | 366680
+> [ 382.000000] (2:node@c-1.me) 432216 | 366680
+> [ 382.000000] (2:node@c-1.me) 497752 | 366680
+> [ 382.000000] (2:node@c-1.me) 628824 | 366680
+> [ 382.000000] (2:node@c-1.me) 890968 | 366680
+> [ 382.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 382.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 382.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 382.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 382.000000] (2:node@c-1.me) Predecessor: 42
+> [ 383.000000] (7:node@c-6.me) My finger table:
+> [ 383.000000] (7:node@c-6.me) Start | Succ
+> [ 383.000000] (7:node@c-6.me) 16728097 | 42
+> [ 383.000000] (7:node@c-6.me) 16728098 | 42
+> [ 383.000000] (7:node@c-6.me) 16728100 | 42
+> [ 383.000000] (7:node@c-6.me) 16728104 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16728112 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 383.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 383.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 383.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 383.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 383.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 383.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 383.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 383.000000] (7:node@c-6.me) Predecessor: 16509405
+> [ 393.000000] (8:node@c-7.me) My finger table:
+> [ 393.000000] (8:node@c-7.me) Start | Succ
+> [ 393.000000] (8:node@c-7.me) 10004761 | 16509405
+> [ 393.000000] (8:node@c-7.me) 10004762 | 16509405
+> [ 393.000000] (8:node@c-7.me) 10004764 | 16509405
+> [ 393.000000] (8:node@c-7.me) 10004768 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10004776 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10004792 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 393.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 393.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 393.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 393.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 393.000000] (8:node@c-7.me) Predecessor: 533744
+> [ 399.000000] (9:node@c-8.me) My finger table:
+> [ 399.000000] (9:node@c-8.me) Start | Succ
+> [ 399.000000] (9:node@c-8.me) 6518809 | 10004760
+> [ 399.000000] (9:node@c-8.me) 6518810 | 16728096
+> [ 399.000000] (9:node@c-8.me) 6518812 | 10004760
+> [ 399.000000] (9:node@c-8.me) 6518816 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6518824 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 399.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 399.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 399.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 399.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 399.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 399.000000] (9:node@c-8.me) Predecessor: 2015253
+> [ 403.000000] (8:node@c-7.me) My finger table:
+> [ 403.000000] (8:node@c-7.me) Start | Succ
+> [ 403.000000] (8:node@c-7.me) 10004761 | 16509405
+> [ 403.000000] (8:node@c-7.me) 10004762 | 16509405
+> [ 403.000000] (8:node@c-7.me) 10004764 | 16509405
+> [ 403.000000] (8:node@c-7.me) 10004768 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10004776 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10004792 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 403.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 403.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 403.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 403.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 403.000000] (8:node@c-7.me) Predecessor: 6518808
+> [ 407.000000] (5:node@c-4.me) My finger table:
+> [ 407.000000] (5:node@c-4.me) Start | Succ
+> [ 407.000000] (5:node@c-4.me) 16509406 | 16728096
+> [ 407.000000] (5:node@c-4.me) 16509407 | 16728096
+> [ 407.000000] (5:node@c-4.me) 16509409 | 16728096
+> [ 407.000000] (5:node@c-4.me) 16509413 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16509421 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16509437 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 407.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 407.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 407.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 407.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 407.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 407.000000] (5:node@c-4.me) Predecessor: 10004760
+> [ 410.000000] (10:node@c-9.me) My finger table:
+> [ 410.000000] (10:node@c-9.me) Start | Succ
+> [ 410.000000] (10:node@c-9.me) 2015254 | 6518808
+> [ 410.000000] (10:node@c-9.me) 2015255 | 6518808
+> [ 410.000000] (10:node@c-9.me) 2015257 | 6518808
+> [ 410.000000] (10:node@c-9.me) 2015261 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2015269 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2015285 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2015317 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 410.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 410.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 410.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 410.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 410.000000] (10:node@c-9.me) Predecessor: 1319738
+> [ 416.000000] (4:node@c-3.me) My finger table:
+> [ 416.000000] (4:node@c-3.me) Start | Succ
+> [ 416.000000] (4:node@c-3.me) 1319739 | 2015253
+> [ 416.000000] (4:node@c-3.me) 1319740 | 2015253
+> [ 416.000000] (4:node@c-3.me) 1319742 | 2015253
+> [ 416.000000] (4:node@c-3.me) 1319746 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 416.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 416.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 416.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 416.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 416.000000] (4:node@c-3.me) Predecessor: 366680
+> [ 489.000000] (1:node@c-0.me) My finger table:
+> [ 489.000000] (1:node@c-0.me) Start | Succ
+> [ 489.000000] (1:node@c-0.me) 43 | 366680
+> [ 489.000000] (1:node@c-0.me) 44 | 366680
+> [ 489.000000] (1:node@c-0.me) 46 | 366680
+> [ 489.000000] (1:node@c-0.me) 50 | 366680
+> [ 489.000000] (1:node@c-0.me) 58 | 42
+> [ 489.000000] (1:node@c-0.me) 74 | 42
+> [ 489.000000] (1:node@c-0.me) 106 | 42
+> [ 489.000000] (1:node@c-0.me) 170 | 42
+> [ 489.000000] (1:node@c-0.me) 298 | 42
+> [ 489.000000] (1:node@c-0.me) 554 | 42
+> [ 489.000000] (1:node@c-0.me) 1066 | 42
+> [ 489.000000] (1:node@c-0.me) 2090 | 42
+> [ 489.000000] (1:node@c-0.me) 4138 | 42
+> [ 489.000000] (1:node@c-0.me) 8234 | 42
+> [ 489.000000] (1:node@c-0.me) 16426 | 42
+> [ 489.000000] (1:node@c-0.me) 32810 | 42
+> [ 489.000000] (1:node@c-0.me) 65578 | 42
+> [ 489.000000] (1:node@c-0.me) 131114 | 42
+> [ 489.000000] (1:node@c-0.me) 262186 | 42
+> [ 489.000000] (1:node@c-0.me) 524330 | 42
+> [ 489.000000] (1:node@c-0.me) 1048618 | 42
+> [ 489.000000] (1:node@c-0.me) 2097194 | 42
+> [ 489.000000] (1:node@c-0.me) 4194346 | 42
+> [ 489.000000] (1:node@c-0.me) 8388650 | 42
+> [ 489.000000] (1:node@c-0.me) Predecessor: 16728096
+> [ 492.000000] (6:node@c-5.me) My finger table:
+> [ 492.000000] (6:node@c-5.me) Start | Succ
+> [ 492.000000] (6:node@c-5.me) 10874877 | 533744
+> [ 492.000000] (6:node@c-5.me) 10874878 | 533744
+> [ 492.000000] (6:node@c-5.me) 10874880 | 533744
+> [ 492.000000] (6:node@c-5.me) 10874884 | 533744
+> [ 492.000000] (6:node@c-5.me) 10874892 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10874908 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10874940 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 492.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 492.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 492.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 492.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 492.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 492.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 492.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 492.000000] (6:node@c-5.me) Predecessor: -1
+> [ 503.000000] (3:node@c-2.me) My finger table:
+> [ 503.000000] (3:node@c-2.me) Start | Succ
+> [ 503.000000] (3:node@c-2.me) 533745 | 1319738
+> [ 503.000000] (3:node@c-2.me) 533746 | 10004760
+> [ 503.000000] (3:node@c-2.me) 533748 | 10004760
+> [ 503.000000] (3:node@c-2.me) 533752 | 1319738
+> [ 503.000000] (3:node@c-2.me) 533760 | 533744
+> [ 503.000000] (3:node@c-2.me) 533776 | 533744
+> [ 503.000000] (3:node@c-2.me) 533808 | 533744
+> [ 503.000000] (3:node@c-2.me) 533872 | 533744
+> [ 503.000000] (3:node@c-2.me) 534000 | 533744
+> [ 503.000000] (3:node@c-2.me) 534256 | 533744
+> [ 503.000000] (3:node@c-2.me) 534768 | 533744
+> [ 503.000000] (3:node@c-2.me) 535792 | 533744
+> [ 503.000000] (3:node@c-2.me) 537840 | 533744
+> [ 503.000000] (3:node@c-2.me) 541936 | 533744
+> [ 503.000000] (3:node@c-2.me) 550128 | 533744
+> [ 503.000000] (3:node@c-2.me) 566512 | 533744
+> [ 503.000000] (3:node@c-2.me) 599280 | 533744
+> [ 503.000000] (3:node@c-2.me) 664816 | 533744
+> [ 503.000000] (3:node@c-2.me) 795888 | 533744
+> [ 503.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 503.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 503.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 503.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 503.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 503.000000] (3:node@c-2.me) Predecessor: 10874876
+> [ 512.000000] (7:node@c-6.me) My finger table:
+> [ 512.000000] (7:node@c-6.me) Start | Succ
+> [ 512.000000] (7:node@c-6.me) 16728097 | 42
+> [ 512.000000] (7:node@c-6.me) 16728098 | 42
+> [ 512.000000] (7:node@c-6.me) 16728100 | 42
+> [ 512.000000] (7:node@c-6.me) 16728104 | 42
+> [ 512.000000] (7:node@c-6.me) 16728112 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 512.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 512.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 512.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 512.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 512.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 512.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 512.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 512.000000] (7:node@c-6.me) Predecessor: 16509405
+> [ 519.000000] (9:node@c-8.me) My finger table:
+> [ 519.000000] (9:node@c-8.me) Start | Succ
+> [ 519.000000] (9:node@c-8.me) 6518809 | 10004760
+> [ 519.000000] (9:node@c-8.me) 6518810 | 16728096
+> [ 519.000000] (9:node@c-8.me) 6518812 | 10004760
+> [ 519.000000] (9:node@c-8.me) 6518816 | 10004760
+> [ 519.000000] (9:node@c-8.me) 6518824 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 519.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 519.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 519.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 519.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 519.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 519.000000] (9:node@c-8.me) Predecessor: 2015253
+> [ 520.000000] (2:node@c-1.me) My finger table:
+> [ 520.000000] (2:node@c-1.me) Start | Succ
+> [ 520.000000] (2:node@c-1.me) 366681 | 1319738
+> [ 520.000000] (2:node@c-1.me) 366682 | 1319738
+> [ 520.000000] (2:node@c-1.me) 366684 | 1319738
+> [ 520.000000] (2:node@c-1.me) 366688 | 1319738
+> [ 520.000000] (2:node@c-1.me) 366696 | 366680
+> [ 520.000000] (2:node@c-1.me) 366712 | 366680
+> [ 520.000000] (2:node@c-1.me) 366744 | 366680
+> [ 520.000000] (2:node@c-1.me) 366808 | 366680
+> [ 520.000000] (2:node@c-1.me) 366936 | 366680
+> [ 520.000000] (2:node@c-1.me) 367192 | 366680
+> [ 520.000000] (2:node@c-1.me) 367704 | 366680
+> [ 520.000000] (2:node@c-1.me) 368728 | 366680
+> [ 520.000000] (2:node@c-1.me) 370776 | 366680
+> [ 520.000000] (2:node@c-1.me) 374872 | 366680
+> [ 520.000000] (2:node@c-1.me) 383064 | 366680
+> [ 520.000000] (2:node@c-1.me) 399448 | 366680
+> [ 520.000000] (2:node@c-1.me) 432216 | 366680
+> [ 520.000000] (2:node@c-1.me) 497752 | 366680
+> [ 520.000000] (2:node@c-1.me) 628824 | 366680
+> [ 520.000000] (2:node@c-1.me) 890968 | 366680
+> [ 520.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 520.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 520.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 520.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 520.000000] (2:node@c-1.me) Predecessor: 42
+> [ 529.000000] (8:node@c-7.me) My finger table:
+> [ 529.000000] (8:node@c-7.me) Start | Succ
+> [ 529.000000] (8:node@c-7.me) 10004761 | 16509405
+> [ 529.000000] (8:node@c-7.me) 10004762 | 16509405
+> [ 529.000000] (8:node@c-7.me) 10004764 | 16509405
+> [ 529.000000] (8:node@c-7.me) 10004768 | 16509405
+> [ 529.000000] (8:node@c-7.me) 10004776 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10004792 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 529.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 529.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 529.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 529.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 529.000000] (8:node@c-7.me) Predecessor: 6518808
+> [ 530.000000] (10:node@c-9.me) My finger table:
+> [ 530.000000] (10:node@c-9.me) Start | Succ
+> [ 530.000000] (10:node@c-9.me) 2015254 | 6518808
+> [ 530.000000] (10:node@c-9.me) 2015255 | 6518808
+> [ 530.000000] (10:node@c-9.me) 2015257 | 6518808
+> [ 530.000000] (10:node@c-9.me) 2015261 | 6518808
+> [ 530.000000] (10:node@c-9.me) 2015269 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2015285 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2015317 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 530.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 530.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 530.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 530.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 530.000000] (10:node@c-9.me) Predecessor: 1319738
+> [ 536.000000] (4:node@c-3.me) My finger table:
+> [ 536.000000] (4:node@c-3.me) Start | Succ
+> [ 536.000000] (4:node@c-3.me) 1319739 | 2015253
+> [ 536.000000] (4:node@c-3.me) 1319740 | 2015253
+> [ 536.000000] (4:node@c-3.me) 1319742 | 2015253
+> [ 536.000000] (4:node@c-3.me) 1319746 | 2015253
+> [ 536.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 536.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 536.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 536.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 536.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 536.000000] (4:node@c-3.me) Predecessor: 366680
+> [ 544.000000] (4:node@c-3.me) My finger table:
+> [ 544.000000] (4:node@c-3.me) Start | Succ
+> [ 544.000000] (4:node@c-3.me) 1319739 | 2015253
+> [ 544.000000] (4:node@c-3.me) 1319740 | 2015253
+> [ 544.000000] (4:node@c-3.me) 1319742 | 2015253
+> [ 544.000000] (4:node@c-3.me) 1319746 | 2015253
+> [ 544.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 544.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 544.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 544.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 544.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 544.000000] (4:node@c-3.me) Predecessor: 533744
+> [ 547.000000] (5:node@c-4.me) My finger table:
+> [ 547.000000] (5:node@c-4.me) Start | Succ
+> [ 547.000000] (5:node@c-4.me) 16509406 | 16728096
+> [ 547.000000] (5:node@c-4.me) 16509407 | 16728096
+> [ 547.000000] (5:node@c-4.me) 16509409 | 16728096
+> [ 547.000000] (5:node@c-4.me) 16509413 | 16728096
+> [ 547.000000] (5:node@c-4.me) 16509421 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16509437 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 547.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 547.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 547.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 547.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 547.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 547.000000] (5:node@c-4.me) Predecessor: 10004760
+> [ 600.000000] (3:node@c-2.me) My finger table:
+> [ 600.000000] (3:node@c-2.me) Start | Succ
+> [ 600.000000] (3:node@c-2.me) 533745 | 1319738
+> [ 600.000000] (3:node@c-2.me) 533746 | 10004760
+> [ 600.000000] (3:node@c-2.me) 533748 | 10004760
+> [ 600.000000] (3:node@c-2.me) 533752 | 1319738
+> [ 600.000000] (3:node@c-2.me) 533760 | 533744
+> [ 600.000000] (3:node@c-2.me) 533776 | 533744
+> [ 600.000000] (3:node@c-2.me) 533808 | 533744
+> [ 600.000000] (3:node@c-2.me) 533872 | 533744
+> [ 600.000000] (3:node@c-2.me) 534000 | 533744
+> [ 600.000000] (3:node@c-2.me) 534256 | 533744
+> [ 600.000000] (3:node@c-2.me) 534768 | 533744
+> [ 600.000000] (3:node@c-2.me) 535792 | 533744
+> [ 600.000000] (3:node@c-2.me) 537840 | 533744
+> [ 600.000000] (3:node@c-2.me) 541936 | 533744
+> [ 600.000000] (3:node@c-2.me) 550128 | 533744
+> [ 600.000000] (3:node@c-2.me) 566512 | 533744
+> [ 600.000000] (3:node@c-2.me) 599280 | 533744
+> [ 600.000000] (3:node@c-2.me) 664816 | 533744
+> [ 600.000000] (3:node@c-2.me) 795888 | 533744
+> [ 600.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 600.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 600.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 600.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 600.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 600.000000] (3:node@c-2.me) Predecessor: 366680
+> [ 612.000000] (6:node@c-5.me) My finger table:
+> [ 612.000000] (6:node@c-5.me) Start | Succ
+> [ 612.000000] (6:node@c-5.me) 10874877 | 366680
+> [ 612.000000] (6:node@c-5.me) 10874878 | 533744
+> [ 612.000000] (6:node@c-5.me) 10874880 | 533744
+> [ 612.000000] (6:node@c-5.me) 10874884 | 533744
+> [ 612.000000] (6:node@c-5.me) 10874892 | 366680
+> [ 612.000000] (6:node@c-5.me) 10874908 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10874940 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 612.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 612.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 612.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 612.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 612.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 612.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 612.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 612.000000] (6:node@c-5.me) Predecessor: -1
+> [ 614.000000] (1:node@c-0.me) My finger table:
+> [ 614.000000] (1:node@c-0.me) Start | Succ
+> [ 614.000000] (1:node@c-0.me) 43 | 366680
+> [ 614.000000] (1:node@c-0.me) 44 | 366680
+> [ 614.000000] (1:node@c-0.me) 46 | 366680
+> [ 614.000000] (1:node@c-0.me) 50 | 366680
+> [ 614.000000] (1:node@c-0.me) 58 | 366680
+> [ 614.000000] (1:node@c-0.me) 74 | 42
+> [ 614.000000] (1:node@c-0.me) 106 | 42
+> [ 614.000000] (1:node@c-0.me) 170 | 42
+> [ 614.000000] (1:node@c-0.me) 298 | 42
+> [ 614.000000] (1:node@c-0.me) 554 | 42
+> [ 614.000000] (1:node@c-0.me) 1066 | 42
+> [ 614.000000] (1:node@c-0.me) 2090 | 42
+> [ 614.000000] (1:node@c-0.me) 4138 | 42
+> [ 614.000000] (1:node@c-0.me) 8234 | 42
+> [ 614.000000] (1:node@c-0.me) 16426 | 42
+> [ 614.000000] (1:node@c-0.me) 32810 | 42
+> [ 614.000000] (1:node@c-0.me) 65578 | 42
+> [ 614.000000] (1:node@c-0.me) 131114 | 42
+> [ 614.000000] (1:node@c-0.me) 262186 | 42
+> [ 614.000000] (1:node@c-0.me) 524330 | 42
+> [ 614.000000] (1:node@c-0.me) 1048618 | 42
+> [ 614.000000] (1:node@c-0.me) 2097194 | 42
+> [ 614.000000] (1:node@c-0.me) 4194346 | 42
+> [ 614.000000] (1:node@c-0.me) 8388650 | 42
+> [ 614.000000] (1:node@c-0.me) Predecessor: 16728096
+> [ 632.000000] (3:node@c-2.me) My finger table:
+> [ 632.000000] (3:node@c-2.me) Start | Succ
+> [ 632.000000] (3:node@c-2.me) 533745 | 1319738
+> [ 632.000000] (3:node@c-2.me) 533746 | 10004760
+> [ 632.000000] (3:node@c-2.me) 533748 | 10004760
+> [ 632.000000] (3:node@c-2.me) 533752 | 1319738
+> [ 632.000000] (3:node@c-2.me) 533760 | 1319738
+> [ 632.000000] (3:node@c-2.me) 533776 | 533744
+> [ 632.000000] (3:node@c-2.me) 533808 | 533744
+> [ 632.000000] (3:node@c-2.me) 533872 | 533744
+> [ 632.000000] (3:node@c-2.me) 534000 | 533744
+> [ 632.000000] (3:node@c-2.me) 534256 | 533744
+> [ 632.000000] (3:node@c-2.me) 534768 | 533744
+> [ 632.000000] (3:node@c-2.me) 535792 | 533744
+> [ 632.000000] (3:node@c-2.me) 537840 | 533744
+> [ 632.000000] (3:node@c-2.me) 541936 | 533744
+> [ 632.000000] (3:node@c-2.me) 550128 | 533744
+> [ 632.000000] (3:node@c-2.me) 566512 | 533744
+> [ 632.000000] (3:node@c-2.me) 599280 | 533744
+> [ 632.000000] (3:node@c-2.me) 664816 | 533744
+> [ 632.000000] (3:node@c-2.me) 795888 | 533744
+> [ 632.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 632.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 632.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 632.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 632.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 632.000000] (3:node@c-2.me) Predecessor: 366680
+> [ 638.000000] (7:node@c-6.me) My finger table:
+> [ 638.000000] (7:node@c-6.me) Start | Succ
+> [ 638.000000] (7:node@c-6.me) 16728097 | 42
+> [ 638.000000] (7:node@c-6.me) 16728098 | 42
+> [ 638.000000] (7:node@c-6.me) 16728100 | 42
+> [ 638.000000] (7:node@c-6.me) 16728104 | 42
+> [ 638.000000] (7:node@c-6.me) 16728112 | 42
+> [ 638.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 638.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 638.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 638.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 638.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 638.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 638.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 638.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 638.000000] (7:node@c-6.me) Predecessor: 16509405
+> [ 643.000000] (2:node@c-1.me) My finger table:
+> [ 643.000000] (2:node@c-1.me) Start | Succ
+> [ 643.000000] (2:node@c-1.me) 366681 | 533744
+> [ 643.000000] (2:node@c-1.me) 366682 | 1319738
+> [ 643.000000] (2:node@c-1.me) 366684 | 1319738
+> [ 643.000000] (2:node@c-1.me) 366688 | 1319738
+> [ 643.000000] (2:node@c-1.me) 366696 | 533744
+> [ 643.000000] (2:node@c-1.me) 366712 | 366680
+> [ 643.000000] (2:node@c-1.me) 366744 | 366680
+> [ 643.000000] (2:node@c-1.me) 366808 | 366680
+> [ 643.000000] (2:node@c-1.me) 366936 | 366680
+> [ 643.000000] (2:node@c-1.me) 367192 | 366680
+> [ 643.000000] (2:node@c-1.me) 367704 | 366680
+> [ 643.000000] (2:node@c-1.me) 368728 | 366680
+> [ 643.000000] (2:node@c-1.me) 370776 | 366680
+> [ 643.000000] (2:node@c-1.me) 374872 | 366680
+> [ 643.000000] (2:node@c-1.me) 383064 | 366680
+> [ 643.000000] (2:node@c-1.me) 399448 | 366680
+> [ 643.000000] (2:node@c-1.me) 432216 | 366680
+> [ 643.000000] (2:node@c-1.me) 497752 | 366680
+> [ 643.000000] (2:node@c-1.me) 628824 | 366680
+> [ 643.000000] (2:node@c-1.me) 890968 | 366680
+> [ 643.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 643.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 643.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 643.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 643.000000] (2:node@c-1.me) Predecessor: 42
+> [ 662.000000] (4:node@c-3.me) My finger table:
+> [ 662.000000] (4:node@c-3.me) Start | Succ
+> [ 662.000000] (4:node@c-3.me) 1319739 | 2015253
+> [ 662.000000] (4:node@c-3.me) 1319740 | 2015253
+> [ 662.000000] (4:node@c-3.me) 1319742 | 2015253
+> [ 662.000000] (4:node@c-3.me) 1319746 | 2015253
+> [ 662.000000] (4:node@c-3.me) 1319754 | 2015253
+> [ 662.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 662.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 662.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 662.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 662.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 662.000000] (4:node@c-3.me) Predecessor: 533744
+> [ 663.000000] (8:node@c-7.me) My finger table:
+> [ 663.000000] (8:node@c-7.me) Start | Succ
+> [ 663.000000] (8:node@c-7.me) 10004761 | 16509405
+> [ 663.000000] (8:node@c-7.me) 10004762 | 16509405
+> [ 663.000000] (8:node@c-7.me) 10004764 | 16509405
+> [ 663.000000] (8:node@c-7.me) 10004768 | 16509405
+> [ 663.000000] (8:node@c-7.me) 10004776 | 16509405
+> [ 663.000000] (8:node@c-7.me) 10004792 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 663.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 663.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 663.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 663.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 663.000000] (8:node@c-7.me) Predecessor: 6518808
+> [ 666.000000] (10:node@c-9.me) My finger table:
+> [ 666.000000] (10:node@c-9.me) Start | Succ
+> [ 666.000000] (10:node@c-9.me) 2015254 | 6518808
+> [ 666.000000] (10:node@c-9.me) 2015255 | 6518808
+> [ 666.000000] (10:node@c-9.me) 2015257 | 6518808
+> [ 666.000000] (10:node@c-9.me) 2015261 | 6518808
+> [ 666.000000] (10:node@c-9.me) 2015269 | 6518808
+> [ 666.000000] (10:node@c-9.me) 2015285 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2015317 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 666.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 666.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 666.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 666.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 666.000000] (10:node@c-9.me) Predecessor: 1319738
+> [ 669.000000] (9:node@c-8.me) My finger table:
+> [ 669.000000] (9:node@c-8.me) Start | Succ
+> [ 669.000000] (9:node@c-8.me) 6518809 | 10004760
+> [ 669.000000] (9:node@c-8.me) 6518810 | 16728096
+> [ 669.000000] (9:node@c-8.me) 6518812 | 10004760
+> [ 669.000000] (9:node@c-8.me) 6518816 | 10004760
+> [ 669.000000] (9:node@c-8.me) 6518824 | 10004760
+> [ 669.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 669.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 669.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 669.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 669.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 669.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 669.000000] (9:node@c-8.me) Predecessor: 2015253
+> [ 693.000000] (5:node@c-4.me) My finger table:
+> [ 693.000000] (5:node@c-4.me) Start | Succ
+> [ 693.000000] (5:node@c-4.me) 16509406 | 16728096
+> [ 693.000000] (5:node@c-4.me) 16509407 | 16728096
+> [ 693.000000] (5:node@c-4.me) 16509409 | 16728096
+> [ 693.000000] (5:node@c-4.me) 16509413 | 16728096
+> [ 693.000000] (5:node@c-4.me) 16509421 | 16728096
+> [ 693.000000] (5:node@c-4.me) 16509437 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 693.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 693.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 693.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 693.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 693.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 693.000000] (5:node@c-4.me) Predecessor: 10004760
+> [ 729.000000] (5:node@c-4.me) My finger table:
+> [ 729.000000] (5:node@c-4.me) Start | Succ
+> [ 729.000000] (5:node@c-4.me) 16509406 | 16728096
+> [ 729.000000] (5:node@c-4.me) 16509407 | 16728096
+> [ 729.000000] (5:node@c-4.me) 16509409 | 16728096
+> [ 729.000000] (5:node@c-4.me) 16509413 | 16728096
+> [ 729.000000] (5:node@c-4.me) 16509421 | 16728096
+> [ 729.000000] (5:node@c-4.me) 16509437 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 729.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 729.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 729.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 729.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 729.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 729.000000] (5:node@c-4.me) Predecessor: 10874876
+> [ 734.000000] (6:node@c-5.me) My finger table:
+> [ 734.000000] (6:node@c-5.me) Start | Succ
+> [ 734.000000] (6:node@c-5.me) 10874877 | 16509405
+> [ 734.000000] (6:node@c-5.me) 10874878 | 533744
+> [ 734.000000] (6:node@c-5.me) 10874880 | 533744
+> [ 734.000000] (6:node@c-5.me) 10874884 | 533744
+> [ 734.000000] (6:node@c-5.me) 10874892 | 366680
+> [ 734.000000] (6:node@c-5.me) 10874908 | 16509405
+> [ 734.000000] (6:node@c-5.me) 10874940 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 734.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 734.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 734.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 734.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 734.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 734.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 734.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 734.000000] (6:node@c-5.me) Predecessor: -1
+> [ 740.000000] (1:node@c-0.me) My finger table:
+> [ 740.000000] (1:node@c-0.me) Start | Succ
+> [ 740.000000] (1:node@c-0.me) 43 | 366680
+> [ 740.000000] (1:node@c-0.me) 44 | 366680
+> [ 740.000000] (1:node@c-0.me) 46 | 366680
+> [ 740.000000] (1:node@c-0.me) 50 | 366680
+> [ 740.000000] (1:node@c-0.me) 58 | 366680
+> [ 740.000000] (1:node@c-0.me) 74 | 366680
+> [ 740.000000] (1:node@c-0.me) 106 | 42
+> [ 740.000000] (1:node@c-0.me) 170 | 42
+> [ 740.000000] (1:node@c-0.me) 298 | 42
+> [ 740.000000] (1:node@c-0.me) 554 | 42
+> [ 740.000000] (1:node@c-0.me) 1066 | 42
+> [ 740.000000] (1:node@c-0.me) 2090 | 42
+> [ 740.000000] (1:node@c-0.me) 4138 | 42
+> [ 740.000000] (1:node@c-0.me) 8234 | 42
+> [ 740.000000] (1:node@c-0.me) 16426 | 42
+> [ 740.000000] (1:node@c-0.me) 32810 | 42
+> [ 740.000000] (1:node@c-0.me) 65578 | 42
+> [ 740.000000] (1:node@c-0.me) 131114 | 42
+> [ 740.000000] (1:node@c-0.me) 262186 | 42
+> [ 740.000000] (1:node@c-0.me) 524330 | 42
+> [ 740.000000] (1:node@c-0.me) 1048618 | 42
+> [ 740.000000] (1:node@c-0.me) 2097194 | 42
+> [ 740.000000] (1:node@c-0.me) 4194346 | 42
+> [ 740.000000] (1:node@c-0.me) 8388650 | 42
+> [ 740.000000] (1:node@c-0.me) Predecessor: 16728096
+> [ 753.000000] (3:node@c-2.me) My finger table:
+> [ 753.000000] (3:node@c-2.me) Start | Succ
+> [ 753.000000] (3:node@c-2.me) 533745 | 1319738
+> [ 753.000000] (3:node@c-2.me) 533746 | 10004760
+> [ 753.000000] (3:node@c-2.me) 533748 | 10004760
+> [ 753.000000] (3:node@c-2.me) 533752 | 1319738
+> [ 753.000000] (3:node@c-2.me) 533760 | 1319738
+> [ 753.000000] (3:node@c-2.me) 533776 | 1319738
+> [ 753.000000] (3:node@c-2.me) 533808 | 533744
+> [ 753.000000] (3:node@c-2.me) 533872 | 533744
+> [ 753.000000] (3:node@c-2.me) 534000 | 533744
+> [ 753.000000] (3:node@c-2.me) 534256 | 533744
+> [ 753.000000] (3:node@c-2.me) 534768 | 533744
+> [ 753.000000] (3:node@c-2.me) 535792 | 533744
+> [ 753.000000] (3:node@c-2.me) 537840 | 533744
+> [ 753.000000] (3:node@c-2.me) 541936 | 533744
+> [ 753.000000] (3:node@c-2.me) 550128 | 533744
+> [ 753.000000] (3:node@c-2.me) 566512 | 533744
+> [ 753.000000] (3:node@c-2.me) 599280 | 533744
+> [ 753.000000] (3:node@c-2.me) 664816 | 533744
+> [ 753.000000] (3:node@c-2.me) 795888 | 533744
+> [ 753.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 753.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 753.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 753.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 753.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 753.000000] (3:node@c-2.me) Predecessor: 366680
+> [ 765.000000] (2:node@c-1.me) My finger table:
+> [ 765.000000] (2:node@c-1.me) Start | Succ
+> [ 765.000000] (2:node@c-1.me) 366681 | 533744
+> [ 765.000000] (2:node@c-1.me) 366682 | 1319738
+> [ 765.000000] (2:node@c-1.me) 366684 | 1319738
+> [ 765.000000] (2:node@c-1.me) 366688 | 1319738
+> [ 765.000000] (2:node@c-1.me) 366696 | 533744
+> [ 765.000000] (2:node@c-1.me) 366712 | 533744
+> [ 765.000000] (2:node@c-1.me) 366744 | 366680
+> [ 765.000000] (2:node@c-1.me) 366808 | 366680
+> [ 765.000000] (2:node@c-1.me) 366936 | 366680
+> [ 765.000000] (2:node@c-1.me) 367192 | 366680
+> [ 765.000000] (2:node@c-1.me) 367704 | 366680
+> [ 765.000000] (2:node@c-1.me) 368728 | 366680
+> [ 765.000000] (2:node@c-1.me) 370776 | 366680
+> [ 765.000000] (2:node@c-1.me) 374872 | 366680
+> [ 765.000000] (2:node@c-1.me) 383064 | 366680
+> [ 765.000000] (2:node@c-1.me) 399448 | 366680
+> [ 765.000000] (2:node@c-1.me) 432216 | 366680
+> [ 765.000000] (2:node@c-1.me) 497752 | 366680
+> [ 765.000000] (2:node@c-1.me) 628824 | 366680
+> [ 765.000000] (2:node@c-1.me) 890968 | 366680
+> [ 765.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 765.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 765.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 765.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 765.000000] (2:node@c-1.me) Predecessor: 42
+> [ 766.000000] (6:node@c-5.me) My finger table:
+> [ 766.000000] (6:node@c-5.me) Start | Succ
+> [ 766.000000] (6:node@c-5.me) 10874877 | 16509405
+> [ 766.000000] (6:node@c-5.me) 10874878 | 533744
+> [ 766.000000] (6:node@c-5.me) 10874880 | 533744
+> [ 766.000000] (6:node@c-5.me) 10874884 | 533744
+> [ 766.000000] (6:node@c-5.me) 10874892 | 366680
+> [ 766.000000] (6:node@c-5.me) 10874908 | 16509405
+> [ 766.000000] (6:node@c-5.me) 10874940 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 766.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 766.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 766.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 766.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 766.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 766.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 766.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 766.000000] (6:node@c-5.me) Predecessor: 10004760
+> [ 777.000000] (7:node@c-6.me) My finger table:
+> [ 777.000000] (7:node@c-6.me) Start | Succ
+> [ 777.000000] (7:node@c-6.me) 16728097 | 42
+> [ 777.000000] (7:node@c-6.me) 16728098 | 42
+> [ 777.000000] (7:node@c-6.me) 16728100 | 42
+> [ 777.000000] (7:node@c-6.me) 16728104 | 42
+> [ 777.000000] (7:node@c-6.me) 16728112 | 42
+> [ 777.000000] (7:node@c-6.me) 16728128 | 42
+> [ 777.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 777.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 777.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 777.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 777.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 777.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 777.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 777.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 777.000000] (7:node@c-6.me) Predecessor: 16509405
+> [ 800.000000] (8:node@c-7.me) My finger table:
+> [ 800.000000] (8:node@c-7.me) Start | Succ
+> [ 800.000000] (8:node@c-7.me) 10004761 | 10874876
+> [ 800.000000] (8:node@c-7.me) 10004762 | 16509405
+> [ 800.000000] (8:node@c-7.me) 10004764 | 16509405
+> [ 800.000000] (8:node@c-7.me) 10004768 | 16509405
+> [ 800.000000] (8:node@c-7.me) 10004776 | 16509405
+> [ 800.000000] (8:node@c-7.me) 10004792 | 10874876
+> [ 800.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 800.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 800.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 800.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 800.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 800.000000] (8:node@c-7.me) Predecessor: 6518808
+> [ 806.000000] (9:node@c-8.me) My finger table:
+> [ 806.000000] (9:node@c-8.me) Start | Succ
+> [ 806.000000] (9:node@c-8.me) 6518809 | 10004760
+> [ 806.000000] (9:node@c-8.me) 6518810 | 16728096
+> [ 806.000000] (9:node@c-8.me) 6518812 | 10004760
+> [ 806.000000] (9:node@c-8.me) 6518816 | 10004760
+> [ 806.000000] (9:node@c-8.me) 6518824 | 10004760
+> [ 806.000000] (9:node@c-8.me) 6518840 | 10004760
+> [ 806.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 806.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 806.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 806.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 806.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 806.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 806.000000] (9:node@c-8.me) Predecessor: 2015253
+> [ 812.000000] (10:node@c-9.me) My finger table:
+> [ 812.000000] (10:node@c-9.me) Start | Succ
+> [ 812.000000] (10:node@c-9.me) 2015254 | 6518808
+> [ 812.000000] (10:node@c-9.me) 2015255 | 6518808
+> [ 812.000000] (10:node@c-9.me) 2015257 | 6518808
+> [ 812.000000] (10:node@c-9.me) 2015261 | 6518808
+> [ 812.000000] (10:node@c-9.me) 2015269 | 6518808
+> [ 812.000000] (10:node@c-9.me) 2015285 | 6518808
+> [ 812.000000] (10:node@c-9.me) 2015317 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 812.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 812.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 812.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 812.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 812.000000] (10:node@c-9.me) Predecessor: 1319738
+> [ 812.000000] (4:node@c-3.me) My finger table:
+> [ 812.000000] (4:node@c-3.me) Start | Succ
+> [ 812.000000] (4:node@c-3.me) 1319739 | 2015253
+> [ 812.000000] (4:node@c-3.me) 1319740 | 2015253
+> [ 812.000000] (4:node@c-3.me) 1319742 | 2015253
+> [ 812.000000] (4:node@c-3.me) 1319746 | 2015253
+> [ 812.000000] (4:node@c-3.me) 1319754 | 2015253
+> [ 812.000000] (4:node@c-3.me) 1319770 | 2015253
+> [ 812.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 812.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 812.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 812.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 812.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 812.000000] (4:node@c-3.me) Predecessor: 533744
+> [ 833.000000] (5:node@c-4.me) My finger table:
+> [ 833.000000] (5:node@c-4.me) Start | Succ
+> [ 833.000000] (5:node@c-4.me) 16509406 | 16728096
+> [ 833.000000] (5:node@c-4.me) 16509407 | 16728096
+> [ 833.000000] (5:node@c-4.me) 16509409 | 16728096
+> [ 833.000000] (5:node@c-4.me) 16509413 | 16728096
+> [ 833.000000] (5:node@c-4.me) 16509421 | 16728096
+> [ 833.000000] (5:node@c-4.me) 16509437 | 16728096
+> [ 833.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 833.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 833.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 833.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 833.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 833.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 833.000000] (5:node@c-4.me) Predecessor: 10874876
+> [ 861.000000] (1:node@c-0.me) My finger table:
+> [ 861.000000] (1:node@c-0.me) Start | Succ
+> [ 861.000000] (1:node@c-0.me) 43 | 366680
+> [ 861.000000] (1:node@c-0.me) 44 | 366680
+> [ 861.000000] (1:node@c-0.me) 46 | 366680
+> [ 861.000000] (1:node@c-0.me) 50 | 366680
+> [ 861.000000] (1:node@c-0.me) 58 | 366680
+> [ 861.000000] (1:node@c-0.me) 74 | 366680
+> [ 861.000000] (1:node@c-0.me) 106 | 366680
+> [ 861.000000] (1:node@c-0.me) 170 | 42
+> [ 861.000000] (1:node@c-0.me) 298 | 42
+> [ 861.000000] (1:node@c-0.me) 554 | 42
+> [ 861.000000] (1:node@c-0.me) 1066 | 42
+> [ 861.000000] (1:node@c-0.me) 2090 | 42
+> [ 861.000000] (1:node@c-0.me) 4138 | 42
+> [ 861.000000] (1:node@c-0.me) 8234 | 42
+> [ 861.000000] (1:node@c-0.me) 16426 | 42
+> [ 861.000000] (1:node@c-0.me) 32810 | 42
+> [ 861.000000] (1:node@c-0.me) 65578 | 42
+> [ 861.000000] (1:node@c-0.me) 131114 | 42
+> [ 861.000000] (1:node@c-0.me) 262186 | 42
+> [ 861.000000] (1:node@c-0.me) 524330 | 42
+> [ 861.000000] (1:node@c-0.me) 1048618 | 42
+> [ 861.000000] (1:node@c-0.me) 2097194 | 42
+> [ 861.000000] (1:node@c-0.me) 4194346 | 42
+> [ 861.000000] (1:node@c-0.me) 8388650 | 42
+> [ 861.000000] (1:node@c-0.me) Predecessor: 16728096
+> [ 884.000000] (3:node@c-2.me) My finger table:
+> [ 884.000000] (3:node@c-2.me) Start | Succ
+> [ 884.000000] (3:node@c-2.me) 533745 | 1319738
+> [ 884.000000] (3:node@c-2.me) 533746 | 10004760
+> [ 884.000000] (3:node@c-2.me) 533748 | 10004760
+> [ 884.000000] (3:node@c-2.me) 533752 | 1319738
+> [ 884.000000] (3:node@c-2.me) 533760 | 1319738
+> [ 884.000000] (3:node@c-2.me) 533776 | 1319738
+> [ 884.000000] (3:node@c-2.me) 533808 | 1319738
+> [ 884.000000] (3:node@c-2.me) 533872 | 533744
+> [ 884.000000] (3:node@c-2.me) 534000 | 533744
+> [ 884.000000] (3:node@c-2.me) 534256 | 533744
+> [ 884.000000] (3:node@c-2.me) 534768 | 533744
+> [ 884.000000] (3:node@c-2.me) 535792 | 533744
+> [ 884.000000] (3:node@c-2.me) 537840 | 533744
+> [ 884.000000] (3:node@c-2.me) 541936 | 533744
+> [ 884.000000] (3:node@c-2.me) 550128 | 533744
+> [ 884.000000] (3:node@c-2.me) 566512 | 533744
+> [ 884.000000] (3:node@c-2.me) 599280 | 533744
+> [ 884.000000] (3:node@c-2.me) 664816 | 533744
+> [ 884.000000] (3:node@c-2.me) 795888 | 533744
+> [ 884.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 884.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 884.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 884.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 884.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 884.000000] (3:node@c-2.me) Predecessor: 366680
+> [ 887.000000] (6:node@c-5.me) My finger table:
+> [ 887.000000] (6:node@c-5.me) Start | Succ
+> [ 887.000000] (6:node@c-5.me) 10874877 | 16509405
+> [ 887.000000] (6:node@c-5.me) 10874878 | 533744
+> [ 887.000000] (6:node@c-5.me) 10874880 | 533744
+> [ 887.000000] (6:node@c-5.me) 10874884 | 533744
+> [ 887.000000] (6:node@c-5.me) 10874892 | 366680
+> [ 887.000000] (6:node@c-5.me) 10874908 | 16509405
+> [ 887.000000] (6:node@c-5.me) 10874940 | 16509405
+> [ 887.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 887.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 887.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 887.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 887.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 887.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 887.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 887.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 887.000000] (6:node@c-5.me) Predecessor: 10004760
+> [ 908.000000] (2:node@c-1.me) My finger table:
+> [ 908.000000] (2:node@c-1.me) Start | Succ
+> [ 908.000000] (2:node@c-1.me) 366681 | 533744
+> [ 908.000000] (2:node@c-1.me) 366682 | 1319738
+> [ 908.000000] (2:node@c-1.me) 366684 | 1319738
+> [ 908.000000] (2:node@c-1.me) 366688 | 1319738
+> [ 908.000000] (2:node@c-1.me) 366696 | 533744
+> [ 908.000000] (2:node@c-1.me) 366712 | 533744
+> [ 908.000000] (2:node@c-1.me) 366744 | 533744
+> [ 908.000000] (2:node@c-1.me) 366808 | 366680
+> [ 908.000000] (2:node@c-1.me) 366936 | 366680
+> [ 908.000000] (2:node@c-1.me) 367192 | 366680
+> [ 908.000000] (2:node@c-1.me) 367704 | 366680
+> [ 908.000000] (2:node@c-1.me) 368728 | 366680
+> [ 908.000000] (2:node@c-1.me) 370776 | 366680
+> [ 908.000000] (2:node@c-1.me) 374872 | 366680
+> [ 908.000000] (2:node@c-1.me) 383064 | 366680
+> [ 908.000000] (2:node@c-1.me) 399448 | 366680
+> [ 908.000000] (2:node@c-1.me) 432216 | 366680
+> [ 908.000000] (2:node@c-1.me) 497752 | 366680
+> [ 908.000000] (2:node@c-1.me) 628824 | 366680
+> [ 908.000000] (2:node@c-1.me) 890968 | 366680
+> [ 908.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 908.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 908.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 908.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 908.000000] (2:node@c-1.me) Predecessor: 42
+> [ 915.000000] (7:node@c-6.me) My finger table:
+> [ 915.000000] (7:node@c-6.me) Start | Succ
+> [ 915.000000] (7:node@c-6.me) 16728097 | 42
+> [ 915.000000] (7:node@c-6.me) 16728098 | 42
+> [ 915.000000] (7:node@c-6.me) 16728100 | 42
+> [ 915.000000] (7:node@c-6.me) 16728104 | 42
+> [ 915.000000] (7:node@c-6.me) 16728112 | 42
+> [ 915.000000] (7:node@c-6.me) 16728128 | 42
+> [ 915.000000] (7:node@c-6.me) 16728160 | 42
+> [ 915.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 915.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 915.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 915.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 915.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 915.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 915.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 915.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 915.000000] (7:node@c-6.me) Predecessor: 16509405
+> [ 920.000000] (8:node@c-7.me) My finger table:
+> [ 920.000000] (8:node@c-7.me) Start | Succ
+> [ 920.000000] (8:node@c-7.me) 10004761 | 10874876
+> [ 920.000000] (8:node@c-7.me) 10004762 | 16509405
+> [ 920.000000] (8:node@c-7.me) 10004764 | 16509405
+> [ 920.000000] (8:node@c-7.me) 10004768 | 16509405
+> [ 920.000000] (8:node@c-7.me) 10004776 | 16509405
+> [ 920.000000] (8:node@c-7.me) 10004792 | 10874876
+> [ 920.000000] (8:node@c-7.me) 10004824 | 10874876
+> [ 920.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 920.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 920.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 920.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 920.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 920.000000] (8:node@c-7.me) Predecessor: 6518808
+> [ 932.000000] (10:node@c-9.me) My finger table:
+> [ 932.000000] (10:node@c-9.me) Start | Succ
+> [ 932.000000] (10:node@c-9.me) 2015254 | 6518808
+> [ 932.000000] (10:node@c-9.me) 2015255 | 6518808
+> [ 932.000000] (10:node@c-9.me) 2015257 | 6518808
+> [ 932.000000] (10:node@c-9.me) 2015261 | 6518808
+> [ 932.000000] (10:node@c-9.me) 2015269 | 6518808
+> [ 932.000000] (10:node@c-9.me) 2015285 | 6518808
+> [ 932.000000] (10:node@c-9.me) 2015317 | 6518808
+> [ 932.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 932.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 932.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 932.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 932.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 932.000000] (10:node@c-9.me) Predecessor: 1319738
+> [ 950.000000] (4:node@c-3.me) My finger table:
+> [ 950.000000] (4:node@c-3.me) Start | Succ
+> [ 950.000000] (4:node@c-3.me) 1319739 | 2015253
+> [ 950.000000] (4:node@c-3.me) 1319740 | 2015253
+> [ 950.000000] (4:node@c-3.me) 1319742 | 2015253
+> [ 950.000000] (4:node@c-3.me) 1319746 | 2015253
+> [ 950.000000] (4:node@c-3.me) 1319754 | 2015253
+> [ 950.000000] (4:node@c-3.me) 1319770 | 2015253
+> [ 950.000000] (4:node@c-3.me) 1319802 | 2015253
+> [ 950.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 950.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 950.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 950.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 950.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 950.000000] (4:node@c-3.me) Predecessor: 533744
+> [ 955.000000] (9:node@c-8.me) My finger table:
+> [ 955.000000] (9:node@c-8.me) Start | Succ
+> [ 955.000000] (9:node@c-8.me) 6518809 | 10004760
+> [ 955.000000] (9:node@c-8.me) 6518810 | 16728096
+> [ 955.000000] (9:node@c-8.me) 6518812 | 10004760
+> [ 955.000000] (9:node@c-8.me) 6518816 | 10004760
+> [ 955.000000] (9:node@c-8.me) 6518824 | 10004760
+> [ 955.000000] (9:node@c-8.me) 6518840 | 10004760
+> [ 955.000000] (9:node@c-8.me) 6518872 | 10004760
+> [ 955.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 955.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 955.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 955.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 955.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 955.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 955.000000] (9:node@c-8.me) Predecessor: 2015253
+> [ 972.000000] (5:node@c-4.me) My finger table:
+> [ 972.000000] (5:node@c-4.me) Start | Succ
+> [ 972.000000] (5:node@c-4.me) 16509406 | 16728096
+> [ 972.000000] (5:node@c-4.me) 16509407 | 16728096
+> [ 972.000000] (5:node@c-4.me) 16509409 | 16728096
+> [ 972.000000] (5:node@c-4.me) 16509413 | 16728096
+> [ 972.000000] (5:node@c-4.me) 16509421 | 16728096
+> [ 972.000000] (5:node@c-4.me) 16509437 | 16728096
+> [ 972.000000] (5:node@c-4.me) 16509469 | 16728096
+> [ 972.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 972.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 972.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 972.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 972.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 972.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 972.000000] (5:node@c-4.me) Predecessor: 10874876
+> [ 984.000000] (1:node@c-0.me) My finger table:
+> [ 984.000000] (1:node@c-0.me) Start | Succ
+> [ 984.000000] (1:node@c-0.me) 43 | 366680
+> [ 984.000000] (1:node@c-0.me) 44 | 366680
+> [ 984.000000] (1:node@c-0.me) 46 | 366680
+> [ 984.000000] (1:node@c-0.me) 50 | 366680
+> [ 984.000000] (1:node@c-0.me) 58 | 366680
+> [ 984.000000] (1:node@c-0.me) 74 | 366680
+> [ 984.000000] (1:node@c-0.me) 106 | 366680
+> [ 984.000000] (1:node@c-0.me) 170 | 366680
+> [ 984.000000] (1:node@c-0.me) 298 | 42
+> [ 984.000000] (1:node@c-0.me) 554 | 42
+> [ 984.000000] (1:node@c-0.me) 1066 | 42
+> [ 984.000000] (1:node@c-0.me) 2090 | 42
+> [ 984.000000] (1:node@c-0.me) 4138 | 42
+> [ 984.000000] (1:node@c-0.me) 8234 | 42
+> [ 984.000000] (1:node@c-0.me) 16426 | 42
+> [ 984.000000] (1:node@c-0.me) 32810 | 42
+> [ 984.000000] (1:node@c-0.me) 65578 | 42
+> [ 984.000000] (1:node@c-0.me) 131114 | 42
+> [ 984.000000] (1:node@c-0.me) 262186 | 42
+> [ 984.000000] (1:node@c-0.me) 524330 | 42
+> [ 984.000000] (1:node@c-0.me) 1048618 | 42
+> [ 984.000000] (1:node@c-0.me) 2097194 | 42
+> [ 984.000000] (1:node@c-0.me) 4194346 | 42
+> [ 984.000000] (1:node@c-0.me) 8388650 | 42
+> [ 984.000000] (1:node@c-0.me) Predecessor: 16728096
+> [1053.000000] (0:@) Messages created: 1972
+> [1053.000000] (0:@) Simulated time: 1053
--- /dev/null
+<?xml version='1.0'?>
+<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid.dtd">
+<platform version="3">
+ <process host="c-0.me" function="node"><argument value="42"/><argument value="6000000"/></process>
+ <process host="c-1.me" function="node"><argument value="366680" /><argument value="42" /><argument value="10" /><argument value="6000000" /></process>
+ <process host="c-2.me" function="node"><argument value="533744" /><argument value="366680" /><argument value="20" /><argument value="6000000" /></process>
+ <process host="c-3.me" function="node"><argument value="1319738" /><argument value="42" /><argument value="30" /><argument value="6000000" /></process>
+ <process host="c-4.me" function="node"><argument value="16509405" /><argument value="366680" /><argument value="40" /><argument value="6000000" /></process>
+ <process host="c-5.me" function="node"><argument value="10874876" /><argument value="533744" /><argument value="50" /><argument value="6000000" /></process>
+ <process host="c-6.me" function="node"><argument value="16728096" /><argument value="1319738" /><argument value="60" /><argument value="6000000" /></process>
+ <process host="c-7.me" function="node"><argument value="10004760" /><argument value="16509405" /><argument value="70" /><argument value="6000000" /></process>
+ <process host="c-8.me" function="node"><argument value="6518808" /><argument value="42" /><argument value="80" /><argument value="6000000" /></process>
+ <process host="c-9.me" function="node"><argument value="2015253" /><argument value="1319738" /><argument value="90" /><argument value="6000000" /></process>
+</platform>
--- /dev/null
+<?xml version='1.0'?>
+<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid.dtd">
+<platform version="3">
+ <process host="c-0.me" function="node"><argument value="42"/><argument value="6000000"/></process>
+ <process host="c-1.me" function="node"><argument value="366680" /><argument value="42" /><argument value="10" /><argument value="6000000" /></process>
+ <process host="c-2.me" function="node"><argument value="533744" /><argument value="366680" /><argument value="20" /><argument value="6000000" /></process>
+ <process host="c-3.me" function="node"><argument value="1319738" /><argument value="42" /><argument value="30" /><argument value="6000000" /></process>
+ <process host="c-4.me" function="node"><argument value="16509405" /><argument value="366680" /><argument value="40" /><argument value="6000000" /></process>
+ <process host="c-5.me" function="node"><argument value="10874876" /><argument value="533744" /><argument value="50" /><argument value="6000000" /></process>
+ <process host="c-6.me" function="node"><argument value="16728096" /><argument value="1319738" /><argument value="60" /><argument value="6000000" /></process>
+ <process host="c-7.me" function="node"><argument value="10004760" /><argument value="16509405" /><argument value="70" /><argument value="6000000" /></process>
+ <process host="c-8.me" function="node"><argument value="6518808" /><argument value="42" /><argument value="80" /><argument value="6000000" /></process>
+ <process host="c-9.me" function="node"><argument value="2015253" /><argument value="1319738" /><argument value="90" /><argument value="6000000" /></process>
+ <process host="c-10.me" function="node"><argument value="10480191" /><argument value="42" /><argument value="100" /><argument value="6000000" /></process>
+ <process host="c-11.me" function="node"><argument value="13505621" /><argument value="10004760" /><argument value="110" /><argument value="6000000" /></process>
+ <process host="c-12.me" function="node"><argument value="13281914" /><argument value="10480191" /><argument value="120" /><argument value="6000000" /></process>
+ <process host="c-13.me" function="node"><argument value="13493864" /><argument value="6518808" /><argument value="130" /><argument value="6000000" /></process>
+ <process host="c-14.me" function="node"><argument value="15853741" /><argument value="6518808" /><argument value="140" /><argument value="6000000" /></process>
+ <process host="c-15.me" function="node"><argument value="12334717" /><argument value="2015253" /><argument value="150" /><argument value="6000000" /></process>
+ <process host="c-16.me" function="node"><argument value="13082922" /><argument value="2015253" /><argument value="160" /><argument value="6000000" /></process>
+ <process host="c-17.me" function="node"><argument value="11008018" /><argument value="13082922" /><argument value="170" /><argument value="6000000" /></process>
+ <process host="c-18.me" function="node"><argument value="14292368" /><argument value="13505621" /><argument value="180" /><argument value="6000000" /></process>
+ <process host="c-19.me" function="node"><argument value="13213873" /><argument value="16509405" /><argument value="190" /><argument value="6000000" /></process>
+ <process host="c-20.me" function="node"><argument value="16742003" /><argument value="11008018" /><argument value="200" /><argument value="6000000" /></process>
+ <process host="c-21.me" function="node"><argument value="8868836" /><argument value="6518808" /><argument value="210" /><argument value="6000000" /></process>
+ <process host="c-22.me" function="node"><argument value="596034" /><argument value="12334717" /><argument value="220" /><argument value="6000000" /></process>
+ <process host="c-23.me" function="node"><argument value="12957732" /><argument value="16728096" /><argument value="230" /><argument value="6000000" /></process>
+ <process host="c-24.me" function="node"><argument value="14183557" /><argument value="12334717" /><argument value="240" /><argument value="6000000" /></process>
+ <process host="c-25.me" function="node"><argument value="9710252" /><argument value="8868836" /><argument value="250" /><argument value="6000000" /></process>
+ <process host="c-26.me" function="node"><argument value="9592804" /><argument value="596034" /><argument value="260" /><argument value="6000000" /></process>
+ <process host="c-27.me" function="node"><argument value="10836171" /><argument value="366680" /><argument value="270" /><argument value="6000000" /></process>
+ <process host="c-28.me" function="node"><argument value="2610930" /><argument value="10836171" /><argument value="280" /><argument value="6000000" /></process>
+ <process host="c-29.me" function="node"><argument value="3938816" /><argument value="596034" /><argument value="290" /><argument value="6000000" /></process>
+ <process host="c-30.me" function="node"><argument value="9207700" /><argument value="1319738" /><argument value="300" /><argument value="6000000" /></process>
+ <process host="c-31.me" function="node"><argument value="11822289" /><argument value="10004760" /><argument value="310" /><argument value="6000000" /></process>
+ <process host="c-32.me" function="node"><argument value="6617885" /><argument value="10004760" /><argument value="320" /><argument value="6000000" /></process>
+ <process host="c-33.me" function="node"><argument value="10798069" /><argument value="366680" /><argument value="330" /><argument value="6000000" /></process>
+ <process host="c-34.me" function="node"><argument value="16224350" /><argument value="10836171" /><argument value="340" /><argument value="6000000" /></process>
+ <process host="c-35.me" function="node"><argument value="116769" /><argument value="13082922" /><argument value="350" /><argument value="6000000" /></process>
+ <process host="c-36.me" function="node"><argument value="8131023" /><argument value="2610930" /><argument value="360" /><argument value="6000000" /></process>
+ <process host="c-37.me" function="node"><argument value="15470236" /><argument value="116769" /><argument value="370" /><argument value="6000000" /></process>
+ <process host="c-38.me" function="node"><argument value="10364630" /><argument value="3938816" /><argument value="380" /><argument value="6000000" /></process>
+ <process host="c-39.me" function="node"><argument value="2379364" /><argument value="8868836" /><argument value="390" /><argument value="6000000" /></process>
+ <process host="c-40.me" function="node"><argument value="5944675" /><argument value="13281914" /><argument value="400" /><argument value="6000000" /></process>
+ <process host="c-41.me" function="node"><argument value="2772317" /><argument value="16224350" /><argument value="410" /><argument value="6000000" /></process>
+ <process host="c-42.me" function="node"><argument value="7514751" /><argument value="42" /><argument value="420" /><argument value="6000000" /></process>
+ <process host="c-43.me" function="node"><argument value="11363703" /><argument value="13505621" /><argument value="430" /><argument value="6000000" /></process>
+ <process host="c-44.me" function="node"><argument value="5864403" /><argument value="42" /><argument value="440" /><argument value="6000000" /></process>
+ <process host="c-45.me" function="node"><argument value="9509382" /><argument value="12957732" /><argument value="450" /><argument value="6000000" /></process>
+ <process host="c-46.me" function="node"><argument value="3107460" /><argument value="10364630" /><argument value="460" /><argument value="6000000" /></process>
+ <process host="c-47.me" function="node"><argument value="13568572" /><argument value="42" /><argument value="470" /><argument value="6000000" /></process>
+ <process host="c-48.me" function="node"><argument value="15651936" /><argument value="9710252" /><argument value="480" /><argument value="6000000" /></process>
+ <process host="c-49.me" function="node"><argument value="10558377" /><argument value="3938816" /><argument value="490" /><argument value="6000000" /></process>
+ <process host="c-50.me" function="node"><argument value="4285545" /><argument value="13493864" /><argument value="500" /><argument value="6000000" /></process>
+ <process host="c-51.me" function="node"><argument value="7467879" /><argument value="10480191" /><argument value="510" /><argument value="6000000" /></process>
+ <process host="c-52.me" function="node"><argument value="11019172" /><argument value="14183557" /><argument value="520" /><argument value="6000000" /></process>
+ <process host="c-53.me" function="node"><argument value="2358899" /><argument value="16728096" /><argument value="530" /><argument value="6000000" /></process>
+ <process host="c-54.me" function="node"><argument value="16134431" /><argument value="11822289" /><argument value="540" /><argument value="6000000" /></process>
+ <process host="c-55.me" function="node"><argument value="5244089" /><argument value="2015253" /><argument value="550" /><argument value="6000000" /></process>
+ <process host="c-56.me" function="node"><argument value="15499692" /><argument value="5944675" /><argument value="560" /><argument value="6000000" /></process>
+ <process host="c-57.me" function="node"><argument value="13922187" /><argument value="366680" /><argument value="570" /><argument value="6000000" /></process>
+ <process host="c-58.me" function="node"><argument value="12086592" /><argument value="2379364" /><argument value="580" /><argument value="6000000" /></process>
+ <process host="c-59.me" function="node"><argument value="10579841" /><argument value="8131023" /><argument value="590" /><argument value="6000000" /></process>
+ <process host="c-60.me" function="node"><argument value="14893867" /><argument value="10798069" /><argument value="600" /><argument value="6000000" /></process>
+ <process host="c-61.me" function="node"><argument value="11273607" /><argument value="12086592" /><argument value="610" /><argument value="6000000" /></process>
+ <process host="c-62.me" function="node"><argument value="810112" /><argument value="10558377" /><argument value="620" /><argument value="6000000" /></process>
+ <process host="c-63.me" function="node"><argument value="10874581" /><argument value="14893867" /><argument value="630" /><argument value="6000000" /></process>
+ <process host="c-64.me" function="node"><argument value="4339906" /><argument value="2379364" /><argument value="640" /><argument value="6000000" /></process>
+ <process host="c-65.me" function="node"><argument value="5230199" /><argument value="15651936" /><argument value="650" /><argument value="6000000" /></process>
+ <process host="c-66.me" function="node"><argument value="3459719" /><argument value="15651936" /><argument value="660" /><argument value="6000000" /></process>
+ <process host="c-67.me" function="node"><argument value="342511" /><argument value="11363703" /><argument value="670" /><argument value="6000000" /></process>
+ <process host="c-68.me" function="node"><argument value="12540825" /><argument value="13082922" /><argument value="680" /><argument value="6000000" /></process>
+ <process host="c-69.me" function="node"><argument value="3915035" /><argument value="16742003" /><argument value="690" /><argument value="6000000" /></process>
+ <process host="c-70.me" function="node"><argument value="9756331" /><argument value="14893867" /><argument value="700" /><argument value="6000000" /></process>
+ <process host="c-71.me" function="node"><argument value="1057" /><argument value="16224350" /><argument value="710" /><argument value="6000000" /></process>
+ <process host="c-72.me" function="node"><argument value="14905830" /><argument value="14292368" /><argument value="720" /><argument value="6000000" /></process>
+ <process host="c-73.me" function="node"><argument value="15011862" /><argument value="116769" /><argument value="730" /><argument value="6000000" /></process>
+ <process host="c-74.me" function="node"><argument value="16561708" /><argument value="2358899" /><argument value="740" /><argument value="6000000" /></process>
+ <process host="c-75.me" function="node"><argument value="15398543" /><argument value="13213873" /><argument value="750" /><argument value="6000000" /></process>
+ <process host="c-76.me" function="node"><argument value="182864" /><argument value="533744" /><argument value="760" /><argument value="6000000" /></process>
+ <process host="c-77.me" function="node"><argument value="6530186" /><argument value="13568572" /><argument value="770" /><argument value="6000000" /></process>
+ <process host="c-78.me" function="node"><argument value="11363165" /><argument value="13213873" /><argument value="780" /><argument value="6000000" /></process>
+ <process host="c-79.me" function="node"><argument value="8636303" /><argument value="15499692" /><argument value="790" /><argument value="6000000" /></process>
+ <process host="c-80.me" function="node"><argument value="11606104" /><argument value="5244089" /><argument value="800" /><argument value="6000000" /></process>
+ <process host="c-81.me" function="node"><argument value="7750053" /><argument value="13505621" /><argument value="810" /><argument value="6000000" /></process>
+ <process host="c-82.me" function="node"><argument value="15166832" /><argument value="3938816" /><argument value="820" /><argument value="6000000" /></process>
+ <process host="c-83.me" function="node"><argument value="4096877" /><argument value="11363703" /><argument value="830" /><argument value="6000000" /></process>
+ <process host="c-84.me" function="node"><argument value="15838695" /><argument value="1319738" /><argument value="840" /><argument value="6000000" /></process>
+ <process host="c-85.me" function="node"><argument value="16057285" /><argument value="182864" /><argument value="850" /><argument value="6000000" /></process>
+ <process host="c-86.me" function="node"><argument value="11161393" /><argument value="15166832" /><argument value="860" /><argument value="6000000" /></process>
+ <process host="c-87.me" function="node"><argument value="7283581" /><argument value="16224350" /><argument value="870" /><argument value="6000000" /></process>
+ <process host="c-88.me" function="node"><argument value="4769647" /><argument value="7750053" /><argument value="880" /><argument value="6000000" /></process>
+ <process host="c-89.me" function="node"><argument value="3395518" /><argument value="596034" /><argument value="890" /><argument value="6000000" /></process>
+ <process host="c-90.me" function="node"><argument value="11275302" /><argument value="10836171" /><argument value="900" /><argument value="6000000" /></process>
+ <process host="c-91.me" function="node"><argument value="1607535" /><argument value="15398543" /><argument value="910" /><argument value="6000000" /></process>
+ <process host="c-92.me" function="node"><argument value="9038828" /><argument value="1057" /><argument value="920" /><argument value="6000000" /></process>
+ <process host="c-93.me" function="node"><argument value="3254523" /><argument value="11363165" /><argument value="930" /><argument value="6000000" /></process>
+ <process host="c-94.me" function="node"><argument value="10826610" /><argument value="366680" /><argument value="940" /><argument value="6000000" /></process>
+ <process host="c-95.me" function="node"><argument value="2013580" /><argument value="11363165" /><argument value="950" /><argument value="6000000" /></process>
+ <process host="c-96.me" function="node"><argument value="4796981" /><argument value="14893867" /><argument value="960" /><argument value="6000000" /></process>
+ <process host="c-97.me" function="node"><argument value="5518537" /><argument value="4285545" /><argument value="970" /><argument value="6000000" /></process>
+ <process host="c-98.me" function="node"><argument value="15089786" /><argument value="15166832" /><argument value="980" /><argument value="6000000" /></process>
+ <process host="c-99.me" function="node"><argument value="8611178" /><argument value="3107460" /><argument value="990" /><argument value="6000000" /></process>
+</platform>
--- /dev/null
+#! ./tesh
+
+p Testing the Chord implementation with MSG
+
+! output sort
+$ $SG_TEST_EXENV ${bindir:=.}/chord$EXEEXT -nb_bits=6 ${srcdir:=.}/../msg_platform.xml ${srcdir:=.}/chord.xml --log=msg_chord.thres:verbose "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (1:node@Gatien) Joining the ring with id 48, knowing node 1
+> [ 0.000000] (2:node@McGee) Joining the ring with id 42, knowing node 1
+> [ 0.000000] (3:node@iRMX) Joining the ring with id 38, knowing node 1
+> [ 0.000000] (4:node@Geoff) Joining the ring with id 32, knowing node 1
+> [ 0.000000] (5:node@TeX) Joining the ring with id 21, knowing node 1
+> [ 0.000000] (6:node@Jean_Yves) Joining the ring with id 14, knowing node 1
+> [ 0.000000] (7:node@Boivin) Joining the ring with id 8, knowing node 1
+> [ 0.000000] (8:node@Jacquelin) My finger table:
+> [ 0.000000] (8:node@Jacquelin) Start | Succ
+> [ 0.000000] (8:node@Jacquelin) 2 | 1
+> [ 0.000000] (8:node@Jacquelin) 3 | 1
+> [ 0.000000] (8:node@Jacquelin) 5 | 1
+> [ 0.000000] (8:node@Jacquelin) 9 | 1
+> [ 0.000000] (8:node@Jacquelin) 17 | 1
+> [ 0.000000] (8:node@Jacquelin) 33 | 1
+> [ 0.000000] (8:node@Jacquelin) Predecessor: -1
+> [ 6.018864] (1:node@Gatien) My finger table:
+> [ 6.018864] (1:node@Gatien) Start | Succ
+> [ 6.018864] (1:node@Gatien) 49 | 1
+> [ 6.018864] (1:node@Gatien) 50 | 48
+> [ 6.018864] (1:node@Gatien) 52 | 48
+> [ 6.018864] (1:node@Gatien) 56 | 48
+> [ 6.018864] (1:node@Gatien) 0 | 48
+> [ 6.018864] (1:node@Gatien) 16 | 48
+> [ 6.018864] (1:node@Gatien) Predecessor: -1
+> [ 10.710204] (2:node@McGee) My finger table:
+> [ 10.710204] (2:node@McGee) Start | Succ
+> [ 10.710204] (2:node@McGee) 43 | 1
+> [ 10.710204] (2:node@McGee) 44 | 42
+> [ 10.710204] (2:node@McGee) 46 | 42
+> [ 10.710204] (2:node@McGee) 50 | 42
+> [ 10.710204] (2:node@McGee) 58 | 42
+> [ 10.710204] (2:node@McGee) 10 | 42
+> [ 10.710204] (2:node@McGee) Predecessor: -1
+> [ 17.433288] (3:node@iRMX) My finger table:
+> [ 17.433288] (3:node@iRMX) Start | Succ
+> [ 17.433288] (3:node@iRMX) 39 | 1
+> [ 17.433288] (3:node@iRMX) 40 | 38
+> [ 17.433288] (3:node@iRMX) 42 | 38
+> [ 17.433288] (3:node@iRMX) 46 | 38
+> [ 17.433288] (3:node@iRMX) 54 | 38
+> [ 17.433288] (3:node@iRMX) 6 | 38
+> [ 17.433288] (3:node@iRMX) Predecessor: -1
+> [ 20.004273] (4:node@Geoff) My finger table:
+> [ 20.004273] (4:node@Geoff) Start | Succ
+> [ 20.004273] (4:node@Geoff) 33 | 1
+> [ 20.004273] (4:node@Geoff) 34 | 32
+> [ 20.004273] (4:node@Geoff) 36 | 32
+> [ 20.004273] (4:node@Geoff) 40 | 32
+> [ 20.004273] (4:node@Geoff) 48 | 32
+> [ 20.004273] (4:node@Geoff) 0 | 32
+> [ 20.004273] (4:node@Geoff) Predecessor: -1
+> [ 26.449094] (5:node@TeX) My finger table:
+> [ 26.449094] (5:node@TeX) Start | Succ
+> [ 26.449094] (5:node@TeX) 22 | 1
+> [ 26.449094] (5:node@TeX) 23 | 21
+> [ 26.449094] (5:node@TeX) 25 | 21
+> [ 26.449094] (5:node@TeX) 29 | 21
+> [ 26.449094] (5:node@TeX) 37 | 21
+> [ 26.449094] (5:node@TeX) 53 | 21
+> [ 26.449094] (5:node@TeX) Predecessor: -1
+> [ 30.739573] (6:node@Jean_Yves) My finger table:
+> [ 30.739573] (6:node@Jean_Yves) Start | Succ
+> [ 30.739573] (6:node@Jean_Yves) 15 | 1
+> [ 30.739573] (6:node@Jean_Yves) 16 | 14
+> [ 30.739573] (6:node@Jean_Yves) 18 | 14
+> [ 30.739573] (6:node@Jean_Yves) 22 | 14
+> [ 30.739573] (6:node@Jean_Yves) 30 | 14
+> [ 30.739573] (6:node@Jean_Yves) 46 | 14
+> [ 30.739573] (6:node@Jean_Yves) Predecessor: -1
+> [ 35.838541] (7:node@Boivin) My finger table:
+> [ 35.838541] (7:node@Boivin) Start | Succ
+> [ 35.838541] (7:node@Boivin) 9 | 1
+> [ 35.838541] (7:node@Boivin) 10 | 8
+> [ 35.838541] (7:node@Boivin) 12 | 8
+> [ 35.838541] (7:node@Boivin) 16 | 8
+> [ 35.838541] (7:node@Boivin) 24 | 8
+> [ 35.838541] (7:node@Boivin) 40 | 8
+> [ 35.838541] (7:node@Boivin) Predecessor: -1
+> [ 75.000000] (8:node@Jacquelin) My finger table:
+> [ 75.000000] (8:node@Jacquelin) Start | Succ
+> [ 75.000000] (8:node@Jacquelin) 2 | 1
+> [ 75.000000] (8:node@Jacquelin) 3 | 1
+> [ 75.000000] (8:node@Jacquelin) 5 | 1
+> [ 75.000000] (8:node@Jacquelin) 9 | 1
+> [ 75.000000] (8:node@Jacquelin) 17 | 1
+> [ 75.000000] (8:node@Jacquelin) 33 | 1
+> [ 75.000000] (8:node@Jacquelin) Predecessor: 32
+> [ 80.000000] (8:node@Jacquelin) My finger table:
+> [ 80.000000] (8:node@Jacquelin) Start | Succ
+> [ 80.000000] (8:node@Jacquelin) 2 | 1
+> [ 80.000000] (8:node@Jacquelin) 3 | 1
+> [ 80.000000] (8:node@Jacquelin) 5 | 1
+> [ 80.000000] (8:node@Jacquelin) 9 | 1
+> [ 80.000000] (8:node@Jacquelin) 17 | 1
+> [ 80.000000] (8:node@Jacquelin) 33 | 1
+> [ 80.000000] (8:node@Jacquelin) Predecessor: 42
+> [ 81.018864] (8:node@Jacquelin) My finger table:
+> [ 81.018864] (8:node@Jacquelin) Start | Succ
+> [ 81.018864] (8:node@Jacquelin) 2 | 42
+> [ 81.018864] (8:node@Jacquelin) 3 | 1
+> [ 81.018864] (8:node@Jacquelin) 5 | 1
+> [ 81.018864] (8:node@Jacquelin) 9 | 1
+> [ 81.018864] (8:node@Jacquelin) 17 | 1
+> [ 81.018864] (8:node@Jacquelin) 33 | 1
+> [ 81.018864] (8:node@Jacquelin) Predecessor: 48
+> [113.891588] (2:node@McGee) My finger table:
+> [113.891588] (2:node@McGee) Start | Succ
+> [113.891588] (2:node@McGee) 43 | 48
+> [113.891588] (2:node@McGee) 44 | 42
+> [113.891588] (2:node@McGee) 46 | 42
+> [113.891588] (2:node@McGee) 50 | 42
+> [113.891588] (2:node@McGee) 58 | 42
+> [113.891588] (2:node@McGee) 10 | 42
+> [113.891588] (2:node@McGee) Predecessor: 1
+> [135.929316] (1:node@Gatien) My finger table:
+> [135.929316] (1:node@Gatien) Start | Succ
+> [135.929316] (1:node@Gatien) 49 | 1
+> [135.929316] (1:node@Gatien) 50 | 48
+> [135.929316] (1:node@Gatien) 52 | 48
+> [135.929316] (1:node@Gatien) 56 | 48
+> [135.929316] (1:node@Gatien) 0 | 48
+> [135.929316] (1:node@Gatien) 16 | 48
+> [135.929316] (1:node@Gatien) Predecessor: 42
+> [187.822192] (2:node@McGee) My finger table:
+> [187.822192] (2:node@McGee) Start | Succ
+> [187.822192] (2:node@McGee) 43 | 48
+> [187.822192] (2:node@McGee) 44 | 42
+> [187.822192] (2:node@McGee) 46 | 42
+> [187.822192] (2:node@McGee) 50 | 42
+> [187.822192] (2:node@McGee) 58 | 42
+> [187.822192] (2:node@McGee) 10 | 42
+> [187.822192] (2:node@McGee) Predecessor: 14
+> [196.476362] (2:node@McGee) My finger table:
+> [196.476362] (2:node@McGee) Start | Succ
+> [196.476362] (2:node@McGee) 43 | 48
+> [196.476362] (2:node@McGee) 44 | 42
+> [196.476362] (2:node@McGee) 46 | 42
+> [196.476362] (2:node@McGee) 50 | 42
+> [196.476362] (2:node@McGee) 58 | 42
+> [196.476362] (2:node@McGee) 10 | 42
+> [196.476362] (2:node@McGee) Predecessor: 38
+> [239.187724] (3:node@iRMX) My finger table:
+> [239.187724] (3:node@iRMX) Start | Succ
+> [239.187724] (3:node@iRMX) 39 | 42
+> [239.187724] (3:node@iRMX) 40 | 38
+> [239.187724] (3:node@iRMX) 42 | 38
+> [239.187724] (3:node@iRMX) 46 | 38
+> [239.187724] (3:node@iRMX) 54 | 38
+> [239.187724] (3:node@iRMX) 6 | 38
+> [239.187724] (3:node@iRMX) Predecessor: 32
+> [246.101146] (1:node@Gatien) My finger table:
+> [246.101146] (1:node@Gatien) Start | Succ
+> [246.101146] (1:node@Gatien) 49 | 1
+> [246.101146] (1:node@Gatien) 50 | 1
+> [246.101146] (1:node@Gatien) 52 | 48
+> [246.101146] (1:node@Gatien) 56 | 48
+> [246.101146] (1:node@Gatien) 0 | 48
+> [246.101146] (1:node@Gatien) 16 | 48
+> [246.101146] (1:node@Gatien) Predecessor: 42
+> [253.395755] (7:node@Boivin) My finger table:
+> [253.395755] (7:node@Boivin) Start | Succ
+> [253.395755] (7:node@Boivin) 9 | 32
+> [253.395755] (7:node@Boivin) 10 | 32
+> [253.395755] (7:node@Boivin) 12 | 8
+> [253.395755] (7:node@Boivin) 16 | 8
+> [253.395755] (7:node@Boivin) 24 | 8
+> [253.395755] (7:node@Boivin) 40 | 8
+> [253.395755] (7:node@Boivin) Predecessor: -1
+> [259.532923] (8:node@Jacquelin) My finger table:
+> [259.532923] (8:node@Jacquelin) Start | Succ
+> [259.532923] (8:node@Jacquelin) 2 | 32
+> [259.532923] (8:node@Jacquelin) 3 | 32
+> [259.532923] (8:node@Jacquelin) 5 | 1
+> [259.532923] (8:node@Jacquelin) 9 | 1
+> [259.532923] (8:node@Jacquelin) 17 | 1
+> [259.532923] (8:node@Jacquelin) 33 | 1
+> [259.532923] (8:node@Jacquelin) Predecessor: 48
+> [261.420507] (2:node@McGee) My finger table:
+> [261.420507] (2:node@McGee) Start | Succ
+> [261.420507] (2:node@McGee) 43 | 48
+> [261.420507] (2:node@McGee) 44 | 48
+> [261.420507] (2:node@McGee) 46 | 42
+> [261.420507] (2:node@McGee) 50 | 42
+> [261.420507] (2:node@McGee) 58 | 42
+> [261.420507] (2:node@McGee) 10 | 42
+> [261.420507] (2:node@McGee) Predecessor: 38
+> [262.822956] (6:node@Jean_Yves) My finger table:
+> [262.822956] (6:node@Jean_Yves) Start | Succ
+> [262.822956] (6:node@Jean_Yves) 15 | 32
+> [262.822956] (6:node@Jean_Yves) 16 | 32
+> [262.822956] (6:node@Jean_Yves) 18 | 14
+> [262.822956] (6:node@Jean_Yves) 22 | 14
+> [262.822956] (6:node@Jean_Yves) 30 | 14
+> [262.822956] (6:node@Jean_Yves) 46 | 14
+> [262.822956] (6:node@Jean_Yves) Predecessor: -1
+> [268.142943] (3:node@iRMX) My finger table:
+> [268.142943] (3:node@iRMX) Start | Succ
+> [268.142943] (3:node@iRMX) 39 | 42
+> [268.142943] (3:node@iRMX) 40 | 42
+> [268.142943] (3:node@iRMX) 42 | 38
+> [268.142943] (3:node@iRMX) 46 | 38
+> [268.142943] (3:node@iRMX) 54 | 38
+> [268.142943] (3:node@iRMX) 6 | 38
+> [268.142943] (3:node@iRMX) Predecessor: 32
+> [271.623232] (5:node@TeX) My finger table:
+> [271.623232] (5:node@TeX) Start | Succ
+> [271.623232] (5:node@TeX) 22 | 32
+> [271.623232] (5:node@TeX) 23 | 32
+> [271.623232] (5:node@TeX) 25 | 21
+> [271.623232] (5:node@TeX) 29 | 21
+> [271.623232] (5:node@TeX) 37 | 21
+> [271.623232] (5:node@TeX) 53 | 21
+> [271.623232] (5:node@TeX) Predecessor: -1
+> [279.057601] (4:node@Geoff) My finger table:
+> [279.057601] (4:node@Geoff) Start | Succ
+> [279.057601] (4:node@Geoff) 33 | 38
+> [279.057601] (4:node@Geoff) 34 | 32
+> [279.057601] (4:node@Geoff) 36 | 32
+> [279.057601] (4:node@Geoff) 40 | 32
+> [279.057601] (4:node@Geoff) 48 | 32
+> [279.057601] (4:node@Geoff) 0 | 32
+> [279.057601] (4:node@Geoff) Predecessor: 8
+> [281.486620] (4:node@Geoff) My finger table:
+> [281.486620] (4:node@Geoff) Start | Succ
+> [281.486620] (4:node@Geoff) 33 | 38
+> [281.486620] (4:node@Geoff) 34 | 38
+> [281.486620] (4:node@Geoff) 36 | 32
+> [281.486620] (4:node@Geoff) 40 | 32
+> [281.486620] (4:node@Geoff) 48 | 32
+> [281.486620] (4:node@Geoff) 0 | 32
+> [281.486620] (4:node@Geoff) Predecessor: 8
+> [292.924103] (4:node@Geoff) My finger table:
+> [292.924103] (4:node@Geoff) Start | Succ
+> [292.924103] (4:node@Geoff) 33 | 38
+> [292.924103] (4:node@Geoff) 34 | 38
+> [292.924103] (4:node@Geoff) 36 | 32
+> [292.924103] (4:node@Geoff) 40 | 32
+> [292.924103] (4:node@Geoff) 48 | 32
+> [292.924103] (4:node@Geoff) 0 | 32
+> [292.924103] (4:node@Geoff) Predecessor: 21
+> [338.206033] (5:node@TeX) My finger table:
+> [338.206033] (5:node@TeX) Start | Succ
+> [338.206033] (5:node@TeX) 22 | 32
+> [338.206033] (5:node@TeX) 23 | 32
+> [338.206033] (5:node@TeX) 25 | 21
+> [338.206033] (5:node@TeX) 29 | 21
+> [338.206033] (5:node@TeX) 37 | 21
+> [338.206033] (5:node@TeX) 53 | 21
+> [338.206033] (5:node@TeX) Predecessor: 14
+> [369.341419] (1:node@Gatien) My finger table:
+> [369.341419] (1:node@Gatien) Start | Succ
+> [369.341419] (1:node@Gatien) 49 | 1
+> [369.341419] (1:node@Gatien) 50 | 1
+> [369.341419] (1:node@Gatien) 52 | 1
+> [369.341419] (1:node@Gatien) 56 | 48
+> [369.341419] (1:node@Gatien) 0 | 48
+> [369.341419] (1:node@Gatien) 16 | 48
+> [369.341419] (1:node@Gatien) Predecessor: 42
+> [379.982438] (8:node@Jacquelin) My finger table:
+> [379.982438] (8:node@Jacquelin) Start | Succ
+> [379.982438] (8:node@Jacquelin) 2 | 14
+> [379.982438] (8:node@Jacquelin) 3 | 32
+> [379.982438] (8:node@Jacquelin) 5 | 14
+> [379.982438] (8:node@Jacquelin) 9 | 1
+> [379.982438] (8:node@Jacquelin) 17 | 1
+> [379.982438] (8:node@Jacquelin) 33 | 1
+> [379.982438] (8:node@Jacquelin) Predecessor: 48
+> [384.034401] (2:node@McGee) My finger table:
+> [384.034401] (2:node@McGee) Start | Succ
+> [384.034401] (2:node@McGee) 43 | 48
+> [384.034401] (2:node@McGee) 44 | 48
+> [384.034401] (2:node@McGee) 46 | 48
+> [384.034401] (2:node@McGee) 50 | 42
+> [384.034401] (2:node@McGee) 58 | 42
+> [384.034401] (2:node@McGee) 10 | 42
+> [384.034401] (2:node@McGee) Predecessor: 38
+> [386.415281] (6:node@Jean_Yves) My finger table:
+> [386.415281] (6:node@Jean_Yves) Start | Succ
+> [386.415281] (6:node@Jean_Yves) 15 | 21
+> [386.415281] (6:node@Jean_Yves) 16 | 32
+> [386.415281] (6:node@Jean_Yves) 18 | 14
+> [386.415281] (6:node@Jean_Yves) 22 | 14
+> [386.415281] (6:node@Jean_Yves) 30 | 14
+> [386.415281] (6:node@Jean_Yves) 46 | 14
+> [386.415281] (6:node@Jean_Yves) Predecessor: 1
+> [386.415281] (6:node@Jean_Yves) My finger table:
+> [386.415281] (6:node@Jean_Yves) Start | Succ
+> [386.415281] (6:node@Jean_Yves) 15 | 21
+> [386.415281] (6:node@Jean_Yves) 16 | 32
+> [386.415281] (6:node@Jean_Yves) 18 | 21
+> [386.415281] (6:node@Jean_Yves) 22 | 14
+> [386.415281] (6:node@Jean_Yves) 30 | 14
+> [386.415281] (6:node@Jean_Yves) 46 | 14
+> [386.415281] (6:node@Jean_Yves) Predecessor: 1
+> [390.149558] (7:node@Boivin) My finger table:
+> [390.149558] (7:node@Boivin) Start | Succ
+> [390.149558] (7:node@Boivin) 9 | 14
+> [390.149558] (7:node@Boivin) 10 | 32
+> [390.149558] (7:node@Boivin) 12 | 14
+> [390.149558] (7:node@Boivin) 16 | 8
+> [390.149558] (7:node@Boivin) 24 | 8
+> [390.149558] (7:node@Boivin) 40 | 8
+> [390.149558] (7:node@Boivin) Predecessor: -1
+> [392.283873] (6:node@Jean_Yves) My finger table:
+> [392.283873] (6:node@Jean_Yves) Start | Succ
+> [392.283873] (6:node@Jean_Yves) 15 | 21
+> [392.283873] (6:node@Jean_Yves) 16 | 32
+> [392.283873] (6:node@Jean_Yves) 18 | 21
+> [392.283873] (6:node@Jean_Yves) 22 | 14
+> [392.283873] (6:node@Jean_Yves) 30 | 14
+> [392.283873] (6:node@Jean_Yves) 46 | 14
+> [392.283873] (6:node@Jean_Yves) Predecessor: 8
+> [404.370688] (4:node@Geoff) My finger table:
+> [404.370688] (4:node@Geoff) Start | Succ
+> [404.370688] (4:node@Geoff) 33 | 38
+> [404.370688] (4:node@Geoff) 34 | 38
+> [404.370688] (4:node@Geoff) 36 | 38
+> [404.370688] (4:node@Geoff) 40 | 32
+> [404.370688] (4:node@Geoff) 48 | 32
+> [404.370688] (4:node@Geoff) 0 | 32
+> [404.370688] (4:node@Geoff) Predecessor: 21
+> [405.070973] (3:node@iRMX) My finger table:
+> [405.070973] (3:node@iRMX) Start | Succ
+> [405.070973] (3:node@iRMX) 39 | 42
+> [405.070973] (3:node@iRMX) 40 | 42
+> [405.070973] (3:node@iRMX) 42 | 42
+> [405.070973] (3:node@iRMX) 46 | 38
+> [405.070973] (3:node@iRMX) 54 | 38
+> [405.070973] (3:node@iRMX) 6 | 38
+> [405.070973] (3:node@iRMX) Predecessor: 32
+> [418.336053] (5:node@TeX) My finger table:
+> [418.336053] (5:node@TeX) Start | Succ
+> [418.336053] (5:node@TeX) 22 | 32
+> [418.336053] (5:node@TeX) 23 | 32
+> [418.336053] (5:node@TeX) 25 | 32
+> [418.336053] (5:node@TeX) 29 | 21
+> [418.336053] (5:node@TeX) 37 | 21
+> [418.336053] (5:node@TeX) 53 | 21
+> [418.336053] (5:node@TeX) Predecessor: 14
+> [448.762028] (7:node@Boivin) My finger table:
+> [448.762028] (7:node@Boivin) Start | Succ
+> [448.762028] (7:node@Boivin) 9 | 14
+> [448.762028] (7:node@Boivin) 10 | 32
+> [448.762028] (7:node@Boivin) 12 | 14
+> [448.762028] (7:node@Boivin) 16 | 8
+> [448.762028] (7:node@Boivin) 24 | 8
+> [448.762028] (7:node@Boivin) 40 | 8
+> [448.762028] (7:node@Boivin) Predecessor: 1
+> [490.780260] (1:node@Gatien) My finger table:
+> [490.780260] (1:node@Gatien) Start | Succ
+> [490.780260] (1:node@Gatien) 49 | 1
+> [490.780260] (1:node@Gatien) 50 | 1
+> [490.780260] (1:node@Gatien) 52 | 1
+> [490.780260] (1:node@Gatien) 56 | 1
+> [490.780260] (1:node@Gatien) 0 | 48
+> [490.780260] (1:node@Gatien) 16 | 48
+> [490.780260] (1:node@Gatien) Predecessor: 42
+> [505.134970] (8:node@Jacquelin) My finger table:
+> [505.134970] (8:node@Jacquelin) Start | Succ
+> [505.134970] (8:node@Jacquelin) 2 | 8
+> [505.134970] (8:node@Jacquelin) 3 | 32
+> [505.134970] (8:node@Jacquelin) 5 | 14
+> [505.134970] (8:node@Jacquelin) 9 | 14
+> [505.134970] (8:node@Jacquelin) 17 | 1
+> [505.134970] (8:node@Jacquelin) 33 | 1
+> [505.134970] (8:node@Jacquelin) Predecessor: 48
+> [519.547951] (6:node@Jean_Yves) My finger table:
+> [519.547951] (6:node@Jean_Yves) Start | Succ
+> [519.547951] (6:node@Jean_Yves) 15 | 21
+> [519.547951] (6:node@Jean_Yves) 16 | 32
+> [519.547951] (6:node@Jean_Yves) 18 | 21
+> [519.547951] (6:node@Jean_Yves) 22 | 32
+> [519.547951] (6:node@Jean_Yves) 30 | 14
+> [519.547951] (6:node@Jean_Yves) 46 | 14
+> [519.547951] (6:node@Jean_Yves) Predecessor: 8
+> [523.456710] (2:node@McGee) My finger table:
+> [523.456710] (2:node@McGee) Start | Succ
+> [523.456710] (2:node@McGee) 43 | 48
+> [523.456710] (2:node@McGee) 44 | 48
+> [523.456710] (2:node@McGee) 46 | 48
+> [523.456710] (2:node@McGee) 50 | 1
+> [523.456710] (2:node@McGee) 58 | 42
+> [523.456710] (2:node@McGee) 10 | 42
+> [523.456710] (2:node@McGee) Predecessor: 38
+> [541.506553] (5:node@TeX) My finger table:
+> [541.506553] (5:node@TeX) Start | Succ
+> [541.506553] (5:node@TeX) 22 | 32
+> [541.506553] (5:node@TeX) 23 | 32
+> [541.506553] (5:node@TeX) 25 | 32
+> [541.506553] (5:node@TeX) 29 | 32
+> [541.506553] (5:node@TeX) 37 | 21
+> [541.506553] (5:node@TeX) 53 | 21
+> [541.506553] (5:node@TeX) Predecessor: 14
+> [542.490747] (4:node@Geoff) My finger table:
+> [542.490747] (4:node@Geoff) Start | Succ
+> [542.490747] (4:node@Geoff) 33 | 38
+> [542.490747] (4:node@Geoff) 34 | 38
+> [542.490747] (4:node@Geoff) 36 | 38
+> [542.490747] (4:node@Geoff) 40 | 42
+> [542.490747] (4:node@Geoff) 48 | 32
+> [542.490747] (4:node@Geoff) 0 | 32
+> [542.490747] (4:node@Geoff) Predecessor: 21
+> [543.696191] (7:node@Boivin) My finger table:
+> [543.696191] (7:node@Boivin) Start | Succ
+> [543.696191] (7:node@Boivin) 9 | 14
+> [543.696191] (7:node@Boivin) 10 | 32
+> [543.696191] (7:node@Boivin) 12 | 14
+> [543.696191] (7:node@Boivin) 16 | 21
+> [543.696191] (7:node@Boivin) 24 | 8
+> [543.696191] (7:node@Boivin) 40 | 8
+> [543.696191] (7:node@Boivin) Predecessor: 1
+> [547.073601] (3:node@iRMX) My finger table:
+> [547.073601] (3:node@iRMX) Start | Succ
+> [547.073601] (3:node@iRMX) 39 | 42
+> [547.073601] (3:node@iRMX) 40 | 42
+> [547.073601] (3:node@iRMX) 42 | 42
+> [547.073601] (3:node@iRMX) 46 | 48
+> [547.073601] (3:node@iRMX) 54 | 38
+> [547.073601] (3:node@iRMX) 6 | 38
+> [547.073601] (3:node@iRMX) Predecessor: 32
+> [649.756627] (0:@) Messages created: 838
+> [649.756627] (0:@) Simulated time: 649.757
+
+! output sort
+$ $SG_TEST_EXENV ${bindir:=.}/chord$EXEEXT ${srcdir:=.}/../../platforms/One_cluster.xml ${srcdir:=.}/chord10.xml --log=msg_chord.thres:verbose "--log=root.fmt:[%11.6r]%e(%i:%P@%h)%e%m%n" --cfg=network/model:Constant
+> [ 0.000000] (0:@) Configuration change: Set 'network/model' to 'Constant'
+> [ 0.000000] (0:@) Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [ 0.000000] (1:node@c-0.me) My finger table:
+> [ 0.000000] (1:node@c-0.me) Start | Succ
+> [ 0.000000] (1:node@c-0.me) 43 | 42
+> [ 0.000000] (1:node@c-0.me) 44 | 42
+> [ 0.000000] (1:node@c-0.me) 46 | 42
+> [ 0.000000] (1:node@c-0.me) 50 | 42
+> [ 0.000000] (1:node@c-0.me) 58 | 42
+> [ 0.000000] (1:node@c-0.me) 74 | 42
+> [ 0.000000] (1:node@c-0.me) 106 | 42
+> [ 0.000000] (1:node@c-0.me) 170 | 42
+> [ 0.000000] (1:node@c-0.me) 298 | 42
+> [ 0.000000] (1:node@c-0.me) 554 | 42
+> [ 0.000000] (1:node@c-0.me) 1066 | 42
+> [ 0.000000] (1:node@c-0.me) 2090 | 42
+> [ 0.000000] (1:node@c-0.me) 4138 | 42
+> [ 0.000000] (1:node@c-0.me) 8234 | 42
+> [ 0.000000] (1:node@c-0.me) 16426 | 42
+> [ 0.000000] (1:node@c-0.me) 32810 | 42
+> [ 0.000000] (1:node@c-0.me) 65578 | 42
+> [ 0.000000] (1:node@c-0.me) 131114 | 42
+> [ 0.000000] (1:node@c-0.me) 262186 | 42
+> [ 0.000000] (1:node@c-0.me) 524330 | 42
+> [ 0.000000] (1:node@c-0.me) 1048618 | 42
+> [ 0.000000] (1:node@c-0.me) 2097194 | 42
+> [ 0.000000] (1:node@c-0.me) 4194346 | 42
+> [ 0.000000] (1:node@c-0.me) 8388650 | 42
+> [ 0.000000] (1:node@c-0.me) Predecessor: -1
+> [ 0.000000] (2:node@c-1.me) Joining the ring with id 366680, knowing node 42
+> [ 0.000000] (3:node@c-2.me) Joining the ring with id 533744, knowing node 366680
+> [ 0.000000] (4:node@c-3.me) Joining the ring with id 1319738, knowing node 42
+> [ 0.000000] (5:node@c-4.me) Joining the ring with id 16509405, knowing node 366680
+> [ 0.000000] (6:node@c-5.me) Joining the ring with id 10874876, knowing node 533744
+> [ 0.000000] (7:node@c-6.me) Joining the ring with id 16728096, knowing node 1319738
+> [ 0.000000] (8:node@c-7.me) Joining the ring with id 10004760, knowing node 16509405
+> [ 0.000000] (9:node@c-8.me) Joining the ring with id 6518808, knowing node 42
+> [ 0.000000] (10:node@c-9.me) Joining the ring with id 2015253, knowing node 1319738
+> [ 4.000000] (6:node@c-5.me) My finger table:
+> [ 4.000000] (6:node@c-5.me) Start | Succ
+> [ 4.000000] (6:node@c-5.me) 10874877 | 533744
+> [ 4.000000] (6:node@c-5.me) 10874878 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10874880 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10874884 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10874892 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10874908 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10874940 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 4.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 4.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 4.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 4.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 4.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 4.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 4.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 4.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 4.000000] (6:node@c-5.me) Predecessor: -1
+> [ 4.000000] (3:node@c-2.me) My finger table:
+> [ 4.000000] (3:node@c-2.me) Start | Succ
+> [ 4.000000] (3:node@c-2.me) 533745 | 366680
+> [ 4.000000] (3:node@c-2.me) 533746 | 533744
+> [ 4.000000] (3:node@c-2.me) 533748 | 533744
+> [ 4.000000] (3:node@c-2.me) 533752 | 533744
+> [ 4.000000] (3:node@c-2.me) 533760 | 533744
+> [ 4.000000] (3:node@c-2.me) 533776 | 533744
+> [ 4.000000] (3:node@c-2.me) 533808 | 533744
+> [ 4.000000] (3:node@c-2.me) 533872 | 533744
+> [ 4.000000] (3:node@c-2.me) 534000 | 533744
+> [ 4.000000] (3:node@c-2.me) 534256 | 533744
+> [ 4.000000] (3:node@c-2.me) 534768 | 533744
+> [ 4.000000] (3:node@c-2.me) 535792 | 533744
+> [ 4.000000] (3:node@c-2.me) 537840 | 533744
+> [ 4.000000] (3:node@c-2.me) 541936 | 533744
+> [ 4.000000] (3:node@c-2.me) 550128 | 533744
+> [ 4.000000] (3:node@c-2.me) 566512 | 533744
+> [ 4.000000] (3:node@c-2.me) 599280 | 533744
+> [ 4.000000] (3:node@c-2.me) 664816 | 533744
+> [ 4.000000] (3:node@c-2.me) 795888 | 533744
+> [ 4.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 4.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 4.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 4.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 4.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 4.000000] (3:node@c-2.me) Predecessor: -1
+> [ 5.000000] (8:node@c-7.me) My finger table:
+> [ 5.000000] (8:node@c-7.me) Start | Succ
+> [ 5.000000] (8:node@c-7.me) 10004761 | 16509405
+> [ 5.000000] (8:node@c-7.me) 10004762 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10004764 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10004768 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10004776 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10004792 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 5.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 5.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 5.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 5.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 5.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 5.000000] (8:node@c-7.me) Predecessor: -1
+> [ 5.000000] (5:node@c-4.me) My finger table:
+> [ 5.000000] (5:node@c-4.me) Start | Succ
+> [ 5.000000] (5:node@c-4.me) 16509406 | 366680
+> [ 5.000000] (5:node@c-4.me) 16509407 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509409 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509413 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509421 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509437 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 5.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 5.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 5.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 5.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 5.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 5.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 5.000000] (5:node@c-4.me) Predecessor: -1
+> [ 6.000000] (2:node@c-1.me) My finger table:
+> [ 6.000000] (2:node@c-1.me) Start | Succ
+> [ 6.000000] (2:node@c-1.me) 366681 | 42
+> [ 6.000000] (2:node@c-1.me) 366682 | 366680
+> [ 6.000000] (2:node@c-1.me) 366684 | 366680
+> [ 6.000000] (2:node@c-1.me) 366688 | 366680
+> [ 6.000000] (2:node@c-1.me) 366696 | 366680
+> [ 6.000000] (2:node@c-1.me) 366712 | 366680
+> [ 6.000000] (2:node@c-1.me) 366744 | 366680
+> [ 6.000000] (2:node@c-1.me) 366808 | 366680
+> [ 6.000000] (2:node@c-1.me) 366936 | 366680
+> [ 6.000000] (2:node@c-1.me) 367192 | 366680
+> [ 6.000000] (2:node@c-1.me) 367704 | 366680
+> [ 6.000000] (2:node@c-1.me) 368728 | 366680
+> [ 6.000000] (2:node@c-1.me) 370776 | 366680
+> [ 6.000000] (2:node@c-1.me) 374872 | 366680
+> [ 6.000000] (2:node@c-1.me) 383064 | 366680
+> [ 6.000000] (2:node@c-1.me) 399448 | 366680
+> [ 6.000000] (2:node@c-1.me) 432216 | 366680
+> [ 6.000000] (2:node@c-1.me) 497752 | 366680
+> [ 6.000000] (2:node@c-1.me) 628824 | 366680
+> [ 6.000000] (2:node@c-1.me) 890968 | 366680
+> [ 6.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 6.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 6.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 6.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 6.000000] (2:node@c-1.me) Predecessor: -1
+> [ 8.000000] (7:node@c-6.me) My finger table:
+> [ 8.000000] (7:node@c-6.me) Start | Succ
+> [ 8.000000] (7:node@c-6.me) 16728097 | 1319738
+> [ 8.000000] (7:node@c-6.me) 16728098 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728100 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728104 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728112 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 8.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 8.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 8.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 8.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 8.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 8.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 8.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 8.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 8.000000] (7:node@c-6.me) Predecessor: -1
+> [ 9.000000] (10:node@c-9.me) My finger table:
+> [ 9.000000] (10:node@c-9.me) Start | Succ
+> [ 9.000000] (10:node@c-9.me) 2015254 | 1319738
+> [ 9.000000] (10:node@c-9.me) 2015255 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015257 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015261 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015269 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015285 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015317 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 9.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 9.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 9.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 9.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 9.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 9.000000] (10:node@c-9.me) Predecessor: -1
+> [ 11.000000] (4:node@c-3.me) My finger table:
+> [ 11.000000] (4:node@c-3.me) Start | Succ
+> [ 11.000000] (4:node@c-3.me) 1319739 | 42
+> [ 11.000000] (4:node@c-3.me) 1319740 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1319742 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1319746 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 11.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 11.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 11.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 11.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 11.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 11.000000] (4:node@c-3.me) Predecessor: -1
+> [ 16.000000] (9:node@c-8.me) My finger table:
+> [ 16.000000] (9:node@c-8.me) Start | Succ
+> [ 16.000000] (9:node@c-8.me) 6518809 | 42
+> [ 16.000000] (9:node@c-8.me) 6518810 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6518812 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6518816 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6518824 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 16.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 16.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 16.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 16.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 16.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 16.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 16.000000] (9:node@c-8.me) Predecessor: -1
+> [ 24.000000] (5:node@c-4.me) My finger table:
+> [ 24.000000] (5:node@c-4.me) Start | Succ
+> [ 24.000000] (5:node@c-4.me) 16509406 | 366680
+> [ 24.000000] (5:node@c-4.me) 16509407 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509409 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509413 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509421 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509437 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 24.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 24.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 24.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 24.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 24.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 24.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 24.000000] (5:node@c-4.me) Predecessor: 10004760
+> [ 26.000000] (4:node@c-3.me) My finger table:
+> [ 26.000000] (4:node@c-3.me) Start | Succ
+> [ 26.000000] (4:node@c-3.me) 1319739 | 42
+> [ 26.000000] (4:node@c-3.me) 1319740 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1319742 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1319746 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 26.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 26.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 26.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 26.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 26.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 26.000000] (4:node@c-3.me) Predecessor: 16728096
+> [ 33.000000] (2:node@c-1.me) My finger table:
+> [ 33.000000] (2:node@c-1.me) Start | Succ
+> [ 33.000000] (2:node@c-1.me) 366681 | 42
+> [ 33.000000] (2:node@c-1.me) 366682 | 366680
+> [ 33.000000] (2:node@c-1.me) 366684 | 366680
+> [ 33.000000] (2:node@c-1.me) 366688 | 366680
+> [ 33.000000] (2:node@c-1.me) 366696 | 366680
+> [ 33.000000] (2:node@c-1.me) 366712 | 366680
+> [ 33.000000] (2:node@c-1.me) 366744 | 366680
+> [ 33.000000] (2:node@c-1.me) 366808 | 366680
+> [ 33.000000] (2:node@c-1.me) 366936 | 366680
+> [ 33.000000] (2:node@c-1.me) 367192 | 366680
+> [ 33.000000] (2:node@c-1.me) 367704 | 366680
+> [ 33.000000] (2:node@c-1.me) 368728 | 366680
+> [ 33.000000] (2:node@c-1.me) 370776 | 366680
+> [ 33.000000] (2:node@c-1.me) 374872 | 366680
+> [ 33.000000] (2:node@c-1.me) 383064 | 366680
+> [ 33.000000] (2:node@c-1.me) 399448 | 366680
+> [ 33.000000] (2:node@c-1.me) 432216 | 366680
+> [ 33.000000] (2:node@c-1.me) 497752 | 366680
+> [ 33.000000] (2:node@c-1.me) 628824 | 366680
+> [ 33.000000] (2:node@c-1.me) 890968 | 366680
+> [ 33.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 33.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 33.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 33.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 33.000000] (2:node@c-1.me) Predecessor: 16509405
+> [ 38.000000] (3:node@c-2.me) My finger table:
+> [ 38.000000] (3:node@c-2.me) Start | Succ
+> [ 38.000000] (3:node@c-2.me) 533745 | 366680
+> [ 38.000000] (3:node@c-2.me) 533746 | 533744
+> [ 38.000000] (3:node@c-2.me) 533748 | 533744
+> [ 38.000000] (3:node@c-2.me) 533752 | 533744
+> [ 38.000000] (3:node@c-2.me) 533760 | 533744
+> [ 38.000000] (3:node@c-2.me) 533776 | 533744
+> [ 38.000000] (3:node@c-2.me) 533808 | 533744
+> [ 38.000000] (3:node@c-2.me) 533872 | 533744
+> [ 38.000000] (3:node@c-2.me) 534000 | 533744
+> [ 38.000000] (3:node@c-2.me) 534256 | 533744
+> [ 38.000000] (3:node@c-2.me) 534768 | 533744
+> [ 38.000000] (3:node@c-2.me) 535792 | 533744
+> [ 38.000000] (3:node@c-2.me) 537840 | 533744
+> [ 38.000000] (3:node@c-2.me) 541936 | 533744
+> [ 38.000000] (3:node@c-2.me) 550128 | 533744
+> [ 38.000000] (3:node@c-2.me) 566512 | 533744
+> [ 38.000000] (3:node@c-2.me) 599280 | 533744
+> [ 38.000000] (3:node@c-2.me) 664816 | 533744
+> [ 38.000000] (3:node@c-2.me) 795888 | 533744
+> [ 38.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 38.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 38.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 38.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 38.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 38.000000] (3:node@c-2.me) Predecessor: 10874876
+> [ 50.000000] (1:node@c-0.me) My finger table:
+> [ 50.000000] (1:node@c-0.me) Start | Succ
+> [ 50.000000] (1:node@c-0.me) 43 | 42
+> [ 50.000000] (1:node@c-0.me) 44 | 42
+> [ 50.000000] (1:node@c-0.me) 46 | 42
+> [ 50.000000] (1:node@c-0.me) 50 | 42
+> [ 50.000000] (1:node@c-0.me) 58 | 42
+> [ 50.000000] (1:node@c-0.me) 74 | 42
+> [ 50.000000] (1:node@c-0.me) 106 | 42
+> [ 50.000000] (1:node@c-0.me) 170 | 42
+> [ 50.000000] (1:node@c-0.me) 298 | 42
+> [ 50.000000] (1:node@c-0.me) 554 | 42
+> [ 50.000000] (1:node@c-0.me) 1066 | 42
+> [ 50.000000] (1:node@c-0.me) 2090 | 42
+> [ 50.000000] (1:node@c-0.me) 4138 | 42
+> [ 50.000000] (1:node@c-0.me) 8234 | 42
+> [ 50.000000] (1:node@c-0.me) 16426 | 42
+> [ 50.000000] (1:node@c-0.me) 32810 | 42
+> [ 50.000000] (1:node@c-0.me) 65578 | 42
+> [ 50.000000] (1:node@c-0.me) 131114 | 42
+> [ 50.000000] (1:node@c-0.me) 262186 | 42
+> [ 50.000000] (1:node@c-0.me) 524330 | 42
+> [ 50.000000] (1:node@c-0.me) 1048618 | 42
+> [ 50.000000] (1:node@c-0.me) 2097194 | 42
+> [ 50.000000] (1:node@c-0.me) 4194346 | 42
+> [ 50.000000] (1:node@c-0.me) 8388650 | 42
+> [ 50.000000] (1:node@c-0.me) Predecessor: 366680
+> [ 60.000000] (1:node@c-0.me) My finger table:
+> [ 60.000000] (1:node@c-0.me) Start | Succ
+> [ 60.000000] (1:node@c-0.me) 43 | 42
+> [ 60.000000] (1:node@c-0.me) 44 | 42
+> [ 60.000000] (1:node@c-0.me) 46 | 42
+> [ 60.000000] (1:node@c-0.me) 50 | 42
+> [ 60.000000] (1:node@c-0.me) 58 | 42
+> [ 60.000000] (1:node@c-0.me) 74 | 42
+> [ 60.000000] (1:node@c-0.me) 106 | 42
+> [ 60.000000] (1:node@c-0.me) 170 | 42
+> [ 60.000000] (1:node@c-0.me) 298 | 42
+> [ 60.000000] (1:node@c-0.me) 554 | 42
+> [ 60.000000] (1:node@c-0.me) 1066 | 42
+> [ 60.000000] (1:node@c-0.me) 2090 | 42
+> [ 60.000000] (1:node@c-0.me) 4138 | 42
+> [ 60.000000] (1:node@c-0.me) 8234 | 42
+> [ 60.000000] (1:node@c-0.me) 16426 | 42
+> [ 60.000000] (1:node@c-0.me) 32810 | 42
+> [ 60.000000] (1:node@c-0.me) 65578 | 42
+> [ 60.000000] (1:node@c-0.me) 131114 | 42
+> [ 60.000000] (1:node@c-0.me) 262186 | 42
+> [ 60.000000] (1:node@c-0.me) 524330 | 42
+> [ 60.000000] (1:node@c-0.me) 1048618 | 42
+> [ 60.000000] (1:node@c-0.me) 2097194 | 42
+> [ 60.000000] (1:node@c-0.me) 4194346 | 42
+> [ 60.000000] (1:node@c-0.me) 8388650 | 42
+> [ 60.000000] (1:node@c-0.me) Predecessor: 1319738
+> [ 70.000000] (1:node@c-0.me) My finger table:
+> [ 70.000000] (1:node@c-0.me) Start | Succ
+> [ 70.000000] (1:node@c-0.me) 43 | 1319738
+> [ 70.000000] (1:node@c-0.me) 44 | 42
+> [ 70.000000] (1:node@c-0.me) 46 | 42
+> [ 70.000000] (1:node@c-0.me) 50 | 42
+> [ 70.000000] (1:node@c-0.me) 58 | 42
+> [ 70.000000] (1:node@c-0.me) 74 | 42
+> [ 70.000000] (1:node@c-0.me) 106 | 42
+> [ 70.000000] (1:node@c-0.me) 170 | 42
+> [ 70.000000] (1:node@c-0.me) 298 | 42
+> [ 70.000000] (1:node@c-0.me) 554 | 42
+> [ 70.000000] (1:node@c-0.me) 1066 | 42
+> [ 70.000000] (1:node@c-0.me) 2090 | 42
+> [ 70.000000] (1:node@c-0.me) 4138 | 42
+> [ 70.000000] (1:node@c-0.me) 8234 | 42
+> [ 70.000000] (1:node@c-0.me) 16426 | 42
+> [ 70.000000] (1:node@c-0.me) 32810 | 42
+> [ 70.000000] (1:node@c-0.me) 65578 | 42
+> [ 70.000000] (1:node@c-0.me) 131114 | 42
+> [ 70.000000] (1:node@c-0.me) 262186 | 42
+> [ 70.000000] (1:node@c-0.me) 524330 | 42
+> [ 70.000000] (1:node@c-0.me) 1048618 | 42
+> [ 70.000000] (1:node@c-0.me) 2097194 | 42
+> [ 70.000000] (1:node@c-0.me) 4194346 | 42
+> [ 70.000000] (1:node@c-0.me) 8388650 | 42
+> [ 70.000000] (1:node@c-0.me) Predecessor: 6518808
+> [ 85.000000] (4:node@c-3.me) My finger table:
+> [ 85.000000] (4:node@c-3.me) Start | Succ
+> [ 85.000000] (4:node@c-3.me) 1319739 | 6518808
+> [ 85.000000] (4:node@c-3.me) 1319740 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1319742 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1319746 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 85.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 85.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 85.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 85.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 85.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 85.000000] (4:node@c-3.me) Predecessor: 42
+> [ 90.000000] (7:node@c-6.me) My finger table:
+> [ 90.000000] (7:node@c-6.me) Start | Succ
+> [ 90.000000] (7:node@c-6.me) 16728097 | 1319738
+> [ 90.000000] (7:node@c-6.me) 16728098 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728100 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728104 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728112 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 90.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 90.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 90.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 90.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 90.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 90.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 90.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 90.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 90.000000] (7:node@c-6.me) Predecessor: 2015253
+> [ 107.000000] (8:node@c-7.me) My finger table:
+> [ 107.000000] (8:node@c-7.me) Start | Succ
+> [ 107.000000] (8:node@c-7.me) 10004761 | 16509405
+> [ 107.000000] (8:node@c-7.me) 10004762 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10004764 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10004768 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10004776 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10004792 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 107.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 107.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 107.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 107.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 107.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 107.000000] (8:node@c-7.me) Predecessor: 533744
+> [ 109.000000] (9:node@c-8.me) My finger table:
+> [ 109.000000] (9:node@c-8.me) Start | Succ
+> [ 109.000000] (9:node@c-8.me) 6518809 | 42
+> [ 109.000000] (9:node@c-8.me) 6518810 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6518812 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6518816 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6518824 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 109.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 109.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 109.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 109.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 109.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 109.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 109.000000] (9:node@c-8.me) Predecessor: 366680
+> [ 110.000000] (9:node@c-8.me) My finger table:
+> [ 110.000000] (9:node@c-8.me) Start | Succ
+> [ 110.000000] (9:node@c-8.me) 6518809 | 42
+> [ 110.000000] (9:node@c-8.me) 6518810 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6518812 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6518816 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6518824 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 110.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 110.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 110.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 110.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 110.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 110.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 110.000000] (9:node@c-8.me) Predecessor: 1319738
+> [ 145.000000] (1:node@c-0.me) My finger table:
+> [ 145.000000] (1:node@c-0.me) Start | Succ
+> [ 145.000000] (1:node@c-0.me) 43 | 1319738
+> [ 145.000000] (1:node@c-0.me) 44 | 42
+> [ 145.000000] (1:node@c-0.me) 46 | 42
+> [ 145.000000] (1:node@c-0.me) 50 | 42
+> [ 145.000000] (1:node@c-0.me) 58 | 42
+> [ 145.000000] (1:node@c-0.me) 74 | 42
+> [ 145.000000] (1:node@c-0.me) 106 | 42
+> [ 145.000000] (1:node@c-0.me) 170 | 42
+> [ 145.000000] (1:node@c-0.me) 298 | 42
+> [ 145.000000] (1:node@c-0.me) 554 | 42
+> [ 145.000000] (1:node@c-0.me) 1066 | 42
+> [ 145.000000] (1:node@c-0.me) 2090 | 42
+> [ 145.000000] (1:node@c-0.me) 4138 | 42
+> [ 145.000000] (1:node@c-0.me) 8234 | 42
+> [ 145.000000] (1:node@c-0.me) 16426 | 42
+> [ 145.000000] (1:node@c-0.me) 32810 | 42
+> [ 145.000000] (1:node@c-0.me) 65578 | 42
+> [ 145.000000] (1:node@c-0.me) 131114 | 42
+> [ 145.000000] (1:node@c-0.me) 262186 | 42
+> [ 145.000000] (1:node@c-0.me) 524330 | 42
+> [ 145.000000] (1:node@c-0.me) 1048618 | 42
+> [ 145.000000] (1:node@c-0.me) 2097194 | 42
+> [ 145.000000] (1:node@c-0.me) 4194346 | 42
+> [ 145.000000] (1:node@c-0.me) 8388650 | 42
+> [ 145.000000] (1:node@c-0.me) Predecessor: 16728096
+> [ 157.000000] (4:node@c-3.me) My finger table:
+> [ 157.000000] (4:node@c-3.me) Start | Succ
+> [ 157.000000] (4:node@c-3.me) 1319739 | 6518808
+> [ 157.000000] (4:node@c-3.me) 1319740 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1319742 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1319746 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 157.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 157.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 157.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 157.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 157.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 157.000000] (4:node@c-3.me) Predecessor: 366680
+> [ 184.000000] (7:node@c-6.me) My finger table:
+> [ 184.000000] (7:node@c-6.me) Start | Succ
+> [ 184.000000] (7:node@c-6.me) 16728097 | 42
+> [ 184.000000] (7:node@c-6.me) 16728098 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728100 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728104 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728112 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 184.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 184.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 184.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 184.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 184.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 184.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 184.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 184.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 184.000000] (7:node@c-6.me) Predecessor: 6518808
+> [ 205.000000] (2:node@c-1.me) My finger table:
+> [ 205.000000] (2:node@c-1.me) Start | Succ
+> [ 205.000000] (2:node@c-1.me) 366681 | 1319738
+> [ 205.000000] (2:node@c-1.me) 366682 | 366680
+> [ 205.000000] (2:node@c-1.me) 366684 | 366680
+> [ 205.000000] (2:node@c-1.me) 366688 | 366680
+> [ 205.000000] (2:node@c-1.me) 366696 | 366680
+> [ 205.000000] (2:node@c-1.me) 366712 | 366680
+> [ 205.000000] (2:node@c-1.me) 366744 | 366680
+> [ 205.000000] (2:node@c-1.me) 366808 | 366680
+> [ 205.000000] (2:node@c-1.me) 366936 | 366680
+> [ 205.000000] (2:node@c-1.me) 367192 | 366680
+> [ 205.000000] (2:node@c-1.me) 367704 | 366680
+> [ 205.000000] (2:node@c-1.me) 368728 | 366680
+> [ 205.000000] (2:node@c-1.me) 370776 | 366680
+> [ 205.000000] (2:node@c-1.me) 374872 | 366680
+> [ 205.000000] (2:node@c-1.me) 383064 | 366680
+> [ 205.000000] (2:node@c-1.me) 399448 | 366680
+> [ 205.000000] (2:node@c-1.me) 432216 | 366680
+> [ 205.000000] (2:node@c-1.me) 497752 | 366680
+> [ 205.000000] (2:node@c-1.me) 628824 | 366680
+> [ 205.000000] (2:node@c-1.me) 890968 | 366680
+> [ 205.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 205.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 205.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 205.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 205.000000] (2:node@c-1.me) Predecessor: 42
+> [ 219.000000] (9:node@c-8.me) My finger table:
+> [ 219.000000] (9:node@c-8.me) Start | Succ
+> [ 219.000000] (9:node@c-8.me) 6518809 | 16728096
+> [ 219.000000] (9:node@c-8.me) 6518810 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6518812 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6518816 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6518824 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 219.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 219.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 219.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 219.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 219.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 219.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 219.000000] (9:node@c-8.me) Predecessor: 2015253
+> [ 245.000000] (6:node@c-5.me) My finger table:
+> [ 245.000000] (6:node@c-5.me) Start | Succ
+> [ 245.000000] (6:node@c-5.me) 10874877 | 533744
+> [ 245.000000] (6:node@c-5.me) 10874878 | 533744
+> [ 245.000000] (6:node@c-5.me) 10874880 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10874884 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10874892 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10874908 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10874940 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 245.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 245.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 245.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 245.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 245.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 245.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 245.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 245.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 245.000000] (6:node@c-5.me) Predecessor: -1
+> [ 246.000000] (1:node@c-0.me) My finger table:
+> [ 246.000000] (1:node@c-0.me) Start | Succ
+> [ 246.000000] (1:node@c-0.me) 43 | 366680
+> [ 246.000000] (1:node@c-0.me) 44 | 366680
+> [ 246.000000] (1:node@c-0.me) 46 | 42
+> [ 246.000000] (1:node@c-0.me) 50 | 42
+> [ 246.000000] (1:node@c-0.me) 58 | 42
+> [ 246.000000] (1:node@c-0.me) 74 | 42
+> [ 246.000000] (1:node@c-0.me) 106 | 42
+> [ 246.000000] (1:node@c-0.me) 170 | 42
+> [ 246.000000] (1:node@c-0.me) 298 | 42
+> [ 246.000000] (1:node@c-0.me) 554 | 42
+> [ 246.000000] (1:node@c-0.me) 1066 | 42
+> [ 246.000000] (1:node@c-0.me) 2090 | 42
+> [ 246.000000] (1:node@c-0.me) 4138 | 42
+> [ 246.000000] (1:node@c-0.me) 8234 | 42
+> [ 246.000000] (1:node@c-0.me) 16426 | 42
+> [ 246.000000] (1:node@c-0.me) 32810 | 42
+> [ 246.000000] (1:node@c-0.me) 65578 | 42
+> [ 246.000000] (1:node@c-0.me) 131114 | 42
+> [ 246.000000] (1:node@c-0.me) 262186 | 42
+> [ 246.000000] (1:node@c-0.me) 524330 | 42
+> [ 246.000000] (1:node@c-0.me) 1048618 | 42
+> [ 246.000000] (1:node@c-0.me) 2097194 | 42
+> [ 246.000000] (1:node@c-0.me) 4194346 | 42
+> [ 246.000000] (1:node@c-0.me) 8388650 | 42
+> [ 246.000000] (1:node@c-0.me) Predecessor: 16728096
+> [ 248.000000] (7:node@c-6.me) My finger table:
+> [ 248.000000] (7:node@c-6.me) Start | Succ
+> [ 248.000000] (7:node@c-6.me) 16728097 | 42
+> [ 248.000000] (7:node@c-6.me) 16728098 | 42
+> [ 248.000000] (7:node@c-6.me) 16728100 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16728104 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16728112 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 248.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 248.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 248.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 248.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 248.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 248.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 248.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 248.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 248.000000] (7:node@c-6.me) Predecessor: 6518808
+> [ 254.000000] (3:node@c-2.me) My finger table:
+> [ 254.000000] (3:node@c-2.me) Start | Succ
+> [ 254.000000] (3:node@c-2.me) 533745 | 10004760
+> [ 254.000000] (3:node@c-2.me) 533746 | 10004760
+> [ 254.000000] (3:node@c-2.me) 533748 | 533744
+> [ 254.000000] (3:node@c-2.me) 533752 | 533744
+> [ 254.000000] (3:node@c-2.me) 533760 | 533744
+> [ 254.000000] (3:node@c-2.me) 533776 | 533744
+> [ 254.000000] (3:node@c-2.me) 533808 | 533744
+> [ 254.000000] (3:node@c-2.me) 533872 | 533744
+> [ 254.000000] (3:node@c-2.me) 534000 | 533744
+> [ 254.000000] (3:node@c-2.me) 534256 | 533744
+> [ 254.000000] (3:node@c-2.me) 534768 | 533744
+> [ 254.000000] (3:node@c-2.me) 535792 | 533744
+> [ 254.000000] (3:node@c-2.me) 537840 | 533744
+> [ 254.000000] (3:node@c-2.me) 541936 | 533744
+> [ 254.000000] (3:node@c-2.me) 550128 | 533744
+> [ 254.000000] (3:node@c-2.me) 566512 | 533744
+> [ 254.000000] (3:node@c-2.me) 599280 | 533744
+> [ 254.000000] (3:node@c-2.me) 664816 | 533744
+> [ 254.000000] (3:node@c-2.me) 795888 | 533744
+> [ 254.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 254.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 254.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 254.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 254.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 254.000000] (3:node@c-2.me) Predecessor: 10874876
+> [ 261.000000] (2:node@c-1.me) My finger table:
+> [ 261.000000] (2:node@c-1.me) Start | Succ
+> [ 261.000000] (2:node@c-1.me) 366681 | 1319738
+> [ 261.000000] (2:node@c-1.me) 366682 | 1319738
+> [ 261.000000] (2:node@c-1.me) 366684 | 366680
+> [ 261.000000] (2:node@c-1.me) 366688 | 366680
+> [ 261.000000] (2:node@c-1.me) 366696 | 366680
+> [ 261.000000] (2:node@c-1.me) 366712 | 366680
+> [ 261.000000] (2:node@c-1.me) 366744 | 366680
+> [ 261.000000] (2:node@c-1.me) 366808 | 366680
+> [ 261.000000] (2:node@c-1.me) 366936 | 366680
+> [ 261.000000] (2:node@c-1.me) 367192 | 366680
+> [ 261.000000] (2:node@c-1.me) 367704 | 366680
+> [ 261.000000] (2:node@c-1.me) 368728 | 366680
+> [ 261.000000] (2:node@c-1.me) 370776 | 366680
+> [ 261.000000] (2:node@c-1.me) 374872 | 366680
+> [ 261.000000] (2:node@c-1.me) 383064 | 366680
+> [ 261.000000] (2:node@c-1.me) 399448 | 366680
+> [ 261.000000] (2:node@c-1.me) 432216 | 366680
+> [ 261.000000] (2:node@c-1.me) 497752 | 366680
+> [ 261.000000] (2:node@c-1.me) 628824 | 366680
+> [ 261.000000] (2:node@c-1.me) 890968 | 366680
+> [ 261.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 261.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 261.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 261.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 261.000000] (2:node@c-1.me) Predecessor: 42
+> [ 264.000000] (10:node@c-9.me) My finger table:
+> [ 264.000000] (10:node@c-9.me) Start | Succ
+> [ 264.000000] (10:node@c-9.me) 2015254 | 6518808
+> [ 264.000000] (10:node@c-9.me) 2015255 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015257 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015261 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015269 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015285 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015317 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 264.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 264.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 264.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 264.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 264.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 264.000000] (10:node@c-9.me) Predecessor: 1319738
+> [ 267.000000] (8:node@c-7.me) My finger table:
+> [ 267.000000] (8:node@c-7.me) Start | Succ
+> [ 267.000000] (8:node@c-7.me) 10004761 | 16509405
+> [ 267.000000] (8:node@c-7.me) 10004762 | 16509405
+> [ 267.000000] (8:node@c-7.me) 10004764 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10004768 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10004776 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10004792 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 267.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 267.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 267.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 267.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 267.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 267.000000] (8:node@c-7.me) Predecessor: 533744
+> [ 268.000000] (4:node@c-3.me) My finger table:
+> [ 268.000000] (4:node@c-3.me) Start | Succ
+> [ 268.000000] (4:node@c-3.me) 1319739 | 2015253
+> [ 268.000000] (4:node@c-3.me) 1319740 | 2015253
+> [ 268.000000] (4:node@c-3.me) 1319742 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1319746 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 268.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 268.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 268.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 268.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 268.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 268.000000] (4:node@c-3.me) Predecessor: 366680
+> [ 271.000000] (5:node@c-4.me) My finger table:
+> [ 271.000000] (5:node@c-4.me) Start | Succ
+> [ 271.000000] (5:node@c-4.me) 16509406 | 16728096
+> [ 271.000000] (5:node@c-4.me) 16509407 | 16728096
+> [ 271.000000] (5:node@c-4.me) 16509409 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16509413 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16509421 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16509437 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 271.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 271.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 271.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 271.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 271.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 271.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 271.000000] (5:node@c-4.me) Predecessor: 10004760
+> [ 274.000000] (9:node@c-8.me) My finger table:
+> [ 274.000000] (9:node@c-8.me) Start | Succ
+> [ 274.000000] (9:node@c-8.me) 6518809 | 16728096
+> [ 274.000000] (9:node@c-8.me) 6518810 | 16728096
+> [ 274.000000] (9:node@c-8.me) 6518812 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6518816 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6518824 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 274.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 274.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 274.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 274.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 274.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 274.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 274.000000] (9:node@c-8.me) Predecessor: 2015253
+> [ 281.000000] (10:node@c-9.me) My finger table:
+> [ 281.000000] (10:node@c-9.me) Start | Succ
+> [ 281.000000] (10:node@c-9.me) 2015254 | 6518808
+> [ 281.000000] (10:node@c-9.me) 2015255 | 6518808
+> [ 281.000000] (10:node@c-9.me) 2015257 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2015261 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2015269 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2015285 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2015317 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 281.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 281.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 281.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 281.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 281.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 281.000000] (10:node@c-9.me) Predecessor: 1319738
+> [ 326.000000] (7:node@c-6.me) My finger table:
+> [ 326.000000] (7:node@c-6.me) Start | Succ
+> [ 326.000000] (7:node@c-6.me) 16728097 | 42
+> [ 326.000000] (7:node@c-6.me) 16728098 | 42
+> [ 326.000000] (7:node@c-6.me) 16728100 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16728104 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16728112 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 326.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 326.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 326.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 326.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 326.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 326.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 326.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 326.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 326.000000] (7:node@c-6.me) Predecessor: 16509405
+> [ 367.000000] (6:node@c-5.me) My finger table:
+> [ 367.000000] (6:node@c-5.me) Start | Succ
+> [ 367.000000] (6:node@c-5.me) 10874877 | 533744
+> [ 367.000000] (6:node@c-5.me) 10874878 | 533744
+> [ 367.000000] (6:node@c-5.me) 10874880 | 533744
+> [ 367.000000] (6:node@c-5.me) 10874884 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10874892 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10874908 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10874940 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 367.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 367.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 367.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 367.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 367.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 367.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 367.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 367.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 367.000000] (6:node@c-5.me) Predecessor: -1
+> [ 367.000000] (1:node@c-0.me) My finger table:
+> [ 367.000000] (1:node@c-0.me) Start | Succ
+> [ 367.000000] (1:node@c-0.me) 43 | 366680
+> [ 367.000000] (1:node@c-0.me) 44 | 366680
+> [ 367.000000] (1:node@c-0.me) 46 | 366680
+> [ 367.000000] (1:node@c-0.me) 50 | 42
+> [ 367.000000] (1:node@c-0.me) 58 | 42
+> [ 367.000000] (1:node@c-0.me) 74 | 42
+> [ 367.000000] (1:node@c-0.me) 106 | 42
+> [ 367.000000] (1:node@c-0.me) 170 | 42
+> [ 367.000000] (1:node@c-0.me) 298 | 42
+> [ 367.000000] (1:node@c-0.me) 554 | 42
+> [ 367.000000] (1:node@c-0.me) 1066 | 42
+> [ 367.000000] (1:node@c-0.me) 2090 | 42
+> [ 367.000000] (1:node@c-0.me) 4138 | 42
+> [ 367.000000] (1:node@c-0.me) 8234 | 42
+> [ 367.000000] (1:node@c-0.me) 16426 | 42
+> [ 367.000000] (1:node@c-0.me) 32810 | 42
+> [ 367.000000] (1:node@c-0.me) 65578 | 42
+> [ 367.000000] (1:node@c-0.me) 131114 | 42
+> [ 367.000000] (1:node@c-0.me) 262186 | 42
+> [ 367.000000] (1:node@c-0.me) 524330 | 42
+> [ 367.000000] (1:node@c-0.me) 1048618 | 42
+> [ 367.000000] (1:node@c-0.me) 2097194 | 42
+> [ 367.000000] (1:node@c-0.me) 4194346 | 42
+> [ 367.000000] (1:node@c-0.me) 8388650 | 42
+> [ 367.000000] (1:node@c-0.me) Predecessor: 16728096
+> [ 380.000000] (3:node@c-2.me) My finger table:
+> [ 380.000000] (3:node@c-2.me) Start | Succ
+> [ 380.000000] (3:node@c-2.me) 533745 | 10004760
+> [ 380.000000] (3:node@c-2.me) 533746 | 10004760
+> [ 380.000000] (3:node@c-2.me) 533748 | 10004760
+> [ 380.000000] (3:node@c-2.me) 533752 | 533744
+> [ 380.000000] (3:node@c-2.me) 533760 | 533744
+> [ 380.000000] (3:node@c-2.me) 533776 | 533744
+> [ 380.000000] (3:node@c-2.me) 533808 | 533744
+> [ 380.000000] (3:node@c-2.me) 533872 | 533744
+> [ 380.000000] (3:node@c-2.me) 534000 | 533744
+> [ 380.000000] (3:node@c-2.me) 534256 | 533744
+> [ 380.000000] (3:node@c-2.me) 534768 | 533744
+> [ 380.000000] (3:node@c-2.me) 535792 | 533744
+> [ 380.000000] (3:node@c-2.me) 537840 | 533744
+> [ 380.000000] (3:node@c-2.me) 541936 | 533744
+> [ 380.000000] (3:node@c-2.me) 550128 | 533744
+> [ 380.000000] (3:node@c-2.me) 566512 | 533744
+> [ 380.000000] (3:node@c-2.me) 599280 | 533744
+> [ 380.000000] (3:node@c-2.me) 664816 | 533744
+> [ 380.000000] (3:node@c-2.me) 795888 | 533744
+> [ 380.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 380.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 380.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 380.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 380.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 380.000000] (3:node@c-2.me) Predecessor: 10874876
+> [ 382.000000] (2:node@c-1.me) My finger table:
+> [ 382.000000] (2:node@c-1.me) Start | Succ
+> [ 382.000000] (2:node@c-1.me) 366681 | 1319738
+> [ 382.000000] (2:node@c-1.me) 366682 | 1319738
+> [ 382.000000] (2:node@c-1.me) 366684 | 1319738
+> [ 382.000000] (2:node@c-1.me) 366688 | 366680
+> [ 382.000000] (2:node@c-1.me) 366696 | 366680
+> [ 382.000000] (2:node@c-1.me) 366712 | 366680
+> [ 382.000000] (2:node@c-1.me) 366744 | 366680
+> [ 382.000000] (2:node@c-1.me) 366808 | 366680
+> [ 382.000000] (2:node@c-1.me) 366936 | 366680
+> [ 382.000000] (2:node@c-1.me) 367192 | 366680
+> [ 382.000000] (2:node@c-1.me) 367704 | 366680
+> [ 382.000000] (2:node@c-1.me) 368728 | 366680
+> [ 382.000000] (2:node@c-1.me) 370776 | 366680
+> [ 382.000000] (2:node@c-1.me) 374872 | 366680
+> [ 382.000000] (2:node@c-1.me) 383064 | 366680
+> [ 382.000000] (2:node@c-1.me) 399448 | 366680
+> [ 382.000000] (2:node@c-1.me) 432216 | 366680
+> [ 382.000000] (2:node@c-1.me) 497752 | 366680
+> [ 382.000000] (2:node@c-1.me) 628824 | 366680
+> [ 382.000000] (2:node@c-1.me) 890968 | 366680
+> [ 382.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 382.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 382.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 382.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 382.000000] (2:node@c-1.me) Predecessor: 42
+> [ 383.000000] (7:node@c-6.me) My finger table:
+> [ 383.000000] (7:node@c-6.me) Start | Succ
+> [ 383.000000] (7:node@c-6.me) 16728097 | 42
+> [ 383.000000] (7:node@c-6.me) 16728098 | 42
+> [ 383.000000] (7:node@c-6.me) 16728100 | 42
+> [ 383.000000] (7:node@c-6.me) 16728104 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16728112 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 383.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 383.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 383.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 383.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 383.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 383.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 383.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 383.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 383.000000] (7:node@c-6.me) Predecessor: 16509405
+> [ 393.000000] (8:node@c-7.me) My finger table:
+> [ 393.000000] (8:node@c-7.me) Start | Succ
+> [ 393.000000] (8:node@c-7.me) 10004761 | 16509405
+> [ 393.000000] (8:node@c-7.me) 10004762 | 16509405
+> [ 393.000000] (8:node@c-7.me) 10004764 | 16509405
+> [ 393.000000] (8:node@c-7.me) 10004768 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10004776 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10004792 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 393.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 393.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 393.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 393.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 393.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 393.000000] (8:node@c-7.me) Predecessor: 533744
+> [ 399.000000] (9:node@c-8.me) My finger table:
+> [ 399.000000] (9:node@c-8.me) Start | Succ
+> [ 399.000000] (9:node@c-8.me) 6518809 | 10004760
+> [ 399.000000] (9:node@c-8.me) 6518810 | 16728096
+> [ 399.000000] (9:node@c-8.me) 6518812 | 10004760
+> [ 399.000000] (9:node@c-8.me) 6518816 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6518824 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 399.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 399.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 399.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 399.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 399.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 399.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 399.000000] (9:node@c-8.me) Predecessor: 2015253
+> [ 403.000000] (8:node@c-7.me) My finger table:
+> [ 403.000000] (8:node@c-7.me) Start | Succ
+> [ 403.000000] (8:node@c-7.me) 10004761 | 16509405
+> [ 403.000000] (8:node@c-7.me) 10004762 | 16509405
+> [ 403.000000] (8:node@c-7.me) 10004764 | 16509405
+> [ 403.000000] (8:node@c-7.me) 10004768 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10004776 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10004792 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 403.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 403.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 403.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 403.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 403.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 403.000000] (8:node@c-7.me) Predecessor: 6518808
+> [ 407.000000] (5:node@c-4.me) My finger table:
+> [ 407.000000] (5:node@c-4.me) Start | Succ
+> [ 407.000000] (5:node@c-4.me) 16509406 | 16728096
+> [ 407.000000] (5:node@c-4.me) 16509407 | 16728096
+> [ 407.000000] (5:node@c-4.me) 16509409 | 16728096
+> [ 407.000000] (5:node@c-4.me) 16509413 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16509421 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16509437 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 407.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 407.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 407.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 407.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 407.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 407.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 407.000000] (5:node@c-4.me) Predecessor: 10004760
+> [ 410.000000] (10:node@c-9.me) My finger table:
+> [ 410.000000] (10:node@c-9.me) Start | Succ
+> [ 410.000000] (10:node@c-9.me) 2015254 | 6518808
+> [ 410.000000] (10:node@c-9.me) 2015255 | 6518808
+> [ 410.000000] (10:node@c-9.me) 2015257 | 6518808
+> [ 410.000000] (10:node@c-9.me) 2015261 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2015269 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2015285 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2015317 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 410.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 410.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 410.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 410.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 410.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 410.000000] (10:node@c-9.me) Predecessor: 1319738
+> [ 416.000000] (4:node@c-3.me) My finger table:
+> [ 416.000000] (4:node@c-3.me) Start | Succ
+> [ 416.000000] (4:node@c-3.me) 1319739 | 2015253
+> [ 416.000000] (4:node@c-3.me) 1319740 | 2015253
+> [ 416.000000] (4:node@c-3.me) 1319742 | 2015253
+> [ 416.000000] (4:node@c-3.me) 1319746 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 416.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 416.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 416.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 416.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 416.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 416.000000] (4:node@c-3.me) Predecessor: 366680
+> [ 489.000000] (1:node@c-0.me) My finger table:
+> [ 489.000000] (1:node@c-0.me) Start | Succ
+> [ 489.000000] (1:node@c-0.me) 43 | 366680
+> [ 489.000000] (1:node@c-0.me) 44 | 366680
+> [ 489.000000] (1:node@c-0.me) 46 | 366680
+> [ 489.000000] (1:node@c-0.me) 50 | 366680
+> [ 489.000000] (1:node@c-0.me) 58 | 42
+> [ 489.000000] (1:node@c-0.me) 74 | 42
+> [ 489.000000] (1:node@c-0.me) 106 | 42
+> [ 489.000000] (1:node@c-0.me) 170 | 42
+> [ 489.000000] (1:node@c-0.me) 298 | 42
+> [ 489.000000] (1:node@c-0.me) 554 | 42
+> [ 489.000000] (1:node@c-0.me) 1066 | 42
+> [ 489.000000] (1:node@c-0.me) 2090 | 42
+> [ 489.000000] (1:node@c-0.me) 4138 | 42
+> [ 489.000000] (1:node@c-0.me) 8234 | 42
+> [ 489.000000] (1:node@c-0.me) 16426 | 42
+> [ 489.000000] (1:node@c-0.me) 32810 | 42
+> [ 489.000000] (1:node@c-0.me) 65578 | 42
+> [ 489.000000] (1:node@c-0.me) 131114 | 42
+> [ 489.000000] (1:node@c-0.me) 262186 | 42
+> [ 489.000000] (1:node@c-0.me) 524330 | 42
+> [ 489.000000] (1:node@c-0.me) 1048618 | 42
+> [ 489.000000] (1:node@c-0.me) 2097194 | 42
+> [ 489.000000] (1:node@c-0.me) 4194346 | 42
+> [ 489.000000] (1:node@c-0.me) 8388650 | 42
+> [ 489.000000] (1:node@c-0.me) Predecessor: 16728096
+> [ 492.000000] (6:node@c-5.me) My finger table:
+> [ 492.000000] (6:node@c-5.me) Start | Succ
+> [ 492.000000] (6:node@c-5.me) 10874877 | 533744
+> [ 492.000000] (6:node@c-5.me) 10874878 | 533744
+> [ 492.000000] (6:node@c-5.me) 10874880 | 533744
+> [ 492.000000] (6:node@c-5.me) 10874884 | 533744
+> [ 492.000000] (6:node@c-5.me) 10874892 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10874908 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10874940 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 492.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 492.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 492.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 492.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 492.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 492.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 492.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 492.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 492.000000] (6:node@c-5.me) Predecessor: -1
+> [ 503.000000] (3:node@c-2.me) My finger table:
+> [ 503.000000] (3:node@c-2.me) Start | Succ
+> [ 503.000000] (3:node@c-2.me) 533745 | 1319738
+> [ 503.000000] (3:node@c-2.me) 533746 | 10004760
+> [ 503.000000] (3:node@c-2.me) 533748 | 10004760
+> [ 503.000000] (3:node@c-2.me) 533752 | 1319738
+> [ 503.000000] (3:node@c-2.me) 533760 | 533744
+> [ 503.000000] (3:node@c-2.me) 533776 | 533744
+> [ 503.000000] (3:node@c-2.me) 533808 | 533744
+> [ 503.000000] (3:node@c-2.me) 533872 | 533744
+> [ 503.000000] (3:node@c-2.me) 534000 | 533744
+> [ 503.000000] (3:node@c-2.me) 534256 | 533744
+> [ 503.000000] (3:node@c-2.me) 534768 | 533744
+> [ 503.000000] (3:node@c-2.me) 535792 | 533744
+> [ 503.000000] (3:node@c-2.me) 537840 | 533744
+> [ 503.000000] (3:node@c-2.me) 541936 | 533744
+> [ 503.000000] (3:node@c-2.me) 550128 | 533744
+> [ 503.000000] (3:node@c-2.me) 566512 | 533744
+> [ 503.000000] (3:node@c-2.me) 599280 | 533744
+> [ 503.000000] (3:node@c-2.me) 664816 | 533744
+> [ 503.000000] (3:node@c-2.me) 795888 | 533744
+> [ 503.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 503.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 503.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 503.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 503.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 503.000000] (3:node@c-2.me) Predecessor: 10874876
+> [ 512.000000] (7:node@c-6.me) My finger table:
+> [ 512.000000] (7:node@c-6.me) Start | Succ
+> [ 512.000000] (7:node@c-6.me) 16728097 | 42
+> [ 512.000000] (7:node@c-6.me) 16728098 | 42
+> [ 512.000000] (7:node@c-6.me) 16728100 | 42
+> [ 512.000000] (7:node@c-6.me) 16728104 | 42
+> [ 512.000000] (7:node@c-6.me) 16728112 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 512.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 512.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 512.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 512.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 512.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 512.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 512.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 512.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 512.000000] (7:node@c-6.me) Predecessor: 16509405
+> [ 519.000000] (9:node@c-8.me) My finger table:
+> [ 519.000000] (9:node@c-8.me) Start | Succ
+> [ 519.000000] (9:node@c-8.me) 6518809 | 10004760
+> [ 519.000000] (9:node@c-8.me) 6518810 | 16728096
+> [ 519.000000] (9:node@c-8.me) 6518812 | 10004760
+> [ 519.000000] (9:node@c-8.me) 6518816 | 10004760
+> [ 519.000000] (9:node@c-8.me) 6518824 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 519.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 519.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 519.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 519.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 519.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 519.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 519.000000] (9:node@c-8.me) Predecessor: 2015253
+> [ 520.000000] (2:node@c-1.me) My finger table:
+> [ 520.000000] (2:node@c-1.me) Start | Succ
+> [ 520.000000] (2:node@c-1.me) 366681 | 1319738
+> [ 520.000000] (2:node@c-1.me) 366682 | 1319738
+> [ 520.000000] (2:node@c-1.me) 366684 | 1319738
+> [ 520.000000] (2:node@c-1.me) 366688 | 1319738
+> [ 520.000000] (2:node@c-1.me) 366696 | 366680
+> [ 520.000000] (2:node@c-1.me) 366712 | 366680
+> [ 520.000000] (2:node@c-1.me) 366744 | 366680
+> [ 520.000000] (2:node@c-1.me) 366808 | 366680
+> [ 520.000000] (2:node@c-1.me) 366936 | 366680
+> [ 520.000000] (2:node@c-1.me) 367192 | 366680
+> [ 520.000000] (2:node@c-1.me) 367704 | 366680
+> [ 520.000000] (2:node@c-1.me) 368728 | 366680
+> [ 520.000000] (2:node@c-1.me) 370776 | 366680
+> [ 520.000000] (2:node@c-1.me) 374872 | 366680
+> [ 520.000000] (2:node@c-1.me) 383064 | 366680
+> [ 520.000000] (2:node@c-1.me) 399448 | 366680
+> [ 520.000000] (2:node@c-1.me) 432216 | 366680
+> [ 520.000000] (2:node@c-1.me) 497752 | 366680
+> [ 520.000000] (2:node@c-1.me) 628824 | 366680
+> [ 520.000000] (2:node@c-1.me) 890968 | 366680
+> [ 520.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 520.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 520.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 520.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 520.000000] (2:node@c-1.me) Predecessor: 42
+> [ 529.000000] (8:node@c-7.me) My finger table:
+> [ 529.000000] (8:node@c-7.me) Start | Succ
+> [ 529.000000] (8:node@c-7.me) 10004761 | 16509405
+> [ 529.000000] (8:node@c-7.me) 10004762 | 16509405
+> [ 529.000000] (8:node@c-7.me) 10004764 | 16509405
+> [ 529.000000] (8:node@c-7.me) 10004768 | 16509405
+> [ 529.000000] (8:node@c-7.me) 10004776 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10004792 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 529.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 529.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 529.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 529.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 529.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 529.000000] (8:node@c-7.me) Predecessor: 6518808
+> [ 530.000000] (10:node@c-9.me) My finger table:
+> [ 530.000000] (10:node@c-9.me) Start | Succ
+> [ 530.000000] (10:node@c-9.me) 2015254 | 6518808
+> [ 530.000000] (10:node@c-9.me) 2015255 | 6518808
+> [ 530.000000] (10:node@c-9.me) 2015257 | 6518808
+> [ 530.000000] (10:node@c-9.me) 2015261 | 6518808
+> [ 530.000000] (10:node@c-9.me) 2015269 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2015285 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2015317 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 530.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 530.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 530.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 530.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 530.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 530.000000] (10:node@c-9.me) Predecessor: 1319738
+> [ 536.000000] (4:node@c-3.me) My finger table:
+> [ 536.000000] (4:node@c-3.me) Start | Succ
+> [ 536.000000] (4:node@c-3.me) 1319739 | 2015253
+> [ 536.000000] (4:node@c-3.me) 1319740 | 2015253
+> [ 536.000000] (4:node@c-3.me) 1319742 | 2015253
+> [ 536.000000] (4:node@c-3.me) 1319746 | 2015253
+> [ 536.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 536.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 536.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 536.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 536.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 536.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 536.000000] (4:node@c-3.me) Predecessor: 366680
+> [ 544.000000] (4:node@c-3.me) My finger table:
+> [ 544.000000] (4:node@c-3.me) Start | Succ
+> [ 544.000000] (4:node@c-3.me) 1319739 | 2015253
+> [ 544.000000] (4:node@c-3.me) 1319740 | 2015253
+> [ 544.000000] (4:node@c-3.me) 1319742 | 2015253
+> [ 544.000000] (4:node@c-3.me) 1319746 | 2015253
+> [ 544.000000] (4:node@c-3.me) 1319754 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 544.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 544.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 544.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 544.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 544.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 544.000000] (4:node@c-3.me) Predecessor: 533744
+> [ 547.000000] (5:node@c-4.me) My finger table:
+> [ 547.000000] (5:node@c-4.me) Start | Succ
+> [ 547.000000] (5:node@c-4.me) 16509406 | 16728096
+> [ 547.000000] (5:node@c-4.me) 16509407 | 16728096
+> [ 547.000000] (5:node@c-4.me) 16509409 | 16728096
+> [ 547.000000] (5:node@c-4.me) 16509413 | 16728096
+> [ 547.000000] (5:node@c-4.me) 16509421 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16509437 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 547.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 547.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 547.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 547.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 547.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 547.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 547.000000] (5:node@c-4.me) Predecessor: 10004760
+> [ 600.000000] (3:node@c-2.me) My finger table:
+> [ 600.000000] (3:node@c-2.me) Start | Succ
+> [ 600.000000] (3:node@c-2.me) 533745 | 1319738
+> [ 600.000000] (3:node@c-2.me) 533746 | 10004760
+> [ 600.000000] (3:node@c-2.me) 533748 | 10004760
+> [ 600.000000] (3:node@c-2.me) 533752 | 1319738
+> [ 600.000000] (3:node@c-2.me) 533760 | 533744
+> [ 600.000000] (3:node@c-2.me) 533776 | 533744
+> [ 600.000000] (3:node@c-2.me) 533808 | 533744
+> [ 600.000000] (3:node@c-2.me) 533872 | 533744
+> [ 600.000000] (3:node@c-2.me) 534000 | 533744
+> [ 600.000000] (3:node@c-2.me) 534256 | 533744
+> [ 600.000000] (3:node@c-2.me) 534768 | 533744
+> [ 600.000000] (3:node@c-2.me) 535792 | 533744
+> [ 600.000000] (3:node@c-2.me) 537840 | 533744
+> [ 600.000000] (3:node@c-2.me) 541936 | 533744
+> [ 600.000000] (3:node@c-2.me) 550128 | 533744
+> [ 600.000000] (3:node@c-2.me) 566512 | 533744
+> [ 600.000000] (3:node@c-2.me) 599280 | 533744
+> [ 600.000000] (3:node@c-2.me) 664816 | 533744
+> [ 600.000000] (3:node@c-2.me) 795888 | 533744
+> [ 600.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 600.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 600.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 600.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 600.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 600.000000] (3:node@c-2.me) Predecessor: 366680
+> [ 612.000000] (6:node@c-5.me) My finger table:
+> [ 612.000000] (6:node@c-5.me) Start | Succ
+> [ 612.000000] (6:node@c-5.me) 10874877 | 366680
+> [ 612.000000] (6:node@c-5.me) 10874878 | 533744
+> [ 612.000000] (6:node@c-5.me) 10874880 | 533744
+> [ 612.000000] (6:node@c-5.me) 10874884 | 533744
+> [ 612.000000] (6:node@c-5.me) 10874892 | 366680
+> [ 612.000000] (6:node@c-5.me) 10874908 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10874940 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 612.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 612.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 612.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 612.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 612.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 612.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 612.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 612.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 612.000000] (6:node@c-5.me) Predecessor: -1
+> [ 614.000000] (1:node@c-0.me) My finger table:
+> [ 614.000000] (1:node@c-0.me) Start | Succ
+> [ 614.000000] (1:node@c-0.me) 43 | 366680
+> [ 614.000000] (1:node@c-0.me) 44 | 366680
+> [ 614.000000] (1:node@c-0.me) 46 | 366680
+> [ 614.000000] (1:node@c-0.me) 50 | 366680
+> [ 614.000000] (1:node@c-0.me) 58 | 366680
+> [ 614.000000] (1:node@c-0.me) 74 | 42
+> [ 614.000000] (1:node@c-0.me) 106 | 42
+> [ 614.000000] (1:node@c-0.me) 170 | 42
+> [ 614.000000] (1:node@c-0.me) 298 | 42
+> [ 614.000000] (1:node@c-0.me) 554 | 42
+> [ 614.000000] (1:node@c-0.me) 1066 | 42
+> [ 614.000000] (1:node@c-0.me) 2090 | 42
+> [ 614.000000] (1:node@c-0.me) 4138 | 42
+> [ 614.000000] (1:node@c-0.me) 8234 | 42
+> [ 614.000000] (1:node@c-0.me) 16426 | 42
+> [ 614.000000] (1:node@c-0.me) 32810 | 42
+> [ 614.000000] (1:node@c-0.me) 65578 | 42
+> [ 614.000000] (1:node@c-0.me) 131114 | 42
+> [ 614.000000] (1:node@c-0.me) 262186 | 42
+> [ 614.000000] (1:node@c-0.me) 524330 | 42
+> [ 614.000000] (1:node@c-0.me) 1048618 | 42
+> [ 614.000000] (1:node@c-0.me) 2097194 | 42
+> [ 614.000000] (1:node@c-0.me) 4194346 | 42
+> [ 614.000000] (1:node@c-0.me) 8388650 | 42
+> [ 614.000000] (1:node@c-0.me) Predecessor: 16728096
+> [ 632.000000] (3:node@c-2.me) My finger table:
+> [ 632.000000] (3:node@c-2.me) Start | Succ
+> [ 632.000000] (3:node@c-2.me) 533745 | 1319738
+> [ 632.000000] (3:node@c-2.me) 533746 | 10004760
+> [ 632.000000] (3:node@c-2.me) 533748 | 10004760
+> [ 632.000000] (3:node@c-2.me) 533752 | 1319738
+> [ 632.000000] (3:node@c-2.me) 533760 | 1319738
+> [ 632.000000] (3:node@c-2.me) 533776 | 533744
+> [ 632.000000] (3:node@c-2.me) 533808 | 533744
+> [ 632.000000] (3:node@c-2.me) 533872 | 533744
+> [ 632.000000] (3:node@c-2.me) 534000 | 533744
+> [ 632.000000] (3:node@c-2.me) 534256 | 533744
+> [ 632.000000] (3:node@c-2.me) 534768 | 533744
+> [ 632.000000] (3:node@c-2.me) 535792 | 533744
+> [ 632.000000] (3:node@c-2.me) 537840 | 533744
+> [ 632.000000] (3:node@c-2.me) 541936 | 533744
+> [ 632.000000] (3:node@c-2.me) 550128 | 533744
+> [ 632.000000] (3:node@c-2.me) 566512 | 533744
+> [ 632.000000] (3:node@c-2.me) 599280 | 533744
+> [ 632.000000] (3:node@c-2.me) 664816 | 533744
+> [ 632.000000] (3:node@c-2.me) 795888 | 533744
+> [ 632.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 632.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 632.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 632.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 632.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 632.000000] (3:node@c-2.me) Predecessor: 366680
+> [ 638.000000] (7:node@c-6.me) My finger table:
+> [ 638.000000] (7:node@c-6.me) Start | Succ
+> [ 638.000000] (7:node@c-6.me) 16728097 | 42
+> [ 638.000000] (7:node@c-6.me) 16728098 | 42
+> [ 638.000000] (7:node@c-6.me) 16728100 | 42
+> [ 638.000000] (7:node@c-6.me) 16728104 | 42
+> [ 638.000000] (7:node@c-6.me) 16728112 | 42
+> [ 638.000000] (7:node@c-6.me) 16728128 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 638.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 638.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 638.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 638.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 638.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 638.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 638.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 638.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 638.000000] (7:node@c-6.me) Predecessor: 16509405
+> [ 643.000000] (2:node@c-1.me) My finger table:
+> [ 643.000000] (2:node@c-1.me) Start | Succ
+> [ 643.000000] (2:node@c-1.me) 366681 | 533744
+> [ 643.000000] (2:node@c-1.me) 366682 | 1319738
+> [ 643.000000] (2:node@c-1.me) 366684 | 1319738
+> [ 643.000000] (2:node@c-1.me) 366688 | 1319738
+> [ 643.000000] (2:node@c-1.me) 366696 | 533744
+> [ 643.000000] (2:node@c-1.me) 366712 | 366680
+> [ 643.000000] (2:node@c-1.me) 366744 | 366680
+> [ 643.000000] (2:node@c-1.me) 366808 | 366680
+> [ 643.000000] (2:node@c-1.me) 366936 | 366680
+> [ 643.000000] (2:node@c-1.me) 367192 | 366680
+> [ 643.000000] (2:node@c-1.me) 367704 | 366680
+> [ 643.000000] (2:node@c-1.me) 368728 | 366680
+> [ 643.000000] (2:node@c-1.me) 370776 | 366680
+> [ 643.000000] (2:node@c-1.me) 374872 | 366680
+> [ 643.000000] (2:node@c-1.me) 383064 | 366680
+> [ 643.000000] (2:node@c-1.me) 399448 | 366680
+> [ 643.000000] (2:node@c-1.me) 432216 | 366680
+> [ 643.000000] (2:node@c-1.me) 497752 | 366680
+> [ 643.000000] (2:node@c-1.me) 628824 | 366680
+> [ 643.000000] (2:node@c-1.me) 890968 | 366680
+> [ 643.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 643.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 643.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 643.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 643.000000] (2:node@c-1.me) Predecessor: 42
+> [ 662.000000] (4:node@c-3.me) My finger table:
+> [ 662.000000] (4:node@c-3.me) Start | Succ
+> [ 662.000000] (4:node@c-3.me) 1319739 | 2015253
+> [ 662.000000] (4:node@c-3.me) 1319740 | 2015253
+> [ 662.000000] (4:node@c-3.me) 1319742 | 2015253
+> [ 662.000000] (4:node@c-3.me) 1319746 | 2015253
+> [ 662.000000] (4:node@c-3.me) 1319754 | 2015253
+> [ 662.000000] (4:node@c-3.me) 1319770 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 662.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 662.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 662.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 662.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 662.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 662.000000] (4:node@c-3.me) Predecessor: 533744
+> [ 663.000000] (8:node@c-7.me) My finger table:
+> [ 663.000000] (8:node@c-7.me) Start | Succ
+> [ 663.000000] (8:node@c-7.me) 10004761 | 16509405
+> [ 663.000000] (8:node@c-7.me) 10004762 | 16509405
+> [ 663.000000] (8:node@c-7.me) 10004764 | 16509405
+> [ 663.000000] (8:node@c-7.me) 10004768 | 16509405
+> [ 663.000000] (8:node@c-7.me) 10004776 | 16509405
+> [ 663.000000] (8:node@c-7.me) 10004792 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 663.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 663.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 663.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 663.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 663.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 663.000000] (8:node@c-7.me) Predecessor: 6518808
+> [ 666.000000] (10:node@c-9.me) My finger table:
+> [ 666.000000] (10:node@c-9.me) Start | Succ
+> [ 666.000000] (10:node@c-9.me) 2015254 | 6518808
+> [ 666.000000] (10:node@c-9.me) 2015255 | 6518808
+> [ 666.000000] (10:node@c-9.me) 2015257 | 6518808
+> [ 666.000000] (10:node@c-9.me) 2015261 | 6518808
+> [ 666.000000] (10:node@c-9.me) 2015269 | 6518808
+> [ 666.000000] (10:node@c-9.me) 2015285 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2015317 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 666.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 666.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 666.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 666.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 666.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 666.000000] (10:node@c-9.me) Predecessor: 1319738
+> [ 669.000000] (9:node@c-8.me) My finger table:
+> [ 669.000000] (9:node@c-8.me) Start | Succ
+> [ 669.000000] (9:node@c-8.me) 6518809 | 10004760
+> [ 669.000000] (9:node@c-8.me) 6518810 | 16728096
+> [ 669.000000] (9:node@c-8.me) 6518812 | 10004760
+> [ 669.000000] (9:node@c-8.me) 6518816 | 10004760
+> [ 669.000000] (9:node@c-8.me) 6518824 | 10004760
+> [ 669.000000] (9:node@c-8.me) 6518840 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 669.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 669.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 669.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 669.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 669.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 669.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 669.000000] (9:node@c-8.me) Predecessor: 2015253
+> [ 693.000000] (5:node@c-4.me) My finger table:
+> [ 693.000000] (5:node@c-4.me) Start | Succ
+> [ 693.000000] (5:node@c-4.me) 16509406 | 16728096
+> [ 693.000000] (5:node@c-4.me) 16509407 | 16728096
+> [ 693.000000] (5:node@c-4.me) 16509409 | 16728096
+> [ 693.000000] (5:node@c-4.me) 16509413 | 16728096
+> [ 693.000000] (5:node@c-4.me) 16509421 | 16728096
+> [ 693.000000] (5:node@c-4.me) 16509437 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 693.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 693.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 693.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 693.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 693.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 693.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 693.000000] (5:node@c-4.me) Predecessor: 10004760
+> [ 729.000000] (5:node@c-4.me) My finger table:
+> [ 729.000000] (5:node@c-4.me) Start | Succ
+> [ 729.000000] (5:node@c-4.me) 16509406 | 16728096
+> [ 729.000000] (5:node@c-4.me) 16509407 | 16728096
+> [ 729.000000] (5:node@c-4.me) 16509409 | 16728096
+> [ 729.000000] (5:node@c-4.me) 16509413 | 16728096
+> [ 729.000000] (5:node@c-4.me) 16509421 | 16728096
+> [ 729.000000] (5:node@c-4.me) 16509437 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 729.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 729.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 729.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 729.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 729.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 729.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 729.000000] (5:node@c-4.me) Predecessor: 10874876
+> [ 734.000000] (6:node@c-5.me) My finger table:
+> [ 734.000000] (6:node@c-5.me) Start | Succ
+> [ 734.000000] (6:node@c-5.me) 10874877 | 16509405
+> [ 734.000000] (6:node@c-5.me) 10874878 | 533744
+> [ 734.000000] (6:node@c-5.me) 10874880 | 533744
+> [ 734.000000] (6:node@c-5.me) 10874884 | 533744
+> [ 734.000000] (6:node@c-5.me) 10874892 | 366680
+> [ 734.000000] (6:node@c-5.me) 10874908 | 16509405
+> [ 734.000000] (6:node@c-5.me) 10874940 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 734.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 734.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 734.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 734.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 734.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 734.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 734.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 734.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 734.000000] (6:node@c-5.me) Predecessor: -1
+> [ 740.000000] (1:node@c-0.me) My finger table:
+> [ 740.000000] (1:node@c-0.me) Start | Succ
+> [ 740.000000] (1:node@c-0.me) 43 | 366680
+> [ 740.000000] (1:node@c-0.me) 44 | 366680
+> [ 740.000000] (1:node@c-0.me) 46 | 366680
+> [ 740.000000] (1:node@c-0.me) 50 | 366680
+> [ 740.000000] (1:node@c-0.me) 58 | 366680
+> [ 740.000000] (1:node@c-0.me) 74 | 366680
+> [ 740.000000] (1:node@c-0.me) 106 | 42
+> [ 740.000000] (1:node@c-0.me) 170 | 42
+> [ 740.000000] (1:node@c-0.me) 298 | 42
+> [ 740.000000] (1:node@c-0.me) 554 | 42
+> [ 740.000000] (1:node@c-0.me) 1066 | 42
+> [ 740.000000] (1:node@c-0.me) 2090 | 42
+> [ 740.000000] (1:node@c-0.me) 4138 | 42
+> [ 740.000000] (1:node@c-0.me) 8234 | 42
+> [ 740.000000] (1:node@c-0.me) 16426 | 42
+> [ 740.000000] (1:node@c-0.me) 32810 | 42
+> [ 740.000000] (1:node@c-0.me) 65578 | 42
+> [ 740.000000] (1:node@c-0.me) 131114 | 42
+> [ 740.000000] (1:node@c-0.me) 262186 | 42
+> [ 740.000000] (1:node@c-0.me) 524330 | 42
+> [ 740.000000] (1:node@c-0.me) 1048618 | 42
+> [ 740.000000] (1:node@c-0.me) 2097194 | 42
+> [ 740.000000] (1:node@c-0.me) 4194346 | 42
+> [ 740.000000] (1:node@c-0.me) 8388650 | 42
+> [ 740.000000] (1:node@c-0.me) Predecessor: 16728096
+> [ 753.000000] (3:node@c-2.me) My finger table:
+> [ 753.000000] (3:node@c-2.me) Start | Succ
+> [ 753.000000] (3:node@c-2.me) 533745 | 1319738
+> [ 753.000000] (3:node@c-2.me) 533746 | 10004760
+> [ 753.000000] (3:node@c-2.me) 533748 | 10004760
+> [ 753.000000] (3:node@c-2.me) 533752 | 1319738
+> [ 753.000000] (3:node@c-2.me) 533760 | 1319738
+> [ 753.000000] (3:node@c-2.me) 533776 | 1319738
+> [ 753.000000] (3:node@c-2.me) 533808 | 533744
+> [ 753.000000] (3:node@c-2.me) 533872 | 533744
+> [ 753.000000] (3:node@c-2.me) 534000 | 533744
+> [ 753.000000] (3:node@c-2.me) 534256 | 533744
+> [ 753.000000] (3:node@c-2.me) 534768 | 533744
+> [ 753.000000] (3:node@c-2.me) 535792 | 533744
+> [ 753.000000] (3:node@c-2.me) 537840 | 533744
+> [ 753.000000] (3:node@c-2.me) 541936 | 533744
+> [ 753.000000] (3:node@c-2.me) 550128 | 533744
+> [ 753.000000] (3:node@c-2.me) 566512 | 533744
+> [ 753.000000] (3:node@c-2.me) 599280 | 533744
+> [ 753.000000] (3:node@c-2.me) 664816 | 533744
+> [ 753.000000] (3:node@c-2.me) 795888 | 533744
+> [ 753.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 753.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 753.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 753.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 753.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 753.000000] (3:node@c-2.me) Predecessor: 366680
+> [ 765.000000] (2:node@c-1.me) My finger table:
+> [ 765.000000] (2:node@c-1.me) Start | Succ
+> [ 765.000000] (2:node@c-1.me) 366681 | 533744
+> [ 765.000000] (2:node@c-1.me) 366682 | 1319738
+> [ 765.000000] (2:node@c-1.me) 366684 | 1319738
+> [ 765.000000] (2:node@c-1.me) 366688 | 1319738
+> [ 765.000000] (2:node@c-1.me) 366696 | 533744
+> [ 765.000000] (2:node@c-1.me) 366712 | 533744
+> [ 765.000000] (2:node@c-1.me) 366744 | 366680
+> [ 765.000000] (2:node@c-1.me) 366808 | 366680
+> [ 765.000000] (2:node@c-1.me) 366936 | 366680
+> [ 765.000000] (2:node@c-1.me) 367192 | 366680
+> [ 765.000000] (2:node@c-1.me) 367704 | 366680
+> [ 765.000000] (2:node@c-1.me) 368728 | 366680
+> [ 765.000000] (2:node@c-1.me) 370776 | 366680
+> [ 765.000000] (2:node@c-1.me) 374872 | 366680
+> [ 765.000000] (2:node@c-1.me) 383064 | 366680
+> [ 765.000000] (2:node@c-1.me) 399448 | 366680
+> [ 765.000000] (2:node@c-1.me) 432216 | 366680
+> [ 765.000000] (2:node@c-1.me) 497752 | 366680
+> [ 765.000000] (2:node@c-1.me) 628824 | 366680
+> [ 765.000000] (2:node@c-1.me) 890968 | 366680
+> [ 765.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 765.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 765.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 765.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 765.000000] (2:node@c-1.me) Predecessor: 42
+> [ 766.000000] (6:node@c-5.me) My finger table:
+> [ 766.000000] (6:node@c-5.me) Start | Succ
+> [ 766.000000] (6:node@c-5.me) 10874877 | 16509405
+> [ 766.000000] (6:node@c-5.me) 10874878 | 533744
+> [ 766.000000] (6:node@c-5.me) 10874880 | 533744
+> [ 766.000000] (6:node@c-5.me) 10874884 | 533744
+> [ 766.000000] (6:node@c-5.me) 10874892 | 366680
+> [ 766.000000] (6:node@c-5.me) 10874908 | 16509405
+> [ 766.000000] (6:node@c-5.me) 10874940 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 766.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 766.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 766.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 766.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 766.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 766.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 766.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 766.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 766.000000] (6:node@c-5.me) Predecessor: 10004760
+> [ 777.000000] (7:node@c-6.me) My finger table:
+> [ 777.000000] (7:node@c-6.me) Start | Succ
+> [ 777.000000] (7:node@c-6.me) 16728097 | 42
+> [ 777.000000] (7:node@c-6.me) 16728098 | 42
+> [ 777.000000] (7:node@c-6.me) 16728100 | 42
+> [ 777.000000] (7:node@c-6.me) 16728104 | 42
+> [ 777.000000] (7:node@c-6.me) 16728112 | 42
+> [ 777.000000] (7:node@c-6.me) 16728128 | 42
+> [ 777.000000] (7:node@c-6.me) 16728160 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 777.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 777.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 777.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 777.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 777.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 777.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 777.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 777.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 777.000000] (7:node@c-6.me) Predecessor: 16509405
+> [ 800.000000] (8:node@c-7.me) My finger table:
+> [ 800.000000] (8:node@c-7.me) Start | Succ
+> [ 800.000000] (8:node@c-7.me) 10004761 | 10874876
+> [ 800.000000] (8:node@c-7.me) 10004762 | 16509405
+> [ 800.000000] (8:node@c-7.me) 10004764 | 16509405
+> [ 800.000000] (8:node@c-7.me) 10004768 | 16509405
+> [ 800.000000] (8:node@c-7.me) 10004776 | 16509405
+> [ 800.000000] (8:node@c-7.me) 10004792 | 10874876
+> [ 800.000000] (8:node@c-7.me) 10004824 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 800.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 800.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 800.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 800.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 800.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 800.000000] (8:node@c-7.me) Predecessor: 6518808
+> [ 806.000000] (9:node@c-8.me) My finger table:
+> [ 806.000000] (9:node@c-8.me) Start | Succ
+> [ 806.000000] (9:node@c-8.me) 6518809 | 10004760
+> [ 806.000000] (9:node@c-8.me) 6518810 | 16728096
+> [ 806.000000] (9:node@c-8.me) 6518812 | 10004760
+> [ 806.000000] (9:node@c-8.me) 6518816 | 10004760
+> [ 806.000000] (9:node@c-8.me) 6518824 | 10004760
+> [ 806.000000] (9:node@c-8.me) 6518840 | 10004760
+> [ 806.000000] (9:node@c-8.me) 6518872 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 806.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 806.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 806.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 806.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 806.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 806.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 806.000000] (9:node@c-8.me) Predecessor: 2015253
+> [ 812.000000] (4:node@c-3.me) My finger table:
+> [ 812.000000] (4:node@c-3.me) Start | Succ
+> [ 812.000000] (4:node@c-3.me) 1319739 | 2015253
+> [ 812.000000] (4:node@c-3.me) 1319740 | 2015253
+> [ 812.000000] (4:node@c-3.me) 1319742 | 2015253
+> [ 812.000000] (4:node@c-3.me) 1319746 | 2015253
+> [ 812.000000] (4:node@c-3.me) 1319754 | 2015253
+> [ 812.000000] (4:node@c-3.me) 1319770 | 2015253
+> [ 812.000000] (4:node@c-3.me) 1319802 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 812.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 812.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 812.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 812.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 812.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 812.000000] (4:node@c-3.me) Predecessor: 533744
+> [ 812.000000] (10:node@c-9.me) My finger table:
+> [ 812.000000] (10:node@c-9.me) Start | Succ
+> [ 812.000000] (10:node@c-9.me) 2015254 | 6518808
+> [ 812.000000] (10:node@c-9.me) 2015255 | 6518808
+> [ 812.000000] (10:node@c-9.me) 2015257 | 6518808
+> [ 812.000000] (10:node@c-9.me) 2015261 | 6518808
+> [ 812.000000] (10:node@c-9.me) 2015269 | 6518808
+> [ 812.000000] (10:node@c-9.me) 2015285 | 6518808
+> [ 812.000000] (10:node@c-9.me) 2015317 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 812.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 812.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 812.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 812.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 812.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 812.000000] (10:node@c-9.me) Predecessor: 1319738
+> [ 833.000000] (5:node@c-4.me) My finger table:
+> [ 833.000000] (5:node@c-4.me) Start | Succ
+> [ 833.000000] (5:node@c-4.me) 16509406 | 16728096
+> [ 833.000000] (5:node@c-4.me) 16509407 | 16728096
+> [ 833.000000] (5:node@c-4.me) 16509409 | 16728096
+> [ 833.000000] (5:node@c-4.me) 16509413 | 16728096
+> [ 833.000000] (5:node@c-4.me) 16509421 | 16728096
+> [ 833.000000] (5:node@c-4.me) 16509437 | 16728096
+> [ 833.000000] (5:node@c-4.me) 16509469 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 833.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 833.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 833.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 833.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 833.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 833.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 833.000000] (5:node@c-4.me) Predecessor: 10874876
+> [ 861.000000] (1:node@c-0.me) My finger table:
+> [ 861.000000] (1:node@c-0.me) Start | Succ
+> [ 861.000000] (1:node@c-0.me) 43 | 366680
+> [ 861.000000] (1:node@c-0.me) 44 | 366680
+> [ 861.000000] (1:node@c-0.me) 46 | 366680
+> [ 861.000000] (1:node@c-0.me) 50 | 366680
+> [ 861.000000] (1:node@c-0.me) 58 | 366680
+> [ 861.000000] (1:node@c-0.me) 74 | 366680
+> [ 861.000000] (1:node@c-0.me) 106 | 366680
+> [ 861.000000] (1:node@c-0.me) 170 | 42
+> [ 861.000000] (1:node@c-0.me) 298 | 42
+> [ 861.000000] (1:node@c-0.me) 554 | 42
+> [ 861.000000] (1:node@c-0.me) 1066 | 42
+> [ 861.000000] (1:node@c-0.me) 2090 | 42
+> [ 861.000000] (1:node@c-0.me) 4138 | 42
+> [ 861.000000] (1:node@c-0.me) 8234 | 42
+> [ 861.000000] (1:node@c-0.me) 16426 | 42
+> [ 861.000000] (1:node@c-0.me) 32810 | 42
+> [ 861.000000] (1:node@c-0.me) 65578 | 42
+> [ 861.000000] (1:node@c-0.me) 131114 | 42
+> [ 861.000000] (1:node@c-0.me) 262186 | 42
+> [ 861.000000] (1:node@c-0.me) 524330 | 42
+> [ 861.000000] (1:node@c-0.me) 1048618 | 42
+> [ 861.000000] (1:node@c-0.me) 2097194 | 42
+> [ 861.000000] (1:node@c-0.me) 4194346 | 42
+> [ 861.000000] (1:node@c-0.me) 8388650 | 42
+> [ 861.000000] (1:node@c-0.me) Predecessor: 16728096
+> [ 884.000000] (3:node@c-2.me) My finger table:
+> [ 884.000000] (3:node@c-2.me) Start | Succ
+> [ 884.000000] (3:node@c-2.me) 533745 | 1319738
+> [ 884.000000] (3:node@c-2.me) 533746 | 10004760
+> [ 884.000000] (3:node@c-2.me) 533748 | 10004760
+> [ 884.000000] (3:node@c-2.me) 533752 | 1319738
+> [ 884.000000] (3:node@c-2.me) 533760 | 1319738
+> [ 884.000000] (3:node@c-2.me) 533776 | 1319738
+> [ 884.000000] (3:node@c-2.me) 533808 | 1319738
+> [ 884.000000] (3:node@c-2.me) 533872 | 533744
+> [ 884.000000] (3:node@c-2.me) 534000 | 533744
+> [ 884.000000] (3:node@c-2.me) 534256 | 533744
+> [ 884.000000] (3:node@c-2.me) 534768 | 533744
+> [ 884.000000] (3:node@c-2.me) 535792 | 533744
+> [ 884.000000] (3:node@c-2.me) 537840 | 533744
+> [ 884.000000] (3:node@c-2.me) 541936 | 533744
+> [ 884.000000] (3:node@c-2.me) 550128 | 533744
+> [ 884.000000] (3:node@c-2.me) 566512 | 533744
+> [ 884.000000] (3:node@c-2.me) 599280 | 533744
+> [ 884.000000] (3:node@c-2.me) 664816 | 533744
+> [ 884.000000] (3:node@c-2.me) 795888 | 533744
+> [ 884.000000] (3:node@c-2.me) 1058032 | 533744
+> [ 884.000000] (3:node@c-2.me) 1582320 | 533744
+> [ 884.000000] (3:node@c-2.me) 2630896 | 533744
+> [ 884.000000] (3:node@c-2.me) 4728048 | 533744
+> [ 884.000000] (3:node@c-2.me) 8922352 | 533744
+> [ 884.000000] (3:node@c-2.me) Predecessor: 366680
+> [ 887.000000] (6:node@c-5.me) My finger table:
+> [ 887.000000] (6:node@c-5.me) Start | Succ
+> [ 887.000000] (6:node@c-5.me) 10874877 | 16509405
+> [ 887.000000] (6:node@c-5.me) 10874878 | 533744
+> [ 887.000000] (6:node@c-5.me) 10874880 | 533744
+> [ 887.000000] (6:node@c-5.me) 10874884 | 533744
+> [ 887.000000] (6:node@c-5.me) 10874892 | 366680
+> [ 887.000000] (6:node@c-5.me) 10874908 | 16509405
+> [ 887.000000] (6:node@c-5.me) 10874940 | 16509405
+> [ 887.000000] (6:node@c-5.me) 10875004 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10875132 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10875388 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10875900 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10876924 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10878972 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10883068 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10891260 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10907644 | 10874876
+> [ 887.000000] (6:node@c-5.me) 10940412 | 10874876
+> [ 887.000000] (6:node@c-5.me) 11005948 | 10874876
+> [ 887.000000] (6:node@c-5.me) 11137020 | 10874876
+> [ 887.000000] (6:node@c-5.me) 11399164 | 10874876
+> [ 887.000000] (6:node@c-5.me) 11923452 | 10874876
+> [ 887.000000] (6:node@c-5.me) 12972028 | 10874876
+> [ 887.000000] (6:node@c-5.me) 15069180 | 10874876
+> [ 887.000000] (6:node@c-5.me) 2486268 | 10874876
+> [ 887.000000] (6:node@c-5.me) Predecessor: 10004760
+> [ 908.000000] (2:node@c-1.me) My finger table:
+> [ 908.000000] (2:node@c-1.me) Start | Succ
+> [ 908.000000] (2:node@c-1.me) 366681 | 533744
+> [ 908.000000] (2:node@c-1.me) 366682 | 1319738
+> [ 908.000000] (2:node@c-1.me) 366684 | 1319738
+> [ 908.000000] (2:node@c-1.me) 366688 | 1319738
+> [ 908.000000] (2:node@c-1.me) 366696 | 533744
+> [ 908.000000] (2:node@c-1.me) 366712 | 533744
+> [ 908.000000] (2:node@c-1.me) 366744 | 533744
+> [ 908.000000] (2:node@c-1.me) 366808 | 366680
+> [ 908.000000] (2:node@c-1.me) 366936 | 366680
+> [ 908.000000] (2:node@c-1.me) 367192 | 366680
+> [ 908.000000] (2:node@c-1.me) 367704 | 366680
+> [ 908.000000] (2:node@c-1.me) 368728 | 366680
+> [ 908.000000] (2:node@c-1.me) 370776 | 366680
+> [ 908.000000] (2:node@c-1.me) 374872 | 366680
+> [ 908.000000] (2:node@c-1.me) 383064 | 366680
+> [ 908.000000] (2:node@c-1.me) 399448 | 366680
+> [ 908.000000] (2:node@c-1.me) 432216 | 366680
+> [ 908.000000] (2:node@c-1.me) 497752 | 366680
+> [ 908.000000] (2:node@c-1.me) 628824 | 366680
+> [ 908.000000] (2:node@c-1.me) 890968 | 366680
+> [ 908.000000] (2:node@c-1.me) 1415256 | 366680
+> [ 908.000000] (2:node@c-1.me) 2463832 | 366680
+> [ 908.000000] (2:node@c-1.me) 4560984 | 366680
+> [ 908.000000] (2:node@c-1.me) 8755288 | 366680
+> [ 908.000000] (2:node@c-1.me) Predecessor: 42
+> [ 915.000000] (7:node@c-6.me) My finger table:
+> [ 915.000000] (7:node@c-6.me) Start | Succ
+> [ 915.000000] (7:node@c-6.me) 16728097 | 42
+> [ 915.000000] (7:node@c-6.me) 16728098 | 42
+> [ 915.000000] (7:node@c-6.me) 16728100 | 42
+> [ 915.000000] (7:node@c-6.me) 16728104 | 42
+> [ 915.000000] (7:node@c-6.me) 16728112 | 42
+> [ 915.000000] (7:node@c-6.me) 16728128 | 42
+> [ 915.000000] (7:node@c-6.me) 16728160 | 42
+> [ 915.000000] (7:node@c-6.me) 16728224 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16728352 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16728608 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16729120 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16730144 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16732192 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16736288 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16744480 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16760864 | 16728096
+> [ 915.000000] (7:node@c-6.me) 16416 | 16728096
+> [ 915.000000] (7:node@c-6.me) 81952 | 16728096
+> [ 915.000000] (7:node@c-6.me) 213024 | 16728096
+> [ 915.000000] (7:node@c-6.me) 475168 | 16728096
+> [ 915.000000] (7:node@c-6.me) 999456 | 16728096
+> [ 915.000000] (7:node@c-6.me) 2048032 | 16728096
+> [ 915.000000] (7:node@c-6.me) 4145184 | 16728096
+> [ 915.000000] (7:node@c-6.me) 8339488 | 16728096
+> [ 915.000000] (7:node@c-6.me) Predecessor: 16509405
+> [ 920.000000] (8:node@c-7.me) My finger table:
+> [ 920.000000] (8:node@c-7.me) Start | Succ
+> [ 920.000000] (8:node@c-7.me) 10004761 | 10874876
+> [ 920.000000] (8:node@c-7.me) 10004762 | 16509405
+> [ 920.000000] (8:node@c-7.me) 10004764 | 16509405
+> [ 920.000000] (8:node@c-7.me) 10004768 | 16509405
+> [ 920.000000] (8:node@c-7.me) 10004776 | 16509405
+> [ 920.000000] (8:node@c-7.me) 10004792 | 10874876
+> [ 920.000000] (8:node@c-7.me) 10004824 | 10874876
+> [ 920.000000] (8:node@c-7.me) 10004888 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10005016 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10005272 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10005784 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10006808 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10008856 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10012952 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10021144 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10037528 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10070296 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10135832 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10266904 | 10004760
+> [ 920.000000] (8:node@c-7.me) 10529048 | 10004760
+> [ 920.000000] (8:node@c-7.me) 11053336 | 10004760
+> [ 920.000000] (8:node@c-7.me) 12101912 | 10004760
+> [ 920.000000] (8:node@c-7.me) 14199064 | 10004760
+> [ 920.000000] (8:node@c-7.me) 1616152 | 10004760
+> [ 920.000000] (8:node@c-7.me) Predecessor: 6518808
+> [ 932.000000] (10:node@c-9.me) My finger table:
+> [ 932.000000] (10:node@c-9.me) Start | Succ
+> [ 932.000000] (10:node@c-9.me) 2015254 | 6518808
+> [ 932.000000] (10:node@c-9.me) 2015255 | 6518808
+> [ 932.000000] (10:node@c-9.me) 2015257 | 6518808
+> [ 932.000000] (10:node@c-9.me) 2015261 | 6518808
+> [ 932.000000] (10:node@c-9.me) 2015269 | 6518808
+> [ 932.000000] (10:node@c-9.me) 2015285 | 6518808
+> [ 932.000000] (10:node@c-9.me) 2015317 | 6518808
+> [ 932.000000] (10:node@c-9.me) 2015381 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2015509 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2015765 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2016277 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2017301 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2019349 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2023445 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2031637 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2048021 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2080789 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2146325 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2277397 | 2015253
+> [ 932.000000] (10:node@c-9.me) 2539541 | 2015253
+> [ 932.000000] (10:node@c-9.me) 3063829 | 2015253
+> [ 932.000000] (10:node@c-9.me) 4112405 | 2015253
+> [ 932.000000] (10:node@c-9.me) 6209557 | 2015253
+> [ 932.000000] (10:node@c-9.me) 10403861 | 2015253
+> [ 932.000000] (10:node@c-9.me) Predecessor: 1319738
+> [ 950.000000] (4:node@c-3.me) My finger table:
+> [ 950.000000] (4:node@c-3.me) Start | Succ
+> [ 950.000000] (4:node@c-3.me) 1319739 | 2015253
+> [ 950.000000] (4:node@c-3.me) 1319740 | 2015253
+> [ 950.000000] (4:node@c-3.me) 1319742 | 2015253
+> [ 950.000000] (4:node@c-3.me) 1319746 | 2015253
+> [ 950.000000] (4:node@c-3.me) 1319754 | 2015253
+> [ 950.000000] (4:node@c-3.me) 1319770 | 2015253
+> [ 950.000000] (4:node@c-3.me) 1319802 | 2015253
+> [ 950.000000] (4:node@c-3.me) 1319866 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1319994 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1320250 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1320762 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1321786 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1323834 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1327930 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1336122 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1352506 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1385274 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1450810 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1581882 | 1319738
+> [ 950.000000] (4:node@c-3.me) 1844026 | 1319738
+> [ 950.000000] (4:node@c-3.me) 2368314 | 1319738
+> [ 950.000000] (4:node@c-3.me) 3416890 | 1319738
+> [ 950.000000] (4:node@c-3.me) 5514042 | 1319738
+> [ 950.000000] (4:node@c-3.me) 9708346 | 1319738
+> [ 950.000000] (4:node@c-3.me) Predecessor: 533744
+> [ 955.000000] (9:node@c-8.me) My finger table:
+> [ 955.000000] (9:node@c-8.me) Start | Succ
+> [ 955.000000] (9:node@c-8.me) 6518809 | 10004760
+> [ 955.000000] (9:node@c-8.me) 6518810 | 16728096
+> [ 955.000000] (9:node@c-8.me) 6518812 | 10004760
+> [ 955.000000] (9:node@c-8.me) 6518816 | 10004760
+> [ 955.000000] (9:node@c-8.me) 6518824 | 10004760
+> [ 955.000000] (9:node@c-8.me) 6518840 | 10004760
+> [ 955.000000] (9:node@c-8.me) 6518872 | 10004760
+> [ 955.000000] (9:node@c-8.me) 6518936 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6519064 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6519320 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6519832 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6520856 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6522904 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6527000 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6535192 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6551576 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6584344 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6649880 | 6518808
+> [ 955.000000] (9:node@c-8.me) 6780952 | 6518808
+> [ 955.000000] (9:node@c-8.me) 7043096 | 6518808
+> [ 955.000000] (9:node@c-8.me) 7567384 | 6518808
+> [ 955.000000] (9:node@c-8.me) 8615960 | 6518808
+> [ 955.000000] (9:node@c-8.me) 10713112 | 6518808
+> [ 955.000000] (9:node@c-8.me) 14907416 | 6518808
+> [ 955.000000] (9:node@c-8.me) Predecessor: 2015253
+> [ 972.000000] (5:node@c-4.me) My finger table:
+> [ 972.000000] (5:node@c-4.me) Start | Succ
+> [ 972.000000] (5:node@c-4.me) 16509406 | 16728096
+> [ 972.000000] (5:node@c-4.me) 16509407 | 16728096
+> [ 972.000000] (5:node@c-4.me) 16509409 | 16728096
+> [ 972.000000] (5:node@c-4.me) 16509413 | 16728096
+> [ 972.000000] (5:node@c-4.me) 16509421 | 16728096
+> [ 972.000000] (5:node@c-4.me) 16509437 | 16728096
+> [ 972.000000] (5:node@c-4.me) 16509469 | 16728096
+> [ 972.000000] (5:node@c-4.me) 16509533 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16509661 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16509917 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16510429 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16511453 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16513501 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16517597 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16525789 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16542173 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16574941 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16640477 | 16509405
+> [ 972.000000] (5:node@c-4.me) 16771549 | 16509405
+> [ 972.000000] (5:node@c-4.me) 256477 | 16509405
+> [ 972.000000] (5:node@c-4.me) 780765 | 16509405
+> [ 972.000000] (5:node@c-4.me) 1829341 | 16509405
+> [ 972.000000] (5:node@c-4.me) 3926493 | 16509405
+> [ 972.000000] (5:node@c-4.me) 8120797 | 16509405
+> [ 972.000000] (5:node@c-4.me) Predecessor: 10874876
+> [ 984.000000] (1:node@c-0.me) My finger table:
+> [ 984.000000] (1:node@c-0.me) Start | Succ
+> [ 984.000000] (1:node@c-0.me) 43 | 366680
+> [ 984.000000] (1:node@c-0.me) 44 | 366680
+> [ 984.000000] (1:node@c-0.me) 46 | 366680
+> [ 984.000000] (1:node@c-0.me) 50 | 366680
+> [ 984.000000] (1:node@c-0.me) 58 | 366680
+> [ 984.000000] (1:node@c-0.me) 74 | 366680
+> [ 984.000000] (1:node@c-0.me) 106 | 366680
+> [ 984.000000] (1:node@c-0.me) 170 | 366680
+> [ 984.000000] (1:node@c-0.me) 298 | 42
+> [ 984.000000] (1:node@c-0.me) 554 | 42
+> [ 984.000000] (1:node@c-0.me) 1066 | 42
+> [ 984.000000] (1:node@c-0.me) 2090 | 42
+> [ 984.000000] (1:node@c-0.me) 4138 | 42
+> [ 984.000000] (1:node@c-0.me) 8234 | 42
+> [ 984.000000] (1:node@c-0.me) 16426 | 42
+> [ 984.000000] (1:node@c-0.me) 32810 | 42
+> [ 984.000000] (1:node@c-0.me) 65578 | 42
+> [ 984.000000] (1:node@c-0.me) 131114 | 42
+> [ 984.000000] (1:node@c-0.me) 262186 | 42
+> [ 984.000000] (1:node@c-0.me) 524330 | 42
+> [ 984.000000] (1:node@c-0.me) 1048618 | 42
+> [ 984.000000] (1:node@c-0.me) 2097194 | 42
+> [ 984.000000] (1:node@c-0.me) 4194346 | 42
+> [ 984.000000] (1:node@c-0.me) 8388650 | 42
+> [ 984.000000] (1:node@c-0.me) Predecessor: 16728096
+> [1053.000000] (0:@) Messages created: 1972
+> [1053.000000] (0:@) Simulated time: 1053
#! ./tesh
-p Testing fullduplex TCP option DISABLED
+p Testing crosstraffic TCP option DISABLED
-$ gtnets/gtnets ${srcdir:=.}/gtnets/fullduplex-p.xml ${srcdir:=.}/gtnets/fullduplex-d.xml --cfg=fullduplex:0
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'fullduplex' to '0'
+$ gtnets/gtnets ${srcdir:=.}/gtnets/crosstraffic-p.xml ${srcdir:=.}/gtnets/crosstraffic-d.xml --cfg=network/crosstraffic:0
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/crosstraffic' to '0'
> [S1:slave:(8) 108.799652] [msg_test/INFO] ===> Estimated Bw of FLOW[1] : 3063.735285 ; message from S1 to C1 with remaining : 666666.666667
> [S1:slave:(8) 108.799652] [msg_test/INFO] ===> Estimated Bw of FLOW[2] : 3063.735285 ; message from S1 to C1 with remaining : 666666.666667
> [S1:slave:(8) 108.799652] [msg_test/INFO] ===> Estimated Bw of FLOW[3] : 3063.735285 ; message from S1 to C1 with remaining : 666666.666667
> [S1:slave:(8) 108.799652] [msg_test/INFO] ===> Estimated Bw of FLOW[4] : 9191.205854 ; message from C1 to S1 with remaining : 0.000000
-p Testing fullduplex TCP option ENABLED
+p Testing crosstraffic TCP option ENABLED
-$ gtnets/gtnets ${srcdir:=.}/gtnets/fullduplex-p.xml ${srcdir:=.}/gtnets/fullduplex-d.xml --cfg=fullduplex:1
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'fullduplex' to '1'
-> [C1:slave:(2) 331.625739] [msg_test/INFO] ===> Estimated Bw of FLOW[1] : 3015.447482 ; message from S1 to C1 with remaining : 0.000000
-> [C1:slave:(2) 331.625739] [msg_test/INFO] ===> Estimated Bw of FLOW[2] : 3015.447482 ; message from S1 to C1 with remaining : 0.000000
-> [C1:slave:(2) 331.625739] [msg_test/INFO] ===> Estimated Bw of FLOW[3] : 3015.447482 ; message from S1 to C1 with remaining : 0.000000
-> [C1:slave:(2) 331.625739] [msg_test/INFO] ===> Estimated Bw of FLOW[4] : 3015.447482 ; message from C1 to S1 with remaining : 0.000000
+$ gtnets/gtnets ${srcdir:=.}/gtnets/crosstraffic-p.xml ${srcdir:=.}/gtnets/crosstraffic-d.xml --cfg=network/crosstraffic:1
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/crosstraffic' to '1'
+> [C1:slave:(4) 331.625739] [msg_test/INFO] ===> Estimated Bw of FLOW[1] : 3015.447482 ; message from S1 to C1 with remaining : 0.000000
+> [C1:slave:(4) 331.625739] [msg_test/INFO] ===> Estimated Bw of FLOW[2] : 3015.447482 ; message from S1 to C1 with remaining : 0.000000
+> [C1:slave:(4) 331.625739] [msg_test/INFO] ===> Estimated Bw of FLOW[3] : 3015.447482 ; message from S1 to C1 with remaining : 0.000000
+> [C1:slave:(4) 331.625739] [msg_test/INFO] ===> Estimated Bw of FLOW[4] : 3015.447482 ; message from C1 to S1 with remaining : 0.000000
char *slavename = NULL;
double task_comm_size = 0;
m_task_t todo;
- m_host_t slave;
char id_alias[10];
//unique id to control statistics
int id = -1;
gl_data_size[id] = task_comm_size;
}
- { /* Process organisation */
- slave = MSG_get_host_by_name(slavename);
- }
-
count_finished++;
/* time measurement */
bool_printed = 1;
for (id = 0; id < NTASKS; id++) {
- if (gl_task_array[id] == NULL) {
- } else if (gl_task_array[id] == task) {
+ if (gl_task_array[id] == NULL) continue;
+ if (gl_task_array[id] == task) {
#ifdef HAVE_LATENCY_BOUND_TRACKING
limited_latency = MSG_task_is_latency_bounded(gl_task_array[id]);
if (limited_latency) {
("===> Estimated Bw of FLOW[%d] : %f ; message from %s to %s with remaining : %f",
id, gl_data_size[id] / elapsed_time, masternames[id],
slavenames[id], 0.0);
+ MSG_task_destroy(gl_task_array[id]);
+ gl_task_array[id]=NULL;
} else {
remaining =
MSG_task_get_remaining_communication(gl_task_array[id]);
("===> Estimated Bw of FLOW[%d] : %f ; message from %s to %s with remaining : %f",
id, (gl_data_size[id] - remaining) / elapsed_time,
masternames[id], slavenames[id], remaining);
+ if(remaining==0) {
+ MSG_task_destroy(gl_task_array[id]);
+ gl_task_array[id]=NULL;
+ }
}
-
}
+ bool_printed = 2;
}
char mark[100];
snprintf(mark, 100, "flow_%d_finished", trace_id);
TRACE_mark("endmark", mark);
- MSG_task_destroy(task);
+ if(bool_printed==2 && gl_task_array[trace_id]) MSG_task_destroy(gl_task_array[trace_id]);
return 0;
} /* end_of_slave */
p Testing a simple master/slave example application
! output sort
-$ $SG_TEST_EXENV masterslave/masterslave_forwarder$EXEEXT ${srcdir:=.}/small_platform_with_routers.xml ${srcdir:=.}/masterslave/deployment_masterslave.xml --trace "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV masterslave/masterslave_forwarder$EXEEXT ${srcdir:=.}/small_platform_with_routers.xml ${srcdir:=.}/masterslave/deployment_masterslave.xml --cfg=network/crosstraffic:0 --trace "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (0:@) Configuration change: Set 'network/crosstraffic' to '0'
> [ 0.000000] (1:master@Tremblay) Got 5 slaves and 20 tasks to process
> [ 0.000000] (1:master@Tremblay) Sending "Task_0" to "Jupiter"
> [ 0.165962] (1:master@Tremblay) Sent
p Testing a simple master/slave example application
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/masterslave_forwarder$EXEEXT small_platform.xml masterslave/deployment_masterslave.xml --cfg=cpu/model:CpuTI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'CpuTI'
-> [ 0.000000] (0:@) Switching workstation model to compound since you changed the network and/or cpu model(s)
+$ $SG_TEST_EXENV ${bindir:=.}/masterslave_forwarder$EXEEXT small_platform.xml masterslave/deployment_masterslave.xml --cfg=network/crosstraffic:0 --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (0:@) Configuration change: Set 'network/crosstraffic' to '0'
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
> [ 0.000000] (1:master@Tremblay) Got 5 slaves and 20 tasks to process
> [ 0.000000] (1:master@Tremblay) Sending "Task_0" to "Jupiter"
> [ 0.165962] (1:master@Tremblay) Sent
p Testing a master/slave example application with a forwarder module
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/masterslave_forwarder$EXEEXT msg_platform.xml masterslave/deployment_masterslave_forwarder.xml --cfg=cpu/model:CpuTI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'CpuTI'
-> [ 0.000000] (0:@) Switching workstation model to compound since you changed the network and/or cpu model(s)
+$ $SG_TEST_EXENV ${bindir:=.}/masterslave_forwarder$EXEEXT msg_platform.xml masterslave/deployment_masterslave_forwarder.xml --cfg=network/crosstraffic:0 --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (0:@) Configuration change: Set 'network/crosstraffic' to '0'
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
> [ 0.000000] (1:master@Jacquelin) Got 5 slaves and 20 tasks to process
> [ 0.000000] (1:master@Jacquelin) Sending "Task_0" to "iRMX"
> [ 4.772530] (1:master@Jacquelin) Sent
p Testing a simple master/slave example application handling failures
! output sort
-$ ${bindir:=.}/masterslave_failure$EXEEXT --log=xbt_cfg.thres:critical --log=no_loc small_platform_with_failures.xml masterslave/deployment_masterslave.xml --cfg=path:${srcdir} --cfg=cpu/model:CpuTI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (0:@) Switching workstation model to compound since you changed the network and/or cpu model(s)
+$ ${bindir:=.}/masterslave_failure$EXEEXT --log=xbt_cfg.thres:critical --log=no_loc small_platform_with_failures.xml masterslave/deployment_masterslave.xml --cfg=network/crosstraffic:0 --cfg=path:${srcdir} --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (0:@) Cannot launch process 'slave' on failed host 'Fafard'
> [ 0.000000] (1:master@Tremblay) Got 5 slave(s) :
> [ 0.000000] (1:master@Tremblay) Jupiter
p Testing the bypassing of the flexml parser
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/masterslave_bypass --log=no_loc --cfg=cpu/model:CpuTI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'CpuTI'
+$ $SG_TEST_EXENV ${bindir:=.}/masterslave_bypass --log=no_loc --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
> [ 0.000000] (0:@) Bypassing the XML parser since surf_parse_open received a NULL pointer. If it is not what you want, go fix your code.
-> [ 0.000000] (0:@) Switching workstation model to compound since you changed the network and/or cpu model(s)
> [ 0.000000] (1:master@host A) Got 1 slave(s) :
> [ 0.000000] (1:master@host A) host B
> [ 0.000000] (1:master@host A) Got 20 task to process :
--- /dev/null
+#! ./tesh
+
+p Testing a simple master/slave example application
+
+$ $SG_TEST_EXENV ${bindir:=.}/masterslave_forwarder$EXEEXT small_platform.xml masterslave/deployment_masterslave.xml --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
+> [ 0.000000] (1:master@Tremblay) Got 5 slaves and 20 tasks to process
+> [ 0.000000] (1:master@Tremblay) Sending "Task_0" to "Jupiter"
+> [ 0.173500] (3:slave@Jupiter) Received "Task_0"
+> [ 0.173500] (3:slave@Jupiter) Processing "Task_0"
+> [ 0.173500] (1:master@Tremblay) Sent
+> [ 0.173500] (1:master@Tremblay) Sending "Task_1" to "Fafard"
+> [ 0.391653] (4:slave@Fafard) Received "Task_1"
+> [ 0.391653] (4:slave@Fafard) Processing "Task_1"
+> [ 0.391653] (1:master@Tremblay) Sent
+> [ 0.391653] (1:master@Tremblay) Sending "Task_2" to "Ginette"
+> [ 0.532113] (5:slave@Ginette) Received "Task_2"
+> [ 0.532113] (5:slave@Ginette) Processing "Task_2"
+> [ 0.532113] (1:master@Tremblay) Sent
+> [ 0.532113] (1:master@Tremblay) Sending "Task_3" to "Bourassa"
+> [ 0.747985] (6:slave@Bourassa) Received "Task_3"
+> [ 0.747985] (6:slave@Bourassa) Processing "Task_3"
+> [ 0.747985] (1:master@Tremblay) Sent
+> [ 0.747985] (1:master@Tremblay) Sending "Task_4" to "Tremblay"
+> [ 0.747985] (1:master@Tremblay) Hey ! It's me ! :)
+> [ 0.750324] (2:slave@Tremblay) Received "Task_4"
+> [ 0.750324] (2:slave@Tremblay) Processing "Task_4"
+> [ 0.750324] (1:master@Tremblay) Sent
+> [ 0.750324] (1:master@Tremblay) Sending "Task_5" to "Jupiter"
+> [ 0.828842] (3:slave@Jupiter) "Task_0" done
+> [ 1.002342] (1:master@Tremblay) Sent
+> [ 1.002342] (1:master@Tremblay) Sending "Task_6" to "Fafard"
+> [ 1.002342] (3:slave@Jupiter) Received "Task_5"
+> [ 1.002342] (3:slave@Jupiter) Processing "Task_5"
+> [ 1.046995] (4:slave@Fafard) "Task_1" done
+> [ 1.260034] (2:slave@Tremblay) "Task_4" done
+> [ 1.265149] (1:master@Tremblay) Sent
+> [ 1.265149] (1:master@Tremblay) Sending "Task_7" to "Ginette"
+> [ 1.265149] (4:slave@Fafard) Received "Task_6"
+> [ 1.265149] (4:slave@Fafard) Processing "Task_6"
+> [ 1.563211] (5:slave@Ginette) "Task_2" done
+> [ 1.657685] (3:slave@Jupiter) "Task_5" done
+> [ 1.703670] (1:master@Tremblay) Sent
+> [ 1.703670] (1:master@Tremblay) Sending "Task_8" to "Bourassa"
+> [ 1.703670] (5:slave@Ginette) Received "Task_7"
+> [ 1.703670] (5:slave@Ginette) Processing "Task_7"
+> [ 1.779083] (6:slave@Bourassa) "Task_3" done
+> [ 1.920491] (4:slave@Fafard) "Task_6" done
+> [ 1.994955] (1:master@Tremblay) Sent
+> [ 1.994955] (1:master@Tremblay) Sending "Task_9" to "Tremblay"
+> [ 1.994955] (1:master@Tremblay) Hey ! It's me ! :)
+> [ 1.994955] (6:slave@Bourassa) Received "Task_8"
+> [ 1.994955] (6:slave@Bourassa) Processing "Task_8"
+> [ 1.997294] (2:slave@Tremblay) Received "Task_9"
+> [ 1.997294] (2:slave@Tremblay) Processing "Task_9"
+> [ 1.997294] (1:master@Tremblay) Sent
+> [ 1.997294] (1:master@Tremblay) Sending "Task_10" to "Jupiter"
+> [ 2.170794] (3:slave@Jupiter) Received "Task_10"
+> [ 2.170794] (3:slave@Jupiter) Processing "Task_10"
+> [ 2.170794] (1:master@Tremblay) Sent
+> [ 2.170794] (1:master@Tremblay) Sending "Task_11" to "Fafard"
+> [ 2.388947] (4:slave@Fafard) Received "Task_11"
+> [ 2.388947] (4:slave@Fafard) Processing "Task_11"
+> [ 2.388947] (1:master@Tremblay) Sent
+> [ 2.388947] (1:master@Tremblay) Sending "Task_12" to "Ginette"
+> [ 2.507004] (2:slave@Tremblay) "Task_9" done
+> [ 2.734768] (5:slave@Ginette) "Task_7" done
+> [ 2.826136] (3:slave@Jupiter) "Task_10" done
+> [ 2.875228] (1:master@Tremblay) Sent
+> [ 2.875228] (1:master@Tremblay) Sending "Task_13" to "Bourassa"
+> [ 2.875228] (5:slave@Ginette) Received "Task_12"
+> [ 2.875228] (5:slave@Ginette) Processing "Task_12"
+> [ 3.026053] (6:slave@Bourassa) "Task_8" done
+> [ 3.044289] (4:slave@Fafard) "Task_11" done
+> [ 3.241925] (1:master@Tremblay) Sent
+> [ 3.241925] (1:master@Tremblay) Sending "Task_14" to "Tremblay"
+> [ 3.241925] (1:master@Tremblay) Hey ! It's me ! :)
+> [ 3.241925] (6:slave@Bourassa) Received "Task_13"
+> [ 3.241925] (6:slave@Bourassa) Processing "Task_13"
+> [ 3.244264] (2:slave@Tremblay) Received "Task_14"
+> [ 3.244264] (2:slave@Tremblay) Processing "Task_14"
+> [ 3.244264] (1:master@Tremblay) Sent
+> [ 3.244264] (1:master@Tremblay) Sending "Task_15" to "Jupiter"
+> [ 3.417764] (3:slave@Jupiter) Received "Task_15"
+> [ 3.417764] (3:slave@Jupiter) Processing "Task_15"
+> [ 3.417764] (1:master@Tremblay) Sent
+> [ 3.417764] (1:master@Tremblay) Sending "Task_16" to "Fafard"
+> [ 3.635917] (4:slave@Fafard) Received "Task_16"
+> [ 3.635917] (4:slave@Fafard) Processing "Task_16"
+> [ 3.635917] (1:master@Tremblay) Sent
+> [ 3.635917] (1:master@Tremblay) Sending "Task_17" to "Ginette"
+> [ 3.753974] (2:slave@Tremblay) "Task_14" done
+> [ 3.906326] (5:slave@Ginette) "Task_12" done
+> [ 4.046785] (1:master@Tremblay) Sent
+> [ 4.046785] (1:master@Tremblay) Sending "Task_18" to "Bourassa"
+> [ 4.046785] (5:slave@Ginette) Received "Task_17"
+> [ 4.046785] (5:slave@Ginette) Processing "Task_17"
+> [ 4.073106] (3:slave@Jupiter) "Task_15" done
+> [ 4.273023] (6:slave@Bourassa) "Task_13" done
+> [ 4.291259] (4:slave@Fafard) "Task_16" done
+> [ 4.488896] (1:master@Tremblay) Sent
+> [ 4.488896] (1:master@Tremblay) Sending "Task_19" to "Tremblay"
+> [ 4.488896] (1:master@Tremblay) Hey ! It's me ! :)
+> [ 4.488896] (6:slave@Bourassa) Received "Task_18"
+> [ 4.488896] (6:slave@Bourassa) Processing "Task_18"
+> [ 4.491234] (2:slave@Tremblay) Received "Task_19"
+> [ 4.491234] (2:slave@Tremblay) Processing "Task_19"
+> [ 4.491234] (1:master@Tremblay) Sent
+> [ 4.491234] (1:master@Tremblay) All tasks have been dispatched. Let's tell everybody the computation is over.
+> [ 4.506434] (3:slave@Jupiter) Received "finalize"
+> [ 4.506434] (3:slave@Jupiter) I'm done. See you!
+> [ 4.526985] (4:slave@Fafard) Received "finalize"
+> [ 4.526985] (4:slave@Fafard) I'm done. See you!
+> [ 5.000944] (2:slave@Tremblay) "Task_19" done
+> [ 5.077883] (5:slave@Ginette) "Task_17" done
+> [ 5.091115] (5:slave@Ginette) Received "finalize"
+> [ 5.091115] (5:slave@Ginette) I'm done. See you!
+> [ 5.519994] (6:slave@Bourassa) "Task_18" done
+> [ 5.540329] (6:slave@Bourassa) Received "finalize"
+> [ 5.540329] (6:slave@Bourassa) I'm done. See you!
+> [ 5.540485] (2:slave@Tremblay) Received "finalize"
+> [ 5.540485] (2:slave@Tremblay) I'm done. See you!
+> [ 5.540485] (1:master@Tremblay) Goodbye now!
+> [ 5.540485] (0:@) Simulation time 5.54049
+
+p Testing a master/slave example application with a forwarder module
+
+$ $SG_TEST_EXENV ${bindir:=.}/masterslave_forwarder$EXEEXT msg_platform.xml masterslave/deployment_masterslave_forwarder.xml --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
+> [ 0.000000] (1:master@Jacquelin) Got 5 slaves and 20 tasks to process
+> [ 0.000000] (1:master@Jacquelin) Sending "Task_0" to "iRMX"
+> [ 4.772530] (4:slave@iRMX) Received "Task_0"
+> [ 4.772530] (4:slave@iRMX) Processing "Task_0"
+> [ 4.772530] (1:master@Jacquelin) Sent
+> [ 4.772530] (1:master@Jacquelin) Sending "Task_1" to "Casavant"
+> [ 4.845345] (4:slave@iRMX) "Task_0" done
+> [ 6.900351] (3:forwarder@Casavant) Received "Task_1"
+> [ 6.900351] (3:forwarder@Casavant) Sending "Task_1" to "Robert"
+> [ 6.900351] (1:master@Jacquelin) Sent
+> [ 6.900351] (1:master@Jacquelin) Sending "Task_2" to "Bousquet"
+> [ 8.491392] (5:slave@Bousquet) Received "Task_2"
+> [ 8.491392] (5:slave@Bousquet) Processing "Task_2"
+> [ 8.491392] (1:master@Jacquelin) Sent
+> [ 8.491392] (1:master@Jacquelin) Sending "Task_3" to "Soucy"
+> [ 8.607896] (5:slave@Bousquet) "Task_2" done
+> [ 9.260549] (10:slave@Robert) Received "Task_1"
+> [ 9.260549] (10:slave@Robert) Processing "Task_1"
+> [ 9.296956] (10:slave@Robert) "Task_1" done
+> [ 10.755127] (6:slave@Soucy) Received "Task_3"
+> [ 10.755127] (6:slave@Soucy) Processing "Task_3"
+> [ 10.755127] (1:master@Jacquelin) Sent
+> [ 10.755127] (1:master@Jacquelin) Sending "Task_4" to "Jackson"
+> [ 10.791535] (6:slave@Soucy) "Task_3" done
+> [ 12.090242] (2:forwarder@Jackson) Received "Task_4"
+> [ 12.090242] (2:forwarder@Jackson) Sending "Task_4" to "Kuenning"
+> [ 12.090242] (1:master@Jacquelin) Sent
+> [ 12.090242] (1:master@Jacquelin) Sending "Task_5" to "iRMX"
+> [ 12.177813] (7:slave@Kuenning) Received "Task_4"
+> [ 12.177813] (7:slave@Kuenning) Processing "Task_4"
+> [ 12.236066] (7:slave@Kuenning) "Task_4" done
+> [ 16.862772] (4:slave@iRMX) Received "Task_5"
+> [ 16.862772] (4:slave@iRMX) Processing "Task_5"
+> [ 16.862772] (1:master@Jacquelin) Sent
+> [ 16.862772] (1:master@Jacquelin) Sending "Task_6" to "Casavant"
+> [ 16.935587] (4:slave@iRMX) "Task_5" done
+> [ 18.990593] (3:forwarder@Casavant) Received "Task_6"
+> [ 18.990593] (3:forwarder@Casavant) Sending "Task_6" to "Sirois"
+> [ 18.990593] (1:master@Jacquelin) Sent
+> [ 18.990593] (1:master@Jacquelin) Sending "Task_7" to "Bousquet"
+> [ 19.793293] (11:slave@Sirois) Received "Task_6"
+> [ 19.793293] (11:slave@Sirois) Processing "Task_6"
+> [ 19.836983] (11:slave@Sirois) "Task_6" done
+> [ 20.581634] (5:slave@Bousquet) Received "Task_7"
+> [ 20.581634] (5:slave@Bousquet) Processing "Task_7"
+> [ 20.581634] (1:master@Jacquelin) Sent
+> [ 20.581634] (1:master@Jacquelin) Sending "Task_8" to "Soucy"
+> [ 20.698138] (5:slave@Bousquet) "Task_7" done
+> [ 22.845369] (6:slave@Soucy) Received "Task_8"
+> [ 22.845369] (6:slave@Soucy) Processing "Task_8"
+> [ 22.845369] (1:master@Jacquelin) Sent
+> [ 22.845369] (1:master@Jacquelin) Sending "Task_9" to "Jackson"
+> [ 22.881777] (6:slave@Soucy) "Task_8" done
+> [ 24.180485] (2:forwarder@Jackson) Received "Task_9"
+> [ 24.180485] (2:forwarder@Jackson) Sending "Task_9" to "Browne"
+> [ 24.180485] (1:master@Jacquelin) Sent
+> [ 24.180485] (1:master@Jacquelin) Sending "Task_10" to "iRMX"
+> [ 27.009931] (8:slave@Browne) Received "Task_9"
+> [ 27.009931] (8:slave@Browne) Processing "Task_9"
+> [ 27.046339] (8:slave@Browne) "Task_9" done
+> [ 28.953014] (4:slave@iRMX) Received "Task_10"
+> [ 28.953014] (4:slave@iRMX) Processing "Task_10"
+> [ 28.953014] (1:master@Jacquelin) Sent
+> [ 28.953014] (1:master@Jacquelin) Sending "Task_11" to "Casavant"
+> [ 29.025830] (4:slave@iRMX) "Task_10" done
+> [ 31.080835] (3:forwarder@Casavant) Received "Task_11"
+> [ 31.080835] (3:forwarder@Casavant) Sending "Task_11" to "Monique"
+> [ 31.080835] (1:master@Jacquelin) Sent
+> [ 31.080835] (1:master@Jacquelin) Sending "Task_12" to "Bousquet"
+> [ 32.472434] (12:slave@Monique) Received "Task_11"
+> [ 32.472434] (12:slave@Monique) Processing "Task_11"
+> [ 32.516124] (12:slave@Monique) "Task_11" done
+> [ 32.671876] (5:slave@Bousquet) Received "Task_12"
+> [ 32.671876] (5:slave@Bousquet) Processing "Task_12"
+> [ 32.671876] (1:master@Jacquelin) Sent
+> [ 32.671876] (1:master@Jacquelin) Sending "Task_13" to "Soucy"
+> [ 32.788380] (5:slave@Bousquet) "Task_12" done
+> [ 34.935611] (6:slave@Soucy) Received "Task_13"
+> [ 34.935611] (6:slave@Soucy) Processing "Task_13"
+> [ 34.935611] (1:master@Jacquelin) Sent
+> [ 34.935611] (1:master@Jacquelin) Sending "Task_14" to "Jackson"
+> [ 34.972019] (6:slave@Soucy) "Task_13" done
+> [ 36.270727] (2:forwarder@Jackson) Received "Task_14"
+> [ 36.270727] (2:forwarder@Jackson) Sending "Task_14" to "Stephen"
+> [ 36.270727] (1:master@Jacquelin) Sent
+> [ 36.270727] (1:master@Jacquelin) Sending "Task_15" to "iRMX"
+> [ 40.508273] (9:slave@Stephen) Received "Task_14"
+> [ 40.508273] (9:slave@Stephen) Processing "Task_14"
+> [ 40.581088] (9:slave@Stephen) "Task_14" done
+> [ 41.043257] (4:slave@iRMX) Received "Task_15"
+> [ 41.043257] (4:slave@iRMX) Processing "Task_15"
+> [ 41.043257] (1:master@Jacquelin) Sent
+> [ 41.043257] (1:master@Jacquelin) Sending "Task_16" to "Casavant"
+> [ 41.116072] (4:slave@iRMX) "Task_15" done
+> [ 43.171078] (3:forwarder@Casavant) Received "Task_16"
+> [ 43.171078] (3:forwarder@Casavant) Sending "Task_16" to "Robert"
+> [ 43.171078] (1:master@Jacquelin) Sent
+> [ 43.171078] (1:master@Jacquelin) Sending "Task_17" to "Bousquet"
+> [ 44.762118] (5:slave@Bousquet) Received "Task_17"
+> [ 44.762118] (5:slave@Bousquet) Processing "Task_17"
+> [ 44.762118] (1:master@Jacquelin) Sent
+> [ 44.762118] (1:master@Jacquelin) Sending "Task_18" to "Soucy"
+> [ 44.878622] (5:slave@Bousquet) "Task_17" done
+> [ 45.531275] (10:slave@Robert) Received "Task_16"
+> [ 45.531275] (10:slave@Robert) Processing "Task_16"
+> [ 45.567683] (10:slave@Robert) "Task_16" done
+> [ 47.025854] (6:slave@Soucy) Received "Task_18"
+> [ 47.025854] (6:slave@Soucy) Processing "Task_18"
+> [ 47.025854] (1:master@Jacquelin) Sent
+> [ 47.025854] (1:master@Jacquelin) Sending "Task_19" to "Jackson"
+> [ 47.062262] (6:slave@Soucy) "Task_18" done
+> [ 48.360969] (2:forwarder@Jackson) Received "Task_19"
+> [ 48.360969] (2:forwarder@Jackson) Sending "Task_19" to "Kuenning"
+> [ 48.360969] (1:master@Jacquelin) Sent
+> [ 48.360969] (1:master@Jacquelin) All tasks have been dispatched. Let's tell everybody the computation is over.
+> [ 48.448540] (7:slave@Kuenning) Received "Task_19"
+> [ 48.448540] (7:slave@Kuenning) Processing "Task_19"
+> [ 48.506793] (7:slave@Kuenning) "Task_19" done
+> [ 50.794024] (4:slave@iRMX) Received "finalize"
+> [ 50.794024] (4:slave@iRMX) I'm done. See you!
+> [ 51.878795] (3:forwarder@Casavant) Received "finalize"
+> [ 51.878795] (3:forwarder@Casavant) All tasks have been dispatched. Let's tell everybody the computation is over.
+> [ 52.689914] (5:slave@Bousquet) Received "finalize"
+> [ 52.689914] (5:slave@Bousquet) I'm done. See you!
+> [ 53.082033] (10:slave@Robert) Received "finalize"
+> [ 53.082033] (10:slave@Robert) I'm done. See you!
+> [ 53.491253] (11:slave@Sirois) Received "finalize"
+> [ 53.491253] (11:slave@Sirois) I'm done. See you!
+> [ 53.843975] (6:slave@Soucy) Received "finalize"
+> [ 53.843975] (6:slave@Soucy) I'm done. See you!
+> [ 54.200695] (12:slave@Monique) Received "finalize"
+> [ 54.200695] (12:slave@Monique) I'm done. See you!
+> [ 54.200695] (3:forwarder@Casavant) I'm done. See you!
+> [ 54.524622] (2:forwarder@Jackson) Received "finalize"
+> [ 54.524622] (2:forwarder@Jackson) All tasks have been dispatched. Let's tell everybody the computation is over.
+> [ 54.524622] (1:master@Jacquelin) Goodbye now!
+> [ 54.529294] (7:slave@Kuenning) Received "finalize"
+> [ 54.529294] (7:slave@Kuenning) I'm done. See you!
+> [ 55.971757] (8:slave@Browne) Received "finalize"
+> [ 55.971757] (8:slave@Browne) I'm done. See you!
+> [ 58.132075] (9:slave@Stephen) Received "finalize"
+> [ 58.132075] (9:slave@Stephen) I'm done. See you!
+> [ 58.132075] (2:forwarder@Jackson) I'm done. See you!
+> [ 58.132075] (0:@) Simulation time 58.1321
+
+p Testing a simple master/slave example application handling failures
+
+$ ${bindir:=.}/masterslave_failure$EXEEXT --log=xbt_cfg.thres:critical --log=no_loc small_platform_with_failures.xml masterslave/deployment_masterslave.xml --cfg=path:${srcdir} --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (0:@) Cannot launch process 'slave' on failed host 'Fafard'
+> [ 0.000000] (1:master@Tremblay) Got 5 slave(s) :
+> [ 0.000000] (1:master@Tremblay) Jupiter
+> [ 0.000000] (1:master@Tremblay) Fafard
+> [ 0.000000] (1:master@Tremblay) Ginette
+> [ 0.000000] (1:master@Tremblay) Bourassa
+> [ 0.000000] (1:master@Tremblay) Tremblay
+> [ 0.000000] (1:master@Tremblay) Got 20 task to process :
+> [ 0.173500] (3:slave@Jupiter) Received "Task"
+> [ 0.173500] (3:slave@Jupiter) Communication time : "0.173500"
+> [ 0.173500] (3:slave@Jupiter) Processing "Task"
+> [ 0.173500] (1:master@Tremblay) Send completed
+> [ 0.828842] (3:slave@Jupiter) "Task" done
+> [ 1.100000] (3:slave@Jupiter) Gloups. The cpu on which I'm running just turned off!. See you!
+> [ 10.173500] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Fafard'. Nevermind. Let's keep going!
+> [ 10.313960] (4:slave@Ginette) Received "Task"
+> [ 10.313960] (4:slave@Ginette) Communication time : "0.140460"
+> [ 10.313960] (4:slave@Ginette) Processing "Task"
+> [ 10.313960] (1:master@Tremblay) Send completed
+> [ 10.529832] (5:slave@Bourassa) Received "Task"
+> [ 10.529832] (5:slave@Bourassa) Communication time : "0.215872"
+> [ 10.529832] (5:slave@Bourassa) Processing "Task"
+> [ 10.529832] (1:master@Tremblay) Send completed
+> [ 10.532280] (2:slave@Tremblay) Received "Task"
+> [ 10.532280] (2:slave@Tremblay) Communication time : "0.002448"
+> [ 10.532280] (2:slave@Tremblay) Processing "Task"
+> [ 10.532280] (1:master@Tremblay) Send completed
+> [ 11.041990] (2:slave@Tremblay) "Task" done
+> [ 11.345057] (4:slave@Ginette) "Task" done
+> [ 11.560930] (5:slave@Bourassa) "Task" done
+> [ 20.532280] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Jupiter'. Nevermind. Let's keep going!
+> [ 30.532280] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Fafard'. Nevermind. Let's keep going!
+> [ 30.532280] (4:slave@Ginette) Mmh. Something went wrong. Nevermind. Let's keep going!
+> [ 30.532280] (1:master@Tremblay) Mmh. Something went wrong with 'Ginette'. Nevermind. Let's keep going!
+> [ 30.748152] (5:slave@Bourassa) Received "Task"
+> [ 30.748152] (5:slave@Bourassa) Communication time : "0.215872"
+> [ 30.748152] (5:slave@Bourassa) Processing "Task"
+> [ 30.748152] (1:master@Tremblay) Send completed
+> [ 30.750600] (2:slave@Tremblay) Received "Task"
+> [ 30.750600] (2:slave@Tremblay) Communication time : "0.002448"
+> [ 30.750600] (2:slave@Tremblay) Processing "Task"
+> [ 30.750600] (1:master@Tremblay) Send completed
+> [ 31.260310] (2:slave@Tremblay) "Task" done
+> [ 31.779250] (5:slave@Bourassa) "Task" done
+> [ 40.750600] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Jupiter'. Nevermind. Let's keep going!
+> [ 50.750600] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Fafard'. Nevermind. Let's keep going!
+> [ 50.891059] (4:slave@Ginette) Received "Task"
+> [ 50.891059] (4:slave@Ginette) Communication time : "0.140460"
+> [ 50.891059] (4:slave@Ginette) Processing "Task"
+> [ 50.891059] (1:master@Tremblay) Send completed
+> [ 51.106932] (5:slave@Bourassa) Received "Task"
+> [ 51.106932] (5:slave@Bourassa) Communication time : "0.215872"
+> [ 51.106932] (5:slave@Bourassa) Processing "Task"
+> [ 51.106932] (1:master@Tremblay) Send completed
+> [ 51.109379] (2:slave@Tremblay) Received "Task"
+> [ 51.109379] (2:slave@Tremblay) Communication time : "0.002448"
+> [ 51.109379] (2:slave@Tremblay) Processing "Task"
+> [ 51.109379] (1:master@Tremblay) Send completed
+> [ 51.619089] (2:slave@Tremblay) "Task" done
+> [ 51.922157] (4:slave@Ginette) "Task" done
+> [ 52.138029] (5:slave@Bourassa) "Task" done
+> [ 61.109379] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Jupiter'. Nevermind. Let's keep going!
+> [ 71.109379] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Fafard'. Nevermind. Let's keep going!
+> [ 71.249839] (4:slave@Ginette) Received "Task"
+> [ 71.249839] (4:slave@Ginette) Communication time : "0.140460"
+> [ 71.249839] (4:slave@Ginette) Processing "Task"
+> [ 71.249839] (1:master@Tremblay) Send completed
+> [ 71.465711] (5:slave@Bourassa) Received "Task"
+> [ 71.465711] (5:slave@Bourassa) Communication time : "0.215872"
+> [ 71.465711] (5:slave@Bourassa) Processing "Task"
+> [ 71.465711] (1:master@Tremblay) Send completed
+> [ 71.468159] (2:slave@Tremblay) Received "Task"
+> [ 71.468159] (2:slave@Tremblay) Communication time : "0.002448"
+> [ 71.468159] (2:slave@Tremblay) Processing "Task"
+> [ 71.468159] (1:master@Tremblay) Send completed
+> [ 71.468159] (1:master@Tremblay) All tasks have been dispatched. Let's tell everybody the computation is over.
+> [ 71.977869] (2:slave@Tremblay) "Task" done
+> [ 72.280937] (4:slave@Ginette) "Task" done
+> [ 72.468159] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Jupiter'. Nevermind. Let's keep going!
+> [ 72.496809] (5:slave@Bourassa) "Task" done
+> [ 73.468159] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Fafard'. Nevermind. Let's keep going!
+> [ 73.481391] (4:slave@Ginette) Received "finalize"
+> [ 73.481391] (4:slave@Ginette) I'm done. See you!
+> [ 73.501726] (5:slave@Bourassa) Received "finalize"
+> [ 73.501726] (5:slave@Bourassa) I'm done. See you!
+> [ 73.501882] (2:slave@Tremblay) Received "finalize"
+> [ 73.501882] (2:slave@Tremblay) I'm done. See you!
+> [ 73.501882] (1:master@Tremblay) Goodbye now!
+> [ 73.501882] (0:@) Simulation time 73.5019
+
+p Testing the bypassing of the flexml parser
+
+$ $SG_TEST_EXENV ${bindir:=.}/masterslave_bypass --log=no_loc --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
+> [ 0.000000] (0:@) Bypassing the XML parser since surf_parse_open received a NULL pointer. If it is not what you want, go fix your code.
+> [ 0.000000] (2:slave@host B) I'm a slave
+> [ 0.000000] (1:master@host A) Got 1 slave(s) :
+> [ 0.000000] (1:master@host A) host B
+> [ 0.000000] (1:master@host A) Got 20 task to process :
+> [ 0.000000] (1:master@host A) "Task_0"
+> [ 0.000000] (1:master@host A) "Task_1"
+> [ 0.000000] (1:master@host A) "Task_2"
+> [ 0.000000] (1:master@host A) "Task_3"
+> [ 0.000000] (1:master@host A) "Task_4"
+> [ 0.000000] (1:master@host A) "Task_5"
+> [ 0.000000] (1:master@host A) "Task_6"
+> [ 0.000000] (1:master@host A) "Task_7"
+> [ 0.000000] (1:master@host A) "Task_8"
+> [ 0.000000] (1:master@host A) "Task_9"
+> [ 0.000000] (1:master@host A) "Task_10"
+> [ 0.000000] (1:master@host A) "Task_11"
+> [ 0.000000] (1:master@host A) "Task_12"
+> [ 0.000000] (1:master@host A) "Task_13"
+> [ 0.000000] (1:master@host A) "Task_14"
+> [ 0.000000] (1:master@host A) "Task_15"
+> [ 0.000000] (1:master@host A) "Task_16"
+> [ 0.000000] (1:master@host A) "Task_17"
+> [ 0.000000] (1:master@host A) "Task_18"
+> [ 0.000000] (1:master@host A) "Task_19"
+> [ 0.000000] (1:master@host A) Sending "Task_0" to "host B"
+> [ 4.080000] (2:slave@host B) Received "Task_0"
+> [ 4.080000] (2:slave@host B) Processing "Task_0"
+> [ 4.080000] (1:master@host A) Send completed
+> [ 4.080000] (1:master@host A) Sending "Task_1" to "host B"
+> [ 4.130000] (2:slave@host B) "Task_0" done
+> [ 8.210000] (1:master@host A) Send completed
+> [ 8.210000] (1:master@host A) Sending "Task_2" to "host B"
+> [ 8.210000] (2:slave@host B) Received "Task_1"
+> [ 8.210000] (2:slave@host B) Processing "Task_1"
+> [ 8.260000] (2:slave@host B) "Task_1" done
+> [ 12.340000] (1:master@host A) Send completed
+> [ 12.340000] (1:master@host A) Sending "Task_3" to "host B"
+> [ 12.340000] (2:slave@host B) Received "Task_2"
+> [ 12.340000] (2:slave@host B) Processing "Task_2"
+> [ 12.390000] (2:slave@host B) "Task_2" done
+> [ 16.470000] (1:master@host A) Send completed
+> [ 16.470000] (1:master@host A) Sending "Task_4" to "host B"
+> [ 16.470000] (2:slave@host B) Received "Task_3"
+> [ 16.470000] (2:slave@host B) Processing "Task_3"
+> [ 16.520000] (2:slave@host B) "Task_3" done
+> [ 20.600000] (1:master@host A) Send completed
+> [ 20.600000] (1:master@host A) Sending "Task_5" to "host B"
+> [ 20.600000] (2:slave@host B) Received "Task_4"
+> [ 20.600000] (2:slave@host B) Processing "Task_4"
+> [ 20.650000] (2:slave@host B) "Task_4" done
+> [ 24.730000] (1:master@host A) Send completed
+> [ 24.730000] (1:master@host A) Sending "Task_6" to "host B"
+> [ 24.730000] (2:slave@host B) Received "Task_5"
+> [ 24.730000] (2:slave@host B) Processing "Task_5"
+> [ 24.780000] (2:slave@host B) "Task_5" done
+> [ 28.860000] (1:master@host A) Send completed
+> [ 28.860000] (1:master@host A) Sending "Task_7" to "host B"
+> [ 28.860000] (2:slave@host B) Received "Task_6"
+> [ 28.860000] (2:slave@host B) Processing "Task_6"
+> [ 28.910000] (2:slave@host B) "Task_6" done
+> [ 32.990000] (1:master@host A) Send completed
+> [ 32.990000] (1:master@host A) Sending "Task_8" to "host B"
+> [ 32.990000] (2:slave@host B) Received "Task_7"
+> [ 32.990000] (2:slave@host B) Processing "Task_7"
+> [ 33.040000] (2:slave@host B) "Task_7" done
+> [ 37.120000] (1:master@host A) Send completed
+> [ 37.120000] (1:master@host A) Sending "Task_9" to "host B"
+> [ 37.120000] (2:slave@host B) Received "Task_8"
+> [ 37.120000] (2:slave@host B) Processing "Task_8"
+> [ 37.170000] (2:slave@host B) "Task_8" done
+> [ 41.250000] (1:master@host A) Send completed
+> [ 41.250000] (1:master@host A) Sending "Task_10" to "host B"
+> [ 41.250000] (2:slave@host B) Received "Task_9"
+> [ 41.250000] (2:slave@host B) Processing "Task_9"
+> [ 41.300000] (2:slave@host B) "Task_9" done
+> [ 45.380000] (1:master@host A) Send completed
+> [ 45.380000] (1:master@host A) Sending "Task_11" to "host B"
+> [ 45.380000] (2:slave@host B) Received "Task_10"
+> [ 45.380000] (2:slave@host B) Processing "Task_10"
+> [ 45.430000] (2:slave@host B) "Task_10" done
+> [ 49.510000] (1:master@host A) Send completed
+> [ 49.510000] (1:master@host A) Sending "Task_12" to "host B"
+> [ 49.510000] (2:slave@host B) Received "Task_11"
+> [ 49.510000] (2:slave@host B) Processing "Task_11"
+> [ 49.560000] (2:slave@host B) "Task_11" done
+> [ 53.640000] (1:master@host A) Send completed
+> [ 53.640000] (1:master@host A) Sending "Task_13" to "host B"
+> [ 53.640000] (2:slave@host B) Received "Task_12"
+> [ 53.640000] (2:slave@host B) Processing "Task_12"
+> [ 53.690000] (2:slave@host B) "Task_12" done
+> [ 57.770000] (1:master@host A) Send completed
+> [ 57.770000] (1:master@host A) Sending "Task_14" to "host B"
+> [ 57.770000] (2:slave@host B) Received "Task_13"
+> [ 57.770000] (2:slave@host B) Processing "Task_13"
+> [ 57.820000] (2:slave@host B) "Task_13" done
+> [ 61.900000] (1:master@host A) Send completed
+> [ 61.900000] (1:master@host A) Sending "Task_15" to "host B"
+> [ 61.900000] (2:slave@host B) Received "Task_14"
+> [ 61.900000] (2:slave@host B) Processing "Task_14"
+> [ 61.950000] (2:slave@host B) "Task_14" done
+> [ 66.030000] (1:master@host A) Send completed
+> [ 66.030000] (1:master@host A) Sending "Task_16" to "host B"
+> [ 66.030000] (2:slave@host B) Received "Task_15"
+> [ 66.030000] (2:slave@host B) Processing "Task_15"
+> [ 66.080000] (2:slave@host B) "Task_15" done
+> [ 70.160000] (1:master@host A) Send completed
+> [ 70.160000] (1:master@host A) Sending "Task_17" to "host B"
+> [ 70.160000] (2:slave@host B) Received "Task_16"
+> [ 70.160000] (2:slave@host B) Processing "Task_16"
+> [ 70.210000] (2:slave@host B) "Task_16" done
+> [ 74.290000] (1:master@host A) Send completed
+> [ 74.290000] (1:master@host A) Sending "Task_18" to "host B"
+> [ 74.290000] (2:slave@host B) Received "Task_17"
+> [ 74.290000] (2:slave@host B) Processing "Task_17"
+> [ 74.340000] (2:slave@host B) "Task_17" done
+> [ 78.420000] (1:master@host A) Send completed
+> [ 78.420000] (1:master@host A) Sending "Task_19" to "host B"
+> [ 78.420000] (2:slave@host B) Received "Task_18"
+> [ 78.420000] (2:slave@host B) Processing "Task_18"
+> [ 78.470000] (2:slave@host B) "Task_18" done
+> [ 82.550000] (1:master@host A) Send completed
+> [ 82.550000] (1:master@host A) All tasks have been dispatched. Let's tell everybody the computation is over.
+> [ 82.550000] (2:slave@host B) Received "Task_19"
+> [ 82.550000] (2:slave@host B) Processing "Task_19"
+> [ 82.600000] (2:slave@host B) "Task_19" done
+> [ 84.680000] (1:master@host A) Goodbye now!
+> [ 84.680000] (2:slave@host B) Received "finalize"
+> [ 84.680000] (2:slave@host B) I'm done. See you!
+> [ 84.680000] (0:@) Simulation time 84.68
--- /dev/null
+#! ./tesh
+
+p Testing a simple master/slave example application
+
+$ $SG_TEST_EXENV masterslave/masterslave_forwarder$EXEEXT ${srcdir:=.}/small_platform_with_routers.xml ${srcdir:=.}/masterslave/deployment_masterslave.xml --trace "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (1:master@Tremblay) Got 5 slaves and 20 tasks to process
+> [ 0.000000] (1:master@Tremblay) Sending "Task_0" to "Jupiter"
+> [ 0.173500] (3:slave@Jupiter) Received "Task_0"
+> [ 0.173500] (3:slave@Jupiter) Processing "Task_0"
+> [ 0.173500] (1:master@Tremblay) Sent
+> [ 0.173500] (1:master@Tremblay) Sending "Task_1" to "Fafard"
+> [ 0.391653] (4:slave@Fafard) Received "Task_1"
+> [ 0.391653] (4:slave@Fafard) Processing "Task_1"
+> [ 0.391653] (1:master@Tremblay) Sent
+> [ 0.391653] (1:master@Tremblay) Sending "Task_2" to "Ginette"
+> [ 0.532113] (5:slave@Ginette) Received "Task_2"
+> [ 0.532113] (5:slave@Ginette) Processing "Task_2"
+> [ 0.532113] (1:master@Tremblay) Sent
+> [ 0.532113] (1:master@Tremblay) Sending "Task_3" to "Bourassa"
+> [ 0.747985] (6:slave@Bourassa) Received "Task_3"
+> [ 0.747985] (6:slave@Bourassa) Processing "Task_3"
+> [ 0.747985] (1:master@Tremblay) Sent
+> [ 0.747985] (1:master@Tremblay) Sending "Task_4" to "Tremblay"
+> [ 0.747985] (1:master@Tremblay) Hey ! It's me ! :)
+> [ 0.750324] (2:slave@Tremblay) Received "Task_4"
+> [ 0.750324] (2:slave@Tremblay) Processing "Task_4"
+> [ 0.750324] (1:master@Tremblay) Sent
+> [ 0.750324] (1:master@Tremblay) Sending "Task_5" to "Jupiter"
+> [ 0.828842] (3:slave@Jupiter) "Task_0" done
+> [ 1.002342] (1:master@Tremblay) Sent
+> [ 1.002342] (1:master@Tremblay) Sending "Task_6" to "Fafard"
+> [ 1.002342] (3:slave@Jupiter) Received "Task_5"
+> [ 1.002342] (3:slave@Jupiter) Processing "Task_5"
+> [ 1.046995] (4:slave@Fafard) "Task_1" done
+> [ 1.260034] (2:slave@Tremblay) "Task_4" done
+> [ 1.265149] (1:master@Tremblay) Sent
+> [ 1.265149] (1:master@Tremblay) Sending "Task_7" to "Ginette"
+> [ 1.265149] (4:slave@Fafard) Received "Task_6"
+> [ 1.265149] (4:slave@Fafard) Processing "Task_6"
+> [ 1.563211] (5:slave@Ginette) "Task_2" done
+> [ 1.657685] (3:slave@Jupiter) "Task_5" done
+> [ 1.703670] (1:master@Tremblay) Sent
+> [ 1.703670] (1:master@Tremblay) Sending "Task_8" to "Bourassa"
+> [ 1.703670] (5:slave@Ginette) Received "Task_7"
+> [ 1.703670] (5:slave@Ginette) Processing "Task_7"
+> [ 1.779083] (6:slave@Bourassa) "Task_3" done
+> [ 1.920491] (4:slave@Fafard) "Task_6" done
+> [ 1.994955] (1:master@Tremblay) Sent
+> [ 1.994955] (1:master@Tremblay) Sending "Task_9" to "Tremblay"
+> [ 1.994955] (1:master@Tremblay) Hey ! It's me ! :)
+> [ 1.994955] (6:slave@Bourassa) Received "Task_8"
+> [ 1.994955] (6:slave@Bourassa) Processing "Task_8"
+> [ 1.997294] (2:slave@Tremblay) Received "Task_9"
+> [ 1.997294] (2:slave@Tremblay) Processing "Task_9"
+> [ 1.997294] (1:master@Tremblay) Sent
+> [ 1.997294] (1:master@Tremblay) Sending "Task_10" to "Jupiter"
+> [ 2.170794] (3:slave@Jupiter) Received "Task_10"
+> [ 2.170794] (3:slave@Jupiter) Processing "Task_10"
+> [ 2.170794] (1:master@Tremblay) Sent
+> [ 2.170794] (1:master@Tremblay) Sending "Task_11" to "Fafard"
+> [ 2.388947] (4:slave@Fafard) Received "Task_11"
+> [ 2.388947] (4:slave@Fafard) Processing "Task_11"
+> [ 2.388947] (1:master@Tremblay) Sent
+> [ 2.388947] (1:master@Tremblay) Sending "Task_12" to "Ginette"
+> [ 2.507004] (2:slave@Tremblay) "Task_9" done
+> [ 2.734768] (5:slave@Ginette) "Task_7" done
+> [ 2.826136] (3:slave@Jupiter) "Task_10" done
+> [ 2.875228] (1:master@Tremblay) Sent
+> [ 2.875228] (1:master@Tremblay) Sending "Task_13" to "Bourassa"
+> [ 2.875228] (5:slave@Ginette) Received "Task_12"
+> [ 2.875228] (5:slave@Ginette) Processing "Task_12"
+> [ 3.026053] (6:slave@Bourassa) "Task_8" done
+> [ 3.044289] (4:slave@Fafard) "Task_11" done
+> [ 3.241925] (1:master@Tremblay) Sent
+> [ 3.241925] (1:master@Tremblay) Sending "Task_14" to "Tremblay"
+> [ 3.241925] (1:master@Tremblay) Hey ! It's me ! :)
+> [ 3.241925] (6:slave@Bourassa) Received "Task_13"
+> [ 3.241925] (6:slave@Bourassa) Processing "Task_13"
+> [ 3.244264] (2:slave@Tremblay) Received "Task_14"
+> [ 3.244264] (2:slave@Tremblay) Processing "Task_14"
+> [ 3.244264] (1:master@Tremblay) Sent
+> [ 3.244264] (1:master@Tremblay) Sending "Task_15" to "Jupiter"
+> [ 3.417764] (3:slave@Jupiter) Received "Task_15"
+> [ 3.417764] (3:slave@Jupiter) Processing "Task_15"
+> [ 3.417764] (1:master@Tremblay) Sent
+> [ 3.417764] (1:master@Tremblay) Sending "Task_16" to "Fafard"
+> [ 3.635917] (4:slave@Fafard) Received "Task_16"
+> [ 3.635917] (4:slave@Fafard) Processing "Task_16"
+> [ 3.635917] (1:master@Tremblay) Sent
+> [ 3.635917] (1:master@Tremblay) Sending "Task_17" to "Ginette"
+> [ 3.753974] (2:slave@Tremblay) "Task_14" done
+> [ 3.906326] (5:slave@Ginette) "Task_12" done
+> [ 4.046785] (1:master@Tremblay) Sent
+> [ 4.046785] (1:master@Tremblay) Sending "Task_18" to "Bourassa"
+> [ 4.046785] (5:slave@Ginette) Received "Task_17"
+> [ 4.046785] (5:slave@Ginette) Processing "Task_17"
+> [ 4.073106] (3:slave@Jupiter) "Task_15" done
+> [ 4.273023] (6:slave@Bourassa) "Task_13" done
+> [ 4.291259] (4:slave@Fafard) "Task_16" done
+> [ 4.488896] (1:master@Tremblay) Sent
+> [ 4.488896] (1:master@Tremblay) Sending "Task_19" to "Tremblay"
+> [ 4.488896] (1:master@Tremblay) Hey ! It's me ! :)
+> [ 4.488896] (6:slave@Bourassa) Received "Task_18"
+> [ 4.488896] (6:slave@Bourassa) Processing "Task_18"
+> [ 4.491234] (2:slave@Tremblay) Received "Task_19"
+> [ 4.491234] (2:slave@Tremblay) Processing "Task_19"
+> [ 4.491234] (1:master@Tremblay) Sent
+> [ 4.491234] (1:master@Tremblay) All tasks have been dispatched. Let's tell everybody the computation is over.
+> [ 4.506434] (3:slave@Jupiter) Received "finalize"
+> [ 4.506434] (3:slave@Jupiter) I'm done. See you!
+> [ 4.526985] (4:slave@Fafard) Received "finalize"
+> [ 4.526985] (4:slave@Fafard) I'm done. See you!
+> [ 5.000944] (2:slave@Tremblay) "Task_19" done
+> [ 5.077883] (5:slave@Ginette) "Task_17" done
+> [ 5.091115] (5:slave@Ginette) Received "finalize"
+> [ 5.091115] (5:slave@Ginette) I'm done. See you!
+> [ 5.519994] (6:slave@Bourassa) "Task_18" done
+> [ 5.540329] (6:slave@Bourassa) Received "finalize"
+> [ 5.540329] (6:slave@Bourassa) I'm done. See you!
+> [ 5.540485] (2:slave@Tremblay) Received "finalize"
+> [ 5.540485] (2:slave@Tremblay) I'm done. See you!
+> [ 5.540485] (1:master@Tremblay) Goodbye now!
+> [ 5.540485] (0:@) Simulation time 5.54049
p Testing a simple master/slave example application handling failures
! output sort
-$ masterslave/masterslave_failure$EXEEXT --log=xbt_cfg.thres:critical --log=no_loc ${srcdir:=.}/small_platform_with_failures.xml ${srcdir:=.}/masterslave/deployment_masterslave.xml --cfg=path:${srcdir} "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ masterslave/masterslave_failure$EXEEXT --log=xbt_cfg.thres:critical --log=no_loc ${srcdir:=.}/small_platform_with_failures.xml ${srcdir:=.}/masterslave/deployment_masterslave.xml --cfg=path:${srcdir} --cfg=network/crosstraffic:0 "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (0:@) Cannot launch process 'slave' on failed host 'Fafard'
> [ 0.000000] (1:master@Tremblay) Got 5 slave(s) :
> [ 0.000000] (1:master@Tremblay) Jupiter
--- /dev/null
+#! ./tesh
+
+p Testing a simple master/slave example application handling failures
+
+$ masterslave/masterslave_failure$EXEEXT --log=xbt_cfg.thres:critical --log=no_loc ${srcdir:=.}/small_platform_with_failures.xml ${srcdir:=.}/masterslave/deployment_masterslave.xml --cfg=path:${srcdir} "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (0:@) Cannot launch process 'slave' on failed host 'Fafard'
+> [ 0.000000] (1:master@Tremblay) Got 5 slave(s) :
+> [ 0.000000] (1:master@Tremblay) Jupiter
+> [ 0.000000] (1:master@Tremblay) Fafard
+> [ 0.000000] (1:master@Tremblay) Ginette
+> [ 0.000000] (1:master@Tremblay) Bourassa
+> [ 0.000000] (1:master@Tremblay) Tremblay
+> [ 0.000000] (1:master@Tremblay) Got 20 task to process :
+> [ 0.173500] (3:slave@Jupiter) Received "Task"
+> [ 0.173500] (3:slave@Jupiter) Communication time : "0.173500"
+> [ 0.173500] (3:slave@Jupiter) Processing "Task"
+> [ 0.173500] (1:master@Tremblay) Send completed
+> [ 0.828842] (3:slave@Jupiter) "Task" done
+> [ 1.100000] (3:slave@Jupiter) Gloups. The cpu on which I'm running just turned off!. See you!
+> [ 10.173500] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Fafard'. Nevermind. Let's keep going!
+> [ 10.313960] (4:slave@Ginette) Received "Task"
+> [ 10.313960] (4:slave@Ginette) Communication time : "0.140460"
+> [ 10.313960] (4:slave@Ginette) Processing "Task"
+> [ 10.313960] (1:master@Tremblay) Send completed
+> [ 10.529832] (5:slave@Bourassa) Received "Task"
+> [ 10.529832] (5:slave@Bourassa) Communication time : "0.215872"
+> [ 10.529832] (5:slave@Bourassa) Processing "Task"
+> [ 10.529832] (1:master@Tremblay) Send completed
+> [ 10.532280] (2:slave@Tremblay) Received "Task"
+> [ 10.532280] (2:slave@Tremblay) Communication time : "0.002448"
+> [ 10.532280] (2:slave@Tremblay) Processing "Task"
+> [ 10.532280] (1:master@Tremblay) Send completed
+> [ 11.041990] (2:slave@Tremblay) "Task" done
+> [ 11.345057] (4:slave@Ginette) "Task" done
+> [ 11.560930] (5:slave@Bourassa) "Task" done
+> [ 20.532280] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Jupiter'. Nevermind. Let's keep going!
+> [ 30.532280] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Fafard'. Nevermind. Let's keep going!
+> [ 30.532280] (4:slave@Ginette) Mmh. Something went wrong. Nevermind. Let's keep going!
+> [ 30.532280] (1:master@Tremblay) Mmh. Something went wrong with 'Ginette'. Nevermind. Let's keep going!
+> [ 30.748152] (5:slave@Bourassa) Received "Task"
+> [ 30.748152] (5:slave@Bourassa) Communication time : "0.215872"
+> [ 30.748152] (5:slave@Bourassa) Processing "Task"
+> [ 30.748152] (1:master@Tremblay) Send completed
+> [ 30.750600] (2:slave@Tremblay) Received "Task"
+> [ 30.750600] (2:slave@Tremblay) Communication time : "0.002448"
+> [ 30.750600] (2:slave@Tremblay) Processing "Task"
+> [ 30.750600] (1:master@Tremblay) Send completed
+> [ 31.260310] (2:slave@Tremblay) "Task" done
+> [ 31.779250] (5:slave@Bourassa) "Task" done
+> [ 40.750600] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Jupiter'. Nevermind. Let's keep going!
+> [ 50.750600] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Fafard'. Nevermind. Let's keep going!
+> [ 50.891059] (4:slave@Ginette) Received "Task"
+> [ 50.891059] (4:slave@Ginette) Communication time : "0.140460"
+> [ 50.891059] (4:slave@Ginette) Processing "Task"
+> [ 50.891059] (1:master@Tremblay) Send completed
+> [ 51.106932] (5:slave@Bourassa) Received "Task"
+> [ 51.106932] (5:slave@Bourassa) Communication time : "0.215872"
+> [ 51.106932] (5:slave@Bourassa) Processing "Task"
+> [ 51.106932] (1:master@Tremblay) Send completed
+> [ 51.109379] (2:slave@Tremblay) Received "Task"
+> [ 51.109379] (2:slave@Tremblay) Communication time : "0.002448"
+> [ 51.109379] (2:slave@Tremblay) Processing "Task"
+> [ 51.109379] (1:master@Tremblay) Send completed
+> [ 51.619089] (2:slave@Tremblay) "Task" done
+> [ 51.922157] (4:slave@Ginette) "Task" done
+> [ 52.138029] (5:slave@Bourassa) "Task" done
+> [ 61.109379] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Jupiter'. Nevermind. Let's keep going!
+> [ 71.109379] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Fafard'. Nevermind. Let's keep going!
+> [ 71.249839] (4:slave@Ginette) Received "Task"
+> [ 71.249839] (4:slave@Ginette) Communication time : "0.140460"
+> [ 71.249839] (4:slave@Ginette) Processing "Task"
+> [ 71.249839] (1:master@Tremblay) Send completed
+> [ 71.465711] (5:slave@Bourassa) Received "Task"
+> [ 71.465711] (5:slave@Bourassa) Communication time : "0.215872"
+> [ 71.465711] (5:slave@Bourassa) Processing "Task"
+> [ 71.465711] (1:master@Tremblay) Send completed
+> [ 71.468159] (2:slave@Tremblay) Received "Task"
+> [ 71.468159] (2:slave@Tremblay) Communication time : "0.002448"
+> [ 71.468159] (2:slave@Tremblay) Processing "Task"
+> [ 71.468159] (1:master@Tremblay) Send completed
+> [ 71.468159] (1:master@Tremblay) All tasks have been dispatched. Let's tell everybody the computation is over.
+> [ 71.977869] (2:slave@Tremblay) "Task" done
+> [ 72.280937] (4:slave@Ginette) "Task" done
+> [ 72.468159] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Jupiter'. Nevermind. Let's keep going!
+> [ 72.496809] (5:slave@Bourassa) "Task" done
+> [ 73.468159] (1:master@Tremblay) Mmh. Got timeouted while speaking to 'Fafard'. Nevermind. Let's keep going!
+> [ 73.481391] (4:slave@Ginette) Received "finalize"
+> [ 73.481391] (4:slave@Ginette) I'm done. See you!
+> [ 73.501726] (5:slave@Bourassa) Received "finalize"
+> [ 73.501726] (5:slave@Bourassa) I'm done. See you!
+> [ 73.501882] (2:slave@Tremblay) Received "finalize"
+> [ 73.501882] (2:slave@Tremblay) I'm done. See you!
+> [ 73.501882] (1:master@Tremblay) Goodbye now!
+> [ 73.501882] (0:@) Simulation time 73.5019
p Testing a master/slave example application with a forwarder module
! output sort
-$ $SG_TEST_EXENV masterslave/masterslave_forwarder$EXEEXT ${srcdir:=.}/msg_platform.xml ${srcdir:=.}/masterslave/deployment_masterslave_forwarder.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV masterslave/masterslave_forwarder$EXEEXT ${srcdir:=.}/msg_platform.xml ${srcdir:=.}/masterslave/deployment_masterslave_forwarder.xml --cfg=network/crosstraffic:0 "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (0:@) Configuration change: Set 'network/crosstraffic' to '0'
> [ 0.000000] (1:master@Jacquelin) Got 5 slaves and 20 tasks to process
> [ 0.000000] (1:master@Jacquelin) Sending "Task_0" to "iRMX"
> [ 4.772530] (1:master@Jacquelin) Sent
--- /dev/null
+#! ./tesh
+
+p Testing a master/slave example application with a forwarder module
+
+$ $SG_TEST_EXENV masterslave/masterslave_forwarder$EXEEXT ${srcdir:=.}/msg_platform.xml ${srcdir:=.}/masterslave/deployment_masterslave_forwarder.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (1:master@Jacquelin) Got 5 slaves and 20 tasks to process
+> [ 0.000000] (1:master@Jacquelin) Sending "Task_0" to "iRMX"
+> [ 4.772530] (4:slave@iRMX) Received "Task_0"
+> [ 4.772530] (4:slave@iRMX) Processing "Task_0"
+> [ 4.772530] (1:master@Jacquelin) Sent
+> [ 4.772530] (1:master@Jacquelin) Sending "Task_1" to "Casavant"
+> [ 4.845345] (4:slave@iRMX) "Task_0" done
+> [ 6.900351] (3:forwarder@Casavant) Received "Task_1"
+> [ 6.900351] (3:forwarder@Casavant) Sending "Task_1" to "Robert"
+> [ 6.900351] (1:master@Jacquelin) Sent
+> [ 6.900351] (1:master@Jacquelin) Sending "Task_2" to "Bousquet"
+> [ 8.491392] (5:slave@Bousquet) Received "Task_2"
+> [ 8.491392] (5:slave@Bousquet) Processing "Task_2"
+> [ 8.491392] (1:master@Jacquelin) Sent
+> [ 8.491392] (1:master@Jacquelin) Sending "Task_3" to "Soucy"
+> [ 8.607896] (5:slave@Bousquet) "Task_2" done
+> [ 9.260549] (10:slave@Robert) Received "Task_1"
+> [ 9.260549] (10:slave@Robert) Processing "Task_1"
+> [ 9.296956] (10:slave@Robert) "Task_1" done
+> [ 10.755127] (6:slave@Soucy) Received "Task_3"
+> [ 10.755127] (6:slave@Soucy) Processing "Task_3"
+> [ 10.755127] (1:master@Jacquelin) Sent
+> [ 10.755127] (1:master@Jacquelin) Sending "Task_4" to "Jackson"
+> [ 10.791535] (6:slave@Soucy) "Task_3" done
+> [ 12.090242] (2:forwarder@Jackson) Received "Task_4"
+> [ 12.090242] (2:forwarder@Jackson) Sending "Task_4" to "Kuenning"
+> [ 12.090242] (1:master@Jacquelin) Sent
+> [ 12.090242] (1:master@Jacquelin) Sending "Task_5" to "iRMX"
+> [ 12.177813] (7:slave@Kuenning) Received "Task_4"
+> [ 12.177813] (7:slave@Kuenning) Processing "Task_4"
+> [ 12.236066] (7:slave@Kuenning) "Task_4" done
+> [ 16.862772] (4:slave@iRMX) Received "Task_5"
+> [ 16.862772] (4:slave@iRMX) Processing "Task_5"
+> [ 16.862772] (1:master@Jacquelin) Sent
+> [ 16.862772] (1:master@Jacquelin) Sending "Task_6" to "Casavant"
+> [ 16.935587] (4:slave@iRMX) "Task_5" done
+> [ 18.990593] (3:forwarder@Casavant) Received "Task_6"
+> [ 18.990593] (3:forwarder@Casavant) Sending "Task_6" to "Sirois"
+> [ 18.990593] (1:master@Jacquelin) Sent
+> [ 18.990593] (1:master@Jacquelin) Sending "Task_7" to "Bousquet"
+> [ 19.793293] (11:slave@Sirois) Received "Task_6"
+> [ 19.793293] (11:slave@Sirois) Processing "Task_6"
+> [ 19.836983] (11:slave@Sirois) "Task_6" done
+> [ 20.581634] (5:slave@Bousquet) Received "Task_7"
+> [ 20.581634] (5:slave@Bousquet) Processing "Task_7"
+> [ 20.581634] (1:master@Jacquelin) Sent
+> [ 20.581634] (1:master@Jacquelin) Sending "Task_8" to "Soucy"
+> [ 20.698138] (5:slave@Bousquet) "Task_7" done
+> [ 22.845369] (6:slave@Soucy) Received "Task_8"
+> [ 22.845369] (6:slave@Soucy) Processing "Task_8"
+> [ 22.845369] (1:master@Jacquelin) Sent
+> [ 22.845369] (1:master@Jacquelin) Sending "Task_9" to "Jackson"
+> [ 22.881777] (6:slave@Soucy) "Task_8" done
+> [ 24.180485] (2:forwarder@Jackson) Received "Task_9"
+> [ 24.180485] (2:forwarder@Jackson) Sending "Task_9" to "Browne"
+> [ 24.180485] (1:master@Jacquelin) Sent
+> [ 24.180485] (1:master@Jacquelin) Sending "Task_10" to "iRMX"
+> [ 27.009931] (8:slave@Browne) Received "Task_9"
+> [ 27.009931] (8:slave@Browne) Processing "Task_9"
+> [ 27.046339] (8:slave@Browne) "Task_9" done
+> [ 28.953014] (4:slave@iRMX) Received "Task_10"
+> [ 28.953014] (4:slave@iRMX) Processing "Task_10"
+> [ 28.953014] (1:master@Jacquelin) Sent
+> [ 28.953014] (1:master@Jacquelin) Sending "Task_11" to "Casavant"
+> [ 29.025830] (4:slave@iRMX) "Task_10" done
+> [ 31.080835] (3:forwarder@Casavant) Received "Task_11"
+> [ 31.080835] (3:forwarder@Casavant) Sending "Task_11" to "Monique"
+> [ 31.080835] (1:master@Jacquelin) Sent
+> [ 31.080835] (1:master@Jacquelin) Sending "Task_12" to "Bousquet"
+> [ 32.472434] (12:slave@Monique) Received "Task_11"
+> [ 32.472434] (12:slave@Monique) Processing "Task_11"
+> [ 32.516124] (12:slave@Monique) "Task_11" done
+> [ 32.671876] (5:slave@Bousquet) Received "Task_12"
+> [ 32.671876] (5:slave@Bousquet) Processing "Task_12"
+> [ 32.671876] (1:master@Jacquelin) Sent
+> [ 32.671876] (1:master@Jacquelin) Sending "Task_13" to "Soucy"
+> [ 32.788380] (5:slave@Bousquet) "Task_12" done
+> [ 34.935611] (6:slave@Soucy) Received "Task_13"
+> [ 34.935611] (6:slave@Soucy) Processing "Task_13"
+> [ 34.935611] (1:master@Jacquelin) Sent
+> [ 34.935611] (1:master@Jacquelin) Sending "Task_14" to "Jackson"
+> [ 34.972019] (6:slave@Soucy) "Task_13" done
+> [ 36.270727] (2:forwarder@Jackson) Received "Task_14"
+> [ 36.270727] (2:forwarder@Jackson) Sending "Task_14" to "Stephen"
+> [ 36.270727] (1:master@Jacquelin) Sent
+> [ 36.270727] (1:master@Jacquelin) Sending "Task_15" to "iRMX"
+> [ 40.508273] (9:slave@Stephen) Received "Task_14"
+> [ 40.508273] (9:slave@Stephen) Processing "Task_14"
+> [ 40.581088] (9:slave@Stephen) "Task_14" done
+> [ 41.043257] (4:slave@iRMX) Received "Task_15"
+> [ 41.043257] (4:slave@iRMX) Processing "Task_15"
+> [ 41.043257] (1:master@Jacquelin) Sent
+> [ 41.043257] (1:master@Jacquelin) Sending "Task_16" to "Casavant"
+> [ 41.116072] (4:slave@iRMX) "Task_15" done
+> [ 43.171078] (3:forwarder@Casavant) Received "Task_16"
+> [ 43.171078] (3:forwarder@Casavant) Sending "Task_16" to "Robert"
+> [ 43.171078] (1:master@Jacquelin) Sent
+> [ 43.171078] (1:master@Jacquelin) Sending "Task_17" to "Bousquet"
+> [ 44.762118] (5:slave@Bousquet) Received "Task_17"
+> [ 44.762118] (5:slave@Bousquet) Processing "Task_17"
+> [ 44.762118] (1:master@Jacquelin) Sent
+> [ 44.762118] (1:master@Jacquelin) Sending "Task_18" to "Soucy"
+> [ 44.878622] (5:slave@Bousquet) "Task_17" done
+> [ 45.531275] (10:slave@Robert) Received "Task_16"
+> [ 45.531275] (10:slave@Robert) Processing "Task_16"
+> [ 45.567683] (10:slave@Robert) "Task_16" done
+> [ 47.025854] (6:slave@Soucy) Received "Task_18"
+> [ 47.025854] (6:slave@Soucy) Processing "Task_18"
+> [ 47.025854] (1:master@Jacquelin) Sent
+> [ 47.025854] (1:master@Jacquelin) Sending "Task_19" to "Jackson"
+> [ 47.062262] (6:slave@Soucy) "Task_18" done
+> [ 48.360969] (2:forwarder@Jackson) Received "Task_19"
+> [ 48.360969] (2:forwarder@Jackson) Sending "Task_19" to "Kuenning"
+> [ 48.360969] (1:master@Jacquelin) Sent
+> [ 48.360969] (1:master@Jacquelin) All tasks have been dispatched. Let's tell everybody the computation is over.
+> [ 48.448540] (7:slave@Kuenning) Received "Task_19"
+> [ 48.448540] (7:slave@Kuenning) Processing "Task_19"
+> [ 48.506793] (7:slave@Kuenning) "Task_19" done
+> [ 50.794024] (4:slave@iRMX) Received "finalize"
+> [ 50.794024] (4:slave@iRMX) I'm done. See you!
+> [ 51.878795] (3:forwarder@Casavant) Received "finalize"
+> [ 51.878795] (3:forwarder@Casavant) All tasks have been dispatched. Let's tell everybody the computation is over.
+> [ 52.689914] (5:slave@Bousquet) Received "finalize"
+> [ 52.689914] (5:slave@Bousquet) I'm done. See you!
+> [ 53.082033] (10:slave@Robert) Received "finalize"
+> [ 53.082033] (10:slave@Robert) I'm done. See you!
+> [ 53.491253] (11:slave@Sirois) Received "finalize"
+> [ 53.491253] (11:slave@Sirois) I'm done. See you!
+> [ 53.843975] (6:slave@Soucy) Received "finalize"
+> [ 53.843975] (6:slave@Soucy) I'm done. See you!
+> [ 54.200695] (12:slave@Monique) Received "finalize"
+> [ 54.200695] (12:slave@Monique) I'm done. See you!
+> [ 54.200695] (3:forwarder@Casavant) I'm done. See you!
+> [ 54.524622] (2:forwarder@Jackson) Received "finalize"
+> [ 54.524622] (2:forwarder@Jackson) All tasks have been dispatched. Let's tell everybody the computation is over.
+> [ 54.524622] (1:master@Jacquelin) Goodbye now!
+> [ 54.529294] (7:slave@Kuenning) Received "finalize"
+> [ 54.529294] (7:slave@Kuenning) I'm done. See you!
+> [ 55.971757] (8:slave@Browne) Received "finalize"
+> [ 55.971757] (8:slave@Browne) I'm done. See you!
+> [ 58.132075] (9:slave@Stephen) Received "finalize"
+> [ 58.132075] (9:slave@Stephen) I'm done. See you!
+> [ 58.132075] (2:forwarder@Jackson) I'm done. See you!
+> [ 58.132075] (0:@) Simulation time 58.1321
p Testing a simple master/slave example application (mailbox version)
! output sort
-$ $SG_TEST_EXENV masterslave/masterslave_mailbox$EXEEXT ${srcdir:=.}/small_platform_with_routers.xml ${srcdir:=.}/masterslave/deployment_masterslave_mailbox.xml --trace "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV masterslave/masterslave_mailbox$EXEEXT ${srcdir:=.}/small_platform_with_routers.xml ${srcdir:=.}/masterslave/deployment_masterslave_mailbox.xml --cfg=network/crosstraffic:0 --trace "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (0:@) Configuration change: Set 'network/crosstraffic' to '0'
> [ 0.000000] (1:master@Tremblay) Got 5 slaves and 20 tasks to process
> [ 0.000000] (1:master@Tremblay) Sending "Task_0" (of 20) to mailbox "slave-0"
> [ 0.002339] (1:master@Tremblay) Sending "Task_1" (of 20) to mailbox "slave-1"
--- /dev/null
+#! ./tesh
+
+p Testing a simple master/slave example application (mailbox version)
+
+$ $SG_TEST_EXENV masterslave/masterslave_mailbox$EXEEXT ${srcdir:=.}/small_platform_with_routers.xml ${srcdir:=.}/masterslave/deployment_masterslave_mailbox.xml --trace "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (1:master@Tremblay) Got 5 slaves and 20 tasks to process
+> [ 0.000000] (1:master@Tremblay) Sending "Task_0" (of 20) to mailbox "slave-0"
+> [ 0.002339] (1:master@Tremblay) Sending "Task_1" (of 20) to mailbox "slave-1"
+> [ 0.175839] (1:master@Tremblay) Sending "Task_2" (of 20) to mailbox "slave-2"
+> [ 0.393992] (1:master@Tremblay) Sending "Task_3" (of 20) to mailbox "slave-3"
+> [ 0.534451] (1:master@Tremblay) Sending "Task_4" (of 20) to mailbox "slave-4"
+> [ 0.750324] (1:master@Tremblay) Sending "Task_5" (of 20) to mailbox "slave-0"
+> [ 0.752662] (1:master@Tremblay) Sending "Task_6" (of 20) to mailbox "slave-1"
+> [ 1.004681] (1:master@Tremblay) Sending "Task_7" (of 20) to mailbox "slave-2"
+> [ 1.267487] (1:master@Tremblay) Sending "Task_8" (of 20) to mailbox "slave-3"
+> [ 1.706009] (1:master@Tremblay) Sending "Task_9" (of 20) to mailbox "slave-4"
+> [ 1.997294] (1:master@Tremblay) Sending "Task_10" (of 20) to mailbox "slave-0"
+> [ 1.999632] (1:master@Tremblay) Sending "Task_11" (of 20) to mailbox "slave-1"
+> [ 2.173132] (1:master@Tremblay) Sending "Task_12" (of 20) to mailbox "slave-2"
+> [ 2.391286] (1:master@Tremblay) Sending "Task_13" (of 20) to mailbox "slave-3"
+> [ 2.877566] (1:master@Tremblay) Sending "Task_14" (of 20) to mailbox "slave-4"
+> [ 3.244264] (1:master@Tremblay) Sending "Task_15" (of 20) to mailbox "slave-0"
+> [ 3.246603] (1:master@Tremblay) Sending "Task_16" (of 20) to mailbox "slave-1"
+> [ 3.420103] (1:master@Tremblay) Sending "Task_17" (of 20) to mailbox "slave-2"
+> [ 3.638256] (1:master@Tremblay) Sending "Task_18" (of 20) to mailbox "slave-3"
+> [ 4.049124] (1:master@Tremblay) Sending "Task_19" (of 20) to mailbox "slave-4"
+> [ 4.491234] (1:master@Tremblay) All tasks have been dispatched. Let's tell everybody the computation is over.
+> [ 4.491390] (2:slave@Tremblay) I'm done. See you!
+> [ 4.506590] (3:slave@Jupiter) I'm done. See you!
+> [ 4.527141] (4:slave@Fafard) I'm done. See you!
+> [ 5.093453] (5:slave@Ginette) I'm done. See you!
+> [ 5.542668] (6:slave@Bourassa) I'm done. See you!
+> [ 5.542668] (0:@) Simulation time 5.54267
p Testing a simple master/slave example application
-$ $SG_TEST_EXENV ./masterslave/masterslave_mailbox ${srcdir:=.}/../platforms/multicore_machine.xml ${srcdir:=.}/masterslave/deployment_masterslave_mailbox_multicore.xml --cfg=cpu/model:Cas01_fullupdate
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'cpu/model' to 'Cas01_fullupdate'
+$ $SG_TEST_EXENV ./masterslave/masterslave_mailbox ${srcdir:=.}/../platforms/multicore_machine.xml ${srcdir:=.}/masterslave/deployment_masterslave_mailbox_multicore.xml --cfg=cpu/model:Cas01 --cfg=cpu/optim:Full
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'cpu/model' to 'Cas01'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'cpu/optim' to 'Full'
> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
> [Tremblay:master:(1) 0.000000] [msg_test/INFO] Got 6 slaves and 20 tasks to process
> [Tremblay:master:(1) 0.000000] [msg_test/INFO] Sending "Task_0" (of 20) to mailbox "slave-0"
$ $SG_TEST_EXENV ./masterslave/masterslave_mailbox ${srcdir:=.}/../platforms/vivaldi.xml ${srcdir:=.}/masterslave/deployment_masterslave_vivaldi.xml --cfg=network/latency_factor:1.0 --cfg=network/bandwidth_factor:1.0
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/latency_factor' to '1.0'
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/bandwidth_factor' to '1.0'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'coordinates' to 'yes'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/coordinates' to 'yes'
> [100030591:master:(1) 0.000000] [msg_test/INFO] Got 39 slaves and 20 tasks to process
> [100030591:master:(1) 0.000000] [msg_test/INFO] Sending "Task_0" (of 20) to mailbox "slave-0"
> [100030591:master:(1) 0.046299] [msg_test/INFO] Sending "Task_1" (of 20) to mailbox "slave-1"
<process host="host1" function="bob">
<prop id="SomeProp" value="SomeValue"/>
</process>
+
+ <process host="host2" function="carole" />
</platform>
int alice(int argc, char *argv[]);
int bob(int argc, char *argv[]);
+int carole(int argc, char *argv[]);
int forwarder(int argc, char *argv[]);
MSG_error_t test_all(const char *platform_file,
const char *application_file);
-int alice(int argc, char *argv[])
+static void test_host(const char*hostname)
{
- m_host_t host1 = MSG_get_host_by_name("host1");
- xbt_dict_t props = MSG_host_get_properties(host1);
+ m_host_t thehost = MSG_get_host_by_name(hostname);
+ xbt_dict_t props = MSG_host_get_properties(thehost);
xbt_dict_cursor_t cursor = NULL;
char *key, *data;
const char *noexist = "Unknown";
XBT_INFO(" Host property: '%s' -> '%s'", key, data);
XBT_INFO("== Try to get a host property that does not exist");
- value = MSG_host_get_property_value(host1, noexist);
+ value = MSG_host_get_property_value(thehost, noexist);
xbt_assert(!value, "The key exists (it's not supposed to)");
XBT_INFO("== Try to get a host property that does exist");
- value = MSG_host_get_property_value(host1, exist);
+ value = MSG_host_get_property_value(thehost, exist);
xbt_assert(value, "\tProperty %s is undefined (where it should)",
exist);
xbt_assert(!strcmp(value, "180"),
xbt_dict_set(props, exist, xbt_strdup("250"), NULL);
/* Test if we have changed the value */
- value = MSG_host_get_property_value(host1, exist);
+ value = MSG_host_get_property_value(thehost, exist);
xbt_assert(value, "Property %s is undefined (where it should)", exist);
xbt_assert(!strcmp(value, "250"),
"Value of property %s is defined to %s (where it should be 250)",
exist, value);
XBT_INFO(" Property: %s old value: %s", exist, value);
+
+ /* Restore the value for the next test */
+ xbt_dict_set(props, exist, xbt_strdup("180"), NULL);
+}
+int alice(int argc, char *argv[]) { /* Dump what we have on the current host */
+ test_host("host1");
+ return 0;
+}
+int carole(int argc, char *argv[]) {/* Dump what we have on a remote host */
+ MSG_process_sleep(1); // Wait for alice to be done with its experiment
+ test_host("host1");
return 0;
}
{
MSG_function_register("alice", alice);
MSG_function_register("bob", bob);
+ MSG_function_register("carole", carole);
MSG_create_environment(platform_file);
MSG_launch_application(application_file);
> [ 0.000000] (2:bob@host1) == Print the properties of the process
> [ 0.000000] (2:bob@host1) Process property: SomeProp -> SomeValue
> [ 0.000000] (2:bob@host1) == Try to get a process property that does not exist
+> [ 1.000000] (3:carole@host2) == Print the properties of the host
+> [ 1.000000] (3:carole@host2) Host property: 'SG_TEST_Hdd' -> '180'
+> [ 1.000000] (3:carole@host2) Host property: 'SG_TEST_mem' -> '4'
+> [ 1.000000] (3:carole@host2) == Try to get a host property that does not exist
+> [ 1.000000] (3:carole@host2) == Try to get a host property that does exist
+> [ 1.000000] (3:carole@host2) Property: SG_TEST_Hdd old value: 180
+> [ 1.000000] (3:carole@host2) == Trying to modify a host property
+> [ 1.000000] (3:carole@host2) Property: SG_TEST_Hdd old value: 250
#! ./tesh
-p Testing the surf network maxmin fairness model
+p Testing the deprecated CM02 network model
! output sort
-$ $SG_TEST_EXENV sendrecv/sendrecv$EXEEXT ${srcdir:=.}/sendrecv/platform_sendrecv.xml ${srcdir:=.}/sendrecv/deployment_sendrecv.xml --cfg=workstation/model:CLM03 --cfg=cpu/model:Cas01 --cfg=network/model:CM02 "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (0:@) Configuration change: Set 'workstation/model' to 'CLM03'
-> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'Cas01'
+$ $SG_TEST_EXENV sendrecv/sendrecv$EXEEXT ${srcdir:=.}/sendrecv/platform_sendrecv.xml ${srcdir:=.}/sendrecv/deployment_sendrecv.xml --cfg=cpu/model:Cas01 --cfg=network/model:CM02 "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'Cas01'
> [ 0.000000] (0:@) Configuration change: Set 'network/model' to 'CM02'
> [ 0.000000] (0:@) test_all
+> [ 0.000000] (0:@) Switching workstation model to compound since you changed the network and/or cpu model(s)
> [ 0.000000] (1:sender@Inmos) sender
> [ 0.000000] (1:sender@Inmos) host = Bellevue
> [ 0.000000] (1:sender@Inmos) task_la->data = 0.000000e+00
> [ 0.000000] (2:receiver@Bellevue) receiver
-> [ 1.040100] (1:sender@Inmos) task_bw->data = 1.040100e+00
-> [ 1.040100] (2:receiver@Bellevue) Task received : latency task
-> [ 1.040100] (2:receiver@Bellevue) Communic. time 1.040100e+00
-> [ 1.040100] (2:receiver@Bellevue) --- la 1.040100 ----
-> [10002.080100] (0:@) Total simulation time: 1.000208e+04
-> [10002.080100] (2:receiver@Bellevue) Task received : bandwidth task
-> [10002.080100] (2:receiver@Bellevue) Communic. time 1.000104e+04
-> [10002.080100] (2:receiver@Bellevue) --- bw 99989.601081 ----
\ No newline at end of file
+> [ 0.100100] (1:sender@Inmos) task_bw->data = 1.001000e-01
+> [ 0.100100] (2:receiver@Bellevue) Task received : latency task
+> [ 0.100100] (2:receiver@Bellevue) Communic. time 1.001000e-01
+> [ 0.100100] (2:receiver@Bellevue) --- la 0.100100 ----
+> [10000.200100] (0:@) Total simulation time: 1.000020e+04
+> [10000.200100] (2:receiver@Bellevue) Task received : bandwidth task
+> [10000.200100] (2:receiver@Bellevue) Communic. time 1.000010e+04
+> [10000.200100] (2:receiver@Bellevue) --- bw 99999.000010 ----
\ No newline at end of file
p Testing the surf network maxmin fairness model
-$ $SG_TEST_EXENV sendrecv/sendrecv$EXEEXT ${srcdir:=.}/sendrecv/platform_sendrecv.xml ${srcdir:=.}/sendrecv/deployment_sendrecv.xml --cfg=workstation/model:CLM03
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'workstation/model' to 'CLM03'
+$ $SG_TEST_EXENV sendrecv/sendrecv$EXEEXT ${srcdir:=.}/sendrecv/platform_sendrecv.xml ${srcdir:=.}/sendrecv/deployment_sendrecv.xml
> [0.000000] [msg_test/INFO] test_all
> [Inmos:sender:(1) 0.000000] [msg_test/INFO] sender
> [Inmos:sender:(1) 0.000000] [msg_test/INFO] host = Bellevue
> [ 1.104000] (1:0@bob1.hamburger.edu) Host "0" received "Token"
> [ 1.104000] (0:@) Simulation time 1.104
-$ $SG_TEST_EXENV ${bindir:=.}/token_ring ${srcdir:=.}/two_peers.xml --cfg=coordinates:yes "--log=root.fmt:[%12.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (0:@) Configuration change: Set 'coordinates' to 'yes'
+$ $SG_TEST_EXENV ${bindir:=.}/token_ring ${srcdir:=.}/two_peers.xml --cfg=network/coordinates:yes "--log=root.fmt:[%12.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (0:@) Configuration change: Set 'network/coordinates' to 'yes'
> [ 0.000000] (0:@) Number of host '2'
> [ 0.000000] (1:0@peer_100030591) Host "0" send 'Token' to Host "1"
-> [ 0.110400] (2:1@peer_100036570) Host "1" received "Token"
-> [ 0.110400] (2:1@peer_100036570) Host "1" send 'Token' to Host "0"
-> [ 0.220800] (1:0@peer_100030591) Host "0" received "Token"
-> [ 0.220800] (0:@) Simulation time 0.2208
\ No newline at end of file
+> [ 5.221778] (2:1@peer_100036570) Host "1" received "Token"
+> [ 5.221778] (2:1@peer_100036570) Host "1" send 'Token' to Host "0"
+> [ 10.443556] (1:0@peer_100030591) Host "0" received "Token"
+> [ 10.443556] (0:@) Simulation time 10.4436
\ No newline at end of file
p Testing trace integration using file.trace and test1.xml, a < max(time), b < max(time)
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test1.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/model:CpuTI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test1.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (0:@) Configuration change: Set 'workstation/model' to 'compound'
> [ 0.000000] (0:@) Configuration change: Set 'network/model' to 'CM02'
-> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'CpuTI'
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
> [ 0.000000] (0:@) Ignoring redefinition of trace trace/file.trace
> [ 10.000000] (1:test_trace@CPU1) Testing the trace integration cpu model: CpuTI
> [ 10.000000] (1:test_trace@CPU1) Task size: 400.000000
p Testing trace integration using file.trace and test2.xml, a < max(time), max(time) < b < 2 max(time)
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test2.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/model:CpuTI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test2.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (0:@) Configuration change: Set 'workstation/model' to 'compound'
> [ 0.000000] (0:@) Configuration change: Set 'network/model' to 'CM02'
-> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'CpuTI'
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
> [ 0.000000] (0:@) Ignoring redefinition of trace trace/file.trace
> [ 10.000000] (1:test_trace@CPU1) Testing the trace integration cpu model: CpuTI
> [ 10.000000] (1:test_trace@CPU1) Task size: 850.000000
p Testing trace integration using file.trace and test3.xml, a < max(time), b > 2 max(time)
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test3.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/model:CpuTI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test3.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (0:@) Configuration change: Set 'workstation/model' to 'compound'
> [ 0.000000] (0:@) Configuration change: Set 'network/model' to 'CM02'
-> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'CpuTI'
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
> [ 0.000000] (0:@) Ignoring redefinition of trace trace/file.trace
> [ 10.000000] (1:test_trace@CPU1) Testing the trace integration cpu model: CpuTI
> [ 10.000000] (1:test_trace@CPU1) Task size: 1980.000000
p Testing trace integration using file.trace and test4.xml, max(time) < a < 2max(time), max(time) < b < 2max(time)
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test4.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/model:CpuTI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test4.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (0:@) Configuration change: Set 'workstation/model' to 'compound'
> [ 0.000000] (0:@) Configuration change: Set 'network/model' to 'CM02'
-> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'CpuTI'
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
> [ 0.000000] (0:@) Ignoring redefinition of trace trace/file.trace
> [ 80.000000] (1:test_trace@CPU1) Testing the trace integration cpu model: CpuTI
> [ 80.000000] (1:test_trace@CPU1) Task size: 400.000000
p Testing trace integration using file.trace and test5.xml, max(time) < a < 2max(time), 2max(time) < b < 3max(time)
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test5.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/model:CpuTI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test5.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (0:@) Configuration change: Set 'workstation/model' to 'compound'
> [ 0.000000] (0:@) Configuration change: Set 'network/model' to 'CM02'
-> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'CpuTI'
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
> [ 0.000000] (0:@) Ignoring redefinition of trace trace/file.trace
> [ 90.000000] (1:test_trace@CPU1) Testing the trace integration cpu model: CpuTI
> [ 90.000000] (1:test_trace@CPU1) Task size: 850.000000
p Testing trace integration using file.trace and test6.xml, max(time) < a < 2max(time), b > 3max(time)
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test6.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/model:CpuTI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test6.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (0:@) Configuration change: Set 'workstation/model' to 'compound'
> [ 0.000000] (0:@) Configuration change: Set 'network/model' to 'CM02'
-> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'CpuTI'
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
> [ 0.000000] (0:@) Ignoring redefinition of trace trace/file.trace
> [ 80.000000] (1:test_trace@CPU1) Testing the trace integration cpu model: CpuTI
> [ 80.000000] (1:test_trace@CPU1) Task size: 1980.000000
p Testing trace integration using file.trace and test7.xml, two process with same priority
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test7.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/model:CpuTI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test7.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (0:@) Configuration change: Set 'workstation/model' to 'compound'
> [ 0.000000] (0:@) Configuration change: Set 'network/model' to 'CM02'
-> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'CpuTI'
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
> [ 0.000000] (0:@) Ignoring redefinition of trace trace/file.trace
> [ 10.000000] (1:test_trace@CPU1) Testing the trace integration cpu model: CpuTI
> [ 10.000000] (1:test_trace@CPU1) Task size: 400.000000
p Testing trace integration using file.trace and test8.xml, two process with different priority
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test8.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/model:CpuTI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test8.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (0:@) Configuration change: Set 'workstation/model' to 'compound'
> [ 0.000000] (0:@) Configuration change: Set 'network/model' to 'CM02'
-> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'CpuTI'
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
> [ 0.000000] (0:@) Ignoring redefinition of trace trace/file.trace
> [ 10.000000] (1:test_trace@CPU1) Testing the trace integration cpu model: CpuTI
> [ 10.000000] (1:test_trace@CPU1) Task size: 400.000000
p Testing trace integration using file.trace and test9.xml, three process with same priority
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test9.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/model:CpuTI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test9.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (0:@) Configuration change: Set 'workstation/model' to 'compound'
> [ 0.000000] (0:@) Configuration change: Set 'network/model' to 'CM02'
-> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'CpuTI'
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
> [ 0.000000] (0:@) Ignoring redefinition of trace trace/file.trace
> [ 0.000000] (1:test_trace@CPU1) Testing the trace integration cpu model: CpuTI
> [ 0.000000] (1:test_trace@CPU1) Task size: 400.000000
p Testing trace integration using file.trace and test10.xml, three process with different priority
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test10.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/model:CpuTI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test10.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (0:@) Configuration change: Set 'workstation/model' to 'compound'
> [ 0.000000] (0:@) Configuration change: Set 'network/model' to 'CM02'
-> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'CpuTI'
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
> [ 0.000000] (0:@) Ignoring redefinition of trace trace/file.trace
> [ 0.000000] (1:test_trace@CPU1) Testing the trace integration cpu model: CpuTI
> [ 0.000000] (1:test_trace@CPU1) Task size: 420.000000
p Testing trace integration using file.trace and test11.xml, three process with different priority. Changed timestep to 0.1.
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test11.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/model:CpuTI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ $SG_TEST_EXENV ${bindir:=.}/trace/test_trace_integration$EXEEXT ./trace/test11.xml --cfg=workstation/model:compound --cfg=network/model:CM02 --cfg=cpu/optim:TI "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (0:@) Configuration change: Set 'workstation/model' to 'compound'
> [ 0.000000] (0:@) Configuration change: Set 'network/model' to 'CM02'
-> [ 0.000000] (0:@) Configuration change: Set 'cpu/model' to 'CpuTI'
+> [ 0.000000] (0:@) Configuration change: Set 'cpu/optim' to 'TI'
> [ 0.000000] (1:test_trace@CPU1) Testing the trace integration cpu model: CpuTI
> [ 0.000000] (1:test_trace@CPU1) Task size: 420.000000
> [ 0.000000] (1:test_trace@CPU1) Task prio: 2.000000
<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid.dtd">
<platform version="3">
<config id="General">
- <prop id="coordinates" value="yes"></prop>
+ <prop id="network/coordinates" value="yes"></prop>
</config>
<AS id="AS0" routing="Vivaldi">
<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid.dtd">
<platform version="3">
<config id="General">
- <prop id="coordinates" value="yes"></prop>
+ <prop id="network/coordinates" value="yes"></prop>
</config>
<AS id="AS0" routing="Vivaldi">
sub get_gflops {
switch ($_[0]) {
+ #Bordeaux
case "bordeplage" { print "5.2297E9" }
case "bordereau" { print "8.8925E9" }
case "borderline" { print "13.357E9" }
-
+ #Lille
case "chicon" { print "8.9618E9" }
case "chimint" { print "23.531E9" }
case "chinqchint" { print "22.270E9" }
case "chirloute" { print "24.473E9" }
-
+ #Grenoble
case "adonis" { print "23.681E9" }
case "edel" { print "23.492E9" }
case "genepi" { print "21.175E9" }
-
+ #Lyon
case "capricorne" { print "4.7233E9" }
case "sagittaire" { print "5.6693E9" }
-
+ #Nancy
case "graphene" { print "16.673E9" }
case "griffon" { print "20.678E9" }
-
+ #Orsay
case "gdx" { print "4.7153E9" }
case "netgdx" { print "4.7144E9" }
-
+ #Rennes
case "paradent" { print "21.496E9" }
case "paramount" { print "12.910E9" }
case "parapide" { print "30.130E9" }
case "parapluie" { print "27.391E9" }
-
+ #Sophia
case "helios" { print "7.7318E9" }
case "sol" { print "8.9388E9" }
case "suno" { print "23.530E9" }
-
+ #Toulouse
case "pastel" { print "9.5674E9" }
case "violette" { print "5.1143E9" }
-
- case "default" { print "3.542E9" }
- else { print "xxxxxxx" }
+ #Reims
+ case "stremi" { print "TODO" }
+ #Luxembourg
+ case "granduc" { print "TODO" }
+
+ default: { print "TODO" }
}
-}
\ No newline at end of file
+}
sub get_gflops {
switch ($_[0]) {
+ #Bordeaux
case "bordeplage" { print "5.2297E9" }
case "bordereau" { print "8.8925E9" }
case "borderline" { print "13.357E9" }
-
+ #Lille
case "chicon" { print "8.9618E9" }
case "chimint" { print "23.531E9" }
case "chinqchint" { print "22.270E9" }
case "chirloute" { print "24.473E9" }
-
+ #Grenoble
case "adonis" { print "23.681E9" }
case "edel" { print "23.492E9" }
case "genepi" { print "21.175E9" }
-
+ #Lyon
case "capricorne" { print "4.7233E9" }
case "sagittaire" { print "5.6693E9" }
-
+ #Nancy
case "graphene" { print "16.673E9" }
case "griffon" { print "20.678E9" }
-
+ #Orsay
case "gdx" { print "4.7153E9" }
case "netgdx" { print "4.7144E9" }
-
+ #Rennes
case "paradent" { print "21.496E9" }
case "paramount" { print "12.910E9" }
case "parapide" { print "30.130E9" }
case "parapluie" { print "27.391E9" }
-
+ #Sophia
case "helios" { print "7.7318E9" }
case "sol" { print "8.9388E9" }
case "suno" { print "23.530E9" }
-
+ #Toulouse
case "pastel" { print "9.5674E9" }
case "violette" { print "5.1143E9" }
-
- case "default" { print "3.542E9" }
- else { print "xxxxxxx" }
+ #Reims
+ case "stremi" { print "TODO" }
+ #Luxembourg
+ case "granduc" { print "TODO" }
+
+ default: { print "TODO" }
}
}
<platform version="3">
<config id="General">
- <prop id="coordinates" value="yes"></prop>
+ <prop id="network/coordinates" value="yes"></prop>
</config>
<AS id="AS0" routing="Vivaldi">
<platform version="3">
<config id="General">
- <prop id="coordinates" value="yes"></prop>
+ <prop id="network/coordinates" value="yes"></prop>
</config>
<AS id="AS0" routing="Vivaldi">
<platform version="3">
<config id="General">
- <prop id="coordinates" value="yes"></prop>
+ <prop id="network/coordinates" value="yes"></prop>
</config>
<AS id="AS0" routing="Vivaldi">
<platform version="3">
<config id="General">
- <prop id="coordinates" value="yes"></prop>
+ <prop id="network/coordinates" value="yes"></prop>
</config>
<AS id="AS0" routing="Vivaldi">
<host id="100030591" coordinates="25.5 9.4 1.4" power="1500000000.0" />
add_executable(scatter scatter.c)
add_executable(reduce reduce.c)
add_executable(split split.c)
+add_executable(dsend dsend.c)
add_executable(mvmul mvmul.c)
add_executable(smpi_sendrecv sendrecv.c)
add_executable(smpi_traced smpi_traced.c)
target_link_libraries(scatter m simgrid smpi )
target_link_libraries(reduce m simgrid smpi )
target_link_libraries(split m simgrid smpi )
+target_link_libraries(dsend m simgrid smpi )
target_link_libraries(mvmul m simgrid smpi )
target_link_libraries(smpi_sendrecv m simgrid smpi )
target_link_libraries(smpi_traced m simgrid smpi )
> [rank 2] -> Fafard
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'maxmin/precision' to '1e-9'
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
> [rank 5] -> Tremblay
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'maxmin/precision' to '1e-9'
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
> [rank 11] -> Jupiter
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'maxmin/precision' to '1e-9'
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
--- /dev/null
+/* Copyright (c) 2011. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+/* This program simply does a very small exchange to test whether using SIMIX dsend to model the eager mode works */
+
+#include <stdio.h>
+#include <mpi.h>
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(dsend,"the dsend test");
+
+int main(int argc, char *argv[]) {
+ int rank;
+ int data=11;
+
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ if (rank==1) {
+ data=22;
+ MPI_Send(&data,1,MPI_INT,(rank+1)%2,666,MPI_COMM_WORLD);
+// smpi_sleep(1000);
+ } else {
+ MPI_Recv(&data,1,MPI_INT,-1,666,MPI_COMM_WORLD,NULL);
+ if (data !=22) {
+ printf("rank %d: Damn, data does not match (got %d)\n",rank, data);
+ }
+ }
+
+ XBT_INFO("rank %d: data exchanged", rank);
+ MPI_Finalize();
+ return 0;
+}
> [rank 2] -> Fafard
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'maxmin/precision' to '1e-9'
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
> ** Scalar Int Test Result:
> [0] sum=6 ... validation ok.
> [rank 5] -> Tremblay
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'maxmin/precision' to '1e-9'
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
> ** Scalar Int Test Result:
> [0] sum=21 ... validation ok.
> [rank 11] -> Jupiter
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'maxmin/precision' to '1e-9'
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
> ** Scalar Int Test Result:
> [0] sum=78 ... validation ok.
--- /dev/null
+/* A simple example pingpong pogram to test MPI_Send and MPI_Recv */
+
+/* Copyright (c) 2009, 2010. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <mpi.h>
+
+int main(int argc, char *argv[])
+{
+ int rank;
+ int size;
+ MPI_Status status;
+
+ int n = 0, m = 0, bytes = 0, workusecs = 0;
+ int currusecs;
+
+ char *buf = NULL;
+ int i, j;
+ struct timeval start_time, stop_time;
+ struct timeval start_pause, curr_pause;
+ unsigned long usecs;
+
+ MPI_Init(&argc, &argv); /* Initialize MPI */
+ MPI_Comm_size(MPI_COMM_WORLD, &size); /* Get nr of tasks */
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank); /* Get id of this process */
+
+ if (size != 2) {
+ printf("run this program with exactly 2 processes (-np 2)\n");
+ MPI_Finalize();
+ exit(0);
+ }
+
+ if (0 == rank) {
+ if (argc > 1 && isdigit(argv[1][0])) {
+ n = atoi(argv[1]);
+ }
+ if (argc > 2 && isdigit(argv[2][0])) {
+ m = atoi(argv[2]);
+ }
+ if (argc > 3 && isdigit(argv[3][0])) {
+ bytes = atoi(argv[3]);
+ }
+ if (argc > 4 && isdigit(argv[4][0])) {
+ workusecs = atoi(argv[4]);
+ }
+ buf = malloc(sizeof(char) * bytes);
+ for (i = 0; i < bytes; i++) buf[i] = i % 256;
+ MPI_Send(&n, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
+ MPI_Send(&m, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
+ MPI_Send(&bytes, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
+ MPI_Send(&workusecs, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
+ MPI_Barrier(MPI_COMM_WORLD);
+ gettimeofday(&start_time, NULL);
+ for (i = 0; i < n; i++) {
+ for (j = 0; j < m; j++) {
+ MPI_Send(buf, bytes, MPI_CHAR, 1, 0, MPI_COMM_WORLD);
+ gettimeofday(&start_pause, NULL);
+ currusecs = 0;
+ while (currusecs < workusecs) {
+ gettimeofday(&curr_pause, NULL);
+ currusecs = (curr_pause.tv_sec - start_pause.tv_sec) * 1e6 + curr_pause.tv_usec - start_pause.tv_usec;
+ }
+ }
+ MPI_Recv(buf, bytes, MPI_CHAR, 1, 0, MPI_COMM_WORLD, &status);
+ }
+ gettimeofday(&stop_time, NULL);
+ usecs = (stop_time.tv_sec - start_time.tv_sec) * 1e6 + stop_time.tv_usec - start_time.tv_usec;
+ printf("n: %d m: %d bytes: %d sleep: %d usecs: %u\n", n, m, bytes, workusecs, usecs);
+ } else {
+ MPI_Recv(&n, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+ MPI_Recv(&m, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+ MPI_Recv(&bytes, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+ MPI_Recv(&workusecs, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+ buf = malloc(sizeof(char) * bytes);
+ MPI_Barrier(MPI_COMM_WORLD);
+ for (i = 0; i < n; i++) {
+ for (j = 0; j < m; j++) {
+ MPI_Recv(buf, bytes, MPI_CHAR, 0, 0, MPI_COMM_WORLD, &status);
+ }
+ MPI_Send(buf, bytes, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
+ }
+ }
+ free(buf);
+ MPI_Finalize();
+ return 0;
+}
smx_context_t smx_ctx_base_self(void);
void *smx_ctx_base_get_data(smx_context_t context);
+XBT_INLINE xbt_dynar_t SIMIX_process_get_runnable(void);
+
/* parallelism */
XBT_INLINE int SIMIX_context_is_parallel(void);
XBT_INLINE int SIMIX_context_get_nthreads(void);
XBT_PUBLIC(void*) SIMIX_process_self_get_data(smx_process_t self);
XBT_PUBLIC(smx_context_t) SIMIX_process_get_context(smx_process_t);
XBT_PUBLIC(void) SIMIX_process_set_context(smx_process_t p,smx_context_t c);
+XBT_PUBLIC(int) SIMIX_process_has_pending_comms(smx_process_t process);
/****************************** Communication *********************************/
XBT_PUBLIC(void) SIMIX_comm_set_copy_data_callback(void (*callback) (smx_action_t, size_t));
XBT_PUBLIC(void) SIMIX_comm_copy_pointer_callback(smx_action_t comm, size_t buff_size);
XBT_PUBLIC(void) SIMIX_comm_copy_buffer_callback(smx_action_t comm, size_t buff_size);
+XBT_PUBLIC(void) smpi_comm_copy_data_callback(smx_action_t comm, size_t buff_size);
+
XBT_PUBLIC(smx_action_t) SIMIX_comm_get_send_match(smx_rdv_t rdv, int (*match_fun)(void*, void*), void* data);
XBT_PUBLIC(int) SIMIX_comm_has_send_match(smx_rdv_t rdv, int (*match_fun)(void*, void*), void* data);
XBT_PUBLIC(int) SIMIX_comm_has_recv_match(smx_rdv_t rdv, int (*match_fun)(void*, void*), void* data);
double rate, void *src_buff,
size_t src_buff_size,
int (*match_fun)(void *, void *),
+ void (*clean_fun)(void *),
void *data, int detached);
XBT_PUBLIC(void) SIMIX_req_comm_recv(smx_rdv_t rdv, void *dst_buff,
#include <xbt/log.h>
#include <xbt/asserts.h>
-#define sleep(x) smpi_sleep(x)
-#define gettimeofday(x, y) smpi_gettimeofday(x, y)
-
#endif
#include <xbt/misc.h>
#include <xbt/function_types.h>
+#define sleep(x) smpi_sleep(x)
+#define gettimeofday(x, y) smpi_gettimeofday(x, y)
+
#define MPI_CALL(type,name,args) \
type name args __attribute__((weak)); \
type P##name args
} MPI_Status;
#define MPI_STATUS_IGNORE NULL
+#define MPI_STATUSES_IGNORE NULL
#define MPI_DATATYPE_NULL NULL
extern MPI_Datatype MPI_CHAR;
#include <smpi/smpi.h>
#include <f2c.h>
+XBT_PUBLIC_DATA(__thread int) smpi_current_rank;
+
XBT_PUBLIC(int) smpi_process_argc(void);
XBT_PUBLIC(int) smpi_process_getarg(integer* index, char* dst, ftnlen len);
XBT_PUBLIC(int) smpi_global_size(void);
/* @brief heap datatype */
typedef struct xbt_heap *xbt_heap_t;
-XBT_PUBLIC(xbt_heap_t) xbt_heap_new(int num,
+XBT_PUBLIC(xbt_heap_t) xbt_heap_new(int init_size,
void_f_pvoid_t const free_func);
XBT_PUBLIC(void) xbt_heap_free(xbt_heap_t H);
XBT_PUBLIC(int) xbt_heap_size(xbt_heap_t H);
payload, msg->payl);
}
- comm = SIMIX_req_comm_isend(target_rdv, whole_payload_size, -1, msg, sizeof(void *), NULL, msg, 0);
+ comm = SIMIX_req_comm_isend(target_rdv, whole_payload_size, -1, msg, sizeof(void *), NULL,NULL, msg, 0);
SIMIX_req_comm_wait(comm, -1);
XBT_VERB("Message sent (and received)");
return (fabs(value1 - value2) < MAXMIN_PRECISION);
}
-XBT_PUBLIC(lmm_system_t) lmm_system_new(void);
+XBT_PUBLIC(lmm_system_t) lmm_system_new(int selective_update);
XBT_PUBLIC(void) lmm_system_free(lmm_system_t sys);
void lmm_variable_disable(lmm_system_t sys, lmm_variable_t var);
/** \brief Initializes the CPU model with the model Cas01
* \ingroup SURF_models
*
+ * By default, this model uses the lazy optimization mechanism that
+ * relies on partial invalidation in LMM and a heap for lazy action update.
+ * You can change this behavior by setting the cpu/optim configuration
+ * variable to a different value.
+ *
* This function is called by surf_workstation_model_init_CLM03
* so you shouldn't have to call it by yourself.
*
*/
XBT_PUBLIC(void) surf_cpu_model_init_Cas01(void);
-/** \brief Initializes the CPU model with trace integration
+/** \brief Initializes the CPU model with trace integration [Deprecated]
* \ingroup SURF_models
*
+ * You shouldn't have to call it by yourself.
+ * \see surf_workstation_model_init_CLM03()
*/
XBT_PUBLIC(void) surf_cpu_model_init_ti(void);
-/** \brief Initializes the CPU model with the model Cas01 Improved. This model uses a heap to order the events, decreasing the time complexity to get the minimum next event.
- * \ingroup SURF_models
+/** \brief This function call the share resources function needed
*
- * This function is called by surf_workstation_model_init_CLM03
- * so you shouldn't have to call it by yourself.
+ */
+XBT_PUBLIC(double) generic_share_resources(double now);
+
+/** \brief This function call the update action state function needed
*
- * \see surf_workstation_model_init_CLM03()
*/
-XBT_PUBLIC(void) surf_cpu_model_init_Cas01_im(void);
+XBT_PUBLIC(void) generic_update_actions_state(double now, double delta);
+
+/** \brief The list of all available optimization modes (both for cpu and networks).
+ * \ingroup SURF_models
+ * These optimization modes can be set using --cfg=cpu/optim:... and --cfg=network/optim:...
+ */
+XBT_PUBLIC_DATA(s_surf_model_description_t) surf_optimization_mode_description[];
/** \brief The list of all available cpu model models
* \ingroup SURF_models
/** \brief Same as network model 'LagrangeVelho', only with different correction factors.
* \ingroup SURF_models
- * \param filename XML platform file name
*
* This model is proposed by Pierre-Nicolas Clauss and Martin Quinson and Stéphane Génaud
* based on the model 'LV08' and different correction factors depending on the communication
* size (< 1KiB, < 64KiB, >= 64KiB).
+ * See comments in the code for more information.
*
* \see surf_workstation_model_init_SMPI()
*/
XBT_PUBLIC(void) surf_network_model_init_SMPI(void);
-/** \brief Initializes the platform with the network model 'LagrangeVelho'
+/** \brief Initializes the platform with the network model 'LegrandVelho'
* \ingroup SURF_models
- * \param filename XML platform file name
*
* This model is proposed by Arnaud Legrand and Pedro Velho based on
* the results obtained with the GTNets simulator for onelink and
- * dogbone sharing scenarios.
+ * dogbone sharing scenarios. See comments in the code for more information.
*
* \see surf_workstation_model_init_LegrandVelho()
*/
XBT_PUBLIC(void) surf_network_model_init_LegrandVelho(void);
-
-/** \brief Initializes the platform with the network model 'LV08_im'
- * \ingroup SURF_models
- * \param filename XML platform file name
- *
- * This model is adds the lazy management improvement to Legrand and
- * Velho model. This improvement essentially replaces the list of actions
- * inside the simulation kernel by a heap in order to reduce the complexity
- * at each iteration of the simulation kernel.
- *
- * \see surf_workstation_model_init_LegrandVelho()
- */
-XBT_PUBLIC(void) im_surf_network_model_init_LegrandVelho(void);
-
/** \brief Initializes the platform with the network model 'Constant'
* \ingroup SURF_models
- * \param filename XML platform file name
*
* In this model, the communication time between two network cards is
* constant, hence no need for a routing table. This is particularly
/** \brief Initializes the platform with the network model CM02
* \ingroup SURF_models
- * \param filename XML platform file name
*
* This function is called by surf_workstation_model_init_CLM03
* or by yourself only if you plan using surf_workstation_model_init_compound
+ * See comments in the code for more information.
*
* \see surf_workstation_model_init_CLM03()
*/
/** \brief Initializes the platform with a compound workstation model
* \ingroup SURF_models
- * \param filename XML platform file name
*
* This function should be called after a cpu_model and a
* network_model have been set up.
*/
XBT_PUBLIC(void) surf_workstation_model_init_compound(void);
+/** \brief Initializes the platform with the current best network and cpu models at hand
+ * \ingroup SURF_models
+ *
+ * This platform model seperates the workstation model and the network model.
+ * The workstation model will be initialized with the model compound, the network
+ * model with the model LV08 (with cross traffic support) and the CPU model with
+ * the model Cas01.
+ * Such model is subject to modification with warning in the ChangeLog so monitor it!
+ *
+ */
+XBT_PUBLIC(void) surf_workstation_model_init_current_default(void);
+
/** \brief Initializes the platform with the workstation model CLM03
* \ingroup SURF_models
- * \param filename XML platform file name
*
* This platform model seperates the workstation model and the network model.
* The workstation model will be initialized with the model CLM03, the network
* In future releases, some other network models will be implemented and will be
* combined with the workstation model CLM03.
*
- * \see surf_workstation_model_init_KCCFLN05()
*/
XBT_PUBLIC(void) surf_workstation_model_init_CLM03(void);
/** \brief Initializes the platform with the model KCCFLN05
* \ingroup SURF_models
- * \param filename XML platform file name
- *
- * With this model, the workstations and the network are handled
- * together. The network model is roughly the same as in CM02 but
- * interference between computations and communications can be taken
- * into account. This platform model is the default one for MSG and
- * SimDag.
- *
- */
-XBT_PUBLIC(void) surf_workstation_model_init_KCCFLN05(void);
-
-/** \brief Initializes the platform with the model KCCFLN05
- * \ingroup SURF_models
- * \param filename XML platform file name
*
* With this model, only parallel tasks can be used. Resource sharing
* is done by identifying bottlenecks and giving an equal share of
* This function has to be called to initialize the common
* structures. Then you will have to create the environment by
* calling
- * e.g. surf_workstation_model_init_CLM03() or
- * surf_workstation_model_init_KCCFLN05().
+ * e.g. surf_workstation_model_init_CLM03()
*
- * \see surf_workstation_model_init_CLM03(),
- * surf_workstation_model_init_KCCFLN05(), surf_workstation_model_init_compound(), surf_exit()
+ * \see surf_workstation_model_init_CLM03(), surf_workstation_model_init_compound(), surf_exit()
*/
XBT_PUBLIC(void) surf_init(int *argc, char **argv); /* initialize common structures */
/* destroy all data structures of tracing (and free) */
destroyAllContainers();
+ instr_paje_free();
+ TRACE_surf_release();
+ TRACE_smpi_release();
+ xbt_dict_free(&created_categories);
/* close the trace file */
TRACE_paje_end();
xbt_cfgelm_int, &default_tracing, 0, 1,
NULL, NULL);
- /* tracing platform*/
+ /* register platform in the trace */
int default_tracing_platform = 0;
xbt_cfg_register(&_surf_cfg_set, OPT_TRACING_PLATFORM,
- "Enable Tracing Platform.",
+ "Register the platform in the trace as a graph.",
xbt_cfgelm_int, &default_tracing_platform, 0, 1,
NULL, NULL);
NULL, NULL);
- /* platform */
+ /* tracing categorized resource utilization traces */
int default_tracing_categorized = 0;
xbt_cfg_register(&_surf_cfg_set, OPT_TRACING_CATEGORIZED,
- "Tracing of categorized platform (host and link) utilization.",
+ "Tracing categorized resource utilization of hosts and links.",
xbt_cfgelm_int, &default_tracing_categorized, 0, 1,
NULL, NULL);
/* tracing uncategorized resource utilization */
int default_tracing_uncategorized = 0;
xbt_cfg_register(&_surf_cfg_set, OPT_TRACING_UNCATEGORIZED,
- "Tracing of uncategorized resource (host and link) utilization.",
+ "Tracing uncategorized resource utilization of hosts and links.",
xbt_cfgelm_int, &default_tracing_uncategorized, 0, 1,
NULL, NULL);
}
}
+void TRACE_set_network_update_mechanism (void)
+{
+ if (TRACE_is_enabled()){
+ if (TRACE_categorized() || TRACE_uncategorized()){
+ XBT_INFO ("Tracing resource utilization active, network/optim configuration now set to Full.");
+ xbt_cfg_set_string (_surf_cfg_set, "network/optim", "Full");
+ }
+ }
+}
+
#undef OPT_TRACING
#undef OPT_TRACING_PLATFORM
#undef OPT_TRACING_SMPI
rootContainer = root;
}
+void instr_paje_free (void)
+{
+ xbt_dict_free (&allContainers);
+ xbt_dict_free (&trivaNodeTypes);
+ xbt_dict_free (&trivaEdgeTypes);
+}
+
static long long int new_type_id (void)
{
static long long int type_id = 0;
return type_id++;
}
+static void destroyValue (void *value)
+{
+ xbt_free(((val_t)value)->name);
+ xbt_free(((val_t)value)->color);
+ xbt_free(((val_t)value)->id);
+ xbt_free(value);
+}
+
static val_t newValue (const char *valuename, const char *color, type_t father)
{
val_t ret = xbt_new0(s_val_t, 1);
void destroyContainer (container_t container)
{
- //remove me from my father
- if (container->father){
- xbt_dict_remove(container->father->children, container->name);
- }
-
XBT_DEBUG("destroy container %s", container->name);
//obligation to dump previous events because they might
//free
xbt_free (container->name);
xbt_free (container->id);
- xbt_free (container->children);
+ xbt_dict_free (&container->children);
xbt_free (container);
container = NULL;
}
}
xbt_free (type->name);
xbt_free (type->id);
- xbt_free (type->children);
- xbt_free (type->values);
+ xbt_free (type->color);
+ xbt_dict_free (&type->children);
+ val_t value;
+ char *value_name;
+ xbt_dict_foreach(type->values, cursor, value_name, value) {
+ destroyValue (value);
+ }
+ xbt_dict_free (&type->values);
xbt_free (type);
type = NULL;
}
{
fclose(tracing_file);
char *filename = TRACE_get_filename();
+ xbt_dynar_free (&buffer);
XBT_DEBUG("Filename %s is closed", filename);
}
static void free_paje_event (paje_event_t event)
{
XBT_DEBUG("%s: event_type=%d, timestamp=%f", __FUNCTION__, event->event_type, event->timestamp);
- if (event->event_type == PAJE_StartLink){
+ switch (event->event_type){
+ case PAJE_StartLink:
xbt_free (((startLink_t)(event->data))->value);
xbt_free (((startLink_t)(event->data))->key);
- }else if (event->event_type == PAJE_EndLink){
+ break;
+ case PAJE_EndLink:
xbt_free (((endLink_t)(event->data))->value);
xbt_free (((endLink_t)(event->data))->key);
+ break;
}
xbt_free (event->data);
xbt_free (event);
void TRACE_help(int detailed);
void TRACE_generate_triva_uncat_conf (void);
void TRACE_generate_triva_cat_conf (void);
+void TRACE_set_network_update_mechanism (void);
/* from resource_utilization.c */
void TRACE_surf_host_set_utilization(const char *resource,
char *getVariableTypeIdByName (const char *name, type_t father);
container_t getRootContainer (void);
void instr_paje_init (container_t root);
+void instr_paje_free (void);
type_t getRootType (void);
type_t getContainerType (const char *name, type_t father);
type_t getEventType (const char *name, const char *color, type_t father);
const char *variable,
const char *resource)
{
- /* check if we have to set it to 0 */
- if (!xbt_dict_get_or_null(platform_variables, resource)) {
- xbt_dynar_t array = xbt_dynar_new(sizeof(char *), xbt_free);
- char *var_cpy = xbt_strdup(variable);
- xbt_dynar_push(array, &var_cpy);
+ /*
+ * To trace resource utilization, we use pajeAddVariable and pajeSubVariable only.
+ * The Paje simulator needs a pajeSetVariable in the first place so it knows
+ * the initial value of all variables for subsequent adds/subs. If we don't do
+ * so, the first pajeAddVariable is added to a non-determined value within
+ * the Paje simulator, causing analysis problems.
+ */
+
+ // create a key considering the resource and variable
+ int n = strlen(variable)+strlen(resource)+1;
+ char *key = (char*)xbt_malloc(n*sizeof(char));
+ snprintf (key, n, "%s%s", resource, variable);
+
+ // check if key exists: if it doesn't, set the variable to zero and mark this in the dict
+ if (!xbt_dict_get_or_null(platform_variables, key)) {
container_t container = getContainerByName (resource);
type_t type = getVariableType (variable, NULL, container->type);
new_pajeSetVariable (now, container, type, 0);
- xbt_dict_set(platform_variables, resource, array, NULL);
- } else {
- xbt_dynar_t array = xbt_dict_get(platform_variables, resource);
- unsigned int i;
- char *cat;
- int flag = 0;
- xbt_dynar_foreach(array, i, cat) {
- if (strcmp(variable, cat) == 0) {
- flag = 1;
- }
- }
- if (flag == 0) {
- char *var_cpy = xbt_strdup(variable);
- xbt_dynar_push(array, &var_cpy);
- if (TRACE_categorized ()){
- container_t container = getContainerByName (resource);
- type_t type = getVariableType (variable, NULL, container->type);
- new_pajeSetVariable (now, container, type, 0);
- }
- }
+ xbt_dict_set(platform_variables, key, "", NULL);
}
- /* end of check */
+ xbt_free(key);
}
-
-
/*
static void __TRACE_A_event(smx_action_t action, double now, double delta,
const char *variable, const char *resource,
void TRACE_surf_resource_utilization_alloc()
{
- platform_variables = xbt_dict_new_homogeneous(xbt_dynar_free_voidp);
+ platform_variables = xbt_dict_new_homogeneous(NULL);
}
void TRACE_surf_resource_utilization_release()
{
+ xbt_dict_free(&platform_variables);
}
#endif /* HAVE_TRACING */
void TRACE_smpi_release(void)
{
+ xbt_dict_free(&keys);
+ xbt_dict_free(&process_category);
if (!TRACE_smpi_is_enabled()) return;
TRACE_surf_release();
-/* Copyright (c) 2004, 2005, 2006, 2007, 2008, 2009, 2010. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2004-2011. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
listening. This value has to be >=0 and < than the maximal
number of channels fixed with MSG_set_channel_number().
* \param host the host that is to be watched.
- * \return #MSG_FATAL if \a task is equal to \c NULL, #MSG_WARNING
- if \a *task is not equal to \c NULL, and #MSG_OK otherwise.
+ * \return a #MSG_error_t indicating whether the operation was successful (#MSG_OK), or why it failed otherwise.
*/
MSG_error_t
MSG_task_get_from_host(m_task_t * task, m_channel_t channel, m_host_t host)
* \param channel the channel on which the agent should be
listening. This value has to be >=0 and < than the maximal
number of channels fixed with MSG_set_channel_number().
- * \return #MSG_FATAL if \a task is equal to \c NULL, #MSG_WARNING
- * if \a *task is not equal to \c NULL, and #MSG_OK otherwise.
+ * \return a #MSG_error_t indicating whether the operation was successful (#MSG_OK), or why it failed otherwise.
*/
MSG_error_t MSG_task_get(m_task_t * task, m_channel_t channel)
{
up. In such a case, #MSG_TRANSFER_FAILURE will be returned, \a task
will not be modified and will still be
equal to \c NULL when returning.
- * \return #MSG_FATAL if \a task is equal to \c NULL, #MSG_WARNING
- if \a *task is not equal to \c NULL, and #MSG_OK otherwise.
+ * \return a #MSG_error_t indicating whether the operation was successful (#MSG_OK), or why it failed otherwise.
*/
MSG_error_t
MSG_task_get_with_timeout(m_task_t * task, m_channel_t channel,
comm->status = MSG_OK;
comm->s_comm =
SIMIX_req_comm_isend(mailbox, t_simdata->message_size,
- t_simdata->rate, task, sizeof(void *), match_fun, match_data, 0);
+ t_simdata->rate, task, sizeof(void *), match_fun, NULL, match_data, 0);
t_simdata->comm = comm->s_comm; /* FIXME: is the field t_simdata->comm still useful? */
return comm;
msg_global->sent_msg++;
/* Send it by calling SIMIX network layer */
- SIMIX_req_comm_isend(mailbox, t_simdata->message_size,
- t_simdata->rate, task, sizeof(void *), NULL, cleanup, 1);
+ smx_action_t comm = SIMIX_req_comm_isend(mailbox, t_simdata->message_size,
+ t_simdata->rate, task, sizeof(void *), NULL,cleanup, NULL, 1);
+ t_simdata->comm = comm;
}
/** \ingroup msg_gos_functions
if (*task)
XBT_CRITICAL
- ("MSG_task_get() was asked to write in a non empty task struct.");
+ ("MSG_task_irecv() was asked to write in a non empty task struct.");
/* Try to receive it by calling SIMIX network layer */
msg_comm_t comm = xbt_new0(s_msg_comm_t, 1);
* \param channel the channel on which the agent should put this
task. This value has to be >=0 and < than the maximal number of
channels fixed with MSG_set_channel_number().
- * \return #MSG_FATAL if \a task is not properly initialized and
- * #MSG_OK otherwise. Returns #MSG_HOST_FAILURE if the host on which
- * this function was called was shut down. Returns
+ * \return #MSG_HOST_FAILURE if the host on which
+ * this function was called was shut down,
* #MSG_TRANSFER_FAILURE if the transfer could not be properly done
- * (network failure, dest failure)
+ * (network failure, dest failure) or #MSG_OK if it succeeded.
*/
MSG_error_t MSG_task_put(m_task_t task, m_host_t dest, m_channel_t channel)
{
* \param timeout the maximum time to wait for a task before giving
up. In such a case, #MSG_TRANSFER_FAILURE will be returned, \a task
will not be modified
- * \return #MSG_FATAL if \a task is not properly initialized and
-#MSG_OK otherwise. Returns #MSG_HOST_FAILURE if the host on which
-this function was called was shut down. Returns
+ * \return #MSG_HOST_FAILURE if the host on which
+this function was called was shut down,
#MSG_TRANSFER_FAILURE if the transfer could not be properly done
-(network failure, dest failure, timeout...)
+(network failure, dest failure, timeout...) or #MSG_OK if the communication succeeded.
*/
MSG_error_t
MSG_task_put_with_timeout(m_task_t task, m_host_t dest,
/* Try to send it by calling SIMIX network layer */
TRY {
-#ifdef HAVE_TRACING
- if (TRACE_is_enabled()) {
smx_action_t comm = SIMIX_req_comm_isend(mailbox, t_simdata->message_size,
t_simdata->rate, task, sizeof(void *),
- NULL, NULL, 0);
- SIMIX_req_set_category(comm, task->category);
- SIMIX_req_comm_wait(comm, timeout);
- } else {
-#endif
- SIMIX_req_comm_send(mailbox, t_simdata->message_size,
- t_simdata->rate, task, sizeof(void*),
- NULL, NULL, timeout);
+ NULL, NULL, NULL, 0);
#ifdef HAVE_TRACING
+ if (TRACE_is_enabled()) {
+ SIMIX_req_set_category(comm, task->category);
}
#endif
+ t_simdata->comm = comm;
+ SIMIX_req_comm_wait(comm, timeout);
}
CATCH(e) {
t_simdata->isused = 0;
}
+
p_simdata->waiting_task = NULL;
#ifdef HAVE_TRACING
if (call_end)
*/
/******************************** Process ************************************/
+
+/**
+ * \brief Cleans the MSG data of a process.
+ * \param smx_proc a SIMIX process
+ */
void MSG_process_cleanup_from_SIMIX(smx_process_t smx_proc)
{
simdata_process_t msg_proc;
if (smx_proc == SIMIX_process_self()) {
/* avoid a SIMIX request if this function is called by the process itself */
msg_proc = SIMIX_process_self_get_data(smx_proc);
+ SIMIX_process_self_set_data(smx_proc, NULL);
}
else {
msg_proc = SIMIX_req_process_get_data(smx_proc);
+ SIMIX_req_process_set_data(smx_proc, NULL);
}
#ifdef HAVE_TRACING
smx_rdv_t SIMIX_rdv_create(const char *name);
void SIMIX_rdv_destroy(smx_rdv_t rdv);
smx_rdv_t SIMIX_rdv_get_by_name(const char *name);
+XBT_INLINE void SIMIX_rdv_remove(smx_rdv_t rdv, smx_action_t comm);
int SIMIX_rdv_comm_count_by_host(smx_rdv_t rdv, smx_host_t host);
smx_action_t SIMIX_rdv_get_head(smx_rdv_t rdv);
XBT_INLINE void SIMIX_comm_start(smx_action_t action);
smx_action_t SIMIX_comm_isend(smx_process_t src_proc, smx_rdv_t rdv,
double task_size, double rate,
void *src_buff, size_t src_buff_size,
- int (*)(void *, void *), void *data,
+ int (*)(void *, void *),
+ void (*clean_fun)(void *), // used to free the action in case of problem after a detached send
+ void *data,
int detached);
void SIMIX_comm_recv(smx_process_t dst_proc, smx_rdv_t rdv,
void *dst_buff, size_t *dst_buff_size,
int refcount; /* Number of processes involved in the cond */
int detached; /* If detached or not */
+ void (*clean_fun)(void*); /* Function to clean the detached src_buf if something goes wrong */
+
/* Surf action data */
surf_action_t surf_comm; /* The Surf communication action encapsulated */
surf_action_t src_timeout; /* Surf's actions to instrument the timeouts */
void *src_buff;
size_t src_buff_size;
int (*match_fun)(void *, void *);
+ void (*clean_fun)(void *);
void *data;
int detached;
smx_action_t result;
/* a fast and simple context switching library */
-/* Copyright (c) 2009, 2010. The SimGrid Team.
+/* Copyright (c) 2009 - 2011. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
static smx_context_t smx_current_context_serial;
static int smx_parallel_contexts = 1;
static int smx_parallel_threshold = 2;
-static e_xbt_parmap_mode_t smx_parallel_synchronization_mode = XBT_PARMAP_FUTEX;
/**
* This function is called by SIMIX_global_init() to initialize the context module.
if (nb_threads > 1) {
#ifndef CONTEXT_THREADS
- THROWF(arg_error, 0, "No thread support for parallel context execution");
+ THROWF(arg_error, 0, "The thread factory cannot be run in parallel");
#endif
}
* \return how threads are synchronized if processes are run in parallel
*/
XBT_INLINE e_xbt_parmap_mode_t SIMIX_context_get_parallel_mode(void) {
- return smx_parallel_synchronization_mode;
+ e_xbt_parmap_mode_t mode = XBT_PARMAP_FUTEX;
+ const char* mode_name = xbt_cfg_get_string(_surf_cfg_set, "contexts/synchro");
+ if (!strcmp(mode_name, "posix")) {
+ mode = XBT_PARMAP_POSIX;
+ }
+ else if (!strcmp(mode_name, "futex")) {
+ mode = XBT_PARMAP_FUTEX;
+ }
+ else if (!strcmp(mode_name, "busy_wait")) {
+ mode = XBT_PARMAP_BUSY_WAIT;
+ }
+ else {
+ XBT_WARN("Command line setting of the parallel synchronization mode should "
+ "be one of \"posix\", \"futex\" or \"busy_wait\"");
+ }
+ return mode;
}
/**
* \param mode how to synchronize threads if processes are run in parallel
*/
XBT_INLINE void SIMIX_context_set_parallel_mode(e_xbt_parmap_mode_t mode) {
- smx_parallel_synchronization_mode = mode;
+ if (mode == XBT_PARMAP_POSIX) {
+ xbt_cfg_set_string(_surf_cfg_set, "contexts/synchro", "posix");
+ }
+ else if (mode == XBT_PARMAP_FUTEX) {
+ xbt_cfg_set_string(_surf_cfg_set, "contexts/synchro", "futex");
+ }
+ else if (XBT_PARMAP_BUSY_WAIT) {
+ xbt_cfg_set_string(_surf_cfg_set, "contexts/synchro", "busy_wait");
+ }
+ else {
+ XBT_WARN("Command line setting of the parallel synchronization mode should "
+ "be one of \"posix\", \"futex\" or \"busy_wait\"");
+ }
}
/**
context->cleanup_func(context->data);
context->iwannadie = 0;
SIMIX_req_process_cleanup(context->data);
+ context->iwannadie = 1;
}
smx_context_t smx_ctx_base_self(void)
void_pfn_smxprocess_t cleanup_func,
void *data);
static void smx_ctx_sysv_free(smx_context_t context);
-static smx_context_t smx_ctx_sysv_self_parallel(void);
static smx_context_t
smx_ctx_sysv_create_context(xbt_main_func_t code, int argc, char **argv,
void_pfn_smxprocess_t cleanup_func, void* data);
static double start_time = 0.0;
static double kill_time = -1.0;
+extern int surf_parse_lineno;
+
static void parse_process_init(void)
{
smx_host_t host = SIMIX_host_get_by_name(A_surfxml_process_host);
*/
void SIMIX_launch_application(const char *file)
{
+ xbt_ex_t e;
+
_XBT_GNUC_UNUSED int parse_status;
xbt_assert(simix_global,
"SIMIX_global_init has to be called before SIMIX_launch_application.");
parse_process_finalize);
surf_parse_open(file);
- parse_status = surf_parse();
- surf_parse_close();
- xbt_assert(!parse_status, "Parse error in %s", file);
+ TRY {
+ parse_status = surf_parse();
+ surf_parse_close();
+ xbt_assert(!parse_status, "Parse error in %s", file);
+ } CATCH(e) {
+ xbt_die("Unrecoverable error at %s:%d: %s", file,surf_parse_lineno,
+ __xbt_running_ctx_fetch()->exception.msg); //FIXME: that pimple is due to the fact that e.msg does not seem to be set on CATCH(e). The pimple should be removed when the bug is gone.
+ }
}
/**
SIMIX_display_process_status();
}
else {
- XBT_INFO("CTRL-C pressed. bailing out without displaying because verbose-exit disabled");
+ XBT_INFO("CTRL-C pressed. bailing out without displaying because verbose-exit is disabled");
}
exit(1);
}
SIMIX_network_exit();
xbt_heap_free(simix_timers);
+ simix_timers = NULL;
/* Free the remaining data structures */
xbt_dynar_free(&simix_global->process_to_run);
xbt_dynar_free(&simix_global->process_that_ran);
SIMIX_process_runall();
xbt_dynar_foreach(simix_global->process_that_ran, iter, process) {
if (process->request.call != REQ_NO_REQ) {
- XBT_DEBUG("Handling request %p", &process->request);
SIMIX_request_pre(&process->request, 0);
}
}
XBT_INFO("%d processes are still running, waiting for something.", nbprocess);
/* List the process and their state */
XBT_INFO
- ("Legend of the following listing: \"<process>(<pid>) on <host>: <status>.\"");
+ ("Legend of the following listing: \"Process <pid> (<name>@<host>): <status>\"");
xbt_swag_foreach(process, simix_global->process_list) {
if (process->waiting_action) {
static void SIMIX_comm_copy_data(smx_action_t comm);
static smx_action_t SIMIX_comm_new(e_smx_comm_type_t type);
static XBT_INLINE void SIMIX_rdv_push(smx_rdv_t rdv, smx_action_t comm);
-static XBT_INLINE void SIMIX_rdv_remove(smx_rdv_t rdv, smx_action_t comm);
static smx_action_t SIMIX_rdv_get_request(smx_rdv_t rdv, e_smx_comm_type_t type,
int (*match_fun)(void *, void *), void *);
static void SIMIX_rdv_free(void *data);
* \param rdv The rendez-vous point
* \param comm The communication request
*/
-static XBT_INLINE void SIMIX_rdv_remove(smx_rdv_t rdv, smx_action_t comm)
+XBT_INLINE void SIMIX_rdv_remove(smx_rdv_t rdv, smx_action_t comm)
{
xbt_fifo_remove(rdv->comm_fifo, comm);
comm->comm.rdv = NULL;
XBT_DEBUG("Destroy action %p (refcount: %d), state: %d",
action, action->comm.refcount, action->state);
- xbt_assert(action->comm.refcount > 0,
- "The refcount of comm %p is already 0 before decreasing it. "
- "That's a bug!", action);
-
+ if (action->comm.refcount <= 0) {
+ xbt_backtrace_display_current();
+ xbt_die("the refcount of comm %p is already 0 before decreasing it. "
+ "That's a bug!", action);
+ }
action->comm.refcount--;
if (action->comm.refcount > 0)
return;
if (action->comm.detached && action->state != SIMIX_DONE) {
/* the communication has failed and was detached:
* we have to free the buffer */
- ((void_f_pvoid_t) action->comm.src_data)(action->comm.src_buff);
+ action->comm.clean_fun(action->comm.src_buff);
+ action->comm.src_buff = NULL;
}
xbt_mallocator_release(simix_global->action_mallocator, action);
smx_action_t SIMIX_comm_isend(smx_process_t src_proc, smx_rdv_t rdv,
double task_size, double rate,
void *src_buff, size_t src_buff_size,
- int (*match_fun)(void *, void *), void *data,
+ int (*match_fun)(void *, void *),
+ void (*clean_fun)(void *), // used to free the action in case of problem after a detached send
+ void *data,
int detached)
{
smx_action_t action;
if (detached) {
action->comm.detached = 1;
action->comm.refcount--;
+ action->comm.clean_fun = clean_fun;
+ } else {
+ action->comm.clean_fun = NULL;
}
/* Setup the communication request */
}
SIMIX_comm_start(action);
- return action;
+ return (detached ? NULL : action);
}
smx_action_t SIMIX_comm_irecv(smx_process_t dst_proc, smx_rdv_t rdv,
void SIMIX_pre_comm_wait(smx_req_t req, smx_action_t action, double timeout, int idx)
{
+
/* the request may be a wait, a send or a recv */
surf_action_t sleep;
}
xbt_dynar_foreach(actions, cursor, action){
- /* Associate this request to the action */
+ /* associate this request to the the action */
xbt_fifo_push(action->request_list, req);
+
+ /* see if the action is already finished */
if (action->state != SIMIX_WAITING && action->state != SIMIX_RUNNING){
SIMIX_comm_finish(action);
break;
case SIMIX_LINK_FAILURE:
TRY {
- XBT_DEBUG("Link failure in action %p between '%s' and '%s': posting an exception to the issuer: %s (%p)",
- action,
- action->comm.src_proc ? action->comm.src_proc->smx_host->name : NULL,
- action->comm.dst_proc ? action->comm.dst_proc->smx_host->name : NULL,
- req->issuer->name, req->issuer);
+ XBT_DEBUG("Link failure in action %p between '%s' and '%s': posting an exception to the issuer: %s (%p) detached:%d",
+ action,
+ action->comm.src_proc ? action->comm.src_proc->smx_host->name : NULL,
+ action->comm.dst_proc ? action->comm.dst_proc->smx_host->name : NULL,
+ req->issuer->name, req->issuer, action->comm.detached);
+ if (action->comm.src_proc == req->issuer) {
+ XBT_DEBUG("I'm source");
+ } else if (action->comm.dst_proc == req->issuer) {
+ XBT_DEBUG("I'm dest");
+ } else {
+ XBT_DEBUG("I'm neither source nor dest");
+ }
THROWF(network_error, 0, "Link failure");
}
CATCH(req->issuer->running_ctx->exception) {
surf_workstation_model->action_state_get(action->comm.dst_timeout) == SURF_ACTION_FAILED)
action->state = SIMIX_DST_HOST_FAILURE;
else if (action->comm.surf_comm &&
- surf_workstation_model->action_state_get(action->comm.surf_comm) == SURF_ACTION_FAILED)
+ surf_workstation_model->action_state_get(action->comm.surf_comm) == SURF_ACTION_FAILED) {
+ XBT_DEBUG("Puta madre. Surf says that the link broke");
action->state = SIMIX_LINK_FAILURE;
- else
+ } else
action->state = SIMIX_DONE;
XBT_DEBUG("SIMIX_post_comm: comm %p, state %d, src_proc %p, dst_proc %p, detached: %d",
void SIMIX_comm_copy_buffer_callback(smx_action_t comm, size_t buff_size)
{
+ XBT_DEBUG("Copy the data over");
memcpy(comm->comm.dst_buff, comm->comm.src_buff, buff_size);
}
+void smpi_comm_copy_data_callback(smx_action_t comm, size_t buff_size)
+{
+ XBT_DEBUG("Copy the data over");
+ memcpy(comm->comm.dst_buff, comm->comm.src_buff, buff_size);
+ if (comm->comm.detached) { // if this is a detached send, the source buffer was duplicated by SMPI sender to make the original buffer available to the application ASAP
+ xbt_free(comm->comm.src_buff);
+ comm->comm.src_buff = NULL;
+ }
+}
+
/**
* \brief Copy the communication data from the sender's buffer to the receiver's one
* \param comm The communication
XBT_DEBUG("Copying comm %p data from %s (%p) -> %s (%p) (%zu bytes)",
comm,
- comm->comm.src_proc->smx_host->name, comm->comm.src_buff,
- comm->comm.dst_proc->smx_host->name, comm->comm.dst_buff, buff_size);
+ comm->comm.src_proc ? comm->comm.src_proc->smx_host->name : "a finished process",
+ comm->comm.src_buff,
+ comm->comm.dst_proc ? comm->comm.dst_proc->smx_host->name : "a finished process",
+ comm->comm.dst_buff, buff_size);
/* Copy at most dst_buff_size bytes of the message to receiver's buffer */
if (comm->comm.dst_buff_size)
if (comm->comm.dst_buff_size)
*comm->comm.dst_buff_size = buff_size;
- if (buff_size == 0)
- return;
-
- SIMIX_comm_copy_data_callback(comm, buff_size);
+ if (buff_size > 0)
+ SIMIX_comm_copy_data_callback (comm, buff_size);
/* Set the copied flag so we copy data only once */
/* (this function might be called from both communication ends) */
}
/**
- * \brief Move a process to the list of processes to destroy.
+ * \brief Returns whether a process has pending asynchronous communications.
+ * \return true if there are asynchronous communications in this process
+ */
+int SIMIX_process_has_pending_comms(smx_process_t process) {
+
+ return xbt_fifo_size(process->comms) > 0;
+}
+
+/**
+ * \brief Moves a process to the list of processes to destroy.
*/
void SIMIX_process_cleanup(smx_process_t process)
{
smx_action_t action;
while ((action = xbt_fifo_pop(process->comms))) {
- /* make sure no one will finish the comm after this process is destroyed */
+ /* make sure no one will finish the comm after this process is destroyed,
+ * because src_proc or dst_proc would be an invalid pointer */
SIMIX_comm_cancel(action);
if (action->comm.src_proc == process) {
if (action->comm.detached) {
if (action->comm.refcount == 0) {
+ XBT_DEBUG("Increase the refcount before destroying it since it's detached");
/* I'm not supposed to destroy a detached comm from the sender side,
* unless there is no receiver matching the rdv */
action->comm.refcount++;
SIMIX_comm_destroy(action);
}
- }
- else {
+ else {
+ XBT_DEBUG("Don't destroy it since its refcount is %d", action->comm.refcount);
+ }
+ } else {
SIMIX_comm_destroy(action);
}
}
break;
case SIMIX_ACTION_COMMUNICATE:
+ xbt_fifo_remove(process->comms, process->waiting_action);
SIMIX_comm_destroy(process->waiting_action);
break;
void SIMIX_process_set_context(smx_process_t p,smx_context_t c) {
p->context = c;
}
+
+/**
+ * \brief Returns the list of processes to run.
+ */
+XBT_INLINE xbt_dynar_t SIMIX_process_get_runnable(void)
+{
+ return simix_global->process_to_run;
+}
void SIMIX_request_pre(smx_req_t req, int value)
{
+ XBT_DEBUG("Handling request %p: %s", req, SIMIX_request_name(req->call));
switch (req->call) {
case REQ_COMM_TEST:
req->comm_send.src_buff,
req->comm_send.src_buff_size,
req->comm_send.match_fun,
+ NULL, /* no clean function since it's not detached */
req->comm_send.data,
0);
SIMIX_pre_comm_wait(req, comm, req->comm_send.timeout, 0);
req->comm_isend.src_buff,
req->comm_isend.src_buff_size,
req->comm_isend.match_fun,
+ req->comm_isend.clean_fun,
req->comm_isend.data,
req->comm_isend.detached);
SIMIX_request_answer(req);
if (MC_IS_ENABLED) {
/* the model-checker wants two separate requests */
smx_action_t comm = SIMIX_req_comm_isend(rdv, task_size, rate,
- src_buff, src_buff_size, match_fun, data, 0);
+ src_buff, src_buff_size, match_fun, NULL, data, 0);
SIMIX_req_comm_wait(comm, timeout);
}
else {
smx_action_t SIMIX_req_comm_isend(smx_rdv_t rdv, double task_size, double rate,
void *src_buff, size_t src_buff_size,
- int (*match_fun)(void *, void *), void *data,
+ int (*match_fun)(void *, void *),
+ void (*clean_fun)(void *),
+ void *data,
int detached)
{
/* checking for infinite values */
req->comm_isend.src_buff = src_buff;
req->comm_isend.src_buff_size = src_buff_size;
req->comm_isend.match_fun = match_fun;
+ req->comm_isend.clean_fun = clean_fun;
req->comm_isend.data = data;
req->comm_isend.detached = detached;
void smpi_process_init(int *argc, char ***argv);
void smpi_process_destroy(void);
+void smpi_process_finalize(void);
smpi_process_data_t smpi_process_data(void);
smpi_process_data_t smpi_process_remote_data(int index);
xbt_assert(ref, "Cannot match recv against null reference");
xbt_assert(req, "Cannot match recv against null request");
- return req->comm == ref->comm
- && (ref->src == MPI_ANY_SOURCE || req->src == ref->src)
+ return (ref->src == MPI_ANY_SOURCE || req->src == ref->src)
&& (ref->tag == MPI_ANY_TAG || req->tag == ref->tag);
}
xbt_assert(ref, "Cannot match send against null reference");
xbt_assert(req, "Cannot match send against null request");
- return req->comm == ref->comm
- && (req->src == MPI_ANY_SOURCE || req->src == ref->src)
+ return (req->src == MPI_ANY_SOURCE || req->src == ref->src)
&& (req->tag == MPI_ANY_TAG || req->tag == ref->tag);
}
request = xbt_new(s_smpi_mpi_request_t, 1);
request->buf = buf;
+ // FIXME: this will have to be changed to support non-contiguous datatypes
request->size = smpi_datatype_size(datatype) * count;
request->src = src;
request->dst = dst;
void smpi_mpi_start(MPI_Request request)
{
smx_rdv_t mailbox;
+ int detached = 0;
xbt_assert(!request->action,
"Cannot (re)start a non-finished communication");
if(request->flags & RECV) {
print_request("New recv", request);
mailbox = smpi_process_mailbox();
+ // FIXME: SIMIX does not yet support non-contiguous datatypes
request->action = SIMIX_req_comm_irecv(mailbox, request->buf, &request->size, &match_recv, request);
} else {
print_request("New send", request);
- mailbox = smpi_process_remote_mailbox(request->dst);
- request->action = SIMIX_req_comm_isend(mailbox, request->size, -1.0,
- request->buf, request->size, &match_send, request, 0);
+ mailbox = smpi_process_remote_mailbox(
+ smpi_group_index(smpi_comm_group(request->comm), request->dst));
+ // FIXME: SIMIX does not yet support non-contiguous datatypes
+
+ if (request->size < 64*1024 ) { // eager mode => detached send (FIXME: this limit should be configurable)
+ void *oldbuf = request->buf;
+ detached = 1;
+ request->buf = malloc(request->size);
+ memcpy(request->buf,oldbuf,request->size);
+ XBT_DEBUG("Send request %p is detached; buf %p copied into %p",request,oldbuf,request->buf);
+ } else {
+ XBT_DEBUG("Send request %p is not detached (buf: %p)",request,request->buf);
+ }
+ request->action =
+ SIMIX_req_comm_isend(mailbox, request->size, -1.0,
+ request->buf, request->size,
+ &match_send,
+ (void (*)(void *))&smpi_mpi_request_free, // how to free the userdata if a detached send fails
+ request,
+ // detach if msg size < eager/rdv switch limit
+ detached);
+
#ifdef HAVE_TRACING
SIMIX_req_set_category (request->action, TRACE_internal_smpi_get_category());
#endif
void smpi_mpi_startall(int count, MPI_Request * requests)
{
- int i;
+ int i;
for(i = 0; i < count; i++) {
smpi_mpi_start(requests[i]);
status->MPI_SOURCE = req->src;
status->MPI_TAG = req->tag;
status->MPI_ERROR = MPI_SUCCESS;
+ // FIXME: really this should just contain the count of receive-type blocks,
+ // right?
status->count = req->size;
}
print_request("Finishing", req);
}
}
-int smpi_mpi_test(MPI_Request * request, MPI_Status * status)
-{
- int flag = SIMIX_req_comm_test((*request)->action);
+int smpi_mpi_test(MPI_Request * request, MPI_Status * status) {
+int flag;
+ if ((*request)->action == NULL)
+ flag = 1;
+ else
+ flag = SIMIX_req_comm_test((*request)->action);
if(flag) {
- smpi_mpi_wait(request, status);
- }
- return flag;
+ smpi_mpi_wait(request, status);
+ }
+ return flag;
}
int smpi_mpi_testany(int count, MPI_Request requests[], int *index,
}
}
if(size > 0) {
- *index = SIMIX_req_comm_testany(comms);
- *index = map[*index];
- if(*index != MPI_UNDEFINED) {
+ i = SIMIX_req_comm_testany(comms);
+ // FIXME: MPI_UNDEFINED or does SIMIX have a return code?
+ if(i != MPI_UNDEFINED) {
+ *index = map[i];
smpi_mpi_wait(&requests[*index], status);
flag = 1;
}
void smpi_mpi_wait(MPI_Request * request, MPI_Status * status)
{
print_request("Waiting", *request);
- SIMIX_req_comm_wait((*request)->action, -1.0);
- finish_wait(request, status);
+ if ((*request)->action != NULL) { // this is not a detached send
+ SIMIX_req_comm_wait((*request)->action, -1.0);
+ finish_wait(request, status);
+ }
+ // FIXME for a detached send, finish_wait is not called:
}
int smpi_mpi_waitany(int count, MPI_Request requests[],
size = 0;
XBT_DEBUG("Wait for one of");
for(i = 0; i < count; i++) {
- if(requests[i] != MPI_REQUEST_NULL) {
+ if((requests[i] != MPI_REQUEST_NULL) && (requests[i]->action != NULL)) {
print_request(" ", requests[i]);
xbt_dynar_push(comms, &requests[i]->action);
map[size] = i;
}
}
if(size > 0) {
- index = SIMIX_req_comm_waitany(comms);
- index = map[index];
- finish_wait(&requests[index], status);
+ i = SIMIX_req_comm_waitany(comms);
+ // FIXME: MPI_UNDEFINED or does SIMIX have a return code?
+ if (i != MPI_UNDEFINED) {
+ index = map[i];
+ finish_wait(&requests[index], status);
+ }
}
xbt_free(map);
xbt_dynar_free(&comms);
int root, MPI_Comm comm)
{
int system_tag = 666;
- int rank, size, src, index, sendsize, recvsize;
+ int rank, size, src, index;
+ MPI_Aint lb = 0, recvext = 0;
MPI_Request *requests;
rank = smpi_comm_rank(comm);
// Send buffer to root
smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
} else {
- sendsize = smpi_datatype_size(sendtype);
- recvsize = smpi_datatype_size(recvtype);
+ // FIXME: check for errors
+ smpi_datatype_extent(recvtype, &lb, &recvext);
// Local copy from root
- memcpy(&((char *) recvbuf)[root * recvcount * recvsize], sendbuf,
- sendcount * sendsize * sizeof(char));
+ smpi_datatype_copy(sendbuf, sendcount, sendtype,
+ (char *)recvbuf + root * recvcount * recvext, recvcount, recvtype);
// Receive buffers from senders
requests = xbt_new(MPI_Request, size - 1);
index = 0;
for(src = 0; src < size; src++) {
if(src != root) {
- requests[index] = smpi_irecv_init(&((char *) recvbuf)
- [src * recvcount * recvsize],
- recvcount, recvtype, src,
- system_tag, comm);
+ requests[index] = smpi_irecv_init((char *)recvbuf + src * recvcount * recvext,
+ recvcount, recvtype,
+ src, system_tag, comm);
index++;
}
}
MPI_Datatype recvtype, int root, MPI_Comm comm)
{
int system_tag = 666;
- int rank, size, src, index, sendsize;
+ int rank, size, src, index;
+ MPI_Aint lb = 0, recvext = 0;
MPI_Request *requests;
rank = smpi_comm_rank(comm);
// Send buffer to root
smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
} else {
- sendsize = smpi_datatype_size(sendtype);
+ // FIXME: check for errors
+ smpi_datatype_extent(recvtype, &lb, &recvext);
// Local copy from root
- memcpy(&((char *) recvbuf)[displs[root]], sendbuf,
- sendcount * sendsize * sizeof(char));
+ smpi_datatype_copy(sendbuf, sendcount, sendtype,
+ (char *)recvbuf + displs[root] * recvext,
+ recvcounts[root], recvtype);
// Receive buffers from senders
requests = xbt_new(MPI_Request, size - 1);
index = 0;
for(src = 0; src < size; src++) {
if(src != root) {
requests[index] =
- smpi_irecv_init(&((char *) recvbuf)[displs[src]],
- recvcounts[src], recvtype, src, system_tag,
- comm);
+ smpi_irecv_init((char *)recvbuf + displs[src] * recvext,
+ recvcounts[src], recvtype, src, system_tag, comm);
index++;
}
}
MPI_Comm comm)
{
int system_tag = 666;
- int rank, size, other, index, sendsize, recvsize;
+ int rank, size, other, index;
+ MPI_Aint lb = 0, recvext = 0;
MPI_Request *requests;
rank = smpi_comm_rank(comm);
size = smpi_comm_size(comm);
- sendsize = smpi_datatype_size(sendtype);
- recvsize = smpi_datatype_size(recvtype);
+ // FIXME: check for errors
+ smpi_datatype_extent(recvtype, &lb, &recvext);
// Local copy from self
- memcpy(&((char *) recvbuf)[rank * recvcount * recvsize], sendbuf,
- sendcount * sendsize * sizeof(char));
+ smpi_datatype_copy(sendbuf, sendcount, sendtype,
+ (char *)recvbuf + rank * recvcount * recvext, recvcount,
+ recvtype);
// Send/Recv buffers to/from others;
requests = xbt_new(MPI_Request, 2 * (size - 1));
index = 0;
smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,
comm);
index++;
- requests[index] = smpi_irecv_init(&((char *) recvbuf)
- [other * recvcount * recvsize],
- recvcount, recvtype, other,
+ requests[index] = smpi_irecv_init((char *)recvbuf + other * recvcount * recvext,
+ recvcount, recvtype, other,
system_tag, comm);
index++;
}
MPI_Datatype recvtype, MPI_Comm comm)
{
int system_tag = 666;
- int rank, size, other, index, sendsize;
+ int rank, size, other, index;
+ MPI_Aint lb = 0, recvext = 0;
MPI_Request *requests;
rank = smpi_comm_rank(comm);
size = smpi_comm_size(comm);
- sendsize = smpi_datatype_size(sendtype);
+ // FIXME: check for errors
+ smpi_datatype_extent(recvtype, &lb, &recvext);
// Local copy from self
- memcpy(&((char *) recvbuf)[displs[rank]], sendbuf,
- sendcount * sendsize * sizeof(char));
+ smpi_datatype_copy(sendbuf, sendcount, sendtype,
+ (char *)recvbuf + displs[rank] * recvext,
+ recvcounts[rank], recvtype);
// Send buffers to others;
requests = xbt_new(MPI_Request, 2 * (size - 1));
index = 0;
comm);
index++;
requests[index] =
- smpi_irecv_init(&((char *) recvbuf)[displs[other]],
- recvcounts[other], recvtype, other, system_tag,
- comm);
+ smpi_irecv_init((char *)recvbuf + displs[other] * recvext, recvcounts[other],
+ recvtype, other, system_tag, comm);
index++;
}
}
int root, MPI_Comm comm)
{
int system_tag = 666;
- int rank, size, dst, index, sendsize, recvsize;
+ int rank, size, dst, index;
+ MPI_Aint lb = 0, sendext = 0;
MPI_Request *requests;
rank = smpi_comm_rank(comm);
smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm,
MPI_STATUS_IGNORE);
} else {
- sendsize = smpi_datatype_size(sendtype);
- recvsize = smpi_datatype_size(recvtype);
+ // FIXME: check for errors
+ smpi_datatype_extent(sendtype, &lb, &sendext);
// Local copy from root
- memcpy(recvbuf, &((char *) sendbuf)[root * sendcount * sendsize],
- recvcount * recvsize * sizeof(char));
+ smpi_datatype_copy((char *)sendbuf + root * sendcount * sendext,
+ sendcount, sendtype, recvbuf, recvcount, recvtype);
// Send buffers to receivers
requests = xbt_new(MPI_Request, size - 1);
index = 0;
for(dst = 0; dst < size; dst++) {
if(dst != root) {
- requests[index] = smpi_isend_init(&((char *) sendbuf)
- [dst * sendcount * sendsize],
+ requests[index] = smpi_isend_init((char *)sendbuf + dst * sendcount * sendext,
sendcount, sendtype, dst,
system_tag, comm);
index++;
MPI_Datatype recvtype, int root, MPI_Comm comm)
{
int system_tag = 666;
- int rank, size, dst, index, recvsize;
+ int rank, size, dst, index;
+ MPI_Aint lb = 0, sendext = 0;
MPI_Request *requests;
rank = smpi_comm_rank(comm);
smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm,
MPI_STATUS_IGNORE);
} else {
- recvsize = smpi_datatype_size(recvtype);
+ // FIXME: check for errors
+ smpi_datatype_extent(sendtype, &lb, &sendext);
// Local copy from root
- memcpy(recvbuf, &((char *) sendbuf)[displs[root]],
- recvcount * recvsize * sizeof(char));
+ smpi_datatype_copy((char *)sendbuf + displs[root] * sendext, sendcounts[root],
+ sendtype, recvbuf, recvcount, recvtype);
// Send buffers to receivers
requests = xbt_new(MPI_Request, size - 1);
index = 0;
for(dst = 0; dst < size; dst++) {
if(dst != root) {
requests[index] =
- smpi_isend_init(&((char *) sendbuf)[displs[dst]],
- sendcounts[dst], sendtype, dst, system_tag,
- comm);
+ smpi_isend_init((char *)sendbuf + displs[dst] * sendext, sendcounts[dst],
+ sendtype, dst, system_tag, comm);
index++;
}
}
MPI_Comm comm)
{
int system_tag = 666;
- int rank, size, src, index, datasize;
+ int rank, size, src, index;
+ MPI_Aint lb = 0, dataext = 0;
MPI_Request *requests;
void **tmpbufs;
// Send buffer to root
smpi_mpi_send(sendbuf, count, datatype, root, system_tag, comm);
} else {
- datasize = smpi_datatype_size(datatype);
+ // FIXME: check for errors
+ smpi_datatype_extent(datatype, &lb, &dataext);
// Local copy from root
- memcpy(recvbuf, sendbuf, count * datasize * sizeof(char));
+ smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
// Receive buffers from senders
//TODO: make a MPI_barrier here ?
requests = xbt_new(MPI_Request, size - 1);
index = 0;
for(src = 0; src < size; src++) {
if(src != root) {
- tmpbufs[index] = xbt_malloc(count * datasize);
+ // FIXME: possibly overkill we we have contiguous/noncontiguous data
+ // mapping...
+ tmpbufs[index] = xbt_malloc(count * dataext);
requests[index] =
smpi_irecv_init(tmpbufs[index], count, datatype, src,
system_tag, comm);
{
smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, 0, comm);
smpi_mpi_bcast(recvbuf, count, datatype, 0, comm);
-
-/*
-FIXME: buggy implementation
-
- int system_tag = 666;
- int rank, size, other, index, datasize;
- MPI_Request* requests;
- void** tmpbufs;
-
- rank = smpi_comm_rank(comm);
- size = smpi_comm_size(comm);
- datasize = smpi_datatype_size(datatype);
- // Local copy from self
- memcpy(recvbuf, sendbuf, count * datasize * sizeof(char));
- // Send/Recv buffers to/from others;
- //TODO: make a MPI_barrier here ?
- requests = xbt_new(MPI_Request, 2 * (size - 1));
- tmpbufs = xbt_new(void*, size - 1);
- index = 0;
- for(other = 0; other < size; other++) {
- if(other != rank) {
- tmpbufs[index / 2] = xbt_malloc(count * datasize);
- requests[index] = smpi_mpi_isend(sendbuf, count, datatype, other, system_tag, comm);
- requests[index + 1] = smpi_mpi_irecv(tmpbufs[index / 2], count, datatype, other, system_tag, comm);
- index += 2;
- }
- }
- // Wait for completion of all comms.
- for(other = 0; other < 2 * (size - 1); other++) {
- index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
- if(index == MPI_UNDEFINED) {
- break;
- }
- if((index & 1) == 1) {
- // Request is odd: it's a irecv
- smpi_op_apply(op, tmpbufs[index / 2], recvbuf, &count, &datatype);
- }
- }
- for(index = 0; index < size - 1; index++) {
- xbt_free(tmpbufs[index]);
- }
- xbt_free(tmpbufs);
- xbt_free(requests);
-*/
}
void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{
int system_tag = 666;
- int rank, size, other, index, datasize;
- int total;
+ int rank, size, other, index;
+ MPI_Aint lb = 0, dataext = 0;
MPI_Request *requests;
void **tmpbufs;
rank = smpi_comm_rank(comm);
size = smpi_comm_size(comm);
- datasize = smpi_datatype_size(datatype);
+
+ // FIXME: check for errors
+ smpi_datatype_extent(datatype, &lb, &dataext);
+
// Local copy from self
- memcpy(recvbuf, sendbuf, count * datasize * sizeof(char));
+ smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
+
// Send/Recv buffers to/from others;
- total = rank + (size - (rank + 1));
- requests = xbt_new(MPI_Request, total);
+ requests = xbt_new(MPI_Request, size - 1);
tmpbufs = xbt_new(void *, rank);
index = 0;
for(other = 0; other < rank; other++) {
- tmpbufs[index] = xbt_malloc(count * datasize);
+ // FIXME: possibly overkill we we have contiguous/noncontiguous data
+ // mapping...
+ tmpbufs[index] = xbt_malloc(count * dataext);
requests[index] =
smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag,
comm);
}
// Wait for completion of all comms.
smpi_mpi_startall(size - 1, requests);
- for(other = 0; other < total; other++) {
+ for(other = 0; other < size - 1; other++) {
index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
if(index == MPI_UNDEFINED) {
break;
smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
}
}
- for(index = 0; index < size - 1; index++) {
+ for(index = 0; index < rank; index++) {
xbt_free(tmpbufs[index]);
}
xbt_free(tmpbufs);
#include "xbt/ex.h"
#include "surf/surf.h"
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdio.h>
+
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_bench, smpi,
"Logging specific to SMPI (benchmarking)");
-xbt_dict_t allocs = NULL; /* Allocated on first use */
-xbt_dict_t samples = NULL; /* Allocated on first use */
-xbt_dict_t calls = NULL; /* Allocated on first use */
+/* Shared allocations are handled through shared memory segments.
+ * Associated data and metadata are used as follows:
+ *
+ * mmap #1
+ * `allocs' dict ---- -.
+ * ---------- shared_data_t shared_metadata_t / | | |
+ * .->| <name> | ---> -------------------- <--. ----------------- | | | |
+ * | ---------- | fd of <name> | | | size of mmap | --| | | |
+ * | | count (2) | |-- | data | \ | | |
+ * `----------------- | <name> | | ----------------- ---- |
+ * -------------------- | ^ |
+ * | | |
+ * | | `allocs_metadata' dict |
+ * | | ---------------------- |
+ * | `-- | <addr of mmap #1> |<-'
+ * | .-- | <addr of mmap #2> |<-.
+ * | | ---------------------- |
+ * | | |
+ * | | |
+ * | | |
+ * | | mmap #2 |
+ * | v ---- -'
+ * | shared_metadata_t / | |
+ * | ----------------- | | |
+ * | | size of mmap | --| | |
+ * `-- | data | | | |
+ * ----------------- | | |
+ * \ | |
+ * ----
+ */
+
+#define PTR_STRLEN (2 + 2 * sizeof(void*) + 1)
+
+xbt_dict_t allocs = NULL; /* Allocated on first use */
+xbt_dict_t allocs_metadata = NULL; /* Allocated on first use */
+xbt_dict_t samples = NULL; /* Allocated on first use */
+xbt_dict_t calls = NULL; /* Allocated on first use */
+__thread int smpi_current_rank = 0; /* Updated after each MPI call */
typedef struct {
+ int fd;
int count;
- char data[];
+ char* loc;
} shared_data_t;
+typedef struct {
+ size_t size;
+ shared_data_t* data;
+} shared_metadata_t;
+
+static size_t shm_size(int fd) {
+ struct stat st;
+
+ if(fstat(fd, &st) < 0) {
+ xbt_die("Could not stat fd %d: %s", fd, strerror(errno));
+ }
+ return (size_t)st.st_size;
+}
+
+static void* shm_map(int fd, size_t size, shared_data_t* data) {
+ void* mem;
+ char loc[PTR_STRLEN];
+ shared_metadata_t* meta;
+
+ if(size > shm_size(fd)) {
+ if(ftruncate(fd, (off_t)size) < 0) {
+ xbt_die("Could not truncate fd %d to %zu: %s", fd, size, strerror(errno));
+ }
+ }
+ mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if(mem == MAP_FAILED) {
+ xbt_die("Could not map fd %d: %s", fd, strerror(errno));
+ }
+ if(!allocs_metadata) {
+ allocs_metadata = xbt_dict_new();
+ }
+ snprintf(loc, PTR_STRLEN, "%p", mem);
+ meta = xbt_new(shared_metadata_t, 1);
+ meta->size = size;
+ meta->data = data;
+ xbt_dict_set(allocs_metadata, loc, meta, &free);
+ XBT_DEBUG("MMAP %zu to %p", size, mem);
+ return mem;
+}
+
typedef struct {
int count;
double sum;
static void smpi_execute(double duration)
{
+ /* FIXME: a global variable would be less expensive to consult than a call to xbt_cfg_get_double() right on the critical path */
if (duration >= xbt_cfg_get_double(_surf_cfg_set, "smpi/cpu_threshold")) {
XBT_DEBUG("Sleep for %f to handle real computation time", duration);
smpi_execute_flops(duration *
xbt_cfg_get_double(_surf_cfg_set,
"smpi/running_power"));
+ } else {
+ XBT_DEBUG("Real computation took %f while threshold is set to %f; ignore it",
+ duration, xbt_cfg_get_double(_surf_cfg_set, "smpi/cpu_threshold"));
}
}
void smpi_bench_begin(void)
{
xbt_os_timer_start(smpi_process_timer());
+ smpi_current_rank = smpi_process_index();
}
void smpi_bench_end(void)
unsigned int smpi_sleep(unsigned int secs)
{
+ smpi_bench_end();
smpi_execute((double) secs);
+ smpi_bench_begin();
return secs;
}
int smpi_gettimeofday(struct timeval *tv, struct timezone *tz)
{
- double now = SIMIX_get_clock();
-
+ double now;
+ smpi_bench_end();
+ now = SIMIX_get_clock();
if (tv) {
- tv->tv_sec = (time_t) now;
- tv->tv_usec = (suseconds_t) (now * 1e6);
+ tv->tv_sec = (time_t)now;
+ tv->tv_usec = (suseconds_t)((now - tv->tv_sec) * 1e6);
}
+ smpi_bench_begin();
return 0;
}
void *smpi_shared_malloc(size_t size, const char *file, int line)
{
- char *loc = bprintf("%s:%d:%zu", file, line, size);
+ char *loc = bprintf("%zu_%s_%d", (size_t)getpid(), file, line);
+ size_t len = strlen(loc);
+ size_t i;
+ int fd;
+ void* mem;
shared_data_t *data;
+ for(i = 0; i < len; i++) {
+ /* Make the 'loc' ID be a flat filename */
+ if(loc[i] == '/') {
+ loc[i] = '_';
+ }
+ }
if (!allocs) {
allocs = xbt_dict_new_homogeneous(free);
}
data = xbt_dict_get_or_null(allocs, loc);
- if (!data) {
- data = (shared_data_t *) xbt_malloc0(sizeof(int) + size);
+ if(!data) {
+ fd = shm_open(loc, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if(fd < 0) {
+ switch(errno) {
+ case EEXIST:
+ xbt_die("Please cleanup /dev/shm/%s", loc);
+ default:
+ xbt_die("An unhandled error occured while opening %s: %s", loc, strerror(errno));
+ }
+ }
+ data = xbt_new(shared_data_t, 1);
+ data->fd = fd;
data->count = 1;
+ data->loc = loc;
+ mem = shm_map(fd, size, data);
+ if(shm_unlink(loc) < 0) {
+ XBT_WARN("Could not early unlink %s: %s", loc, strerror(errno));
+ }
xbt_dict_set(allocs, loc, data, NULL);
+ XBT_DEBUG("Mapping %s at %p through %d", loc, mem, fd);
} else {
+ mem = shm_map(data->fd, size, data);
data->count++;
}
- free(loc);
- return data->data;
+ XBT_DEBUG("Malloc %zu in %p (metadata at %p)", size, mem, data);
+ return mem;
}
void smpi_shared_free(void *ptr)
{
- shared_data_t *data = (shared_data_t *) ((int *) ptr - 1);
- char *loc;
+ char loc[PTR_STRLEN];
+ shared_metadata_t* meta;
+ shared_data_t* data;
if (!allocs) {
XBT_WARN("Cannot free: nothing was allocated");
return;
}
- loc = xbt_dict_get_key(allocs, data);
- if (!loc) {
+ if(!allocs_metadata) {
+ XBT_WARN("Cannot free: no metadata was allocated");
+ }
+ snprintf(loc, PTR_STRLEN, "%p", ptr);
+ meta = (shared_metadata_t*)xbt_dict_get_or_null(allocs_metadata, loc);
+ if (!meta) {
XBT_WARN("Cannot free: %p was not shared-allocated by SMPI", ptr);
return;
}
+ data = meta->data;
+ if(!data) {
+ XBT_WARN("Cannot free: something is broken in the metadata link");
+ return;
+ }
+ if(munmap(ptr, meta->size) < 0) {
+ XBT_WARN("Unmapping of fd %d failed: %s", data->fd, strerror(errno));
+ }
data->count--;
if (data->count <= 0) {
- xbt_dict_remove(allocs, loc);
+ close(data->fd);
+ xbt_dict_remove(allocs, data->loc);
+ free(data->loc);
}
}
/**
* Build the tree depending on a process rank (index) and the group size (extent)
- * @param index the rank of the calling process
- * @param extent the total number of processes
+ * @param root the rank of the tree root
+ * @param rank the rank of the calling process
+ * @param size the total number of processes
**/
-static void build_tree(int index, int extent, proc_tree_t * tree)
+static void build_tree(int root, int rank, int size, proc_tree_t * tree)
{
- int places = (*tree)->PROCTREE_A * index;
- int i, ch, pr;
+ int index = (rank - root + size) % size;
+ int firstChildIdx = index * (*tree)->PROCTREE_A + 1;
+ int i;
- (*tree)->me = index;
- (*tree)->root = 0;
- for (i = 1; i <= (*tree)->PROCTREE_A; i++) {
- ++places;
- ch = (*tree)->PROCTREE_A * index + i + (*tree)->root;
- ch %= extent;
- if (places < extent) {
- (*tree)->child[i - 1] = ch;
- (*tree)->numChildren++;
- }
+ (*tree)->me = rank;
+ (*tree)->root = root;
+
+ for (i = 0; i < (*tree)->PROCTREE_A && firstChildIdx + i < size; i++) {
+ (*tree)->child[i] = (firstChildIdx + i + root) % size;
+ (*tree)->numChildren++;
}
- if (index == (*tree)->root) {
+ if (rank == root) {
(*tree)->isRoot = 1;
} else {
(*tree)->isRoot = 0;
- pr = (index - 1) / (*tree)->PROCTREE_A;
- (*tree)->parent = pr;
+ (*tree)->parent = (((index - 1) / (*tree)->PROCTREE_A) + root) % size;
}
}
* bcast
**/
static void tree_bcast(void *buf, int count, MPI_Datatype datatype,
- int root, MPI_Comm comm, proc_tree_t tree)
+ MPI_Comm comm, proc_tree_t tree)
{
int system_tag = 999; // used negative int but smpi_create_request() declares this illegal (to be checked)
int rank, i;
* anti-bcast
**/
static void tree_antibcast(void *buf, int count, MPI_Datatype datatype,
- int root, MPI_Comm comm, proc_tree_t tree)
+ MPI_Comm comm, proc_tree_t tree)
{
int system_tag = 999; // used negative int but smpi_create_request() declares this illegal (to be checked)
int rank, i;
rank = smpi_comm_rank(comm);
size = smpi_comm_size(comm);
- build_tree(rank, size, &tree);
- tree_bcast(buf, count, datatype, root, comm, tree);
+ build_tree(root, rank, size, &tree);
+ tree_bcast(buf, count, datatype, comm, tree);
free_tree(tree);
}
rank = smpi_comm_rank(comm);
size = smpi_comm_size(comm);
- build_tree(rank, size, &tree);
- tree_antibcast(&dummy, 1, MPI_CHAR, 0, comm, tree);
- tree_bcast(&dummy, 1, MPI_CHAR, 0, comm, tree);
+ build_tree(0, rank, size, &tree);
+ tree_antibcast(&dummy, 1, MPI_CHAR, comm, tree);
+ tree_bcast(&dummy, 1, MPI_CHAR, comm, tree);
free_tree(tree);
}
* Alltoall Bruck
*
* Openmpi calls this routine when the message size sent to each rank < 2000 bytes and size < 12
+ * FIXME: uh, check smpi_pmpi again, but this routine is called for > 12, not
+ * less...
**/
int smpi_coll_tuned_alltoall_bruck(void *sendbuf, int sendcount,
MPI_Datatype sendtype, void *recvbuf,
int system_tag = 777;
int i, rank, size, err, count;
MPI_Aint lb;
- MPI_Aint sendextent = 0;
- MPI_Aint recvextent = 0;
+ MPI_Aint sendext = 0;
+ MPI_Aint recvext = 0;
MPI_Request *requests;
// FIXME: check implementation
rank = smpi_comm_rank(comm);
size = smpi_comm_size(comm);
XBT_DEBUG("<%d> algorithm alltoall_bruck() called.", rank);
- err = smpi_datatype_extent(sendtype, &lb, &sendextent);
- err = smpi_datatype_extent(recvtype, &lb, &recvextent);
+ err = smpi_datatype_extent(sendtype, &lb, &sendext);
+ err = smpi_datatype_extent(recvtype, &lb, &recvext);
/* Local copy from self */
err =
- smpi_datatype_copy(&((char *) sendbuf)[rank * sendextent], sendcount,
- sendtype, &((char *) recvbuf)[rank * recvextent],
+ smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext,
+ sendcount, sendtype,
+ (char *)recvbuf + rank * recvcount * recvext,
recvcount, recvtype);
if (err == MPI_SUCCESS && size > 1) {
/* Initiate all send/recv to/from others. */
continue;
}
requests[count] =
- smpi_irecv_init(&((char *) recvbuf)[i * recvextent], recvcount,
+ smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount,
recvtype, i, system_tag, comm);
count++;
}
continue;
}
requests[count] =
- smpi_isend_init(&((char *) sendbuf)[i * sendextent], sendcount,
+ smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount,
sendtype, i, system_tag, comm);
count++;
}
{
int system_tag = 888;
int i, rank, size, err, count;
- MPI_Aint lb;
- MPI_Aint sendinc = 0;
- MPI_Aint recvinc = 0;
+ MPI_Aint lb = 0, sendext = 0, recvext = 0;
MPI_Request *requests;
/* Initialize. */
rank = smpi_comm_rank(comm);
size = smpi_comm_size(comm);
XBT_DEBUG("<%d> algorithm alltoall_basic_linear() called.", rank);
- err = smpi_datatype_extent(sendtype, &lb, &sendinc);
- err = smpi_datatype_extent(recvtype, &lb, &recvinc);
- sendinc *= sendcount;
- recvinc *= recvcount;
+ err = smpi_datatype_extent(sendtype, &lb, &sendext);
+ err = smpi_datatype_extent(recvtype, &lb, &recvext);
/* simple optimization */
- err =
- smpi_datatype_copy(&((char *) sendbuf)[rank * sendinc], sendcount,
- sendtype, &((char *) recvbuf)[rank * recvinc],
- recvcount, recvtype);
+ err = smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext,
+ sendcount, sendtype,
+ (char *)recvbuf + rank * recvcount * recvext,
+ recvcount, recvtype);
if (err == MPI_SUCCESS && size > 1) {
/* Initiate all send/recv to/from others. */
requests = xbt_new(MPI_Request, 2 * (size - 1));
count = 0;
for (i = (rank + 1) % size; i != rank; i = (i + 1) % size) {
requests[count] =
- smpi_irecv_init(&((char *) recvbuf)[i * recvinc], recvcount,
+ smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount,
recvtype, i, system_tag, comm);
count++;
}
* when messages actually arrive in the order in which they were posted.
* TODO: check the previous assertion
*/
- for (i = (rank + size - 1) % size; i != rank;
- i = (i + size - 1) % size) {
+ for (i = (rank + size - 1) % size; i != rank; i = (i + size - 1) % size) {
requests[count] =
- smpi_isend_init(&((char *) sendbuf)[i * sendinc], sendcount,
+ smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount,
sendtype, i, system_tag, comm);
count++;
}
{
int system_tag = 889;
int i, rank, size, err, count;
- MPI_Aint lb;
- MPI_Aint sendextent = 0;
- MPI_Aint recvextent = 0;
+ MPI_Aint lb = 0, sendext = 0, recvext = 0;
MPI_Request *requests;
/* Initialize. */
rank = smpi_comm_rank(comm);
size = smpi_comm_size(comm);
XBT_DEBUG("<%d> algorithm basic_alltoallv() called.", rank);
- err = smpi_datatype_extent(sendtype, &lb, &sendextent);
- err = smpi_datatype_extent(recvtype, &lb, &recvextent);
+ err = smpi_datatype_extent(sendtype, &lb, &sendext);
+ err = smpi_datatype_extent(recvtype, &lb, &recvext);
/* Local copy from self */
err =
- smpi_datatype_copy(&((char *) sendbuf)[senddisps[rank] * sendextent],
+ smpi_datatype_copy((char *)sendbuf + senddisps[rank] * sendext,
sendcounts[rank], sendtype,
- &((char *) recvbuf)[recvdisps[rank] * recvextent],
+ (char *)recvbuf + recvdisps[rank] * recvext,
recvcounts[rank], recvtype);
if (err == MPI_SUCCESS && size > 1) {
/* Initiate all send/recv to/from others. */
continue;
}
requests[count] =
- smpi_irecv_init(&((char *) recvbuf)[recvdisps[i] * recvextent],
+ smpi_irecv_init((char *)recvbuf + recvdisps[i] * recvext,
recvcounts[i], recvtype, i, system_tag, comm);
count++;
}
continue;
}
requests[count] =
- smpi_isend_init(&((char *) sendbuf)[senddisps[i] * sendextent],
+ smpi_isend_init((char *)sendbuf + senddisps[i] * sendext,
sendcounts[i], sendtype, i, system_tag, comm);
count++;
}
static MPI_Comm get_comm(int comm) {
if(comm == -2) {
return MPI_COMM_SELF;
- } else if(comm >= 0) {
+ } else if(comm_lookup && comm >= 0 && comm < (int)xbt_dynar_length(comm_lookup)) {
return *(MPI_Comm*)xbt_dynar_get_ptr(comm_lookup, comm);
}
return MPI_COMM_NULL;
void mpi_finalize__(int* ierr) {
*ierr = MPI_Finalize();
xbt_dynar_free(&op_lookup);
+ op_lookup = NULL;
xbt_dynar_free(&datatype_lookup);
+ datatype_lookup = NULL;
xbt_dict_free(&request_lookup);
+ request_lookup = NULL;
xbt_dynar_free(&comm_lookup);
+ comm_lookup = NULL;
}
void mpi_abort__(int* comm, int* errorcode, int* ierr) {
* under the terms of the license (GNU LGPL) which comes with this package. */
#include <stdint.h>
+#include <stdio.h>
#include <stdlib.h>
#include "private.h"
XBT_DEBUG("<%d> Process left the game", index);
}
+/**
+ * @brief Prepares the current process for termination.
+ */
+void smpi_process_finalize(void)
+{
+ // wait for all pending asynchronous comms to finish
+ while (SIMIX_process_has_pending_comms(SIMIX_process_self())) {
+ SIMIX_req_process_sleep(1);
+ }
+}
+
int smpi_process_argc(void) {
smpi_process_data_t data = smpi_process_data();
char* value = getenv("SMPI_GLOBAL_SIZE");
if(!value) {
+ fprintf(stderr, "Please set env var SMPI_GLOBAL_SIZE to expected number of processes.\n");
abort();
}
return atoi(value);
MPI_Group group;
char name[MAILBOX_NAME_MAXLEN];
- SIMIX_comm_set_copy_data_callback
- (&SIMIX_comm_copy_buffer_callback);
+ SIMIX_comm_set_copy_data_callback(&smpi_comm_copy_data_callback);
process_count = SIMIX_process_count();
process_data = xbt_new(smpi_process_data_t, process_count);
for (i = 0; i < process_count; i++) {
NULL);
if(getenv("SMPI_PRETEND_CC") != NULL) {
+ /* Hack to ensure that smpicc can pretend to be a simple compiler. Particularly handy to pass it to the configuration tools */
return 0;
}
// parse the platform file: get the host list
SIMIX_create_environment(xargv[1]);
- SIMIX_function_register("smpi_simulated_main", smpi_simulated_main);
+ SIMIX_function_register_default(smpi_simulated_main);
SIMIX_launch_application(xargv[2]);
smpi_global_init();
SIMIX_run();
if (xbt_cfg_get_int(_surf_cfg_set, "smpi/display_timing"))
- XBT_INFO("simulation time %g", SIMIX_get_clock());
+ XBT_INFO("Simulation time: %g seconds.", SIMIX_get_clock());
smpi_global_destroy();
int PMPI_Finalize(void)
{
+ smpi_process_finalize();
smpi_bench_end();
#ifdef HAVE_TRACING
TRACE_smpi_finalize(smpi_process_index());
if(/^} (.*?);/) {
$_ = "}* __attribute__((weak)) $1 = NULL;\n";
} elsif(/^#define\s*(\S*)\s*\(?([^.]*)(\..*?)?\)?$/) {
- $_ = "#define $1 $2\[smpi_process_index()\]";
+ $_ = "#define $1 $2\[smpi_current_rank\]";
if(defined $3) {
$_ .= $3;
}
NUMPROCS="${DEFAULT_NUMPROCS}"
POWER="${DEFAULT_POWER}"
-SIMOPTS="--cfg=maxmin/precision:1e-9 --cfg=network/model:SMPI --cfg=TCP_gamma:4194304"
+SIMOPTS="--cfg=maxmin/precision:1e-9 --cfg=network/model:SMPI --cfg=network/TCP_gamma:4194304"
#usage to print the way this script should be called
function usage () {
else
host="${hostnames[$j]}"
fi
- echo " <process host=\"${host}\" function=\"smpi_simulated_main\">" >> ${APPLICATIONTMP}
+ echo " <process host=\"${host}\" function=\"rank$i\"> <!-- function name used only for logging -->" >> ${APPLICATIONTMP}
echo " <argument value=\"$i\"/> <!-- rank -->" >> ${APPLICATIONTMP}
for ARG in $*; do
echo " <argument value=\"${ARG}\"/>" >> ${APPLICATIONTMP}
+++ /dev/null
-/* Copyright (c) 2004-2011. The SimGrid Team.
- * All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include "surf_private.h"
-#include "surf/surf_resource.h"
-
-
-typedef s_surf_action_lmm_t s_surf_action_cpu_Cas01_t,
- *surf_action_cpu_Cas01_t;
-
-typedef struct cpu_Cas01 {
- s_surf_resource_t generic_resource;
- double power_peak;
- double power_scale;
- tmgr_trace_event_t power_event;
- int core;
- e_surf_resource_state_t state_current;
- tmgr_trace_event_t state_event;
- lmm_constraint_t constraint;
-} s_cpu_Cas01_t, *cpu_Cas01_t;
-
-XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_cpu, surf,
- "Logging specific to the SURF CPU module");
-
-
-
-surf_model_t surf_cpu_model = NULL;
-lmm_system_t cpu_maxmin_system = NULL;
-
-
-static xbt_swag_t cpu_running_action_set_that_does_not_need_being_checked =
- NULL;
-
-static void* cpu_create_resource(const char *name, double power_peak,
- double power_scale,
- tmgr_trace_t power_trace,
- int core,
- e_surf_resource_state_t state_initial,
- tmgr_trace_t state_trace,
- xbt_dict_t cpu_properties)
-{
-
- cpu_Cas01_t cpu = NULL;
- xbt_assert(!surf_cpu_resource_by_name(name),
- "Host '%s' declared several times in the platform file",
- name);
- cpu = (cpu_Cas01_t) surf_resource_new(sizeof(s_cpu_Cas01_t),
- surf_cpu_model, name,cpu_properties);
- cpu->power_peak = power_peak;
- xbt_assert(cpu->power_peak > 0, "Power has to be >0");
- cpu->power_scale = power_scale;
- cpu->core = core;
- xbt_assert(core>0,"Invalid number of cores %d",core);
- if (power_trace)
- cpu->power_event =
- tmgr_history_add_trace(history, power_trace, 0.0, 0, cpu);
-
- cpu->state_current = state_initial;
- if (state_trace)
- cpu->state_event =
- tmgr_history_add_trace(history, state_trace, 0.0, 0, cpu);
-
- cpu->constraint =
- lmm_constraint_new(cpu_maxmin_system, cpu,
- cpu->core * cpu->power_scale * cpu->power_peak);
-
- xbt_lib_set(host_lib, name, SURF_CPU_LEVEL, cpu);
-
- return cpu;
-}
-
-
-static void parse_cpu_init(sg_platf_host_cbarg_t host)
-{
- if(strcmp(host->coord,"")) xbt_die("Coordinates not implemented yet!");
-
- cpu_create_resource(host->id,
- host->power_peak,
- host->power_scale,
- host->power_trace,
- host->core_amount,
- host->initial_state,
- host->state_trace,
- host->properties);
-}
-
-static void add_traces_cpu(void)
-{
- xbt_dict_cursor_t cursor = NULL;
- char *trace_name, *elm;
-
- static int called = 0;
-
- if (called)
- return;
- called = 1;
-
-
- /* connect all traces relative to hosts */
- xbt_dict_foreach(trace_connect_list_host_avail, cursor, trace_name, elm) {
- tmgr_trace_t trace = xbt_dict_get_or_null(traces_set_list, trace_name);
- cpu_Cas01_t host = surf_cpu_resource_by_name(elm);
-
- xbt_assert(host, "Host %s undefined", elm);
- xbt_assert(trace, "Trace %s undefined", trace_name);
-
- host->state_event =
- tmgr_history_add_trace(history, trace, 0.0, 0, host);
- }
-
- xbt_dict_foreach(trace_connect_list_power, cursor, trace_name, elm) {
- tmgr_trace_t trace = xbt_dict_get_or_null(traces_set_list, trace_name);
- cpu_Cas01_t host = surf_cpu_resource_by_name(elm);
-
- xbt_assert(host, "Host %s undefined", elm);
- xbt_assert(trace, "Trace %s undefined", trace_name);
-
- host->power_event =
- tmgr_history_add_trace(history, trace, 0.0, 0, host);
- }
-}
-
-static void cpu_define_callbacks(void)
-{
- sg_platf_host_add_cb(parse_cpu_init);
- sg_platf_postparse_add_cb(add_traces_cpu);
-}
-
-static int cpu_resource_used(void *resource_id)
-{
- return lmm_constraint_used(cpu_maxmin_system,
- ((cpu_Cas01_t) resource_id)->constraint);
-}
-
-static int cpu_action_unref(surf_action_t action)
-{
- action->refcount--;
- if (!action->refcount) {
- xbt_swag_remove(action, action->state_set);
- if (((surf_action_cpu_Cas01_t) action)->variable)
- lmm_variable_free(cpu_maxmin_system,
- ((surf_action_cpu_Cas01_t) action)->variable);
-#ifdef HAVE_TRACING
- xbt_free(action->category);
-#endif
- surf_action_free(&action);
- return 1;
- }
- return 0;
-}
-
-static void cpu_action_cancel(surf_action_t action)
-{
- surf_action_state_set(action, SURF_ACTION_FAILED);
- return;
-}
-
-static void cpu_action_state_set(surf_action_t action,
- e_surf_action_state_t state)
-{
-/* if((state==SURF_ACTION_DONE) || (state==SURF_ACTION_FAILED)) */
-/* if(((surf_action_cpu_Cas01_t)action)->variable) { */
-/* lmm_variable_disable(cpu_maxmin_system, ((surf_action_cpu_Cas01_t)action)->variable); */
-/* ((surf_action_cpu_Cas01_t)action)->variable = NULL; */
-/* } */
-
- surf_action_state_set(action, state);
- return;
-}
-
-static double cpu_share_resources(double now)
-{
- s_surf_action_cpu_Cas01_t action;
- return generic_maxmin_share_resources(surf_cpu_model->
- states.running_action_set,
- xbt_swag_offset(action, variable),
- cpu_maxmin_system, lmm_solve);
-}
-
-static void cpu_update_actions_state(double now, double delta)
-{
- surf_action_cpu_Cas01_t action = NULL;
- surf_action_cpu_Cas01_t next_action = NULL;
- xbt_swag_t running_actions = surf_cpu_model->states.running_action_set;
-
- xbt_swag_foreach_safe(action, next_action, running_actions) {
-#ifdef HAVE_TRACING
- if (TRACE_is_enabled()) {
- cpu_Cas01_t x =
- lmm_constraint_id(lmm_get_cnst_from_var
- (cpu_maxmin_system, action->variable, 0));
-
- TRACE_surf_host_set_utilization(x->generic_resource.name,
- action->generic_action.data,
- (surf_action_t) action,
- lmm_variable_getvalue
- (action->variable), now - delta,
- delta);
- TRACE_last_timestamp_to_dump = now-delta;
- }
-#endif
- double_update(&(action->generic_action.remains),
- lmm_variable_getvalue(action->variable) * delta);
- if (action->generic_action.max_duration != NO_MAX_DURATION)
- double_update(&(action->generic_action.max_duration), delta);
- if ((action->generic_action.remains <= 0) &&
- (lmm_get_variable_weight(action->variable) > 0)) {
- action->generic_action.finish = surf_get_clock();
- cpu_action_state_set((surf_action_t) action, SURF_ACTION_DONE);
- } else if ((action->generic_action.max_duration != NO_MAX_DURATION) &&
- (action->generic_action.max_duration <= 0)) {
- action->generic_action.finish = surf_get_clock();
- cpu_action_state_set((surf_action_t) action, SURF_ACTION_DONE);
- }
- }
-
- return;
-}
-
-static void cpu_update_resource_state(void *id,
- tmgr_trace_event_t event_type,
- double value, double date)
-{
- cpu_Cas01_t cpu = id;
- lmm_variable_t var = NULL;
- lmm_element_t elem = NULL;
-
- if (event_type == cpu->power_event) {
- cpu->power_scale = value;
- lmm_update_constraint_bound(cpu_maxmin_system, cpu->constraint,
- cpu->core * cpu->power_scale * cpu->power_peak);
-#ifdef HAVE_TRACING
- TRACE_surf_host_set_power(date, cpu->generic_resource.name,
- cpu->core * cpu->power_scale * cpu->power_peak);
-#endif
- while ((var = lmm_get_var_from_cnst
- (cpu_maxmin_system, cpu->constraint, &elem))) {
- surf_action_cpu_Cas01_t action = lmm_variable_id(var);
- lmm_update_variable_bound(cpu_maxmin_system, action->variable,
- cpu->power_scale * cpu->power_peak);
- }
- if (tmgr_trace_event_free(event_type))
- cpu->power_event = NULL;
- } else if (event_type == cpu->state_event) {
- if (value > 0)
- cpu->state_current = SURF_RESOURCE_ON;
- else {
- lmm_constraint_t cnst = cpu->constraint;
- lmm_variable_t var = NULL;
- lmm_element_t elem = NULL;
-
- cpu->state_current = SURF_RESOURCE_OFF;
-
- while ((var = lmm_get_var_from_cnst(cpu_maxmin_system, cnst, &elem))) {
- surf_action_t action = lmm_variable_id(var);
-
- if (surf_action_state_get(action) == SURF_ACTION_RUNNING ||
- surf_action_state_get(action) == SURF_ACTION_READY ||
- surf_action_state_get(action) ==
- SURF_ACTION_NOT_IN_THE_SYSTEM) {
- action->finish = date;
- cpu_action_state_set(action, SURF_ACTION_FAILED);
- }
- }
- }
- if (tmgr_trace_event_free(event_type))
- cpu->state_event = NULL;
- } else {
- XBT_CRITICAL("Unknown event ! \n");
- xbt_abort();
- }
-
- return;
-}
-
-static surf_action_t cpu_execute(void *cpu, double size)
-{
- surf_action_cpu_Cas01_t action = NULL;
- cpu_Cas01_t CPU = cpu;
-
- XBT_IN("(%s,%g)", surf_resource_name(CPU), size);
- action =
- surf_action_new(sizeof(s_surf_action_cpu_Cas01_t), size,
- surf_cpu_model,
- CPU->state_current != SURF_RESOURCE_ON);
-
- action->suspended = 0; /* Should be useless because of the
- calloc but it seems to help valgrind... */
-
- action->variable = lmm_variable_new(cpu_maxmin_system, action,
- action->generic_action.priority,
- CPU->power_scale * CPU->power_peak, 1);
- lmm_expand(cpu_maxmin_system, CPU->constraint, action->variable, 1.0);
- XBT_OUT();
- return (surf_action_t) action;
-}
-
-static surf_action_t cpu_action_sleep(void *cpu, double duration)
-{
- surf_action_cpu_Cas01_t action = NULL;
-
- if (duration > 0)
- duration = MAX(duration, MAXMIN_PRECISION);
-
- XBT_IN("(%s,%g)", surf_resource_name(cpu), duration);
- action = (surf_action_cpu_Cas01_t) cpu_execute(cpu, 1.0);
- action->generic_action.max_duration = duration;
- action->suspended = 2;
- if (duration == NO_MAX_DURATION) {
- /* Move to the *end* of the corresponding action set. This convention
- is used to speed up update_resource_state */
- xbt_swag_remove(action, ((surf_action_t) action)->state_set);
- ((surf_action_t) action)->state_set =
- cpu_running_action_set_that_does_not_need_being_checked;
- xbt_swag_insert(action, ((surf_action_t) action)->state_set);
- }
-
- lmm_update_variable_weight(cpu_maxmin_system, action->variable, 0.0);
- XBT_OUT();
- return (surf_action_t) action;
-}
-
-static void cpu_action_suspend(surf_action_t action)
-{
- XBT_IN("(%p)", action);
- if (((surf_action_cpu_Cas01_t) action)->suspended != 2) {
- lmm_update_variable_weight(cpu_maxmin_system,
- ((surf_action_cpu_Cas01_t)
- action)->variable, 0.0);
- ((surf_action_cpu_Cas01_t) action)->suspended = 1;
- }
- XBT_OUT();
-}
-
-static void cpu_action_resume(surf_action_t action)
-{
- XBT_IN("(%p)", action);
- if (((surf_action_cpu_Cas01_t) action)->suspended != 2) {
- lmm_update_variable_weight(cpu_maxmin_system,
- ((surf_action_cpu_Cas01_t)
- action)->variable, action->priority);
- ((surf_action_cpu_Cas01_t) action)->suspended = 0;
- }
- XBT_OUT();
-}
-
-static int cpu_action_is_suspended(surf_action_t action)
-{
- return (((surf_action_cpu_Cas01_t) action)->suspended == 1);
-}
-
-static void cpu_action_set_max_duration(surf_action_t action,
- double duration)
-{
- XBT_IN("(%p,%g)", action, duration);
- action->max_duration = duration;
- XBT_OUT();
-}
-
-static void cpu_action_set_priority(surf_action_t action, double priority)
-{
- XBT_IN("(%p,%g)", action, priority);
- action->priority = priority;
- lmm_update_variable_weight(cpu_maxmin_system,
- ((surf_action_cpu_Cas01_t) action)->variable,
- priority);
-
- XBT_OUT();
-}
-
-#ifdef HAVE_TRACING
-static void cpu_action_set_category(surf_action_t action, const char *category)
-{
- XBT_IN("(%p,%s)", action, category);
- action->category = xbt_strdup (category);
- XBT_OUT();
-}
-#endif
-
-static double cpu_action_get_remains(surf_action_t action)
-{
- XBT_IN("(%p)", action);
- return action->remains;
- XBT_OUT();
-}
-
-static e_surf_resource_state_t cpu_get_state(void *cpu)
-{
- return ((cpu_Cas01_t) cpu)->state_current;
-}
-
-static double cpu_get_speed(void *cpu, double load)
-{
- return load * (((cpu_Cas01_t) cpu)->power_peak);
-}
-
-static double cpu_get_available_speed(void *cpu)
-{
- /* number between 0 and 1 */
- return ((cpu_Cas01_t) cpu)->power_scale;
-}
-
-
-static void cpu_finalize(void)
-{
- lmm_system_free(cpu_maxmin_system);
- cpu_maxmin_system = NULL;
-
- surf_model_exit(surf_cpu_model);
- surf_cpu_model = NULL;
-
- xbt_swag_free(cpu_running_action_set_that_does_not_need_being_checked);
- cpu_running_action_set_that_does_not_need_being_checked = NULL;
-}
-
-static void surf_cpu_model_init_internal(void)
-{
- s_surf_action_t action;
-
- surf_cpu_model = surf_model_init();
-
- cpu_running_action_set_that_does_not_need_being_checked =
- xbt_swag_new(xbt_swag_offset(action, state_hookup));
-
- surf_cpu_model->name = "CPU";
-
- surf_cpu_model->action_unref = cpu_action_unref;
- surf_cpu_model->action_cancel = cpu_action_cancel;
- surf_cpu_model->action_state_set = cpu_action_state_set;
-
- surf_cpu_model->model_private->resource_used = cpu_resource_used;
- surf_cpu_model->model_private->share_resources = cpu_share_resources;
- surf_cpu_model->model_private->update_actions_state =
- cpu_update_actions_state;
- surf_cpu_model->model_private->update_resource_state =
- cpu_update_resource_state;
- surf_cpu_model->model_private->finalize = cpu_finalize;
-
- surf_cpu_model->suspend = cpu_action_suspend;
- surf_cpu_model->resume = cpu_action_resume;
- surf_cpu_model->is_suspended = cpu_action_is_suspended;
- surf_cpu_model->set_max_duration = cpu_action_set_max_duration;
- surf_cpu_model->set_priority = cpu_action_set_priority;
-#ifdef HAVE_TRACING
- surf_cpu_model->set_category = cpu_action_set_category;
-#endif
- surf_cpu_model->get_remains = cpu_action_get_remains;
-
- surf_cpu_model->extension.cpu.execute = cpu_execute;
- surf_cpu_model->extension.cpu.sleep = cpu_action_sleep;
-
- surf_cpu_model->extension.cpu.get_state = cpu_get_state;
- surf_cpu_model->extension.cpu.get_speed = cpu_get_speed;
- surf_cpu_model->extension.cpu.get_available_speed =
- cpu_get_available_speed;
- surf_cpu_model->extension.cpu.create_resource = cpu_create_resource;
- surf_cpu_model->extension.cpu.add_traces = add_traces_cpu;
-
- if (!cpu_maxmin_system)
- cpu_maxmin_system = lmm_system_new();
-}
-
-/*********************************************************************/
-/* Basic sharing model for CPU: that is where all this started... ;) */
-/*********************************************************************/
-/* @InProceedings{casanova01simgrid, */
-/* author = "H. Casanova", */
-/* booktitle = "Proceedings of the IEEE Symposium on Cluster Computing */
-/* and the Grid (CCGrid'01)", */
-/* publisher = "IEEE Computer Society", */
-/* title = "Simgrid: {A} Toolkit for the Simulation of Application */
-/* Scheduling", */
-/* year = "2001", */
-/* month = may, */
-/* note = "Available at */
-/* \url{http://grail.sdsc.edu/papers/simgrid_ccgrid01.ps.gz}." */
-/* } */
-void surf_cpu_model_init_Cas01()
-{
- if (surf_cpu_model)
- return;
- surf_cpu_model_init_internal();
- cpu_define_callbacks();
- xbt_dynar_push(model_list, &surf_cpu_model);
-}
#include "surf_private.h"
#include "surf/surf_resource.h"
+surf_model_t surf_cpu_model = NULL;
+lmm_system_t cpu_maxmin_system = NULL;
+e_UM_t cpu_update_mechanism = UM_UNDEFINED;
+static int cpu_selective_update = 0;
+
+static xbt_swag_t cpu_modified_cpu = NULL;
+static xbt_heap_t cpu_action_heap = NULL;
#undef GENERIC_LMM_ACTION
#undef GENERIC_ACTION
#undef ACTION_GET_CPU
#define GENERIC_LMM_ACTION(action) action->generic_lmm_action
#define GENERIC_ACTION(action) GENERIC_LMM_ACTION(action).generic_action
-#define ACTION_GET_CPU(action) ((surf_action_cpu_Cas01_im_t) action)->cpu
+#define ACTION_GET_CPU(action) ((surf_action_cpu_Cas01_t) action)->cpu
-typedef struct surf_action_cpu_cas01_im {
+typedef struct surf_action_cpu_cas01 {
s_surf_action_lmm_t generic_lmm_action;
s_xbt_swag_hookup_t cpu_list_hookup;
int index_heap;
void *cpu;
-} s_surf_action_cpu_Cas01_im_t, *surf_action_cpu_Cas01_im_t;
+} s_surf_action_cpu_Cas01_t, *surf_action_cpu_Cas01_t;
-typedef struct cpu_Cas01_im {
+typedef struct cpu_Cas01 {
s_surf_resource_t generic_resource;
s_xbt_swag_hookup_t modified_cpu_hookup;
double power_peak;
lmm_constraint_t constraint;
xbt_swag_t action_set;
double last_update;
-} s_cpu_Cas01_im_t, *cpu_Cas01_im_t;
+} s_cpu_Cas01_t, *cpu_Cas01_t;
-XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_cpu_im, surf,
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_cpu, surf,
"Logging specific to the SURF CPU IMPROVED module");
-lmm_system_t cpu_im_maxmin_system = NULL;
-static xbt_swag_t cpu_im_modified_cpu = NULL;
-static xbt_heap_t cpu_im_action_heap = NULL;
-extern int sg_maxmin_selective_update;
-
static xbt_swag_t
- cpu_im_running_action_set_that_does_not_need_being_checked = NULL;
+ cpu_running_action_set_that_does_not_need_being_checked = NULL;
-static void* cpu_im_create_resource(const char *name, double power_peak,
+static void* cpu_create_resource(const char *name, double power_peak,
double power_scale,
tmgr_trace_t power_trace,
int core,
tmgr_trace_t state_trace,
xbt_dict_t cpu_properties)
{
- cpu_Cas01_im_t cpu = NULL;
- s_surf_action_cpu_Cas01_im_t action;
+ cpu_Cas01_t cpu = NULL;
+ s_surf_action_cpu_Cas01_t action;
xbt_assert(!surf_cpu_resource_by_name(name),
"Host '%s' declared several times in the platform file",
name);
- cpu = (cpu_Cas01_im_t) surf_resource_new(sizeof(s_cpu_Cas01_im_t),
+ cpu = (cpu_Cas01_t) surf_resource_new(sizeof(s_cpu_Cas01_t),
surf_cpu_model, name,cpu_properties);
cpu->power_peak = power_peak;
xbt_assert(cpu->power_peak > 0, "Power has to be >0");
cpu->power_scale = power_scale;
+ cpu->core = core;
+ xbt_assert(core>0,"Invalid number of cores %d",core);
+
if (power_trace)
cpu->power_event =
tmgr_history_add_trace(history, power_trace, 0.0, 0, cpu);
- cpu->core = core;
- xbt_assert(core>0,"Invalid number of cores %d",core);
cpu->state_current = state_initial;
if (state_trace)
tmgr_history_add_trace(history, state_trace, 0.0, 0, cpu);
cpu->constraint =
- lmm_constraint_new(cpu_im_maxmin_system, cpu,
+ lmm_constraint_new(cpu_maxmin_system, cpu,
cpu->core * cpu->power_scale * cpu->power_peak);
xbt_lib_set(host_lib, name, SURF_CPU_LEVEL, cpu);
- cpu->action_set = xbt_swag_new(xbt_swag_offset(action, cpu_list_hookup));
+ if(cpu_update_mechanism == UM_LAZY)
+ cpu->action_set = xbt_swag_new(xbt_swag_offset(action, cpu_list_hookup));
return cpu;
}
-static void parse_cpu_im_init(sg_platf_host_cbarg_t host)
+static void parse_cpu_init(sg_platf_host_cbarg_t host)
{
- cpu_im_create_resource(host->id,
+ cpu_create_resource(host->id,
host->power_peak,
host->power_scale,
host->power_trace,
host->properties);
}
-static void cpu_im_add_traces_cpu(void)
+static void cpu_add_traces_cpu(void)
{
xbt_dict_cursor_t cursor = NULL;
char *trace_name, *elm;
/* connect all traces relative to hosts */
xbt_dict_foreach(trace_connect_list_host_avail, cursor, trace_name, elm) {
tmgr_trace_t trace = xbt_dict_get_or_null(traces_set_list, trace_name);
- cpu_Cas01_im_t host = surf_cpu_resource_by_name(elm);
+ cpu_Cas01_t host = surf_cpu_resource_by_name(elm);
xbt_assert(host, "Host %s undefined", elm);
xbt_assert(trace, "Trace %s undefined", trace_name);
xbt_dict_foreach(trace_connect_list_power, cursor, trace_name, elm) {
tmgr_trace_t trace = xbt_dict_get_or_null(traces_set_list, trace_name);
- cpu_Cas01_im_t host = surf_cpu_resource_by_name(elm);
+ cpu_Cas01_t host = surf_cpu_resource_by_name(elm);
xbt_assert(host, "Host %s undefined", elm);
xbt_assert(trace, "Trace %s undefined", trace_name);
}
}
-static void cpu_im_define_callbacks()
+static void cpu_define_callbacks()
{
- sg_platf_host_add_cb(parse_cpu_im_init);
- sg_platf_postparse_add_cb(cpu_im_add_traces_cpu);
+ sg_platf_host_add_cb(parse_cpu_init);
+ sg_platf_postparse_add_cb(cpu_add_traces_cpu);
}
-static int cpu_im_resource_used(void *resource)
+static int cpu_resource_used(void *resource)
{
- return lmm_constraint_used(cpu_im_maxmin_system,
- ((cpu_Cas01_im_t) resource)->constraint);
+ return lmm_constraint_used(cpu_maxmin_system,
+ ((cpu_Cas01_t) resource)->constraint);
}
-static int cpu_im_action_unref(surf_action_t action)
+static int cpu_action_unref(surf_action_t action)
{
action->refcount--;
if (!action->refcount) {
xbt_swag_remove(action, action->state_set);
if (((surf_action_lmm_t) action)->variable)
- lmm_variable_free(cpu_im_maxmin_system,
+ lmm_variable_free(cpu_maxmin_system,
((surf_action_lmm_t) action)->variable);
+ if(cpu_update_mechanism == UM_LAZY){
/* remove from heap */
- xbt_heap_remove(cpu_im_action_heap,
- ((surf_action_cpu_Cas01_im_t) action)->index_heap);
+ xbt_heap_remove(cpu_action_heap,
+ ((surf_action_cpu_Cas01_t) action)->index_heap);
xbt_swag_remove(action,
- ((cpu_Cas01_im_t) ACTION_GET_CPU(action))->action_set);
- xbt_swag_insert(ACTION_GET_CPU(action), cpu_im_modified_cpu);
+ ((cpu_Cas01_t) ACTION_GET_CPU(action))->action_set);
+ xbt_swag_insert(ACTION_GET_CPU(action), cpu_modified_cpu);
+ }
#ifdef HAVE_TRACING
xbt_free(action->category);
#endif
return 0;
}
-static void cpu_im_action_cancel(surf_action_t action)
+static void cpu_action_cancel(surf_action_t action)
{
surf_action_state_set(action, SURF_ACTION_FAILED);
- xbt_heap_remove(cpu_im_action_heap,
- ((surf_action_cpu_Cas01_im_t) action)->index_heap);
+ if(cpu_update_mechanism == UM_LAZY){
+ xbt_heap_remove(cpu_action_heap,
+ ((surf_action_cpu_Cas01_t) action)->index_heap);
xbt_swag_remove(action,
- ((cpu_Cas01_im_t) ACTION_GET_CPU(action))->action_set);
+ ((cpu_Cas01_t) ACTION_GET_CPU(action))->action_set);
+ }
return;
}
-static void cpu_im_cpu_action_state_set(surf_action_t action,
+static void cpu_cpu_action_state_set(surf_action_t action,
e_surf_action_state_t state)
{
/* if((state==SURF_ACTION_DONE) || (state==SURF_ACTION_FAILED)) */
/* if(((surf_action_lmm_t)action)->variable) { */
-/* lmm_variable_disable(cpu_im_maxmin_system, ((surf_action_lmm_t)action)->variable); */
+/* lmm_variable_disable(cpu_maxmin_system, ((surf_action_lmm_t)action)->variable); */
/* ((surf_action_lmm_t)action)->variable = NULL; */
/* } */
return;
}
-static void cpu_im_update_remains(cpu_Cas01_im_t cpu, double now)
+static void cpu_update_remains(cpu_Cas01_t cpu, double now)
{
- surf_action_cpu_Cas01_im_t action;
+ surf_action_cpu_Cas01_t action;
if (cpu->last_update >= now)
return;
cpu->last_update = now;
}
-static double cpu_im_share_resources(double now)
+static double cpu_share_resources_lazy(double now)
{
- surf_action_cpu_Cas01_im_t action;
+ surf_action_cpu_Cas01_t action;
double min;
double value;
- cpu_Cas01_im_t cpu, cpu_next;
+ cpu_Cas01_t cpu, cpu_next;
- xbt_swag_foreach(cpu, cpu_im_modified_cpu)
- cpu_im_update_remains(cpu, now);
+ xbt_swag_foreach(cpu, cpu_modified_cpu)
+ cpu_update_remains(cpu, now);
- lmm_solve(cpu_im_maxmin_system);
+ lmm_solve(cpu_maxmin_system);
- xbt_swag_foreach_safe(cpu, cpu_next, cpu_im_modified_cpu) {
+ xbt_swag_foreach_safe(cpu, cpu_next, cpu_modified_cpu) {
xbt_swag_foreach(action, cpu->action_set) {
if (GENERIC_ACTION(action).state_set !=
surf_cpu_model->states.running_action_set)
GENERIC_ACTION(action).max_duration);
if (action->index_heap >= 0) {
- surf_action_cpu_Cas01_im_t heap_act =
- xbt_heap_remove(cpu_im_action_heap, action->index_heap);
+ surf_action_cpu_Cas01_t heap_act =
+ xbt_heap_remove(cpu_action_heap, action->index_heap);
if (heap_act != action)
DIE_IMPOSSIBLE;
}
if (min != -1) {
- xbt_heap_push(cpu_im_action_heap, action, min);
+ xbt_heap_push(cpu_action_heap, action, min);
XBT_DEBUG("Insert at heap action(%p) min %lf", action, min);
}
}
- xbt_swag_remove(cpu, cpu_im_modified_cpu);
+ xbt_swag_remove(cpu, cpu_modified_cpu);
}
- return xbt_heap_size(cpu_im_action_heap) >
- 0 ? xbt_heap_maxkey(cpu_im_action_heap) - now : -1;
+ return xbt_heap_size(cpu_action_heap) >
+ 0 ? xbt_heap_maxkey(cpu_action_heap) - now : -1;
}
-static void cpu_im_update_actions_state(double now, double delta)
-{
- surf_action_cpu_Cas01_im_t action;
+static double cpu_share_resources_full(double now) {
+ surf_action_cpu_Cas01_t action;
+ return generic_maxmin_share_resources(surf_cpu_model->states.running_action_set,
+ xbt_swag_offset(*action, generic_lmm_action.variable),
+ cpu_maxmin_system, lmm_solve);
+}
- while ((xbt_heap_size(cpu_im_action_heap) > 0)
- && (double_equals(xbt_heap_maxkey(cpu_im_action_heap), now))) {
- action = xbt_heap_pop(cpu_im_action_heap);
+static void cpu_update_actions_state_lazy(double now, double delta)
+{
+ surf_action_cpu_Cas01_t action;
+ while ((xbt_heap_size(cpu_action_heap) > 0)
+ && (double_equals(xbt_heap_maxkey(cpu_action_heap), now))) {
+ action = xbt_heap_pop(cpu_action_heap);
XBT_DEBUG("Action %p: finish", action);
GENERIC_ACTION(action).finish = surf_get_clock();
/* set the remains to 0 due to precision problems when updating the remaining amount */
#ifdef HAVE_TRACING
if (TRACE_is_enabled()) {
- cpu_Cas01_im_t cpu = ((cpu_Cas01_im_t)(action->cpu));
+ cpu_Cas01_t cpu = ((cpu_Cas01_t)(action->cpu));
TRACE_surf_host_set_utilization(cpu->generic_resource.name,
- action->generic_lmm_action.generic_action.data,
+ GENERIC_LMM_ACTION(action).generic_action.data,
(surf_action_t) action,
lmm_variable_getvalue (GENERIC_LMM_ACTION(action).variable),
cpu->last_update,
}
#endif
GENERIC_ACTION(action).remains = 0;
- cpu_im_cpu_action_state_set((surf_action_t) action, SURF_ACTION_DONE);
- cpu_im_update_remains(action->cpu, surf_get_clock());
+ cpu_cpu_action_state_set((surf_action_t) action, SURF_ACTION_DONE);
+ cpu_update_remains(action->cpu, surf_get_clock());
}
#ifdef HAVE_TRACING
if (TRACE_is_enabled()) {
//defining the last timestamp that we can safely dump to trace file
//without losing the event ascending order (considering all CPU's)
void **data;
- cpu_Cas01_im_t cpu;
+ cpu_Cas01_t cpu;
xbt_lib_cursor_t cursor;
char *key;
double smaller = -1;
return;
}
-static void cpu_im_update_resource_state(void *id,
+static void cpu_update_actions_state_full(double now, double delta)
+{
+ surf_action_cpu_Cas01_t action = NULL;
+ surf_action_cpu_Cas01_t next_action = NULL;
+ xbt_swag_t running_actions = surf_cpu_model->states.running_action_set;
+ xbt_swag_foreach_safe(action, next_action, running_actions) {
+#ifdef HAVE_TRACING
+ if (TRACE_is_enabled()) {
+ cpu_Cas01_t x =
+ lmm_constraint_id(lmm_get_cnst_from_var
+ (cpu_maxmin_system, GENERIC_LMM_ACTION(action).variable, 0));
+
+ TRACE_surf_host_set_utilization(x->generic_resource.name,
+ GENERIC_ACTION(action).data,
+ (surf_action_t) action,
+ lmm_variable_getvalue
+ (GENERIC_LMM_ACTION(action).variable), now - delta,
+ delta);
+ TRACE_last_timestamp_to_dump = now-delta;
+ }
+#endif
+ double_update(&(GENERIC_ACTION(action).remains),
+ lmm_variable_getvalue(GENERIC_LMM_ACTION(action).variable) * delta);
+ if (GENERIC_LMM_ACTION(action).generic_action.max_duration != NO_MAX_DURATION)
+ double_update(&(GENERIC_ACTION(action).max_duration), delta);
+ if ((GENERIC_ACTION(action).remains <= 0) &&
+ (lmm_get_variable_weight(GENERIC_LMM_ACTION(action).variable) > 0)) {
+ GENERIC_ACTION(action).finish = surf_get_clock();
+ cpu_cpu_action_state_set((surf_action_t) action, SURF_ACTION_DONE);
+ } else if ((GENERIC_ACTION(action).max_duration != NO_MAX_DURATION) &&
+ (GENERIC_ACTION(action).max_duration <= 0)) {
+ GENERIC_ACTION(action).finish = surf_get_clock();
+ cpu_cpu_action_state_set((surf_action_t) action, SURF_ACTION_DONE);
+ }
+ }
+
+ return;
+}
+
+static void cpu_update_resource_state(void *id,
tmgr_trace_event_t event_type,
double value, double date)
{
- cpu_Cas01_im_t cpu = id;
+ cpu_Cas01_t cpu = id;
lmm_variable_t var = NULL;
lmm_element_t elem = NULL;
if (event_type == cpu->power_event) {
cpu->power_scale = value;
- lmm_update_constraint_bound(cpu_im_maxmin_system, cpu->constraint,
+ lmm_update_constraint_bound(cpu_maxmin_system, cpu->constraint,
cpu->core * cpu->power_scale * cpu->power_peak);
#ifdef HAVE_TRACING
TRACE_surf_host_set_power(date, cpu->generic_resource.name,
cpu->core * cpu->power_scale * cpu->power_peak);
#endif
while ((var = lmm_get_var_from_cnst
- (cpu_im_maxmin_system, cpu->constraint, &elem))) {
- surf_action_cpu_Cas01_im_t action = lmm_variable_id(var);
- lmm_update_variable_bound(cpu_im_maxmin_system, action->generic_lmm_action.variable,
+ (cpu_maxmin_system, cpu->constraint, &elem))) {
+ surf_action_cpu_Cas01_t action = lmm_variable_id(var);
+ lmm_update_variable_bound(cpu_maxmin_system, GENERIC_LMM_ACTION(action).variable,
cpu->power_scale * cpu->power_peak);
}
- xbt_swag_insert(cpu, cpu_im_modified_cpu);
+ if(cpu_update_mechanism == UM_LAZY)
+ xbt_swag_insert(cpu, cpu_modified_cpu);
if (tmgr_trace_event_free(event_type))
cpu->power_event = NULL;
} else if (event_type == cpu->state_event) {
cpu->state_current = SURF_RESOURCE_OFF;
while ((var =
- lmm_get_var_from_cnst(cpu_im_maxmin_system, cnst, &elem))) {
+ lmm_get_var_from_cnst(cpu_maxmin_system, cnst, &elem))) {
surf_action_t action = lmm_variable_id(var);
if (surf_action_state_get(action) == SURF_ACTION_RUNNING ||
surf_action_state_get(action) ==
SURF_ACTION_NOT_IN_THE_SYSTEM) {
action->finish = date;
- cpu_im_cpu_action_state_set(action, SURF_ACTION_FAILED);
+ cpu_cpu_action_state_set(action, SURF_ACTION_FAILED);
}
}
}
return;
}
-static surf_action_t cpu_im_execute(void *cpu, double size)
+static surf_action_t cpu_execute(void *cpu, double size)
{
- surf_action_cpu_Cas01_im_t action = NULL;
- cpu_Cas01_im_t CPU = cpu;
+ surf_action_cpu_Cas01_t action = NULL;
+ cpu_Cas01_t CPU = cpu;
XBT_IN("(%s,%g)", surf_resource_name(CPU), size);
action =
- surf_action_new(sizeof(s_surf_action_cpu_Cas01_im_t), size,
+ surf_action_new(sizeof(s_surf_action_cpu_Cas01_t), size,
surf_cpu_model,
CPU->state_current != SURF_RESOURCE_ON);
calloc but it seems to help valgrind... */
GENERIC_LMM_ACTION(action).variable =
- lmm_variable_new(cpu_im_maxmin_system, action,
+ lmm_variable_new(cpu_maxmin_system, action,
GENERIC_ACTION(action).priority, CPU->power_scale * CPU->power_peak, 1);
- action->index_heap = -1;
- action->cpu = CPU;
- xbt_swag_insert(CPU, cpu_im_modified_cpu);
- xbt_swag_insert(action, CPU->action_set);
- lmm_expand(cpu_im_maxmin_system, CPU->constraint,
+ if(cpu_update_mechanism == UM_LAZY){
+ action->index_heap = -1;
+ action->cpu = CPU;
+ xbt_swag_insert(CPU, cpu_modified_cpu);
+ xbt_swag_insert(action, CPU->action_set);
+ }
+ lmm_expand(cpu_maxmin_system, CPU->constraint,
GENERIC_LMM_ACTION(action).variable, 1.0);
XBT_OUT();
return (surf_action_t) action;
}
-static surf_action_t cpu_im_action_sleep(void *cpu, double duration)
+static surf_action_t cpu_action_sleep(void *cpu, double duration)
{
- surf_action_cpu_Cas01_im_t action = NULL;
+ surf_action_cpu_Cas01_t action = NULL;
if (duration > 0)
duration = MAX(duration, MAXMIN_PRECISION);
XBT_IN("(%s,%g)", surf_resource_name(cpu), duration);
- action = (surf_action_cpu_Cas01_im_t) cpu_im_execute(cpu, 1.0);
+ action = (surf_action_cpu_Cas01_t) cpu_execute(cpu, 1.0);
GENERIC_ACTION(action).max_duration = duration;
GENERIC_LMM_ACTION(action).suspended = 2;
if (duration == NO_MAX_DURATION) {
is used to speed up update_resource_state */
xbt_swag_remove(action, ((surf_action_t) action)->state_set);
((surf_action_t) action)->state_set =
- cpu_im_running_action_set_that_does_not_need_being_checked;
+ cpu_running_action_set_that_does_not_need_being_checked;
xbt_swag_insert(action, ((surf_action_t) action)->state_set);
}
- lmm_update_variable_weight(cpu_im_maxmin_system,
+ lmm_update_variable_weight(cpu_maxmin_system,
GENERIC_LMM_ACTION(action).variable, 0.0);
- xbt_swag_insert(cpu, cpu_im_modified_cpu);
+ if(cpu_update_mechanism == UM_LAZY)
+ xbt_swag_insert(cpu, cpu_modified_cpu);
XBT_OUT();
return (surf_action_t) action;
}
-static void cpu_im_action_suspend(surf_action_t action)
+static void cpu_action_suspend(surf_action_t action)
{
XBT_IN("(%p)", action);
if (((surf_action_lmm_t) action)->suspended != 2) {
- lmm_update_variable_weight(cpu_im_maxmin_system,
+ lmm_update_variable_weight(cpu_maxmin_system,
((surf_action_lmm_t) action)->variable,
0.0);
((surf_action_lmm_t) action)->suspended = 1;
- xbt_heap_remove(cpu_im_action_heap,
- ((surf_action_cpu_Cas01_im_t) action)->index_heap);
- xbt_swag_insert(ACTION_GET_CPU(action), cpu_im_modified_cpu);
+ if(cpu_update_mechanism == UM_LAZY){
+ xbt_heap_remove(cpu_action_heap,
+ ((surf_action_cpu_Cas01_t) action)->index_heap);
+ xbt_swag_insert(ACTION_GET_CPU(action), cpu_modified_cpu);
+ }
}
XBT_OUT();
}
-static void cpu_im_action_resume(surf_action_t action)
+static void cpu_action_resume(surf_action_t action)
{
XBT_IN("(%p)", action);
if (((surf_action_lmm_t) action)->suspended != 2) {
- lmm_update_variable_weight(cpu_im_maxmin_system,
+ lmm_update_variable_weight(cpu_maxmin_system,
((surf_action_lmm_t) action)->variable,
action->priority);
((surf_action_lmm_t) action)->suspended = 0;
- xbt_swag_insert(ACTION_GET_CPU(action), cpu_im_modified_cpu);
+ if(cpu_update_mechanism == UM_LAZY)
+ xbt_swag_insert(ACTION_GET_CPU(action), cpu_modified_cpu);
}
XBT_OUT();
}
-static int cpu_im_action_is_suspended(surf_action_t action)
+static int cpu_action_is_suspended(surf_action_t action)
{
return (((surf_action_lmm_t) action)->suspended == 1);
}
-static void cpu_im_action_set_max_duration(surf_action_t action,
+static void cpu_action_set_max_duration(surf_action_t action,
double duration)
{
XBT_IN("(%p,%g)", action, duration);
action->max_duration = duration;
/* insert cpu in modified_cpu set to notice the max duration change */
- xbt_swag_insert(ACTION_GET_CPU(action), cpu_im_modified_cpu);
+ if(cpu_update_mechanism == UM_LAZY)
+ xbt_swag_insert(ACTION_GET_CPU(action), cpu_modified_cpu);
XBT_OUT();
}
-static void cpu_im_action_set_priority(surf_action_t action,
+static void cpu_action_set_priority(surf_action_t action,
double priority)
{
XBT_IN("(%p,%g)", action, priority);
action->priority = priority;
- lmm_update_variable_weight(cpu_im_maxmin_system,
+ lmm_update_variable_weight(cpu_maxmin_system,
((surf_action_lmm_t) action)->variable,
priority);
- xbt_swag_insert(ACTION_GET_CPU(action), cpu_im_modified_cpu);
+ if(cpu_update_mechanism == UM_LAZY)
+ xbt_swag_insert(ACTION_GET_CPU(action), cpu_modified_cpu);
XBT_OUT();
}
#ifdef HAVE_TRACING
-static void cpu_im_action_set_category(surf_action_t action,
+static void cpu_action_set_category(surf_action_t action,
const char *category)
{
XBT_IN("(%p,%s)", action, category);
}
#endif
-static double cpu_im_action_get_remains(surf_action_t action)
+static double cpu_action_get_remains(surf_action_t action)
{
XBT_IN("(%p)", action);
/* update remains before return it */
- cpu_im_update_remains(ACTION_GET_CPU(action), surf_get_clock());
- return action->remains;
+ if(cpu_update_mechanism == UM_LAZY)
+ cpu_update_remains(ACTION_GET_CPU(action), surf_get_clock());
XBT_OUT();
+ return action->remains;
}
-static e_surf_resource_state_t cpu_im_get_state(void *cpu)
+static e_surf_resource_state_t cpu_get_state(void *cpu)
{
- return ((cpu_Cas01_im_t) cpu)->state_current;
+ return ((cpu_Cas01_t) cpu)->state_current;
}
-static double cpu_im_get_speed(void *cpu, double load)
+static double cpu_get_speed(void *cpu, double load)
{
- return load * (((cpu_Cas01_im_t) cpu)->power_peak);
+ return load * (((cpu_Cas01_t) cpu)->power_peak);
}
-static double cpu_im_get_available_speed(void *cpu)
+static double cpu_get_available_speed(void *cpu)
{
/* number between 0 and 1 */
- return ((cpu_Cas01_im_t) cpu)->power_scale;
+ return ((cpu_Cas01_t) cpu)->power_scale;
}
-static void cpu_im_action_update_index_heap(void *action, int i)
+static void cpu_action_update_index_heap(void *action, int i)
{
- ((surf_action_cpu_Cas01_im_t) action)->index_heap = i;
+ ((surf_action_cpu_Cas01_t) action)->index_heap = i;
}
-static void cpu_im_finalize(void)
+static void cpu_finalize(void)
{
void **cpu;
xbt_lib_cursor_t cursor;
char *key;
xbt_lib_foreach(host_lib, cursor, key, cpu){
- if(cpu[SURF_CPU_LEVEL])
- {
- cpu_Cas01_im_t CPU = cpu[SURF_CPU_LEVEL];
- xbt_swag_free(CPU->action_set);
- }
+ if(cpu[SURF_CPU_LEVEL])
+ {
+ cpu_Cas01_t CPU = cpu[SURF_CPU_LEVEL];
+ xbt_swag_free(CPU->action_set);
+ }
}
- lmm_system_free(cpu_im_maxmin_system);
- cpu_im_maxmin_system = NULL;
+ lmm_system_free(cpu_maxmin_system);
+ cpu_maxmin_system = NULL;
surf_model_exit(surf_cpu_model);
surf_cpu_model = NULL;
xbt_swag_free
- (cpu_im_running_action_set_that_does_not_need_being_checked);
- cpu_im_running_action_set_that_does_not_need_being_checked = NULL;
- xbt_heap_free(cpu_im_action_heap);
- xbt_swag_free(cpu_im_modified_cpu);
+ (cpu_running_action_set_that_does_not_need_being_checked);
+ cpu_running_action_set_that_does_not_need_being_checked = NULL;
+ if(cpu_action_heap) xbt_heap_free(cpu_action_heap);
+ if(cpu_modified_cpu) xbt_swag_free(cpu_modified_cpu);
}
-static void surf_cpu_im_model_init_internal(void)
+static void surf_cpu_model_init_internal()
{
s_surf_action_t action;
- s_cpu_Cas01_im_t cpu;
+ s_cpu_Cas01_t cpu;
surf_cpu_model = surf_model_init();
- cpu_im_running_action_set_that_does_not_need_being_checked =
+ cpu_running_action_set_that_does_not_need_being_checked =
xbt_swag_new(xbt_swag_offset(action, state_hookup));
- surf_cpu_model->name = "CPU_IM";
+ surf_cpu_model->name = "cpu";
+
+ surf_cpu_model->action_unref = cpu_action_unref;
+ surf_cpu_model->action_cancel = cpu_action_cancel;
+ surf_cpu_model->action_state_set = cpu_cpu_action_state_set;
+
+ surf_cpu_model->model_private->resource_used = cpu_resource_used;
- surf_cpu_model->action_unref = cpu_im_action_unref;
- surf_cpu_model->action_cancel = cpu_im_action_cancel;
- surf_cpu_model->action_state_set = cpu_im_cpu_action_state_set;
+ if(cpu_update_mechanism == UM_LAZY) {
+ surf_cpu_model->model_private->share_resources = cpu_share_resources_lazy;
+ surf_cpu_model->model_private->update_actions_state = cpu_update_actions_state_lazy;
+ } else if (cpu_update_mechanism == UM_FULL) {
+ surf_cpu_model->model_private->share_resources = cpu_share_resources_full;
+ surf_cpu_model->model_private->update_actions_state = cpu_update_actions_state_full;
+ } else
+ xbt_die("Invalid update mechanism!");
- surf_cpu_model->model_private->resource_used = cpu_im_resource_used;
- surf_cpu_model->model_private->share_resources = cpu_im_share_resources;
- surf_cpu_model->model_private->update_actions_state =
- cpu_im_update_actions_state;
surf_cpu_model->model_private->update_resource_state =
- cpu_im_update_resource_state;
- surf_cpu_model->model_private->finalize = cpu_im_finalize;
-
- surf_cpu_model->suspend = cpu_im_action_suspend;
- surf_cpu_model->resume = cpu_im_action_resume;
- surf_cpu_model->is_suspended = cpu_im_action_is_suspended;
- surf_cpu_model->set_max_duration = cpu_im_action_set_max_duration;
- surf_cpu_model->set_priority = cpu_im_action_set_priority;
+ cpu_update_resource_state;
+ surf_cpu_model->model_private->finalize = cpu_finalize;
+
+ surf_cpu_model->suspend = cpu_action_suspend;
+ surf_cpu_model->resume = cpu_action_resume;
+ surf_cpu_model->is_suspended = cpu_action_is_suspended;
+ surf_cpu_model->set_max_duration = cpu_action_set_max_duration;
+ surf_cpu_model->set_priority = cpu_action_set_priority;
#ifdef HAVE_TRACING
- surf_cpu_model->set_category = cpu_im_action_set_category;
+ surf_cpu_model->set_category = cpu_action_set_category;
#endif
- surf_cpu_model->get_remains = cpu_im_action_get_remains;
+ surf_cpu_model->get_remains = cpu_action_get_remains;
- surf_cpu_model->extension.cpu.execute = cpu_im_execute;
- surf_cpu_model->extension.cpu.sleep = cpu_im_action_sleep;
+ surf_cpu_model->extension.cpu.execute = cpu_execute;
+ surf_cpu_model->extension.cpu.sleep = cpu_action_sleep;
- surf_cpu_model->extension.cpu.get_state = cpu_im_get_state;
- surf_cpu_model->extension.cpu.get_speed = cpu_im_get_speed;
+ surf_cpu_model->extension.cpu.get_state = cpu_get_state;
+ surf_cpu_model->extension.cpu.get_speed = cpu_get_speed;
surf_cpu_model->extension.cpu.get_available_speed =
- cpu_im_get_available_speed;
- surf_cpu_model->extension.cpu.create_resource = cpu_im_create_resource;
- surf_cpu_model->extension.cpu.add_traces = cpu_im_add_traces_cpu;
+ cpu_get_available_speed;
+ surf_cpu_model->extension.cpu.create_resource = cpu_create_resource;
+ surf_cpu_model->extension.cpu.add_traces = cpu_add_traces_cpu;
- if (!cpu_im_maxmin_system) {
- sg_maxmin_selective_update = 1;
- cpu_im_maxmin_system = lmm_system_new();
+ if (!cpu_maxmin_system) {
+ cpu_maxmin_system = lmm_system_new(cpu_selective_update);
+ }
+ if(cpu_update_mechanism == UM_LAZY){
+ cpu_action_heap = xbt_heap_new(8, NULL);
+ xbt_heap_set_update_callback(cpu_action_heap,
+ cpu_action_update_index_heap);
+ cpu_modified_cpu =
+ xbt_swag_new(xbt_swag_offset(cpu, modified_cpu_hookup));
}
- cpu_im_action_heap = xbt_heap_new(8, NULL);
- xbt_heap_set_update_callback(cpu_im_action_heap,
- cpu_im_action_update_index_heap);
- cpu_im_modified_cpu =
- xbt_swag_new(xbt_swag_offset(cpu, modified_cpu_hookup));
}
/*********************************************************************/
/* note = "Available at */
/* \url{http://grail.sdsc.edu/papers/simgrid_ccgrid01.ps.gz}." */
/* } */
-void surf_cpu_model_init_Cas01_im()
+
+void surf_cpu_model_init_Cas01()
{
+ char *optim = xbt_cfg_get_string(_surf_cfg_set, "cpu/optim");
+ int select = xbt_cfg_get_int(_surf_cfg_set, "cpu/maxmin_selective_update");
+
+ if(!strcmp(optim,"Full")) {
+ cpu_update_mechanism = UM_FULL;
+ cpu_selective_update = select;
+ } else if (!strcmp(optim,"Lazy")) {
+ cpu_update_mechanism = UM_LAZY;
+ cpu_selective_update = 1;
+ xbt_assert((select==1) || (xbt_cfg_is_default_value(_surf_cfg_set,"cpu/maxmin_selective_update")),
+ "Disabling selective update while using the lazy update mechanism is dumb!");
+ } else if (!strcmp(optim,"TI")) {
+ surf_cpu_model_init_ti();
+ return;
+ } else {
+ xbt_die("Unsupported optimization (%s) for this model",optim);
+ }
+
if (surf_cpu_model)
return;
- surf_cpu_im_model_init_internal();
- cpu_im_define_callbacks();
+ surf_cpu_model_init_internal();
+ cpu_define_callbacks();
xbt_dynar_push(model_list, &surf_cpu_model);
}
cpu_ti_update_remaining_amount((cpu_ti_t)
((surf_action_cpu_ti_t) action)->cpu,
surf_get_clock());
- return action->remains;
XBT_OUT();
+ return action->remains;
}
static e_surf_resource_state_t cpu_ti_get_state(void *cpu)
cpu_ti_modified_cpu =
xbt_swag_new(xbt_swag_offset(cpu, modified_cpu_hookup));
- surf_cpu_model->name = "CPU_TI";
+ surf_cpu_model->name = "cpu_ti";
surf_cpu_model->action_unref = cpu_ti_action_unref;
surf_cpu_model->action_cancel = cpu_ti_action_cancel;
static void lmm_update_modified_set(lmm_system_t sys,
lmm_constraint_t cnst);
static void lmm_remove_all_modified_set(lmm_system_t sys);
-int sg_maxmin_selective_update = 1;
static int Global_debug_id = 1;
static int Global_const_debug_id = 1;
extern xbt_swag_t keep_track;
-lmm_system_t lmm_system_new(void)
+lmm_system_t lmm_system_new(int selective_update)
{
lmm_system_t l = NULL;
s_lmm_variable_t var;
l = xbt_new0(s_lmm_system_t, 1);
l->modified = 0;
- l->selective_update_active = sg_maxmin_selective_update;
+ l->selective_update_active = selective_update;
+ l->visited_counter = 1;
XBT_DEBUG("Setting selective_update_active flag to %d\n",
l->selective_update_active);
XBT_INLINE void lmm_variable_disable(lmm_system_t sys, lmm_variable_t var)
{
int i;
+ int n;
+
lmm_element_t elem = NULL;
XBT_IN("(sys=%p, var=%p)", sys, var);
sys->modified = 1;
+ n = 0;
for (i = 0; i < var->cnsts_number; i++) {
elem = &var->cnsts[i];
xbt_swag_remove(elem, &(elem->constraint->element_set));
xbt_swag_remove(elem, &(elem->constraint->active_element_set));
if (!xbt_swag_size(&(elem->constraint->element_set)))
make_constraint_inactive(sys, elem->constraint);
- else
- lmm_update_modified_set(sys, elem->constraint);
+ else {
+ if (n < i)
+ var->cnsts[n].constraint = elem->constraint;
+ n++;
+ }
+ }
+ if (n) {
+ var->cnsts_number = n;
+ lmm_update_modified_set(sys, var->cnsts[0].constraint);
}
+
var->cnsts_number = 0;
XBT_OUT();
}
var->weight = weight;
var->bound = bound;
var->value = 0.0;
-
+ var->visited = sys->visited_counter - 1;
var->mu = 0.0;
var->new_mu = 0.0;
var->func_f = func_f_def;
make_constraint_active(sys, cnst);
lmm_update_modified_set(sys, cnst);
+ if (var->cnsts_number > 1)
+ lmm_update_modified_set(sys, var->cnsts[0].constraint);
}
void lmm_expand_add(lmm_system_t sys, lmm_constraint_t cnst,
void lmm_update_variable_bound(lmm_system_t sys, lmm_variable_t var,
double bound)
{
- int i;
-
sys->modified = 1;
var->bound = bound;
- for (i = 0; i < var->cnsts_number; i++)
- lmm_update_modified_set(sys, var->cnsts[i].constraint);
-
+ if (var->cnsts_number)
+ lmm_update_modified_set(sys, var->cnsts[0].constraint);
}
else
xbt_swag_insert_at_tail(elem, &(elem->constraint->element_set));
- lmm_update_modified_set(sys, elem->constraint);
+ if (i == 0)
+ lmm_update_modified_set(sys, elem->constraint);
}
if (!weight)
var->value = 0.0;
* constraints that have changed. Each constraint change is propagated
* to the list of constraints for each variable.
*/
-static void lmm_update_modified_set(lmm_system_t sys,
- lmm_constraint_t cnst)
+static void lmm_update_modified_set_rec(lmm_system_t sys,
+ lmm_constraint_t cnst)
{
- lmm_element_t elem = NULL;
- lmm_variable_t var = NULL;
- xbt_swag_t elem_list = NULL;
- int i;
-
- /* return if selective update isn't active */
- if (!sys->selective_update_active)
- return;
-
- //XBT_DEBUG("Updating modified constraint set with constraint %d", cnst->id_int);
-
- if (xbt_swag_belongs(cnst, &(sys->modified_constraint_set)))
- return;
-
- //XBT_DEBUG("Inserting into modified constraint set %d", cnst->id_int);
-
- /* add to modified set */
- xbt_swag_insert(cnst, &(sys->modified_constraint_set));
+ lmm_element_t elem;
- elem_list = &(cnst->element_set);
- xbt_swag_foreach(elem, elem_list) {
- var = elem->variable;
- for (i = 0; i < var->cnsts_number; i++)
- if (cnst != var->cnsts[i].constraint) {
- //XBT_DEBUG("Updating modified %d calling for %d", cnst->id_int, var->cnsts[i].constraint->id_int);
- lmm_update_modified_set(sys, var->cnsts[i].constraint);
+ xbt_swag_foreach(elem, &cnst->element_set) {
+ lmm_variable_t var = elem->variable;
+ s_lmm_element_t *cnsts = var->cnsts;
+ int i;
+ for (i = 0; var->visited != sys->visited_counter
+ && i < var->cnsts_number ; i++) {
+ if (cnsts[i].constraint != cnst
+ && !xbt_swag_belongs(cnsts[i].constraint,
+ &sys->modified_constraint_set)) {
+ xbt_swag_insert(cnsts[i].constraint, &sys->modified_constraint_set);
+ lmm_update_modified_set_rec(sys, cnsts[i].constraint);
}
+ }
+ var->visited = sys->visited_counter;
+ }
+}
+
+static void lmm_update_modified_set(lmm_system_t sys,
+ lmm_constraint_t cnst)
+{
+ /* nothing to do if selective update isn't active */
+ if (sys->selective_update_active
+ && !xbt_swag_belongs(cnst, &sys->modified_constraint_set)) {
+ xbt_swag_insert(cnst, &sys->modified_constraint_set);
+ lmm_update_modified_set_rec(sys, cnst);
}
}
*/
static void lmm_remove_all_modified_set(lmm_system_t sys)
{
+ if (++sys->visited_counter == 1) {
+ /* the counter wrapped around, reset each variable->visited */
+ lmm_variable_t var;
+ xbt_swag_foreach(var, &sys->variable_set)
+ var->visited = 0;
+ }
xbt_swag_reset(&sys->modified_constraint_set);
}
double value;
void *id;
int id_int;
+ unsigned visited; /* used by lmm_update_modified_set */
/* \begin{For Lagrange only} */
double mu;
double new_mu;
typedef struct lmm_system {
int modified;
int selective_update_active; /* flag to update partially the system only selecting changed portions */
-
+ unsigned visited_counter; /* used by lmm_update_modified_set */
s_xbt_swag_t variable_set; /* a list of lmm_variable_t */
s_xbt_swag_t constraint_set; /* a list of lmm_constraint_t */
#define remove_constraint(sys,cnst) do {xbt_swag_remove(cnst,&(sys->constraint_set));\
xbt_swag_remove(cnst,&(sys->saturated_constraint_set));} while(0)
#define make_constraint_active(sys,cnst) xbt_swag_insert(cnst,&(sys->active_constraint_set))
-#define make_constraint_inactive(sys,cnst) xbt_swag_remove(cnst,&(sys->active_constraint_set))
+#define make_constraint_inactive(sys,cnst) \
+ do { xbt_swag_remove(cnst, &sys->active_constraint_set); \
+ xbt_swag_remove(cnst, &sys->modified_constraint_set); } while (0)
static void lmm_var_free(lmm_system_t sys, lmm_variable_t var);
static XBT_INLINE void lmm_cnst_free(lmm_system_t sys,
-/* Copyright (c) 2004-2011. The SimGrid Team.
+
+/*
+ * Network with improved management of tasks, IM (Improved Management).
+ * Uses a heap to store actions so that the share_resources is faster.
+ * This model automatically sets the selective update flag to 1 and is
+ * highly dependent on the maxmin lmm module.
+ */
+
+/* Copyright (c) 2009, 2010, 2011. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "xbt/log.h"
#include "xbt/str.h"
+#include "surf_private.h"
+#include "xbt/dict.h"
+#include "maxmin_private.h"
#include "surf/surfxml_parse_values.h"
#include "surf/surf_resource.h"
#include "surf/surf_resource_lmm.h"
+#undef GENERIC_ACTION
+#define GENERIC_ACTION(action) action->generic_action
+
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_network, surf,
"Logging specific to the SURF network module");
double sg_weight_S_parameter = 0.0; /* default value; can be set by model or from command line */
double sg_tcp_gamma = 0.0;
-int sg_network_fullduplex = 0;
+int sg_network_crosstraffic = 0;
xbt_dict_t gap_lookup = NULL;
-static double net_get_link_bandwidth(const void *link);
+e_UM_t network_update_mechanism = UM_UNDEFINED;
+static int net_selective_update = 0;
-static void gap_append(double size, const link_CM02_t link, surf_action_network_CM02_t action) {
- const char* src = link->lmm_resource.generic_resource.name;
- xbt_fifo_t fifo;
- surf_action_network_CM02_t last_action;
- double bw;
+static int net_action_is_suspended(surf_action_t action);
+static void update_action_remaining(double now);
- if(sg_sender_gap > 0.0) {
- if(!gap_lookup) {
- gap_lookup = xbt_dict_new_homogeneous(NULL);
- }
- fifo = (xbt_fifo_t)xbt_dict_get_or_null(gap_lookup, src);
- action->sender.gap = 0.0;
- if(fifo && xbt_fifo_size(fifo) > 0) {
- /* Compute gap from last send */
- last_action = (surf_action_network_CM02_t)xbt_fifo_get_item_content(xbt_fifo_get_last_item(fifo));
- bw = net_get_link_bandwidth(link);
- action->sender.gap = last_action->sender.gap + max(sg_sender_gap, last_action->sender.size / bw);
- action->latency += action->sender.gap;
- }
- /* Append action as last send */
- action->sender.link_name = link->lmm_resource.generic_resource.name;
- fifo = (xbt_fifo_t)xbt_dict_get_or_null(gap_lookup, action->sender.link_name);
- if(!fifo) {
- fifo = xbt_fifo_new();
- xbt_dict_set(gap_lookup, action->sender.link_name, fifo, NULL);
- }
- action->sender.fifo_item = xbt_fifo_push(fifo, action);
- action->sender.size = size;
- }
-}
+static xbt_swag_t net_modified_set = NULL;
+static xbt_heap_t net_action_heap = NULL;
+xbt_swag_t keep_track = NULL;
-static void gap_unknown(surf_action_network_CM02_t action) {
- action->sender.gap = 0.0;
- action->sender.link_name = NULL;
- action->sender.fifo_item = NULL;
- action->sender.size = 0.0;
+/* added to manage the communication action's heap */
+static void net_action_update_index_heap(void *action, int i)
+{
+ ((surf_action_network_CM02_t) action)->index_heap = i;
}
-static void gap_remove(surf_action_network_CM02_t action) {
- xbt_fifo_t fifo;
- size_t size;
+/* insert action on heap using a given key and a hat (heap_action_type)
+ * a hat can be of three types for communications:
+ *
+ * NORMAL = this is a normal heap entry stating the date to finish transmitting
+ * LATENCY = this is a heap entry to warn us when the latency is payed
+ * MAX_DURATION =this is a heap entry to warn us when the max_duration limit is reached
+ */
+static void heap_insert(surf_action_network_CM02_t action, double key, enum heap_action_type hat){
+ action->hat = hat;
+ xbt_heap_push(net_action_heap, action, key);
+}
- if(sg_sender_gap > 0.0 && action->sender.link_name && action->sender.fifo_item) {
- fifo = (xbt_fifo_t)xbt_dict_get_or_null(gap_lookup, action->sender.link_name);
- xbt_fifo_remove_item(fifo, action->sender.fifo_item);
- size = xbt_fifo_size(fifo);
- if(size == 0) {
- xbt_fifo_free(fifo);
- xbt_dict_remove(gap_lookup, action->sender.link_name);
- size = xbt_dict_length(gap_lookup);
- if(size == 0) {
- xbt_dict_free(&gap_lookup);
- }
- }
- }
+static void heap_remove(surf_action_network_CM02_t action){
+ action->hat = NONE;
+ if(((surf_action_network_CM02_t) action)->index_heap >= 0){
+ xbt_heap_remove(net_action_heap,action->index_heap);
+ }
}
/******************************************************************************/
/**********************/
/* SMPI callbacks */
/**********************/
-static double smpi_latency_factor(double size)
+static double smpi_bandwidth_factor(double size)
{
- /* 1 B <= size <= 1 KiB */
- if (size <= 1024.0) {
- return 1.0056;
- }
-
- /* 2 KiB <= size <= 32 KiB */
- if (size <= 32768.0) {
- return 1.8805;
- }
- /* 64 KiB <= size <= 4 MiB */
- return 22.7111;
+ if (size >= 65472) return 0.940694;
+ if (size >= 15424) return 0.697866;
+ if (size >= 9376) return 0.58729;
+ if (size >= 5776) return 1.08739;
+ if (size >= 3484) return 0.77493;
+ if (size >= 1426) return 0.608902;
+ if (size >= 732) return 0.341987;
+ if (size >= 257) return 0.338112;
+ if (size >= 0) return 0.812084;
+ return 1.0;
}
-static double smpi_bandwidth_factor(double size)
+static double smpi_latency_factor(double size)
{
- /* 1 B <= size <= 1 KiB */
- if (size <= 1024.0) {
- return 0.2758;
- }
- /* 2 KiB <= size <= 32 KiB */
- if (size <= 32768.0) {
- return 0.5477;
- }
-
- /* 64 KiB <= size <= 4 MiB */
- return 0.9359;
+ if (size >= 65472) return 11.6436;
+ if (size >= 15424) return 3.48845;
+ if (size >= 9376) return 2.59299;
+ if (size >= 5776) return 2.18796;
+ if (size >= 3484) return 1.88101;
+ if (size >= 1426) return 1.61075;
+ if (size >= 732) return 1.9503;
+ if (size >= 257) return 1.95341;
+ if (size >= 0) return 2.01467;
+ return 1.0;
}
+/**--------- <copy/paste C code snippet in surf/network.c> -----------*/
static double smpi_bandwidth_constraint(double rate, double bound,
double size)
return rate < 0 ? bound : min(bound, rate * smpi_bandwidth_factor(size));
}
-
static double (*latency_factor_callback) (double) =
&constant_latency_factor;
static double (*bandwidth_factor_callback) (double) =
static double (*bandwidth_constraint_callback) (double, double, double) =
&constant_bandwidth_constraint;
+static void (*gap_append) (double, const link_CM02_t, surf_action_network_CM02_t) = NULL;
+static void (*gap_remove) (surf_action_network_CM02_t) = NULL;
static void* net_create_resource(const char *name,
double bw_initial,
static void net_parse_link_init(sg_platf_link_cbarg_t link)
{
- XBT_DEBUG("link_CM02");
-
- if (link->policy == SURF_LINK_FULLDUPLEX) {
+ if(link->policy == SURF_LINK_FULLDUPLEX){
char *link_id;
link_id = bprintf("%s_UP", link->id);
- net_create_resource(link_id,
- link->bandwidth,
- link->bandwidth_trace,
- link->latency,
- link->latency_trace,
- link->state,
- link->state_trace,
- link->policy,
- link->properties);
- xbt_free(link_id);
+ net_create_resource(link_id,
+ link->bandwidth,
+ link->bandwidth_trace,
+ link->latency,
+ link->latency_trace,
+ link->state,
+ link->state_trace,
+ link->policy,
+ link->properties);
+ xbt_free(link_id);
link_id = bprintf("%s_DOWN", link->id);
net_create_resource(link_id,
link->bandwidth,
link->state,
link->state_trace,
link->policy,
- NULL); /* FIXME: We need to deep copy the properties or
- * we won't be able to free it */
+ link->properties);
xbt_free(link_id);
- } else {
+ }
+ else{
net_create_resource(link->id,
link->bandwidth,
link->bandwidth_trace,
xbt_dict_foreach(trace_connect_list_link_avail, cursor, trace_name, elm) {
tmgr_trace_t trace = xbt_dict_get_or_null(traces_set_list, trace_name);
link_CM02_t link =
- xbt_lib_get_or_null(link_lib, elm, SURF_LINK_LEVEL);
+ xbt_lib_get_or_null(link_lib, elm, SURF_LINK_LEVEL);
xbt_assert(link, "Cannot connect trace %s to link %s: link undefined",
trace_name, elm);
action->refcount--;
if (!action->refcount) {
xbt_swag_remove(action, action->state_set);
- if (((surf_action_network_CM02_t) action)->variable)
+ if (((surf_action_network_CM02_t) action)->variable){
lmm_variable_free(network_maxmin_system,
((surf_action_network_CM02_t) action)->variable);
+ }
+ if(network_update_mechanism == UM_LAZY){// remove action from the heap
+ heap_remove((surf_action_network_CM02_t) action);
+ xbt_swag_remove(action, net_modified_set);
+ }
#ifdef HAVE_TRACING
xbt_free(((surf_action_network_CM02_t) action)->src_name);
xbt_free(((surf_action_network_CM02_t) action)->dst_name);
return 0;
}
+
+
static void net_action_cancel(surf_action_t action)
{
+ XBT_DEBUG("cancel action %p",action);
surf_network_model->action_state_set(action, SURF_ACTION_FAILED);
+ if(network_update_mechanism == UM_LAZY){// remove action from the heap
+ xbt_swag_remove(action, net_modified_set);
+ heap_remove((surf_action_network_CM02_t) action);
+ }
}
void net_action_recycle(surf_action_t action)
}
#ifdef HAVE_LATENCY_BOUND_TRACKING
-int net_get_link_latency_limited(surf_action_t action)
+static int net_get_link_latency_limited(surf_action_t action)
{
return action->latency_limited;
}
double net_action_get_remains(surf_action_t action)
{
+ if(network_update_mechanism == UM_LAZY)/* update remains before return it */
+ update_action_remaining(surf_get_clock());
return action->remains;
}
-static double net_share_resources(double now)
+static void update_action_remaining(double now){
+ surf_action_network_CM02_t action = NULL;
+ double delta = 0.0;
+
+ xbt_swag_foreach(action, net_modified_set) {
+
+ if(action->suspended != 0){
+ continue;
+ }
+
+ delta = now - action->last_update;
+
+ double_update(&(action->generic_action.remains),
+ lmm_variable_getvalue(action->variable) * delta);
+
+ if (action->generic_action.max_duration != NO_MAX_DURATION)
+ double_update(&(action->generic_action.max_duration), delta);
+
+ if ((action->generic_action.remains <= 0) &&
+ (lmm_get_variable_weight(action->variable) > 0)) {
+ action->generic_action.finish = surf_get_clock();
+ surf_network_model->action_state_set((surf_action_t) action,
+ SURF_ACTION_DONE);
+ heap_remove(action);
+ } else if ((action->generic_action.max_duration != NO_MAX_DURATION)
+ && (action->generic_action.max_duration <= 0)) {
+ action->generic_action.finish = surf_get_clock();
+ surf_network_model->action_state_set((surf_action_t) action,
+ SURF_ACTION_DONE);
+ heap_remove(action);
+ }
+
+ action->last_update = now;
+ }
+}
+
+static double net_share_resources_full(double now)
{
s_surf_action_network_CM02_t s_action;
surf_action_network_CM02_t action = NULL;
}
#endif
if (action->latency > 0) {
- if (min < 0)
- min = action->latency;
- else if (action->latency < min)
- min = action->latency;
+ min = (min<0)?action->latency:min(min,action->latency);
}
}
return min;
}
-static void net_update_actions_state(double now, double delta)
+static double net_share_resources_lazy(double now)
+{
+ surf_action_network_CM02_t action = NULL;
+ double min=-1;
+ double value;
+
+ XBT_DEBUG("Before share resources, the size of modified actions set is %d", xbt_swag_size(net_modified_set));
+ update_action_remaining(now);
+
+ keep_track = net_modified_set;
+ lmm_solve(network_maxmin_system);
+ keep_track = NULL;
+
+ XBT_DEBUG("After share resources, The size of modified actions set is %d", xbt_swag_size(net_modified_set));
+
+ xbt_swag_foreach(action, net_modified_set) {
+ int max_dur_flag = 0;
+
+ if (GENERIC_ACTION(action).state_set != surf_network_model->states.running_action_set){
+ continue;
+ }
+
+ /* bogus priority, skip it */
+ if (GENERIC_ACTION(action).priority <= 0){
+ continue;
+ }
+
+ min = -1;
+ value = lmm_variable_getvalue(action->variable);
+ if (value > 0) {
+ if (GENERIC_ACTION(action).remains > 0) {
+ value = GENERIC_ACTION(action).remains / value;
+ min = now + value;
+ } else {
+ value = 0.0;
+ min = now;
+ }
+ }
+
+ if ((GENERIC_ACTION(action).max_duration != NO_MAX_DURATION)
+ && (min == -1
+ || GENERIC_ACTION(action).start +
+ GENERIC_ACTION(action).max_duration < min)){
+ min = GENERIC_ACTION(action).start +
+ GENERIC_ACTION(action).max_duration;
+ max_dur_flag = 1;
+ }
+
+ XBT_DEBUG("Action(%p) Start %lf Finish %lf Max_duration %lf", action,
+ GENERIC_ACTION(action).start, now + value,
+ GENERIC_ACTION(action).max_duration);
+
+ if (action->index_heap >= 0) {
+ heap_remove((surf_action_network_CM02_t) action);
+ }
+
+ if (min != -1) {
+ heap_insert((surf_action_network_CM02_t) action, min, max_dur_flag?MAX_DURATION:NORMAL);
+ XBT_DEBUG("Insert at heap action(%p) min %lf now %lf", action, min, now);
+ }
+ }
+
+ //hereafter must have already the min value for this resource model
+ if(xbt_heap_size(net_action_heap) > 0 ){
+ min = xbt_heap_maxkey(net_action_heap) - now ;
+ }else{
+ min = -1;
+ }
+
+ XBT_DEBUG("The minimum with the HEAP %lf", min);
+
+ return min;
+}
+
+static void net_update_actions_state_full(double now, double delta)
{
double deltap = 0.0;
surf_action_network_CM02_t action = NULL;
}
#endif
if(!lmm_get_number_of_cnst_from_var(network_maxmin_system, action->variable)) {
- /* There is actually no link used, hence an infinite bandwidth.
- * This happens often when using models like vivaldi.
- * In such case, just make sure that the action completes immediately.
- */
- double_update(&(action->generic_action.remains),
- action->generic_action.remains);
+ /* There is actually no link used, hence an infinite bandwidth.
+ * This happens often when using models like vivaldi.
+ * In such case, just make sure that the action completes immediately.
+ */
+ double_update(&(action->generic_action.remains),
+ action->generic_action.remains);
}
double_update(&(action->generic_action.remains),
lmm_variable_getvalue(action->variable) * deltap);
action->generic_action.finish = surf_get_clock();
surf_network_model->action_state_set((surf_action_t) action,
SURF_ACTION_DONE);
- gap_remove(action);
+
+ if(gap_remove) gap_remove(action);
} else if ((action->generic_action.max_duration != NO_MAX_DURATION)
&& (action->generic_action.max_duration <= 0)) {
action->generic_action.finish = surf_get_clock();
surf_network_model->action_state_set((surf_action_t) action,
SURF_ACTION_DONE);
- gap_remove(action);
+ if(gap_remove) gap_remove(action);
}
}
return;
}
+static void net_update_actions_state_lazy(double now, double delta)
+{
+ surf_action_network_CM02_t action = NULL;
+
+ while ((xbt_heap_size(net_action_heap) > 0)
+ && (double_equals(xbt_heap_maxkey(net_action_heap), now))) {
+ action = xbt_heap_pop(net_action_heap);
+ XBT_DEBUG("Action %p: finish", action);
+ GENERIC_ACTION(action).finish = surf_get_clock();
+
+ // if I am wearing a latency hat
+ if( action->hat == LATENCY){
+ lmm_update_variable_weight(network_maxmin_system, action->variable,
+ action->weight);
+ heap_remove(action);
+ action->last_update = surf_get_clock();
+
+ // if I am wearing a max_duration or normal hat
+ }else if( action->hat == MAX_DURATION || action->hat == NORMAL ){
+ // no need to communicate anymore
+ // assume that flows that reached max_duration have remaining of 0
+ GENERIC_ACTION(action).remains = 0;
+ action->generic_action.finish = surf_get_clock();
+ surf_network_model->action_state_set((surf_action_t) action,
+ SURF_ACTION_DONE);
+ heap_remove(action);
+ }
+ }
+ return;
+}
+
static void net_update_resource_state(void *id,
tmgr_trace_event_t event_type,
double value, double date)
(nw_link->lmm_resource.power.peak *
nw_link->lmm_resource.power.scale));
#ifdef HAVE_TRACING
- TRACE_surf_link_set_bandwidth(date, nw_link->lmm_resource.generic_resource.name,
+ TRACE_surf_link_set_bandwidth(date, (char *)(((nw_link->lmm_resource).generic_resource).name),
sg_bandwidth_factor *
(nw_link->lmm_resource.power.peak *
nw_link->lmm_resource.power.scale));
xbt_abort();
}
+ XBT_DEBUG("There were a resource state event, need to update actions related to the constraint (%p)", nw_link->lmm_resource.constraint);
return;
}
surf_action_network_CM02_t action = NULL;
double bandwidth_bound;
double latency=0.0;
- /* LARGE PLATFORMS HACK:
- Add a link_CM02_t *link and a int link_nb to network_card_CM02_t. It will represent local links for this node
- Use the cluster_id for ->id */
-
xbt_dynar_t back_route = NULL;
int constraints_per_variable = 0;
- xbt_dynar_t route = xbt_dynar_new(sizeof(void*),NULL);
- // I need to have the forward and backward routes at the same time, so allocate "route". That way, the routing wont clean it up
- routing_get_route_and_latency(src_name, dst_name, &route, &latency);
-
- if (sg_network_fullduplex == 1) {
- // FIXME: fill route directly
- routing_get_route_and_latency(dst_name, src_name, &back_route,NULL);
- }
- /* LARGE PLATFORMS HACK:
- total_route_size = route_size + src->link_nb + dst->nb */
+ xbt_dynar_t route=xbt_dynar_new(global_routing->size_of_link,NULL);
XBT_IN("(%s,%s,%g,%g)", src_name, dst_name, size, rate);
- /* LARGE PLATFORMS HACK:
- assert on total_route_size */
+
+ routing_get_route_and_latency(src_name, dst_name, &route, &latency);
xbt_assert(!xbt_dynar_is_empty(route) || latency,
"You're trying to send data from %s to %s but there is no connection at all between these two hosts.",
src_name, dst_name);
break;
}
}
+ if (sg_network_crosstraffic == 1) {
+ routing_get_route_and_latency(dst_name, src_name, &back_route,NULL);
+ xbt_dynar_foreach(back_route, i, link) {
+ if (link->lmm_resource.state_current == SURF_RESOURCE_OFF) {
+ failed = 1;
+ break;
+ }
+ }
+ }
+
action =
surf_action_new(sizeof(s_surf_action_network_CM02_t), size,
surf_network_model, failed);
xbt_swag_insert(action, action->generic_action.state_set);
action->rate = rate;
+ if(network_update_mechanism == UM_LAZY){
+ action->index_heap = -1;
+ action->last_update = surf_get_clock();
+ }
bandwidth_bound = -1.0;
-
+ if(sg_weight_S_parameter>0) {
+ xbt_dynar_foreach(route, i, link) {
+ action->weight +=
+ sg_weight_S_parameter /
+ (link->lmm_resource.power.peak * link->lmm_resource.power.scale);
+ }
+ }
xbt_dynar_foreach(route, i, link) {
- action->weight +=
- sg_weight_S_parameter /
+ double bb = bandwidth_factor_callback(size) *
(link->lmm_resource.power.peak * link->lmm_resource.power.scale);
- if (bandwidth_bound < 0.0)
- bandwidth_bound =
- bandwidth_factor_callback(size) *
- (link->lmm_resource.power.peak * link->lmm_resource.power.scale);
- else
- bandwidth_bound =
- min(bandwidth_bound,
- bandwidth_factor_callback(size) *
- (link->lmm_resource.power.peak *
- link->lmm_resource.power.scale));
+ bandwidth_bound = (bandwidth_bound < 0.0)?bb:min(bandwidth_bound,bb);
}
- /* LARGE PLATFORMS HACK:
- Add src->link and dst->link latencies */
+
action->lat_current = action->latency;
action->latency *= latency_factor_callback(size);
action->rate =
bandwidth_constraint_callback(action->rate, bandwidth_bound,
size);
+ if(gap_append) {
+ xbt_assert(!xbt_dynar_is_empty(route),"Using a model with a gap (e.g., SMPI) with a platform without links (e.g. vivaldi)!!!");
- if(!xbt_dynar_is_empty(route)) {
link = *(link_CM02_t*)xbt_dynar_get_ptr(route, 0);
gap_append(size, link, action);
XBT_DEBUG("Comm %p: %s -> %s gap=%f (lat=%f)",
action, src_name, dst_name, action->sender.gap, action->latency);
- } else {
- gap_unknown(action);
}
+ constraints_per_variable = xbt_dynar_length(route);
+ if (back_route != NULL)
+ constraints_per_variable += xbt_dynar_length(back_route);
- /* LARGE PLATFORMS HACK:
- lmm_variable_new(..., total_route_size) */
- if (back_route != NULL) {
- constraints_per_variable =
- xbt_dynar_length(route) + xbt_dynar_length(back_route);
- } else {
- constraints_per_variable = xbt_dynar_length(route);
- }
-
- if (action->latency > 0)
- action->variable =
+ if (action->latency > 0){
+ action->variable =
lmm_variable_new(network_maxmin_system, action, 0.0, -1.0,
constraints_per_variable);
- else
+ if(network_update_mechanism == UM_LAZY){
+ // add to the heap the event when the latency is payed
+ XBT_DEBUG("Added action (%p) one latency event at date %f", action, action->latency + action->last_update);
+ heap_insert(action, action->latency + action->last_update, xbt_dynar_is_empty(route)?NORMAL:LATENCY);
+ }
+ } else
action->variable =
lmm_variable_new(network_maxmin_system, action, 1.0, -1.0,
constraints_per_variable);
if (action->rate < 0) {
- if (action->lat_current > 0)
- lmm_update_variable_bound(network_maxmin_system, action->variable,
- sg_tcp_gamma / (2.0 *
- action->lat_current));
- else
- lmm_update_variable_bound(network_maxmin_system, action->variable,
- -1.0);
+ lmm_update_variable_bound(network_maxmin_system, action->variable,
+ (action->lat_current > 0)?
+ sg_tcp_gamma / (2.0 * action->lat_current) :-1.0);
} else {
- if (action->lat_current > 0)
- lmm_update_variable_bound(network_maxmin_system, action->variable,
- min(action->rate,
- sg_tcp_gamma / (2.0 *
- action->lat_current)));
- else
- lmm_update_variable_bound(network_maxmin_system, action->variable,
- action->rate);
+ lmm_update_variable_bound(network_maxmin_system, action->variable,
+ (action->lat_current > 0)?
+ min(action->rate, sg_tcp_gamma / (2.0 * action->lat_current))
+ :action->rate);
}
xbt_dynar_foreach(route, i, link) {
action->variable, 1.0);
}
- if (sg_network_fullduplex == 1) {
- XBT_DEBUG("Fullduplex active adding backward flow using 5%c", '%');
+ if (sg_network_crosstraffic == 1) {
+ XBT_DEBUG("Fullduplex active adding backward flow using 5%%");
xbt_dynar_foreach(back_route, i, link) {
lmm_expand(network_maxmin_system, link->lmm_resource.constraint,
action->variable, .05);
}
}
- /* LARGE PLATFORMS HACK:
- expand also with src->link and dst->link */
+
#ifdef HAVE_TRACING
if (TRACE_is_enabled()) {
action->src_name = xbt_strdup(src_name);
static xbt_dynar_t net_get_route(const char *src, const char *dst)
{
xbt_dynar_t route=NULL;
- routing_get_route_and_latency(src, dst,&route, NULL);
+ routing_get_route_and_latency(src, dst,&route,NULL);
return route;
}
lmm_update_variable_weight(network_maxmin_system,
((surf_action_network_CM02_t)
action)->variable, 0.0);
+
+ if(network_update_mechanism == UM_LAZY)// remove action from the heap
+ heap_remove((surf_action_network_CM02_t) action);
}
static void net_action_resume(surf_action_t action)
((surf_action_network_CM02_t)
action)->weight);
((surf_action_network_CM02_t) action)->suspended = 0;
+ if(network_update_mechanism == UM_LAZY)// remove action from the heap
+ heap_remove((surf_action_network_CM02_t) action);
}
}
void net_action_set_max_duration(surf_action_t action, double duration)
{
action->max_duration = duration;
+ if(network_update_mechanism == UM_LAZY)// remove action from the heap
+ heap_remove((surf_action_network_CM02_t) action);
}
#ifdef HAVE_TRACING
lmm_system_free(network_maxmin_system);
network_maxmin_system = NULL;
+
+ if(network_update_mechanism == UM_LAZY){
+ xbt_heap_free(net_action_heap);
+ xbt_swag_free(net_modified_set);
+ }
+}
+
+static void smpi_gap_append(double size, const link_CM02_t link, surf_action_network_CM02_t action) {
+ const char* src = link->lmm_resource.generic_resource.name;
+ xbt_fifo_t fifo;
+ surf_action_network_CM02_t last_action;
+ double bw;
+
+ if(sg_sender_gap > 0.0) {
+ if(!gap_lookup) {
+ gap_lookup = xbt_dict_new();
+ }
+ fifo = (xbt_fifo_t)xbt_dict_get_or_null(gap_lookup, src);
+ action->sender.gap = 0.0;
+ if(fifo && xbt_fifo_size(fifo) > 0) {
+ /* Compute gap from last send */
+ last_action = (surf_action_network_CM02_t)xbt_fifo_get_item_content(xbt_fifo_get_last_item(fifo));
+ bw = net_get_link_bandwidth(link);
+ action->sender.gap = last_action->sender.gap + max(sg_sender_gap, last_action->sender.size / bw);
+ action->latency += action->sender.gap;
+ }
+ /* Append action as last send */
+ action->sender.link_name = link->lmm_resource.generic_resource.name;
+ fifo = (xbt_fifo_t)xbt_dict_get_or_null(gap_lookup, action->sender.link_name);
+ if(!fifo) {
+ fifo = xbt_fifo_new();
+ xbt_dict_set(gap_lookup, action->sender.link_name, fifo, NULL);
+ }
+ action->sender.fifo_item = xbt_fifo_push(fifo, action);
+ action->sender.size = size;
+ }
+}
+
+static void smpi_gap_remove(surf_action_network_CM02_t action) {
+ xbt_fifo_t fifo;
+ size_t size;
+
+ if(sg_sender_gap > 0.0 && action->sender.link_name && action->sender.fifo_item) {
+ fifo = (xbt_fifo_t)xbt_dict_get_or_null(gap_lookup, action->sender.link_name);
+ xbt_fifo_remove_item(fifo, action->sender.fifo_item);
+ size = xbt_fifo_size(fifo);
+ if(size == 0) {
+ xbt_fifo_free(fifo);
+ xbt_dict_remove(gap_lookup, action->sender.link_name);
+ size = xbt_dict_length(gap_lookup);
+ if(size == 0) {
+ xbt_dict_free(&gap_lookup);
+ }
+ }
+ }
}
static void surf_network_model_init_internal(void)
{
+ s_surf_action_network_CM02_t comm;
surf_network_model = surf_model_init();
surf_network_model->name = "network";
#ifdef HAVE_LATENCY_BOUND_TRACKING
surf_network_model->get_latency_limited = net_get_link_latency_limited;
#endif
+#ifdef HAVE_TRACING
+ surf_network_model->set_category = net_action_set_category;
+#endif
surf_network_model->model_private->resource_used = net_resource_used;
- surf_network_model->model_private->share_resources = net_share_resources;
- surf_network_model->model_private->update_actions_state =
- net_update_actions_state;
+ if(network_update_mechanism == UM_LAZY) {
+ surf_network_model->model_private->share_resources = net_share_resources_lazy;
+ surf_network_model->model_private->update_actions_state = net_update_actions_state_lazy;
+ } else if(network_update_mechanism == UM_FULL) {
+ surf_network_model->model_private->share_resources = net_share_resources_full;
+ surf_network_model->model_private->update_actions_state = net_update_actions_state_full;
+ }
+
surf_network_model->model_private->update_resource_state =
- net_update_resource_state;
+ net_update_resource_state;
surf_network_model->model_private->finalize = net_finalize;
surf_network_model->suspend = net_action_suspend;
surf_network_model->resume = net_action_resume;
surf_network_model->is_suspended = net_action_is_suspended;
- surf_network_model->set_max_duration = net_action_set_max_duration;
-#ifdef HAVE_TRACING
- surf_network_model->set_category = net_action_set_category;
-#endif
+ surf_cpu_model->set_max_duration = net_action_set_max_duration;
surf_network_model->extension.network.communicate = net_communicate;
surf_network_model->extension.network.get_route = net_get_route;
surf_network_model->extension.network.get_link_bandwidth =
- net_get_link_bandwidth;
+ net_get_link_bandwidth;
surf_network_model->extension.network.get_link_latency =
- net_get_link_latency;
+ net_get_link_latency;
surf_network_model->extension.network.link_shared = net_link_shared;
surf_network_model->extension.network.add_traces = net_add_traces;
surf_network_model->extension.network.create_resource =
- net_create_resource;
-
- if (!network_maxmin_system)
- network_maxmin_system = lmm_system_new();
-
- routing_model_create(sizeof(link_CM02_t),
- net_create_resource("__loopback__",
- 498000000, NULL,
- 0.000015, NULL,
- SURF_RESOURCE_ON, NULL,
- SURF_LINK_FATPIPE, NULL));
+ net_create_resource;
+
+ if (!network_maxmin_system)
+ network_maxmin_system = lmm_system_new(net_selective_update);
+
+ routing_model_create(sizeof(link_CM02_t),
+ net_create_resource("__loopback__",
+ 498000000, NULL, 0.000015, NULL,
+ SURF_RESOURCE_ON, NULL,
+ SURF_LINK_FATPIPE, NULL));
+
+ if(network_update_mechanism == UM_LAZY){
+ net_action_heap = xbt_heap_new(8,NULL);
+ xbt_heap_set_update_callback(net_action_heap, net_action_update_index_heap);
+ net_modified_set =
+ xbt_swag_new(xbt_swag_offset(comm, action_list_hookup));
+ }
}
+static void set_update_mechanism(void) {
+#ifdef HAVE_TRACING
+ TRACE_set_network_update_mechanism ();
+#endif
+ char *optim = xbt_cfg_get_string(_surf_cfg_set, "network/optim");
+ int select = xbt_cfg_get_int(_surf_cfg_set, "network/maxmin_selective_update");
+
+ if(!strcmp(optim,"Full")) {
+ network_update_mechanism = UM_FULL;
+ net_selective_update = select;
+ } else if (!strcmp(optim,"Lazy")) {
+ network_update_mechanism = UM_LAZY;
+ net_selective_update = 1;
+ xbt_assert((select==1) || (xbt_cfg_is_default_value(_surf_cfg_set,"network/maxmin_selective_update")),
+ "Disabling selective update while using the lazy update mechanism is dumb!");
+ } else {
+ xbt_die("Unsupported optimization (%s) for this model",optim);
+ }
+}
/************************************************************************/
/* New model based on LV08 and experimental results of MPI ping-pongs */
/************************************************************************/
+/* @Inproceedings{smpi_ipdps, */
+/* author={Pierre-Nicolas Clauss and Mark Stillwell and Stéphane Genaud and Frédéric Suter and Henri Casanova and Martin Quinson}, */
+/* title={Single Node On-Line Simulation of {MPI} Applications with SMPI}, */
+/* booktitle={25th IEEE International Parallel and Distributed Processing Symposium (IPDPS'11)}, */
+/* address={Anchorage (Alaska) USA}, */
+/* month=may, */
+/* year={2011} */
+/* } */
void surf_network_model_init_SMPI(void)
{
if (surf_network_model)
return;
+ set_update_mechanism();
+
surf_network_model_init_internal();
latency_factor_callback = &smpi_latency_factor;
bandwidth_factor_callback = &smpi_bandwidth_factor;
bandwidth_constraint_callback = &smpi_bandwidth_constraint;
+ gap_append = &smpi_gap_append;
+ gap_remove = &smpi_gap_remove;
net_define_callbacks();
xbt_dynar_push(model_list, &surf_network_model);
network_solve = lmm_solve;
xbt_cfg_setdefault_double(_surf_cfg_set, "network/sender_gap", 10e-6);
xbt_cfg_setdefault_double(_surf_cfg_set, "network/weight_S", 8775);
-
}
/************************************************************************/
-/* New model based on optimizations discussed during this thesis */
+/* New model based on optimizations discussed during Pedro Velho's thesis*/
/************************************************************************/
+/* @techreport{VELHO:2011:HAL-00646896:1, */
+/* url = {http://hal.inria.fr/hal-00646896/en/}, */
+/* title = {{Flow-level network models: have we reached the limits?}}, */
+/* author = {Velho, Pedro and Schnorr, Lucas and Casanova, Henri and Legrand, Arnaud}, */
+/* type = {Rapport de recherche}, */
+/* institution = {INRIA}, */
+/* number = {RR-7821}, */
+/* year = {2011}, */
+/* month = Nov, */
+/* pdf = {http://hal.inria.fr/hal-00646896/PDF/rr-validity.pdf}, */
+/* } */
void surf_network_model_init_LegrandVelho(void)
{
-
if (surf_network_model)
return;
+
+ set_update_mechanism();
+
surf_network_model_init_internal();
net_define_callbacks();
xbt_dynar_push(model_list, &surf_network_model);
network_solve = lmm_solve;
- xbt_cfg_setdefault_double(_surf_cfg_set, "network/latency_factor", 10.4);
- xbt_cfg_setdefault_double(_surf_cfg_set, "network/bandwidth_factor",
- 0.92);
- xbt_cfg_setdefault_double(_surf_cfg_set, "network/weight_S", 8775);
-
+ xbt_cfg_setdefault_double(_surf_cfg_set, "network/latency_factor", 10.4); // 13.01 when callibration is done without phase effects
+ xbt_cfg_setdefault_double(_surf_cfg_set, "network/bandwidth_factor",0.92);// 0.97 when callibration is done without phase effects
+ xbt_cfg_setdefault_double(_surf_cfg_set, "network/weight_S", 8775); // 20537 when callibration is done without phase effects
}
/***************************************************************************/
if (surf_network_model)
return;
+
+ set_update_mechanism();
surf_network_model_init_internal();
net_define_callbacks();
xbt_dynar_push(model_list, &surf_network_model);
network_solve = lmm_solve;
+
+ xbt_cfg_setdefault_double(_surf_cfg_set, "network/latency_factor", 1.0);
+ xbt_cfg_setdefault_double(_surf_cfg_set, "network/bandwidth_factor", 1.0);
+ xbt_cfg_setdefault_double(_surf_cfg_set, "network/weight_S", 0.0);
}
+/***************************************************************************/
+/* The models from Steven H. Low */
+/***************************************************************************/
+/* @article{Low03, */
+/* author={Steven H. Low}, */
+/* title={A Duality Model of {TCP} and Queue Management Algorithms}, */
+/* year={2003}, */
+/* journal={{IEEE/ACM} Transactions on Networking}, */
+/* volume={11}, number={4}, */
+/* } */
void surf_network_model_init_Reno(void)
{
if (surf_network_model)
return;
+
+ set_update_mechanism();
surf_network_model_init_internal();
net_define_callbacks();
{
if (surf_network_model)
return;
+
+ set_update_mechanism();
surf_network_model_init_internal();
net_define_callbacks();
{
if (surf_network_model)
return;
+
+ set_update_mechanism();
surf_network_model_init_internal();
net_define_callbacks();
static double netcste_get_link_bandwidth(const void *link)
{
DIE_IMPOSSIBLE;
+ return -1.0;
}
static double netcste_get_link_latency(const void *link)
{
DIE_IMPOSSIBLE;
+ return -1.0;
}
static int link_shared(const void *link)
{
DIE_IMPOSSIBLE;
+ return -1;
}
static void netcste_action_suspend(surf_action_t action)
netcste_define_callbacks();
xbt_dynar_push(model_list, &surf_network_model);
- xbt_cfg_set_string(_surf_cfg_set, "routing", "none");
routing_model_create(sizeof(double), NULL);
}
+++ /dev/null
-
-/*
- * Network with improved management of tasks, IM (Improved Management).
- * Uses a heap to store actions so that the share_resources is faster.
- * This model automatically sets the selective update flag to 1 and is
- * highly dependent on the maxmin lmm module.
- */
-
-/* Copyright (c) 2009, 2010, 2011. The SimGrid Team.
- * All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include "xbt/log.h"
-#include "xbt/str.h"
-#include "surf_private.h"
-#include "xbt/dict.h"
-#include "maxmin_private.h"
-#include "surf/surf_resource_lmm.h"
-
-
-#undef GENERIC_ACTION
-#define GENERIC_ACTION(action) action->generic_action
-
-
-XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_network_im, surf,
- "Logging specific to the SURF network module");
-
-
-enum heap_action_type{
- LATENCY = 100,
- MAX_DURATION,
- NORMAL,
- NOTSET
-};
-
-typedef struct surf_action_network_CM02_im {
- s_surf_action_t generic_action;
- s_xbt_swag_hookup_t action_list_hookup;
- double latency;
- double lat_current;
- double weight;
- lmm_variable_t variable;
- double rate;
-#ifdef HAVE_LATENCY_BOUND_TRACKING
- int latency_limited;
-#endif
- int suspended;
-#ifdef HAVE_TRACING
- char *src_name;
- char *dst_name;
-#endif
- int index_heap;
- enum heap_action_type hat;
- double last_update;
-} s_surf_action_network_CM02_im_t, *surf_action_network_CM02_im_t;
-
-
-typedef struct network_link_CM02_im {
- s_surf_resource_lmm_t lmm_resource; /* must remain first to be added to a trace */
-
- /* Using this object with the public part of
- model does not make sense */
- double lat_current;
- tmgr_trace_event_t lat_event;
-} s_link_CM02_im_t, *link_CM02_im_t;
-
-
-
-
-
-
-
-
-extern surf_model_t surf_network_model;
-static lmm_system_t network_im_maxmin_system = NULL;
-static void (*network_im_solve) (lmm_system_t) = NULL;
-
-extern double sg_latency_factor;
-extern double sg_bandwidth_factor;
-extern double sg_weight_S_parameter;
-
-extern double sg_tcp_gamma;
-extern int sg_network_fullduplex;
-
-
-static void im_net_action_recycle(surf_action_t action);
-static int im_net_get_link_latency_limited(surf_action_t action);
-static int im_net_action_is_suspended(surf_action_t action);
-static double im_net_action_get_remains(surf_action_t action);
-static void im_net_action_set_max_duration(surf_action_t action, double duration);
-static void im_net_update_actions_state(double now, double delta);
-static void update_action_remaining(double now);
-
-static xbt_swag_t im_net_modified_set = NULL;
-static xbt_heap_t im_net_action_heap = NULL;
-xbt_swag_t keep_track = NULL;
-extern int sg_maxmin_selective_update;
-
-/* added to manage the communication action's heap */
-static void im_net_action_update_index_heap(void *action, int i)
-{
- ((surf_action_network_CM02_im_t) action)->index_heap = i;
-}
-
-/* insert action on heap using a given key and a hat (heap_action_type)
- * a hat can be of three types for communications:
- *
- * NORMAL = this is a normal heap entry stating the date to finish transmitting
- * LATENCY = this is a heap entry to warn us when the latency is payed
- * MAX_DURATION =this is a heap entry to warn us when the max_duration limit is reached
- */
-static void heap_insert(surf_action_network_CM02_im_t action, double key, enum heap_action_type hat){
- action->hat = hat;
- xbt_heap_push(im_net_action_heap, action, key);
-}
-
-static void heap_remove(surf_action_network_CM02_im_t action){
- action->hat = NONE;
- if(((surf_action_network_CM02_im_t) action)->index_heap >= 0){
- xbt_heap_remove(im_net_action_heap,action->index_heap);
- }
-}
-
-/******************************************************************************/
-/* Factors callbacks */
-/******************************************************************************/
-static double im_constant_latency_factor(double size)
-{
- return sg_latency_factor;
-}
-
-static double im_constant_bandwidth_factor(double size)
-{
- return sg_bandwidth_factor;
-}
-
-static double im_constant_bandwidth_constraint(double rate, double bound,
- double size)
-{
- return rate;
-}
-
-
-static double (*im_latency_factor_callback) (double) =
- &im_constant_latency_factor;
-static double (*im_bandwidth_factor_callback) (double) =
- &im_constant_bandwidth_factor;
-static double (*im_bandwidth_constraint_callback) (double, double, double) =
- &im_constant_bandwidth_constraint;
-
-
-static void* im_net_create_resource(const char *name,
- double bw_initial,
- tmgr_trace_t bw_trace,
- double lat_initial,
- tmgr_trace_t lat_trace,
- e_surf_resource_state_t
- state_initial,
- tmgr_trace_t state_trace,
- e_surf_link_sharing_policy_t
- policy, xbt_dict_t properties)
-{
- link_CM02_im_t nw_link = (link_CM02_im_t)
- surf_resource_lmm_new(sizeof(s_link_CM02_im_t),
- surf_network_model, name, properties,
- network_im_maxmin_system,
- sg_bandwidth_factor * bw_initial,
- history,
- state_initial, state_trace,
- bw_initial, bw_trace);
-
- xbt_assert(!xbt_lib_get_or_null(link_lib, name, SURF_LINK_LEVEL),
- "Link '%s' declared several times in the platform file.",
- name);
-
- nw_link->lat_current = lat_initial;
- if (lat_trace)
- nw_link->lat_event =
- tmgr_history_add_trace(history, lat_trace, 0.0, 0, nw_link);
-
- if (policy == SURF_LINK_FATPIPE)
- lmm_constraint_shared(nw_link->lmm_resource.constraint);
-
- xbt_lib_set(link_lib, name, SURF_LINK_LEVEL, nw_link);
-
- return nw_link;
-}
-
-static void im_net_parse_link_init(sg_platf_link_cbarg_t link)
-{
- if(link->policy == SURF_LINK_FULLDUPLEX)
- {
- char *name = bprintf("%s_UP",link->id);
- im_net_create_resource(name, link->bandwidth, link->bandwidth_trace,
- link->latency, link->latency_trace, link->state, link->state_trace,
- link->policy, link->properties);
- xbt_free(name);
- name = bprintf("%s_DOWN",link->id);
- im_net_create_resource(name, link->bandwidth, link->bandwidth_trace,
- link->latency, link->latency_trace, link->state, link->state_trace,
- link->policy, NULL); // FIXME: We need to deep copy the properties or we won't be able to free it
- xbt_free(name);
- }
- else
- {
- im_net_create_resource(link->id, link->bandwidth, link->bandwidth_trace,
- link->latency, link->latency_trace, link->state, link->state_trace,
- link->policy, link->properties);
- }
-}
-
-static void im_net_add_traces(void)
-{
- xbt_dict_cursor_t cursor = NULL;
- char *trace_name, *elm;
-
- static int called = 0;
- if (called)
- return;
- called = 1;
-
- /* connect all traces relative to network */
- xbt_dict_foreach(trace_connect_list_link_avail, cursor, trace_name, elm) {
- tmgr_trace_t trace = xbt_dict_get_or_null(traces_set_list, trace_name);
- link_CM02_im_t link =
- xbt_lib_get_or_null(link_lib, elm, SURF_LINK_LEVEL);
-
- xbt_assert(link, "Cannot connect trace %s to link %s: link undefined",
- trace_name, elm);
- xbt_assert(trace,
- "Cannot connect trace %s to link %s: trace undefined",
- trace_name, elm);
-
- link->lmm_resource.state_event =
- tmgr_history_add_trace(history, trace, 0.0, 0, link);
- }
-
- xbt_dict_foreach(trace_connect_list_bandwidth, cursor, trace_name, elm) {
- tmgr_trace_t trace = xbt_dict_get_or_null(traces_set_list, trace_name);
- link_CM02_im_t link =
- xbt_lib_get_or_null(link_lib, elm, SURF_LINK_LEVEL);
-
- xbt_assert(link, "Cannot connect trace %s to link %s: link undefined",
- trace_name, elm);
- xbt_assert(trace,
- "Cannot connect trace %s to link %s: trace undefined",
- trace_name, elm);
-
- link->lmm_resource.power.event =
- tmgr_history_add_trace(history, trace, 0.0, 0, link);
- }
-
- xbt_dict_foreach(trace_connect_list_latency, cursor, trace_name, elm) {
- tmgr_trace_t trace = xbt_dict_get_or_null(traces_set_list, trace_name);
- link_CM02_im_t link =
- xbt_lib_get_or_null(link_lib, elm, SURF_LINK_LEVEL);
-
- xbt_assert(link, "Cannot connect trace %s to link %s: link undefined",
- trace_name, elm);
- xbt_assert(trace,
- "Cannot connect trace %s to link %s: trace undefined",
- trace_name, elm);
-
- link->lat_event = tmgr_history_add_trace(history, trace, 0.0, 0, link);
- }
-}
-
-static void im_net_define_callbacks(void)
-{
- /* Figuring out the network links */
- sg_platf_link_add_cb(im_net_parse_link_init);
- sg_platf_postparse_add_cb(im_net_add_traces);
-}
-
-static int im_net_resource_used(void *resource_id)
-{
- return lmm_constraint_used(network_im_maxmin_system,
- ((surf_resource_lmm_t)
- resource_id)->constraint);
-}
-
-static int im_net_action_unref(surf_action_t action)
-{
- action->refcount--;
- if (!action->refcount) {
- xbt_swag_remove(action, action->state_set);
- if (((surf_action_network_CM02_im_t) action)->variable){
- lmm_variable_free(network_im_maxmin_system,
- ((surf_action_network_CM02_im_t) action)->variable);
- }
- // remove action from the heap
- heap_remove((surf_action_network_CM02_im_t) action);
-
- xbt_swag_remove(action, im_net_modified_set);
-#ifdef HAVE_TRACING
- xbt_free(((surf_action_network_CM02_im_t) action)->src_name);
- xbt_free(((surf_action_network_CM02_im_t) action)->dst_name);
- xbt_free(action->category);
-#endif
- surf_action_free(&action);
- return 1;
- }
- return 0;
-}
-
-
-
-static void im_net_action_cancel(surf_action_t action)
-{
- surf_network_model->action_state_set(action, SURF_ACTION_FAILED);
-
- xbt_swag_remove(action, im_net_modified_set);
- // remove action from the heap
- heap_remove((surf_action_network_CM02_im_t) action);
-}
-
-static void im_net_action_recycle(surf_action_t action)
-{
- return;
-}
-
-#ifdef HAVE_LATENCY_BOUND_TRACKING
-static int im_net_get_link_latency_limited(surf_action_t action)
-{
- return action->latency_limited;
-}
-#endif
-
-static double im_net_action_get_remains(surf_action_t action)
-{
- /* update remains before return it */
- update_action_remaining(surf_get_clock());
- return action->remains;
-}
-
-static void update_action_remaining(double now){
- surf_action_network_CM02_im_t action = NULL;
- double delta = 0.0;
-
- xbt_swag_foreach(action, im_net_modified_set) {
-
- if(action->suspended != 0){
- continue;
- }
-
- delta = now - action->last_update;
-
- double_update(&(action->generic_action.remains),
- lmm_variable_getvalue(action->variable) * delta);
-
- if (action->generic_action.max_duration != NO_MAX_DURATION)
- double_update(&(action->generic_action.max_duration), delta);
-
- if ((action->generic_action.remains <= 0) &&
- (lmm_get_variable_weight(action->variable) > 0)) {
- action->generic_action.finish = surf_get_clock();
- surf_network_model->action_state_set((surf_action_t) action,
- SURF_ACTION_DONE);
- heap_remove(action);
- } else if ((action->generic_action.max_duration != NO_MAX_DURATION)
- && (action->generic_action.max_duration <= 0)) {
- action->generic_action.finish = surf_get_clock();
- surf_network_model->action_state_set((surf_action_t) action,
- SURF_ACTION_DONE);
- heap_remove(action);
- }
-
- action->last_update = now;
- }
-}
-
-static double im_net_share_resources(double now)
-{
- surf_action_network_CM02_im_t action = NULL;
- double min=-1;
- double value;
-
- XBT_DEBUG("Before share resources, the size of modified actions set is %d", xbt_swag_size(im_net_modified_set));
- update_action_remaining(now);
-
- keep_track = im_net_modified_set;
- lmm_solve(network_im_maxmin_system);
- keep_track = NULL;
-
- XBT_DEBUG("After share resources, The size of modified actions set is %d", xbt_swag_size(im_net_modified_set));
-
- xbt_swag_foreach(action, im_net_modified_set) {
- if (GENERIC_ACTION(action).state_set != surf_network_model->states.running_action_set){
- continue;
- }
-
- /* bogus priority, skip it */
- if (GENERIC_ACTION(action).priority <= 0){
- continue;
- }
-
- min = -1;
- value = lmm_variable_getvalue(action->variable);
- if (value > 0) {
- if (GENERIC_ACTION(action).remains > 0) {
- value = GENERIC_ACTION(action).remains / value;
- min = now + value;
- } else {
- value = 0.0;
- min = now;
- }
- }
-
- if ((GENERIC_ACTION(action).max_duration != NO_MAX_DURATION)
- && (min == -1
- || GENERIC_ACTION(action).start +
- GENERIC_ACTION(action).max_duration < min)){
- min = GENERIC_ACTION(action).start +
- GENERIC_ACTION(action).max_duration;
- }
-
- XBT_DEBUG("Action(%p) Start %lf Finish %lf Max_duration %lf", action,
- GENERIC_ACTION(action).start, now + value,
- GENERIC_ACTION(action).max_duration);
-
-
-
- if (action->index_heap >= 0) {
- heap_remove((surf_action_network_CM02_im_t) action);
- }
-
- if (min != -1) {
- heap_insert((surf_action_network_CM02_im_t) action, min, NORMAL);
- XBT_DEBUG("Insert at heap action(%p) min %lf now %lf", action, min, now);
- }
- }
-
- //hereafter must have already the min value for this resource model
- if(xbt_heap_size(im_net_action_heap) > 0 ){
- min = xbt_heap_maxkey(im_net_action_heap) - now ;
- }else{
- min = -1;
- }
-
- XBT_DEBUG("The minimum with the HEAP %lf", min);
-
-
- return min;
-}
-
-static void im_net_update_actions_state(double now, double delta)
-{
- surf_action_network_CM02_im_t action = NULL;
-
- while ((xbt_heap_size(im_net_action_heap) > 0)
- && (double_equals(xbt_heap_maxkey(im_net_action_heap), now))) {
- action = xbt_heap_pop(im_net_action_heap);
- XBT_DEBUG("Action %p: finish", action);
- GENERIC_ACTION(action).finish = surf_get_clock();
-
- // if I am wearing a latency heat
- if( action->hat == LATENCY){
- lmm_update_variable_weight(network_im_maxmin_system, action->variable,
- action->weight);
- heap_remove(action);
- action->last_update = surf_get_clock();
-
- XBT_DEBUG("Action (%p) is not limited by latency anymore", action);
-#ifdef HAVE_LATENCY_BOUND_TRACKING
- GENERIC_ACTION(action).latency_limited = 0;
-#endif
-
- // if I am wearing a max_duration or normal hat
- }else if( action->hat == MAX_DURATION || action->hat == NORMAL ){
- // no need to communicate anymore
- // assume that flows that reached max_duration have remaining of 0
- GENERIC_ACTION(action).remains = 0;
- action->generic_action.finish = surf_get_clock();
- surf_network_model->action_state_set((surf_action_t) action,
- SURF_ACTION_DONE);
- heap_remove(action);
- }
- }
- return;
-}
-
-static void im_net_update_resource_state(void *id,
- tmgr_trace_event_t event_type,
- double value, double date)
-{
- link_CM02_im_t nw_link = id;
- /* printf("[" "%lg" "] Asking to update network card \"%s\" with value " */
- /* "%lg" " for event %p\n", surf_get_clock(), nw_link->name, */
- /* value, event_type); */
-
- if (event_type == nw_link->lmm_resource.power.event) {
- double delta =
- sg_weight_S_parameter / value - sg_weight_S_parameter /
- (nw_link->lmm_resource.power.peak *
- nw_link->lmm_resource.power.scale);
- lmm_variable_t var = NULL;
- lmm_element_t elem = NULL;
- surf_action_network_CM02_im_t action = NULL;
-
- nw_link->lmm_resource.power.peak = value;
- lmm_update_constraint_bound(network_im_maxmin_system,
- nw_link->lmm_resource.constraint,
- sg_bandwidth_factor *
- (nw_link->lmm_resource.power.peak *
- nw_link->lmm_resource.power.scale));
-#ifdef HAVE_TRACING
- TRACE_surf_link_set_bandwidth(date, (char *)(((nw_link->lmm_resource).generic_resource).name),
- sg_bandwidth_factor *
- (nw_link->lmm_resource.power.peak *
- nw_link->lmm_resource.power.scale));
-#endif
- if (sg_weight_S_parameter > 0) {
- while ((var = lmm_get_var_from_cnst
- (network_im_maxmin_system, nw_link->lmm_resource.constraint,
- &elem))) {
- action = lmm_variable_id(var);
- action->weight += delta;
- if (!(action->suspended))
- lmm_update_variable_weight(network_im_maxmin_system,
- action->variable, action->weight);
- }
- }
- if (tmgr_trace_event_free(event_type))
- nw_link->lmm_resource.power.event = NULL;
- } else if (event_type == nw_link->lat_event) {
- double delta = value - nw_link->lat_current;
- lmm_variable_t var = NULL;
- lmm_element_t elem = NULL;
- surf_action_network_CM02_im_t action = NULL;
-
- nw_link->lat_current = value;
- while ((var = lmm_get_var_from_cnst
- (network_im_maxmin_system, nw_link->lmm_resource.constraint,
- &elem))) {
- action = lmm_variable_id(var);
- action->lat_current += delta;
- action->weight += delta;
- if (action->rate < 0)
- lmm_update_variable_bound(network_im_maxmin_system, action->variable,
- sg_tcp_gamma / (2.0 *
- action->lat_current));
- else {
- lmm_update_variable_bound(network_im_maxmin_system, action->variable,
- min(action->rate,
- sg_tcp_gamma / (2.0 *
- action->lat_current)));
-
- if (action->rate < sg_tcp_gamma / (2.0 * action->lat_current)) {
- XBT_INFO("Flow is limited BYBANDWIDTH");
- } else {
- XBT_INFO("Flow is limited BYLATENCY, latency of flow is %f",
- action->lat_current);
- }
- }
- if (!(action->suspended))
- lmm_update_variable_weight(network_im_maxmin_system, action->variable,
- action->weight);
-
- }
- if (tmgr_trace_event_free(event_type))
- nw_link->lat_event = NULL;
- } else if (event_type == nw_link->lmm_resource.state_event) {
- if (value > 0)
- nw_link->lmm_resource.state_current = SURF_RESOURCE_ON;
- else {
- lmm_constraint_t cnst = nw_link->lmm_resource.constraint;
- lmm_variable_t var = NULL;
- lmm_element_t elem = NULL;
-
- nw_link->lmm_resource.state_current = SURF_RESOURCE_OFF;
- while ((var = lmm_get_var_from_cnst
- (network_im_maxmin_system, cnst, &elem))) {
- surf_action_t action = lmm_variable_id(var);
-
- if (surf_action_state_get(action) == SURF_ACTION_RUNNING ||
- surf_action_state_get(action) == SURF_ACTION_READY) {
- action->finish = date;
- surf_network_model->action_state_set(action, SURF_ACTION_FAILED);
- }
- }
- }
- if (tmgr_trace_event_free(event_type))
- nw_link->lmm_resource.state_event = NULL;
- } else {
- XBT_CRITICAL("Unknown event ! \n");
- xbt_abort();
- }
-
- XBT_DEBUG("There were a resource state event, need to update actions related to the constraint (%p)", nw_link->lmm_resource.constraint);
- return;
-}
-
-
-static surf_action_t im_net_communicate(const char *src_name,
- const char *dst_name, double size,
- double rate)
-{
- unsigned int i;
- link_CM02_im_t link;
- int failed = 0;
- surf_action_network_CM02_im_t action = NULL;
- double bandwidth_bound;
- /* LARGE PLATFORMS HACK:
- Add a link_CM02_im_t *link and a int link_nb to network_card_CM02_im_t. It will represent local links for this node
- Use the cluster_id for ->id */
-
- xbt_dynar_t back_route = NULL;
- int constraints_per_variable = 0;
- // I need to have the forward and backward routes at the same time, so allocate "route". That way, the routing wont clean it up
- xbt_dynar_t route=xbt_dynar_new(global_routing->size_of_link,NULL);
- routing_get_route_and_latency(src_name, dst_name,&route,NULL);
-
-
- if (sg_network_fullduplex == 1) {
- routing_get_route_and_latency(dst_name, src_name, &back_route, NULL);
- }
-
- /* LARGE PLATFORMS HACK:
- total_route_size = route_size + src->link_nb + dst->nb */
-
- XBT_IN("(%s,%s,%g,%g)", src_name, dst_name, size, rate);
- /* LARGE PLATFORMS HACK:
- assert on total_route_size */
- xbt_assert(xbt_dynar_length(route),
- "You're trying to send data from %s to %s but there is no connection between these two hosts.",
- src_name, dst_name);
-
- xbt_dynar_foreach(route, i, link) {
- if (link->lmm_resource.state_current == SURF_RESOURCE_OFF) {
- failed = 1;
- break;
- }
- }
- action =
- surf_action_new(sizeof(s_surf_action_network_CM02_im_t), size,
- surf_network_model, failed);
-
-
-#ifdef HAVE_LATENCY_BOUND_TRACKING
- (action->generic_action).latency_limited = 0;
-#endif
-
- xbt_swag_insert(action, action->generic_action.state_set);
- action->rate = rate;
- action->index_heap = -1;
- action->latency = 0.0;
- action->weight = 0.0;
- action->last_update = surf_get_clock();
-
- bandwidth_bound = -1.0;
- xbt_dynar_foreach(route, i, link) {
- action->latency += link->lat_current;
- action->weight +=
- link->lat_current +
- sg_weight_S_parameter /
- (link->lmm_resource.power.peak * link->lmm_resource.power.scale);
- if (bandwidth_bound < 0.0)
- bandwidth_bound =
- im_bandwidth_factor_callback(size) *
- (link->lmm_resource.power.peak * link->lmm_resource.power.scale);
- else
- bandwidth_bound =
- min(bandwidth_bound,
- im_bandwidth_factor_callback(size) *
- (link->lmm_resource.power.peak *
- link->lmm_resource.power.scale));
- }
- /* LARGE PLATFORMS HACK:
- Add src->link and dst->link latencies */
- action->lat_current = action->latency;
- action->latency *= im_latency_factor_callback(size);
- action->rate =
- im_bandwidth_constraint_callback(action->rate, bandwidth_bound,
- size);
-
- /* LARGE PLATFORMS HACK:
- lmm_variable_new(..., total_route_size) */
- if (back_route != NULL) {
- constraints_per_variable =
- xbt_dynar_length(route) + xbt_dynar_length(back_route);
- } else {
- constraints_per_variable = xbt_dynar_length(route);
- }
-
- if (action->latency > 0){
- action->variable =
- lmm_variable_new(network_im_maxmin_system, action, 0.0, -1.0,
- constraints_per_variable);
- // add to the heap the event when the latency is payed
- XBT_DEBUG("Added action (%p) one latency event at date %f", action, action->latency + action->last_update);
- heap_insert(action, action->latency + action->last_update, LATENCY);
-#ifdef HAVE_LATENCY_BOUND_TRACKING
- (action->generic_action).latency_limited = 1;
-#endif
- }
- else
- action->variable =
- lmm_variable_new(network_im_maxmin_system, action, 1.0, -1.0,
- constraints_per_variable);
-
- if (action->rate < 0) {
- if (action->lat_current > 0)
- lmm_update_variable_bound(network_im_maxmin_system, action->variable,
- sg_tcp_gamma / (2.0 *
- action->lat_current));
- else
- lmm_update_variable_bound(network_im_maxmin_system, action->variable,
- -1.0);
- } else {
- if (action->lat_current > 0)
- lmm_update_variable_bound(network_im_maxmin_system, action->variable,
- min(action->rate,
- sg_tcp_gamma / (2.0 *
- action->lat_current)));
- else
- lmm_update_variable_bound(network_im_maxmin_system, action->variable,
- action->rate);
- }
-
- xbt_dynar_foreach(route, i, link) {
- lmm_expand(network_im_maxmin_system, link->lmm_resource.constraint,
- action->variable, 1.0);
- }
-
- if (sg_network_fullduplex == 1) {
- XBT_DEBUG("Fullduplex active adding backward flow using 5%c", '%');
- xbt_dynar_foreach(back_route, i, link) {
- lmm_expand(network_im_maxmin_system, link->lmm_resource.constraint,
- action->variable, .05);
- }
- }
-
- /* LARGE PLATFORMS HACK:
- expand also with src->link and dst->link */
-#ifdef HAVE_TRACING
- if (TRACE_is_enabled()) {
- action->src_name = xbt_strdup(src_name);
- action->dst_name = xbt_strdup(dst_name);
- } else {
- action->src_name = action->dst_name = NULL;
- }
-#endif
-
- xbt_dynar_free(&route);
- XBT_OUT();
-
- return (surf_action_t) action;
-}
-
-static xbt_dynar_t im_net_get_route(const char *src, const char *dst)
-{
- xbt_dynar_t route=NULL;
- routing_get_route_and_latency(src, dst,&route,NULL);
- return route;
-}
-
-static double im_net_get_link_bandwidth(const void *link)
-{
- surf_resource_lmm_t lmm = (surf_resource_lmm_t) link;
- return lmm->power.peak * lmm->power.scale;
-}
-
-static double im_net_get_link_latency(const void *link)
-{
- return ((link_CM02_im_t) link)->lat_current;
-}
-
-static int im_net_link_shared(const void *link)
-{
- return
- lmm_constraint_is_shared(((surf_resource_lmm_t) link)->constraint);
-}
-
-static void im_net_action_suspend(surf_action_t action)
-{
- ((surf_action_network_CM02_im_t) action)->suspended = 1;
- lmm_update_variable_weight(network_im_maxmin_system,
- ((surf_action_network_CM02_im_t)
- action)->variable, 0.0);
-
- // remove action from the heap
- heap_remove((surf_action_network_CM02_im_t) action);
-}
-
-static void im_net_action_resume(surf_action_t action)
-{
- if (((surf_action_network_CM02_im_t) action)->suspended) {
- lmm_update_variable_weight(network_im_maxmin_system,
- ((surf_action_network_CM02_im_t)
- action)->variable,
- ((surf_action_network_CM02_im_t)
- action)->weight);
- ((surf_action_network_CM02_im_t) action)->suspended = 0;
- // remove action from the heap
- heap_remove((surf_action_network_CM02_im_t) action);
- }
-}
-
-static int im_net_action_is_suspended(surf_action_t action)
-{
- return ((surf_action_network_CM02_im_t) action)->suspended;
-}
-
-static void im_net_action_set_max_duration(surf_action_t action, double duration)
-{
- action->max_duration = duration;
- // remove action from the heap
- heap_remove((surf_action_network_CM02_im_t) action);
-}
-
-
-static void im_net_finalize(void)
-{
- surf_model_exit(surf_network_model);
- surf_network_model = NULL;
-
- lmm_system_free(network_im_maxmin_system);
- network_im_maxmin_system = NULL;
-
- xbt_heap_free(im_net_action_heap);
- xbt_swag_free(im_net_modified_set);
-
-}
-
-static void im_surf_network_model_init_internal(void)
-{
- s_surf_action_network_CM02_im_t comm;
-
- surf_network_model = surf_model_init();
-
- surf_network_model->name = "network";
- surf_network_model->action_unref = im_net_action_unref;
- surf_network_model->action_cancel = im_net_action_cancel;
- surf_network_model->action_recycle = im_net_action_recycle;
- surf_network_model->get_remains = im_net_action_get_remains;
-#ifdef HAVE_LATENCY_BOUND_TRACKING
- surf_network_model->get_latency_limited = im_net_get_link_latency_limited;
-#endif
-
- surf_network_model->model_private->resource_used = im_net_resource_used;
- surf_network_model->model_private->share_resources = im_net_share_resources;
- surf_network_model->model_private->update_actions_state =
- im_net_update_actions_state;
- surf_network_model->model_private->update_resource_state =
- im_net_update_resource_state;
- surf_network_model->model_private->finalize = im_net_finalize;
-
- surf_network_model->suspend = im_net_action_suspend;
- surf_network_model->resume = im_net_action_resume;
- surf_network_model->is_suspended = im_net_action_is_suspended;
- surf_cpu_model->set_max_duration = im_net_action_set_max_duration;
-
- surf_network_model->extension.network.communicate = im_net_communicate;
- surf_network_model->extension.network.get_route = im_net_get_route;
- surf_network_model->extension.network.get_link_bandwidth =
- im_net_get_link_bandwidth;
- surf_network_model->extension.network.get_link_latency =
- im_net_get_link_latency;
- surf_network_model->extension.network.link_shared = im_net_link_shared;
- surf_network_model->extension.network.add_traces = im_net_add_traces;
- surf_network_model->extension.network.create_resource =
- im_net_create_resource;
-
-
- if (!network_im_maxmin_system){
- sg_maxmin_selective_update = 1;
- network_im_maxmin_system = lmm_system_new();
- }
- im_net_action_heap = xbt_heap_new(8,NULL);
-
- xbt_heap_set_update_callback(im_net_action_heap, im_net_action_update_index_heap);
-
- routing_model_create(sizeof(link_CM02_im_t),
- im_net_create_resource("__loopback__",
- 498000000, NULL, 0.000015, NULL,
- SURF_RESOURCE_ON, NULL,
- SURF_LINK_FATPIPE, NULL));
- im_net_modified_set =
- xbt_swag_new(xbt_swag_offset(comm, action_list_hookup));
-}
-
-
-
-/************************************************************************/
-/* New model based on optimizations discussed during this thesis */
-/************************************************************************/
-void im_surf_network_model_init_LegrandVelho(void)
-{
-
- if (surf_network_model)
- return;
- im_surf_network_model_init_internal();
- im_net_define_callbacks();
- xbt_dynar_push(model_list, &surf_network_model);
- network_im_solve = lmm_solve;
-
- xbt_cfg_setdefault_double(_surf_cfg_set, "network/latency_factor", 10.4);
- xbt_cfg_setdefault_double(_surf_cfg_set, "network/bandwidth_factor",
- 0.92);
- xbt_cfg_setdefault_double(_surf_cfg_set, "network/weight_S", 8775);
-}
-
-
-
tmgr_trace_event_t lat_event;
} s_link_CM02_t, *link_CM02_t;
+enum heap_action_type{
+ LATENCY = 100,
+ MAX_DURATION,
+ NORMAL,
+ NOTSET
+};
typedef struct surf_action_network_CM02 {
s_surf_action_t generic_action;
+ s_xbt_swag_hookup_t action_list_hookup;
double latency;
double lat_current;
double weight;
int latency_limited;
#endif
int suspended;
-
#ifdef HAVE_TRACING
char *src_name;
char *dst_name;
#endif
-
+ int index_heap;
+ enum heap_action_type hat;
+ double last_update;
} s_surf_action_network_CM02_t, *surf_action_network_CM02_t;
#endif /* _SURF_NETWORK_PRIVATE_H */
/* Don't forget to update the option description in smx_config when you change this */
s_surf_model_description_t surf_network_model_description[] = {
+ {"LV08",
+ "Realistic network analytic model (slow-start modeled by multiplying latency by 10.4, bandwidth by .92; bottleneck sharing uses a payload of S=8775 for evaluating RTT). ",
+ surf_network_model_init_LegrandVelho},
{"Constant",
- "Simplistic network model where all communication take a constant time (one second)",
+ "Simplistic network model where all communication take a constant time (one second). This model provides the lowest realism, but is (marginally) faster.",
surf_network_model_init_Constant},
- {"CM02",
- "Realistic network model with lmm_solve and no correction factors",
- surf_network_model_init_CM02},
- {"LV08",
- "Realistic network model with lmm_solve, adequate correction factors (latency*=10.4, bandwidth*=.92, S=8775) and partial invalidation optimization",
- im_surf_network_model_init_LegrandVelho},
- {"LV08_fullupdate",
- "Realistic network model wit lmm_solve, adequate correction factors (latency*=10.4, bandwidth*=.92, S=8775) but no further optimization. Should produce the same results as LV08, only slower.",
- surf_network_model_init_LegrandVelho},
{"SMPI",
- "Realistic network model with lmm_solve and correction factors on three intervals (< 1KiB, < 64 KiB, >= 64 KiB)",
+ "Realistic network model specifically tailored for HPC settings (accurate modeling of slow start with correction factors on three intervals: < 1KiB, < 64 KiB, >= 64 KiB)",
surf_network_model_init_SMPI},
+ {"CM02",
+ "Legacy network analytic model (Very similar to LV08, but without corrective factors. The timings of small messages are thus poorly modeled).",
+ surf_network_model_init_CM02},
#ifdef HAVE_GTNETS
{"GTNets",
- "Network Pseudo-model using the GTNets simulator instead of an analytic model",
+ "Network pseudo-model using the GTNets simulator instead of an analytic model",
surf_network_model_init_GTNETS},
#endif
#ifdef HAVE_NS3
{"NS3",
- "Use NS3 tcp model",
+ "Network pseudo-model using the NS3 tcp model instead of an analytic model",
surf_network_model_init_NS3},
#endif
{"Reno",
- "Model using lagrange_solve instead of lmm_solve (experts only)",
+ "Model from Steven H. Low using lagrange_solve instead of lmm_solve (experts only; check the code for more info).",
surf_network_model_init_Reno},
{"Reno2",
- "Model using lagrange_solve instead of lmm_solve (experts only)",
+ "Model from Steven H. Low using lagrange_solve instead of lmm_solve (experts only; check the code for more info).",
surf_network_model_init_Reno2},
{"Vegas",
- "Model using lagrange_solve instead of lmm_solve (experts only)",
+ "Model from Steven H. Low using lagrange_solve instead of lmm_solve (experts only; check the code for more info).",
surf_network_model_init_Vegas},
{NULL, NULL, NULL} /* this array must be NULL terminated */
};
s_surf_model_description_t surf_cpu_model_description[] = {
- {"Cas01_fullupdate", "CPU classical model time=size/power",
- surf_cpu_model_init_Cas01},
{"Cas01",
- "Variation of Cas01_fullupdate with partial invalidation optimization of lmm system. Should produce the same values, only faster",
- surf_cpu_model_init_Cas01_im},
- {"CpuTI",
- "Variation of Cas01 with also trace integration. Should produce the same values, only faster if you use availability traces",
- surf_cpu_model_init_ti},
+ "Simplistic CPU model (time=size/power).",
+ surf_cpu_model_init_Cas01},
{NULL, NULL, NULL} /* this array must be NULL terminated */
};
s_surf_model_description_t surf_workstation_model_description[] = {
- {"CLM03",
- "Default workstation model, using LV08 and CM02 as network and CPU",
- surf_workstation_model_init_CLM03},
+ {"default",
+ "Default workstation model. Currently, CPU:Cas01 and network:LV08 (with cross traffic enabled)",
+ surf_workstation_model_init_current_default},
{"compound",
- "Workstation model allowing you to use other network and CPU models",
+ "Workstation model that is automatically chosen if you change the network and CPU models",
surf_workstation_model_init_compound},
- {"ptask_L07", "Workstation model with better parallel task modeling",
+ {"ptask_L07", "Workstation model somehow similar to Cas01+CM02 but allowing parallel tasks",
surf_workstation_model_init_ptask_L07},
{NULL, NULL, NULL} /* this array must be NULL terminated */
};
+s_surf_model_description_t surf_optimization_mode_description[] = {
+ {"Lazy",
+ "Lazy action management (partial invalidation in lmm + heap in action remaining).",
+ NULL},
+ {"TI",
+ "Trace integration. Highly optimized mode when using availability traces (only available for the Cas01 CPU model for now).",
+ NULL},
+ {"Full",
+ "Full update of remaining and variables. Slow but may be useful when debugging.",
+ NULL},
+ {NULL, NULL, NULL} /* this array must be NULL terminated */
+};
+
/** Displays the long description of all registered models, and quit */
void model_help(const char *category, s_surf_model_description_t * table)
{
}
}
- XBT_DEBUG("Min for resources (except NS3) : %f", min);
+ XBT_DEBUG("Min for resources (remember that NS3 dont update that value) : %f", min);
XBT_DEBUG("Looking for next trace event");
min = model_next_action_end;
}
- if (next_event_date == -1.0) break;
+ if (next_event_date == -1.0) {
+ XBT_DEBUG("no next TRACE event. Stop searching for it");
+ break;
+ }
if ((min != -1.0) && (next_event_date > NOW + min)) break;
}
} while (1);
- /* FIXME: Moved this test to here to avoid stoping simulation if there are actions running on cpus and all cpus are with availability = 0.
+ /* FIXME: Moved this test to here to avoid stopping simulation if there are actions running on cpus and all cpus are with availability = 0.
* This may cause an infinite loop if one cpu has a trace with periodicity = 0 and the other a trace with periodicity > 0.
* The options are: all traces with same periodicity(0 or >0) or we need to change the way how the events are managed */
- if (min < 0.0)
+ if (min == -1.0) {
+ XBT_DEBUG("No next event at all. Bail out now.");
return -1.0;
+ }
XBT_DEBUG("Duration set to %f", min);
exit(0);
} else if (!strncmp(argv[i], "--help-models", strlen("--help-models") + 1)) {
model_help("workstation", surf_workstation_model_description);
+ printf("\n");
model_help("CPU", surf_cpu_model_description);
+ printf("\n");
model_help("network", surf_network_model_description);
+ printf("\nLong description of all optimization levels accepted by the models of this simulator:\n");
+ for (i = 0; surf_optimization_mode_description[i].name; i++)
+ printf(" %s: %s\n", surf_optimization_mode_description[i].name, surf_optimization_mode_description[i].description);
+ printf("Both network and CPU models have 'Lazy' as default optimization level\n");
exit(0);
#ifdef HAVE_TRACING
} else if (!strncmp(argv[i], "--help-tracing", strlen("--help-tracing") + 1)) {
find_model_description(surf_cpu_model_description, val);
}
+/* callback of the cpu/model variable */
+static void _surf_cfg_cb__optimization_mode(const char *name, int pos)
+{
+ char *val;
+
+ xbt_assert(_surf_init_status < 2,
+ "Cannot change the model after the initialization");
+
+ val = xbt_cfg_get_string(_surf_cfg_set, name);
+
+ if (!strcmp(val, "help")) {
+ model_help("optimization", surf_optimization_mode_description);
+ exit(0);
+ }
+
+ /* New Module missing */
+ find_model_description(surf_optimization_mode_description, val);
+}
+
/* callback of the workstation_model variable */
static void _surf_cfg_cb__network_model(const char *name, int pos)
{
sg_weight_S_parameter = xbt_cfg_get_double(_surf_cfg_set, name);
}
-static void _surf_cfg_cb__surf_maxmin_selective_update(const char *name,
- int pos)
-{
- sg_maxmin_selective_update = xbt_cfg_get_int(_surf_cfg_set, name);
-}
-
/* callback of the inclusion path */
static void _surf_cfg_cb__surf_path(const char *name, int pos)
{
}
} else if (!strcmp(val, "no")) {
if (COORD_HOST_LEVEL)
- XBT_WARN("Setting of whether to use coordinate cannot be disabled once set.");
+ xbt_die("Setting of whether to use coordinate cannot be disabled once set.");
} else {
- XBT_WARN("Command line setting of whether to use coordinates must be either \"yes\" or \"no\"");
+ xbt_die("Command line setting of whether to use coordinates must be either \"yes\" or \"no\"");
}
}
-static void _surf_cfg_cb__surf_network_fullduplex(const char *name,
+static void _surf_cfg_cb__surf_network_crosstraffic(const char *name,
int pos)
{
- sg_network_fullduplex = xbt_cfg_get_int(_surf_cfg_set, name);
+ sg_network_crosstraffic = xbt_cfg_get_int(_surf_cfg_set, name);
}
#ifdef HAVE_GTNETS
sprintf(p,
".\n (use 'help' as a value to see the long description of each model)");
default_value = xbt_strdup("Cas01");
- xbt_cfg_register(&_surf_cfg_set,
- "cpu/model", description, xbt_cfgelm_string,
+ xbt_cfg_register(&_surf_cfg_set, "cpu/model", description, xbt_cfgelm_string,
&default_value, 1, 1, &_surf_cfg_cb__cpu_model, NULL);
+ sprintf(description,
+ "The optimization modes to use for the CPU. Possible values: ");
+ p = description;
+ while (*(++p) != '\0');
+ for (i = 0; surf_optimization_mode_description[i].name; i++)
+ p += sprintf(p, "%s%s", (i == 0 ? "" : ", "),
+ surf_optimization_mode_description[i].name);
+ sprintf(p,
+ ".\n (use 'help' as a value to see the long description of each optimization mode)");
+ default_value = xbt_strdup("Lazy");
+ xbt_cfg_register(&_surf_cfg_set, "cpu/optim", description, xbt_cfgelm_string,
+ &default_value, 1, 1, &_surf_cfg_cb__optimization_mode, NULL);
+
sprintf(description,
"The model to use for the network. Possible values: ");
p = description;
sprintf(p,
".\n (use 'help' as a value to see the long description of each model)");
default_value = xbt_strdup("LV08");
- xbt_cfg_register(&_surf_cfg_set,
- "network/model", description, xbt_cfgelm_string,
+ xbt_cfg_register(&_surf_cfg_set, "network/model", description, xbt_cfgelm_string,
&default_value, 1, 1, &_surf_cfg_cb__network_model,
NULL);
+ sprintf(description,
+ "The optimization modes to use for the network. Possible values: ");
+ p = description;
+ while (*(++p) != '\0');
+ for (i = 0; surf_optimization_mode_description[i].name; i++)
+ p += sprintf(p, "%s%s", (i == 0 ? "" : ", "),
+ surf_optimization_mode_description[i].name);
+ sprintf(p,
+ ".\n (use 'help' as a value to see the long description of each optimization mode)");
+ default_value = xbt_strdup("Lazy");
+ xbt_cfg_register(&_surf_cfg_set, "network/optim", description, xbt_cfgelm_string,
+ &default_value, 1, 1, &_surf_cfg_cb__optimization_mode, NULL);
+
sprintf(description,
"The model to use for the workstation. Possible values: ");
p = description;
surf_workstation_model_description[i].name);
sprintf(p,
".\n (use 'help' as a value to see the long description of each model)");
- default_value = xbt_strdup("CLM03");
- xbt_cfg_register(&_surf_cfg_set,
- "workstation/model", description, xbt_cfgelm_string,
+ default_value = xbt_strdup("default");
+ xbt_cfg_register(&_surf_cfg_set, "workstation/model", description, xbt_cfgelm_string,
&default_value, 1, 1,
&_surf_cfg_cb__workstation_model, NULL);
xbt_free(description);
- default_value = xbt_strdup("Full");
- xbt_cfg_register(&_surf_cfg_set, "routing",
- "Model to use to store the routing information",
- xbt_cfgelm_string, &default_value, 1, 1, NULL, NULL);
-
- xbt_cfg_register(&_surf_cfg_set, "TCP_gamma",
+ xbt_cfg_register(&_surf_cfg_set, "network/TCP_gamma",
"Size of the biggest TCP window (cat /proc/sys/net/ipv4/tcp_[rw]mem for recv/send window; Use the last given value, which is the max window size)",
xbt_cfgelm_double, NULL, 1, 1,
_surf_cfg_cb__tcp_gamma, NULL);
- xbt_cfg_setdefault_double(_surf_cfg_set, "TCP_gamma", 20000.0);
+ xbt_cfg_setdefault_double(_surf_cfg_set, "network/TCP_gamma", 20000.0);
xbt_cfg_register(&_surf_cfg_set, "maxmin/precision",
- "Minimum retained action value when updating simulation",
+ "Numerical precision used when updating simulation models (epsilon in double comparisons)",
xbt_cfgelm_double, NULL, 1, 1, _surf_cfg_cb__maxmin_precision, NULL);
xbt_cfg_setdefault_double(_surf_cfg_set, "maxmin/precision", 0.00001); // FIXME use setdefault everywhere here!
_surf_cfg_cb__surf_path, NULL);
default_value_int = 0;
- xbt_cfg_register(&_surf_cfg_set, "maxmin_selective_update",
- "Update the constraint set propagating recursively to others constraints",
+ xbt_cfg_register(&_surf_cfg_set, "cpu/maxmin_selective_update",
+ "Update the constraint set propagating recursively to others constraints (1 by default when optim is set to lazy)",
+ xbt_cfgelm_int, &default_value_int, 0, 1,
+ NULL, NULL);
+ default_value_int = 0;
+ xbt_cfg_register(&_surf_cfg_set, "network/maxmin_selective_update",
+ "Update the constraint set propagating recursively to others constraints (1 by default when optim is set to lazy)",
xbt_cfgelm_int, &default_value_int, 0, 1,
- _surf_cfg_cb__surf_maxmin_selective_update, NULL);
+ NULL, NULL);
/* do model-check */
default_value_int = 0;
xbt_cfg_set_int(_surf_cfg_set, "model-check", default_value_int); */
/* do verbose-exit */
- default_value_int = 0;
+ default_value_int = 1;
xbt_cfg_register(&_surf_cfg_set, "verbose-exit",
"Activate the \"do nothing\" mode in Ctrl-C",
xbt_cfgelm_int, &default_value_int, 0, 1,
/* stack size of contexts in Ko */
default_value_int = 128;
xbt_cfg_register(&_surf_cfg_set, "contexts/stack_size",
- "Stack size of contexts in Ko (ucontext or raw only)",
+ "Stack size of contexts in Kib (ucontext or raw only)",
xbt_cfgelm_int, &default_value_int, 1, 1,
_surf_cfg_cb_context_stack_size, NULL);
/* number of parallel threads for user processes */
default_value_int = 1;
xbt_cfg_register(&_surf_cfg_set, "contexts/nthreads",
- "Number of parallel threads for user contexts (EXPERIMENTAL)",
+ "Number of parallel threads used to execute user contexts",
xbt_cfgelm_int, &default_value_int, 1, 1,
_surf_cfg_cb_contexts_nthreads, NULL);
/* minimal number of user contexts to be run in parallel */
- default_value_int = 1;
+ default_value_int = 2;
xbt_cfg_register(&_surf_cfg_set, "contexts/parallel_threshold",
"Minimal number of user contexts to be run in parallel (raw contexts only)",
xbt_cfgelm_int, &default_value_int, 1, 1,
_surf_cfg_cb_contexts_parallel_threshold, NULL);
/* minimal number of user contexts to be run in parallel */
+#ifdef HAVE_FUTEX_H
default_value = xbt_strdup("futex");
- xbt_cfg_register(&_surf_cfg_set, "contexts/parallel_mode",
- "Synchronization mode to use when running contexts in parallel",
+#else //No futex on mac and posix is unimplememted yet
+ default_value = xbt_strdup("busy_wait");
+#endif
+ xbt_cfg_register(&_surf_cfg_set, "contexts/synchro",
+ "Synchronization mode to use when running contexts in parallel (either futex, posix or busy_wait)",
xbt_cfgelm_string, &default_value, 1, 1,
_surf_cfg_cb_contexts_parallel_mode, NULL);
default_value = xbt_strdup("no");
- xbt_cfg_register(&_surf_cfg_set, "coordinates",
- "\"yes\" or \"no\" (FIXME: document)",
+ xbt_cfg_register(&_surf_cfg_set, "network/coordinates",
+ "\"yes\" or \"no\", specifying whether we use a coordinate-based routing (as Vivaldi)",
xbt_cfgelm_string, &default_value, 1, 1,
_surf_cfg_cb__surf_network_coordinates, NULL);
- xbt_cfg_setdefault_string(_surf_cfg_set, "coordinates", default_value);
+ xbt_cfg_setdefault_string(_surf_cfg_set, "network/coordinates", default_value);
default_value_int = 0;
- xbt_cfg_register(&_surf_cfg_set, "fullduplex",
+ xbt_cfg_register(&_surf_cfg_set, "network/crosstraffic",
"Activate the interferences between uploads and downloads for fluid max-min models (LV08, CM02)",
xbt_cfgelm_int, &default_value_int, 0, 1,
- _surf_cfg_cb__surf_network_fullduplex, NULL);
- xbt_cfg_setdefault_int(_surf_cfg_set, "fullduplex", default_value_int);
+ _surf_cfg_cb__surf_network_crosstraffic, NULL);
+ xbt_cfg_setdefault_int(_surf_cfg_set, "network/crosstraffic", default_value_int);
#ifdef HAVE_GTNETS
- xbt_cfg_register(&_surf_cfg_set, "gtnets_jitter",
+ xbt_cfg_register(&_surf_cfg_set, "gtnets/jitter",
"Double value to oscillate the link latency, uniformly in random interval [-latency*gtnets_jitter,latency*gtnets_jitter)",
xbt_cfgelm_double, NULL, 1, 1,
_surf_cfg_cb__gtnets_jitter, NULL);
xbt_cfg_setdefault_double(_surf_cfg_set, "gtnets_jitter", 0.0);
default_value_int = 10;
- xbt_cfg_register(&_surf_cfg_set, "gtnets_jitter_seed",
+ xbt_cfg_register(&_surf_cfg_set, "gtnets/jitter_seed",
"Use a positive seed to reproduce jitted results, value must be in [1,1e8], default is 10",
xbt_cfgelm_int, &default_value_int, 0, 1,
_surf_cfg_cb__gtnets_jitter_seed, NULL);
static int int_die_impossible_paction(surf_action_t action)
{
DIE_IMPOSSIBLE;
+ return -1;
}
/** @brief initialize common datastructures to all models */
#define NO_MAX_DURATION -1.0
+typedef enum {
+ UM_FULL,
+ UM_LAZY,
+ UM_UNDEFINED,
+} e_UM_t;
+
/* user-visible parameters */
extern double sg_tcp_gamma;
extern double sg_sender_gap;
extern double sg_bandwidth_factor;
extern double sg_weight_S_parameter;
extern int sg_maxmin_selective_update;
-extern int sg_network_fullduplex;
+extern int sg_network_crosstraffic;
#ifdef HAVE_GTNETS
extern double sg_gtnets_jitter;
extern int sg_gtnets_jitter_seed;
#include "surf_routing_private.h"
/* Global vars */
-extern routing_global_t global_routing;
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_route_cluster, surf, "Routing part of surf");
xbt_dynar_t generic_get_onelink_routes(AS_t rc) { // FIXME: kill that stub
xbt_die("\"generic_get_onelink_routes\" not implemented yet");
+ return NULL;
}
route_t generic_get_bypassroute(AS_t rc, const char *src, const char *dst)
/* Global vars */
extern routing_global_t global_routing;
-extern xbt_dynar_t parsed_link_list;
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_route_rulebased, surf, "Routing part of surf");
}
void STag_surfxml_host(void){
- s_sg_platf_host_cbarg_t host;
- memset(&host,0,sizeof(host));
-
xbt_assert(current_property_set == NULL, "Someone forgot to reset the property set to NULL in its closing tag (or XML malformed)");
- host.properties = current_property_set = xbt_dict_new_homogeneous(xbt_free_f);
-
- host.id = A_surfxml_host_id;
- host.power_peak = get_cpu_power(A_surfxml_host_power);
- host.power_scale = surf_parse_get_double( A_surfxml_host_availability);
- host.core_amount = surf_parse_get_int(A_surfxml_host_core);
- host.power_trace = tmgr_trace_new(A_surfxml_host_availability_file);
- host.state_trace = tmgr_trace_new(A_surfxml_host_state_file);
- xbt_assert((A_surfxml_host_state == A_surfxml_host_state_ON) ||
- (A_surfxml_host_state == A_surfxml_host_state_OFF), "Invalid state");
- if (A_surfxml_host_state == A_surfxml_host_state_ON)
- host.initial_state = SURF_RESOURCE_ON;
- if (A_surfxml_host_state == A_surfxml_host_state_OFF)
- host.initial_state = SURF_RESOURCE_OFF;
- host.coord = A_surfxml_host_coordinates;
-
- sg_platf_new_host(&host);
}
+
void ETag_surfxml_host(void) {
+ s_sg_platf_host_cbarg_t host;
+ memset(&host,0,sizeof(host));
+
+ host.properties = current_property_set;
+
+ host.id = A_surfxml_host_id;
+ host.power_peak = get_cpu_power(A_surfxml_host_power);
+ host.power_scale = surf_parse_get_double( A_surfxml_host_availability);
+ host.core_amount = surf_parse_get_int(A_surfxml_host_core);
+ host.power_trace = tmgr_trace_new(A_surfxml_host_availability_file);
+ host.state_trace = tmgr_trace_new(A_surfxml_host_state_file);
+ xbt_assert((A_surfxml_host_state == A_surfxml_host_state_ON) ||
+ (A_surfxml_host_state == A_surfxml_host_state_OFF), "Invalid state");
+ if (A_surfxml_host_state == A_surfxml_host_state_ON)
+ host.initial_state = SURF_RESOURCE_ON;
+ if (A_surfxml_host_state == A_surfxml_host_state_OFF)
+ host.initial_state = SURF_RESOURCE_OFF;
+ host.coord = A_surfxml_host_coordinates;
+
+ sg_platf_new_host(&host);
current_property_set = NULL;
}
/* nothing to do here */
}
void STag_surfxml_link(void){
- s_sg_platf_link_cbarg_t link;
- memset(&link,0,sizeof(link));
-
xbt_assert(current_property_set == NULL, "Someone forgot to reset the property set to NULL in its closing tag (or XML malformed)");
- link.properties = current_property_set = xbt_dict_new_homogeneous(xbt_free_f);
-
- link.id = A_surfxml_link_id;
- link.bandwidth = surf_parse_get_double(A_surfxml_link_bandwidth);
- link.bandwidth_trace = tmgr_trace_new(A_surfxml_link_bandwidth_file);
- link.latency = surf_parse_get_double(A_surfxml_link_latency);
- link.latency_trace = tmgr_trace_new(A_surfxml_link_latency_file);
-
- switch (A_surfxml_link_state) {
- case A_surfxml_link_state_ON:
- link.state = SURF_RESOURCE_ON;
- break;
- case A_surfxml_link_state_OFF:
- link.state = SURF_RESOURCE_OFF;
- break;
- default:
- surf_parse_error(bprintf("invalid state for link %s",link.id));
- }
- link.state_trace = tmgr_trace_new(A_surfxml_link_state_file);
-
- switch (A_surfxml_link_sharing_policy) {
- case A_surfxml_link_sharing_policy_SHARED:
- link.policy = SURF_LINK_SHARED;
- break;
- case A_surfxml_link_sharing_policy_FATPIPE:
- link.policy = SURF_LINK_FATPIPE;
- break;
- case A_surfxml_link_sharing_policy_FULLDUPLEX:
- link.policy = SURF_LINK_FULLDUPLEX;
- break;
- default:
- surf_parse_error(bprintf("Invalid sharing policy in link %s",link.id));
- }
-
- sg_platf_new_link(&link);
}
void ETag_surfxml_link(void){
+ s_sg_platf_link_cbarg_t link;
+ memset(&link,0,sizeof(link));
+
+ link.properties = current_property_set;
+
+ link.id = A_surfxml_link_id;
+ link.bandwidth = surf_parse_get_double(A_surfxml_link_bandwidth);
+ link.bandwidth_trace = tmgr_trace_new(A_surfxml_link_bandwidth_file);
+ link.latency = surf_parse_get_double(A_surfxml_link_latency);
+ link.latency_trace = tmgr_trace_new(A_surfxml_link_latency_file);
+
+ switch (A_surfxml_link_state) {
+ case A_surfxml_link_state_ON:
+ link.state = SURF_RESOURCE_ON;
+ break;
+ case A_surfxml_link_state_OFF:
+ link.state = SURF_RESOURCE_OFF;
+ break;
+ default:
+ surf_parse_error(bprintf("invalid state for link %s",link.id));
+ break;
+ }
+ link.state_trace = tmgr_trace_new(A_surfxml_link_state_file);
+
+ switch (A_surfxml_link_sharing_policy) {
+ case A_surfxml_link_sharing_policy_SHARED:
+ link.policy = SURF_LINK_SHARED;
+ break;
+ case A_surfxml_link_sharing_policy_FATPIPE:
+ link.policy = SURF_LINK_FATPIPE;
+ break;
+ case A_surfxml_link_sharing_policy_FULLDUPLEX:
+ link.policy = SURF_LINK_FULLDUPLEX;
+ break;
+ default:
+ surf_parse_error(bprintf("Invalid sharing policy in link %s",link.id));
+ break;
+ }
+
+ sg_platf_new_link(&link);
+
current_property_set = NULL;
}
void STag_surfxml_config(void){
XBT_DEBUG("START configuration name = %s",A_surfxml_config_id);
xbt_assert(current_property_set == NULL, "Someone forgot to reset the property set to NULL in its closing tag (or XML malformed)");
- current_property_set = xbt_dict_new_homogeneous(xbt_free_f);
-
}
void ETag_surfxml_config(void){
xbt_dict_cursor_t cursor = NULL;
static int ws_resource_used(void *resource_id)
{
THROW_IMPOSSIBLE; /* This model does not implement parallel tasks */
+ return -1;
}
static void ws_parallel_action_cancel(surf_action_t action)
static int ws_parallel_action_free(surf_action_t action)
{
THROW_UNIMPLEMENTED; /* This model does not implement parallel tasks */
+ return -1;
}
static int ws_action_unref(surf_action_t action)
if (action->model_type == surf_cpu_model)
return surf_cpu_model->is_suspended(action);
DIE_IMPOSSIBLE;
+ return -1;
}
static void ws_action_set_max_duration(surf_action_t action,
if (action->model_type == surf_cpu_model)
return surf_cpu_model->get_remains(action);
DIE_IMPOSSIBLE;
+ return -1.0;
}
static surf_action_t ws_communicate(void *workstation_src,
double amount, double rate)
{
THROW_UNIMPLEMENTED; /* This model does not implement parallel tasks */
+ return NULL;
}
static xbt_dynar_t ws_get_route(void *src, void *dst)
{
return surf_network_model->extension.
- network.get_route(surf_resource_name(src), surf_resource_name(src));
+ network.get_route(surf_resource_name(src), surf_resource_name(dst));
}
static double ws_get_link_bandwidth(const void *link)
}
-/********************************************************************/
-/* The model used in MSG and presented at CCGrid03 */
-/********************************************************************/
-/* @InProceedings{Casanova.CLM_03, */
-/* author = {Henri Casanova and Arnaud Legrand and Loris Marchal}, */
-/* title = {Scheduling Distributed Applications: the SimGrid Simulation Framework}, */
-/* booktitle = {Proceedings of the third IEEE International Symposium on Cluster Computing and the Grid (CCGrid'03)}, */
-/* publisher = {"IEEE Computer Society Press"}, */
-/* month = {may}, */
-/* year = {2003} */
-/* } */
-void surf_workstation_model_init_CLM03(void)
+void surf_workstation_model_init_current_default(void)
{
surf_workstation_model_init_internal();
- surf_cpu_model_init_Cas01_im();
- im_surf_network_model_init_LegrandVelho();
- // FIXME: prefer the proper interface instead of bypassing the cfg module that way
- //xbt_cfg_set_parse(_surf_cfg_set, "network/model:LV08");
- //xbt_cfg_set_parse(_surf_cfg_set, "cpu/model:Cas01");
+ xbt_cfg_setdefault_int(_surf_cfg_set, "network/crosstraffic", 1);
+ surf_cpu_model_init_Cas01();
+ surf_network_model_init_LegrandVelho();
+
xbt_dynar_push(model_list, &surf_workstation_model);
sg_platf_postparse_add_cb(create_workstations);
}
static double ptask_action_get_remains(surf_action_t action)
{
XBT_IN("(%p)", action);
- return action->remains;
XBT_OUT();
+ return action->remains;
}
/**************************************/
ptask_add_traces;
if (!ptask_maxmin_system)
- ptask_maxmin_system = lmm_system_new();
+ ptask_maxmin_system = lmm_system_new(1);
routing_model_create(sizeof(link_L07_t),
ptask_link_create_resource("__loopback__",
typedef struct xbt_heap {
int size;
int count;
- xbt_heap_item_t items;
+ s_xbt_heap_item_t* items; /* array of structs */
void_f_pvoid_t free;
void (*update_callback) (void *, int);
} s_xbt_heap_t;
parmap->wait_f = xbt_parmap_posix_wait;
break;
+
case XBT_PARMAP_FUTEX:
+#ifdef HAVE_FUTEX_H
parmap->start_f = xbt_parmap_futex_start;
parmap->end_f = xbt_parmap_futex_end;
parmap->signal_f = xbt_parmap_futex_signal;
parmap->wait_f = xbt_parmap_futex_wait;
break;
+#else
+ xbt_die("Futex is not available on this OS (maybe you are on a Mac).");
+#endif
case XBT_PARMAP_BUSY_WAIT:
parmap->start_f = xbt_parmap_busy_start;
xbt_dynar_t data = xbt_dynar_new(sizeof(void *), NULL);
/* Create the parallel map */
+#ifdef HAVE_FUTEX_H
parmap = xbt_parmap_new(10, XBT_PARMAP_FUTEX);
-
+#else
+ parmap = xbt_parmap_new(10, XBT_PARMAP_BUSY_WAIT);
+#endif
for (j = 0; j < 100; j++) {
xbt_dynar_push_as(data, void *, (void *)j);
}
add_executable(flatifier flatifier.c)
add_executable(evaluate_parse_time Evaluate_parse_time.c)
add_executable(evaluate_get_route_time Evaluate_get_route_time.c)
+add_executable(basic_tracing basic_tracing.c)
### Add definitions for compile
if(NOT WIN32)
target_link_libraries(basic_parsing_test simgrid m)
target_link_libraries(is_router_test simgrid m)
target_link_libraries(flatifier simgrid m)
+target_link_libraries(basic_tracing simgrid m)
else(NOT WIN32)
target_link_libraries(evaluate_parse_time simgrid)
target_link_libraries(evaluate_get_route_time simgrid)
target_link_libraries(basic_parsing_test simgrid)
target_link_libraries(is_router_test simgrid)
target_link_libraries(flatifier simgrid)
+target_link_libraries(basic_tracing simgrid)
endif(NOT WIN32)
\ No newline at end of file
> Workstation number: 5, link number: 7
$ ${bindir:=.}/basic_parsing_test ./properties.xml
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'cpu/optim' to 'TI'
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'maxmin/precision' to '0.000010'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'cpu/model' to 'CpuTI'
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'workstation/model' to 'compound'
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'Vegas'
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'path' to '~/'
> Workstation number: 1, link number: 1
-$ ${bindir:=.}/basic_parsing_test ./properties.xml --cfg=cpu/model:CpuTI
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'cpu/model' to 'CpuTI'
+$ ${bindir:=.}/basic_parsing_test ./properties.xml --cfg=cpu/optim:TI
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'cpu/optim' to 'TI'
+> [0.000000] [surf_parse/INFO] The custom configuration 'cpu/optim' is already defined by user!
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'maxmin/precision' to '0.000010'
-> [0.000000] [surf_parse/INFO] The custom configuration 'cpu/model' is already defined by user!
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'workstation/model' to 'compound'
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'Vegas'
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'path' to '~/'
--- /dev/null
+/* Copyright (c) 2008, 2009, 2010. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "msg/msg.h"
+#include "surf/surf_private.h"
+
+int host(int argc, char *argv[]);
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(basic_tracing,"Messages specific for this example");
+
+int host(int argc, char *argv[])
+{
+ XBT_INFO("Sleep for 1s");
+ MSG_process_sleep(1);
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int res;
+ MSG_global_init(&argc, argv);
+ MSG_create_environment(argv[1]);
+ MSG_function_register("host", host);
+ MSG_process_create( "host", host, NULL, MSG_get_host_table()[0] );
+
+ res = MSG_main();
+ XBT_INFO("Simulation time %g", MSG_get_clock());
+ MSG_clean();
+ if (res == MSG_OK)
+ return 0;
+ else
+ return 1;
+}
<config id="General">
<prop id="maxmin/precision" value="0.000010"></prop>
- <prop id="cpu/model" value="CpuTI"></prop>
+ <prop id="cpu/optim" value="TI"></prop>
<prop id="workstation/model" value="compound"></prop>
<prop id="network/model" value="Vegas"></prop>
<prop id="path" value="~/"></prop>
lmm_set_default_protocol_function(func_reno_f, func_reno_fpi,
func_reno_fpi);
- Sys = lmm_system_new();
+ Sys = lmm_system_new(1);
L1 = lmm_constraint_new(Sys, (void *) "L1", a);
L2 = lmm_constraint_new(Sys, (void *) "L2", b);
L3 = lmm_constraint_new(Sys, (void *) "L3", a);
lmm_set_default_protocol_function(func_reno_f, func_reno_fp,
func_reno_fpi);
- Sys = lmm_system_new();
+ Sys = lmm_system_new(1);
CPU1 = lmm_constraint_new(Sys, (void *) "CPU1", 200.0);
CPU2 = lmm_constraint_new(Sys, (void *) "CPU2", 100.0);
lmm_set_default_protocol_function(func_reno_f, func_reno_fp,
func_reno_fpi);
- Sys = lmm_system_new();
+ Sys = lmm_system_new(1);
int *used = xbt_new0(int, nb_cnst);
int i, j, k;
- Sys = lmm_system_new();
+ Sys = lmm_system_new(1);
for (i = 0; i < nb_cnst; i++) {
cnst[i] = lmm_constraint_new(Sys, NULL, float_random(10.0));
double now = -1.0;
int running;
- xbt_cfg_set_parse(_surf_cfg_set, "workstation/model:CLM03");
+ xbt_cfg_set_parse(_surf_cfg_set, "network/model:CM02");
+ xbt_cfg_set_parse(_surf_cfg_set, "cpu/model:Cas01");
parse_platform_file(platform);
/*********************** WORKSTATION ***********************************/
my @extra_files = qw(html/index.html html/pages.html html/modules.html html/annotated.html html/functions.html
html/functions_vars.html index.php
html/GRAS_tut.html html/tracing.html html/installSimgrid.html html/bindings.html
- html/options.html html/use.html html/pls.html);
+ html/options.html html/use.html html/pls.html html/FAQ.html);
# GRAS tutorial
map {push @extra_files, "html/GRAS_tut_$_.html"} qw (intro
$tmp_buff .= ' <li><a href="http://gforge.inria.fr/projects/simgrid"><span>Forge</span></a></li>'."\n";
$tmp_buff .= ' <li><a href="http://simgrid.gforge.inria.fr/"><span>Website</span></a></li>'."\n";
$tmp_buff .= ' <li><a href="pages.html"><span>Documentation index</span></a></li>'."\n";
+ $tmp_buff .= ' <li><a href="FAQ.html"><span>FAQ</span></a></li>'."\n";
$tmp_buff .= $_;
$tabs = 0;
# Rework the navbar
# Fix the current "button" of buggy Doxygen tabs
- if($file =~ /^html\/pages.*/)
+ if($file =~ /^html\/pages.*/
+ || $file =~ /^html\/FAQ.*/)
{
my $filename = $file;
$filename =~ s/html\///g;