+
+#ifndef MIN
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#endif
+#ifndef MAX
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+#endif
+
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(xbt_mallocator, xbt, "Mallocators");
+
+/** Implementation note on the mallocators:
+ *
+ * Mallocators and memory mess introduced by model-checking do not mix well together: the mallocator will give
+ * standard memory when we are using raw memory (so these blocks are killed on restore) and the contrary (so these
+ * blocks will leak across restores).
+ *
+ * In addition, model-checking is activated when the command-line arguments are parsed, at the beginning of main, while
+ * most of the mallocators are created during the constructor functions launched from xbt_preinit, before the beginning
+ * of the main function.
+ *
+ * We want the code as fast as possible when they are active while we can deal with a little slow-down when they are
+ * inactive. So we start the mallocators as inactive. When they are so, they check at each use whether they should
+ * switch to the fast active mode or should stay in inactive mode. Finally, we give external elements a way to switch
+ * them all to the active mode (through xbt_mallocator_initialization_is_done).
+ *
+ * This design avoids to store all mallocators somewhere for later conversion, which would be hard to achieve provided
+ * that all our data structures use some mallocators internally...
+ */
+
+/* Value != 0 when the framework configuration is done. Value > 1 if the
+ * mallocators should be protected from concurrent accesses. */
+static int initialization_done = 0;
+
+static inline void lock_reset(xbt_mallocator_t m)
+{
+ atomic_flag_clear(&m->lock);
+}
+
+static inline void lock_acquire(xbt_mallocator_t m)
+{
+ if (initialization_done > 1) {
+ while (atomic_flag_test_and_set(&m->lock))
+ /* nop */;
+ }
+}
+
+static inline void lock_release(xbt_mallocator_t m)
+{
+ if (initialization_done > 1)
+ atomic_flag_clear(&m->lock);
+}