### MC ###
IF(HAVE_MC)
- ADD_TESH(page_store --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/mc --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/mc page_store.tesh)
ADD_TESH(tesh-mc-dwarf --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/mc/dwarf --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/mc/dwarf dwarf.tesh)
ADD_TESH(tesh-mc-dwarf-expression --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/mc/dwarf_expression --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/mc/dwarf_expression dwarf_expression.tesh)
ADD_TESH_FACTORIES(mc-bugged2 "ucontext;raw" --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged2.tesh)
IF(CONTEXT_UCONTEXT AND PROCESSOR_x86_64) # liveness model-checking works only on 64bits (for now ...)
ADD_TESH(mc-bugged1-liveness-ucontext --cfg contexts/factory:ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness.tesh)
+ ADD_TESH(mc-bugged1-liveness-ucontext-sparse --cfg contexts/factory:ucontext --cfg model-check/sparse-checkpoint:yes --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness_sparse.tesh)
ADD_TESH(mc-bugged1-liveness-visited-ucontext --cfg contexts/factory:ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness_visited.tesh)
+ ADD_TESH(mc-bugged1-liveness-visited-ucontext-sparse --cfg contexts/factory:ucontext --cfg model-check/sparse-checkpoint:yes --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness_visited_sparse.tesh)
ENDIF()
ENDIF()
set(optCFLAGS "-O0 ")
# But you can still optimize this:
foreach(s
- src/xbt/mmalloc/mm.c
- src/xbt/snprintf.c src/xbt/log.c
- # For some reason, this fails to work when optimizing dynar.c:
+ # src/xbt/mmalloc/mm.c
+ # src/xbt/snprintf.c src/xbt/log.c
# src/xbt/dynar.c
- src/xbt/set.c src/xbt/setset.c src/xbt/backtrace_linux.c
+ # src/xbt/set.c src/xbt/setset.c
+ # src/xbt/backtrace_linux.c
src/mc/mc_dwarf_expression.c src/mc/mc_dwarf.c src/mc/mc_member.c
src/mc/mc_snapshot.c src/mc/mc_page_store.cpp src/mc/mc_page_snapshot.cpp
src/mc/mc_compare.cpp src/mc/mc_diff.c
${CMAKE_CURRENT_BINARY_DIR}/src/simgrid_units_main.c
)
+if(HAVE_MC)
+ set(TEST_CFILES ${TEST_CFILES}
+ src/mc/mc_page_store.cpp
+ src/mc/mc_snapshot.c
+ )
+ set(TEST_UNITS ${TEST_UNITS}
+ ${CMAKE_CURRENT_BINARY_DIR}/src/mc_page_store_unit.cpp
+ ${CMAKE_CURRENT_BINARY_DIR}/src/mc_snapshot_unit.c
+ )
+endif()
+
ADD_CUSTOM_COMMAND(
OUTPUT ${TEST_UNITS}
--- /dev/null
+#! ./tesh
+
+! expect signal SIGABRT
+! timeout 20
+$ ${bindir:=.}/bugged1_liveness ${srcdir:=.}/../msg_platform.xml ${srcdir:=.}/deploy_bugged1_liveness.xml --cfg=model-check:1 "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" --cfg=contexts/factory:ucontext --cfg=contexts/stack_size:256
+> [ 0.000000] (0:@) Configuration change: Set 'model-check' to '1'
+> [ 0.000000] (0:@) Configuration change: Set 'model-check/sparse-checkpoint' to 'yes'
+> [ 0.000000] (0:@) Check the liveness property promela_bugged1_liveness
+> [ 0.000000] (0:@) Get debug information ...
+> [ 0.000000] (0:@) Get debug information done !
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (3:client@Fafard) Ask the request
+> [ 0.000000] (2:client@Boivin) Propositions changed : r=1, cs=0
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (3:client@Fafard) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (3:client@Fafard) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (0:@) Pair 21 already reached (equal to pair 9) !
+> [ 0.000000] (0:@) *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
+> [ 0.000000] (0:@) | ACCEPTANCE CYCLE |
+> [ 0.000000] (0:@) *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
+> [ 0.000000] (0:@) Counter-example that violates formula :
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iRecv(dst=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(3)Fafard (client)] iSend(src=(3)Fafard (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(3)Fafard (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iSend(src=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(3)Fafard (client)] Wait(comm=(verbose only) [(3)Fafard (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(3)Fafard (client)] iRecv(dst=(3)Fafard (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (3)Fafard (client)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(3)Fafard (client)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (3)Fafard (client)])
+> [ 0.000000] (0:@) [(3)Fafard (client)] iSend(src=(3)Fafard (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(3)Fafard (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(3)Fafard (client)] Wait(comm=(verbose only) [(3)Fafard (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(3)Fafard (client)] iSend(src=(3)Fafard (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(3)Fafard (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) Expanded pairs = 21
+> [ 0.000000] (0:@) Visited pairs = 21
+> [ 0.000000] (0:@) Executed transitions = 20
+> [ 0.000000] (0:@) Counter-example depth : 20
--- /dev/null
+#! ./tesh
+
+! expect signal SIGABRT
+! timeout 90
+$ ${bindir:=.}/bugged1_liveness ${srcdir:=.}/../msg_platform.xml ${srcdir:=.}/deploy_bugged1_liveness_visited.xml --cfg=model-check:1 "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" --cfg=contexts/factory:ucontext --cfg=model-check/visited:100 --cfg=contexts/stack_size:256
+> [ 0.000000] (0:@) Configuration change: Set 'model-check' to '1'
+> [ 0.000000] (0:@) Configuration change: Set 'model-check/visited' to '100'
+> [ 0.000000] (0:@) Configuration change: Set 'model-check/sparse-checkpoint' to 'yes'
+> [ 0.000000] (0:@) Check the liveness property promela_bugged1_liveness
+> [ 0.000000] (0:@) Get debug information ...
+> [ 0.000000] (0:@) Get debug information done !
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (3:client@Fafard) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request.
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1)
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request.
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1)
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request.
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1)
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request.
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1)
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (3:client@Fafard) Propositions changed : r=1, cs=0
+> [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1)
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (0:@) Pair 57 already reached (equal to pair 45) !
+> [ 0.000000] (0:@) *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
+> [ 0.000000] (0:@) | ACCEPTANCE CYCLE |
+> [ 0.000000] (0:@) *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
+> [ 0.000000] (0:@) Counter-example that violates formula :
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iSend(src=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iRecv(dst=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iRecv(dst=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(3)Fafard (client)] iSend(src=(3)Fafard (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iSend(src=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(3)Fafard (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iSend(src=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iRecv(dst=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(3)Fafard (client)] Wait(comm=(verbose only) [(3)Fafard (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iSend(src=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iRecv(dst=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) Expanded pairs = 57
+> [ 0.000000] (0:@) Visited pairs = 208
+> [ 0.000000] (0:@) Executed transitions = 207
+> [ 0.000000] (0:@) Counter-example depth : 50
local_variable_free((local_variable_t) * (void **) v);
}
-static void MC_region_destroy(mc_mem_region_t reg)
+void MC_region_destroy(mc_mem_region_t reg)
{
//munmap(reg->data, reg->size);
xbt_free(reg->data);
/******************************* Snapshot regions ********************************/
/*********************************************************************************/
-static mc_mem_region_t MC_region_new(int type, void *start_addr, size_t size, mc_mem_region_t ref_reg)
+static mc_mem_region_t mc_region_new_dense(int type, void *start_addr, size_t size, mc_mem_region_t ref_reg)
{
- if (_sg_mc_sparse_checkpoint) {
- return mc_region_new_sparse(type, start_addr, size, ref_reg);
- }
-
mc_mem_region_t new_reg = xbt_new(s_mc_mem_region_t, 1);
new_reg->start_addr = start_addr;
new_reg->data = NULL;
}
+static mc_mem_region_t MC_region_new(int type, void *start_addr, size_t size, mc_mem_region_t ref_reg)
+{
+ if (_sg_mc_sparse_checkpoint) {
+ return mc_region_new_sparse(type, start_addr, size, ref_reg);
+ } else {
+ return mc_region_new_dense(type, start_addr, size, ref_reg);
+ }
+}
+
/** @brief Restore a region from a snapshot
*
* If we are using per page snapshots, it is possible to use the reference
{
// We need this snapshot in order to know which
// pages needs to be stored in the next snapshot:
- if (_sg_mc_sparse_checkpoint && snapshot == mc_model_checker->parent_snapshot)
+ if (snapshot == mc_model_checker->parent_snapshot)
return true;
return false;
}
MC_snapshot_ignore_restore(snapshot);
- mc_model_checker->parent_snapshot = snapshot;
+ if (_sg_mc_sparse_checkpoint && _sg_mc_soft_dirty) {
+ mc_model_checker->parent_snapshot = snapshot;
+ }
return snapshot;
}
}
MC_snapshot_ignore_restore(snapshot);
- mc_model_checker->parent_snapshot = snapshot;
+ if (_sg_mc_sparse_checkpoint && _sg_mc_soft_dirty) {
+ mc_model_checker->parent_snapshot = snapshot;
+ }
}
mc_snapshot_t SIMIX_pre_mc_snapshot(smx_simcall_t simcall)
case DW_TAG_enumeration_type:
case DW_TAG_union_type:
{
- void* data1 =
- mc_snapshot_read_region(real_area1, region1, alloca(type->byte_size), type->byte_size);
- void* data2 =
- mc_snapshot_read_region(real_area2, region2, alloca(type->byte_size), type->byte_size);
- return (memcmp(data1, data2, type->byte_size) != 0);
- break;
+ return mc_snapshot_region_memcmp(
+ real_area1, region1, real_area2, region2,
+ type->byte_size) != 0;
}
case DW_TAG_typedef:
case DW_TAG_volatile_type:
case DW_TAG_reference_type:
case DW_TAG_rvalue_reference_type:
{
- void* temp;
- void* addr_pointed1 = *(void**) mc_snapshot_read_region(real_area1, region1, &temp, sizeof(void**));
- void* addr_pointed2 = *(void**) mc_snapshot_read_region(real_area2, region2, &temp, sizeof(void**));
+ void* addr_pointed1 = mc_snapshot_read_pointer_region(real_area1, region1);
+ void* addr_pointed2 = mc_snapshot_read_pointer_region(real_area2, region2);
if (type->subtype && type->subtype->type == DW_TAG_subroutine_type) {
return (addr_pointed1 != addr_pointed2);
return compare_heap_area(addr_pointed1, addr_pointed2, snapshot1,
snapshot2, NULL, type->subtype, pointer_level);
}
+
// The pointers are both in the current object R/W segment:
- else if (addr_pointed1 > region1->start_addr
- && (char *) addr_pointed1 <= (char *) region1->start_addr + region1->size) {
- if (!
- (addr_pointed2 > region2->start_addr
- && (char *) addr_pointed2 <= (char *) region2->start_addr + region2->size))
+ else if (mc_region_contain(region1, addr_pointed1)) {
+ if (!mc_region_contain(region2, addr_pointed2))
return 1;
if (type->dw_type_id == NULL)
return (addr_pointed1 != addr_pointed2);
}
}
+ // TODO, We do not handle very well the case where
+ // it belongs to a different (non-heap) region from the current one.
+
else {
return (addr_pointed1 != addr_pointed2);
}
/* type = heap->heapinfo[block].type; */
/* switch(type){ */
- /* case -1 : /\* Free block *\/ */
+ /* case MMALLOC_TYPE_HEAPINFO : */
+ /* case MMALLOC_TYPE_FREE : /\* Free block *\/ */
/* fprintf(stderr, "Asked to display the backtrace of a block that is free. I'm puzzled\n"); */
/* xbt_abort(); */
/* break; */
void *addr_block1, *addr_block2, *addr_frag1, *addr_frag2;
int nb_diff1 = 0, nb_diff2 = 0;
- xbt_dynar_t previous =
- xbt_dynar_new(sizeof(heap_area_pair_t), heap_area_pair_free_voidp);
-
int equal, res_compare = 0;
/* Check busy blocks */
while (i1 <= state->heaplimit) {
- // TODO, lookup in the correct region in order to speed it up:
malloc_info* heapinfo1 = mc_snapshot_read_region(&heapinfos1[i1], heap_region1, &heapinfo_temp1, sizeof(malloc_info));
malloc_info* heapinfo2 = mc_snapshot_read_region(&heapinfos2[i1], heap_region2, &heapinfo_temp2, sizeof(malloc_info));
- if (heapinfo1->type == -1) { /* Free block */
- i1++;
+ if (heapinfo1->type == MMALLOC_TYPE_FREE || heapinfo1->type == MMALLOC_TYPE_HEAPINFO) { /* Free block */
+ i1 += heapinfo1->free_block.size;
continue;
}
+ if (heapinfo1->type < 0) {
+ fprintf(stderr, "Unkown mmalloc block type.\n");
+ abort();
+ }
+
addr_block1 =
((void *) (((ADDR2UINT(i1)) - 1) * BLOCKSIZE +
(char *) ((xbt_mheap_t) state->s_heap)->heapbase));
- if (heapinfo1->type == 0) { /* Large block */
+ if (heapinfo1->type == MMALLOC_TYPE_UNFRAGMENTED) { /* Large block */
if (is_stack(addr_block1)) {
for (k = 0; k < heapinfo1->busy_block.size; k++)
i1 += heapinfo1->busy_block.size;
}
- xbt_dynar_reset(previous);
-
}
}
malloc_info* heapinfo2b = mc_snapshot_read_region(&heapinfos2[i2], heap_region2, &heapinfo_temp2b, sizeof(malloc_info));
- if (heapinfo2b->type != 0) {
+ if (heapinfo2b->type != MMALLOC_TYPE_UNFRAGMENTED) {
i2++;
continue;
}
i1 += heapinfo1->busy_block.size;
}
- xbt_dynar_reset(previous);
-
i2++;
}
(char *) ((xbt_mheap_t) state->s_heap)->heapbase));
addr_frag2 =
(void *) ((char *) addr_block2 +
- (j1 << ((xbt_mheap_t) state->s_heap)->heapinfo[i1].
- type));
+ (j1 << heapinfo2->type));
res_compare =
compare_heap_area(addr_frag1, addr_frag2, snapshot1, snapshot2,
if (res_compare != 1)
equal = 1;
- xbt_dynar_reset(previous);
-
}
}
while (i2 <= state->heaplimit && !equal) {
malloc_info* heapinfo2b = mc_snapshot_read_region(&heapinfos2[i2], heap_region2, &heapinfo_temp2b, sizeof(malloc_info));
- if (heapinfo2b->type <= 0) {
- i2++;
+
+ if (heapinfo2b->type == MMALLOC_TYPE_FREE || heapinfo2b->type == MMALLOC_TYPE_HEAPINFO) {
+ i2 += heapinfo2b->free_block.size;
continue;
}
+ if (heapinfo2b->type < 0) {
+ fprintf(stderr, "Unkown mmalloc block type.\n");
+ abort();
+ }
+
for (j2 = 0; j2 < (size_t) (BLOCKSIZE >> heapinfo2b->type);
j2++) {
(char *) ((xbt_mheap_t) state->s_heap)->heapbase));
addr_frag2 =
(void *) ((char *) addr_block2 +
- (j2 << ((xbt_mheap_t) state->s_heap)->heapinfo[i2].
- type));
+ (j2 << heapinfo2b->type));
res_compare =
compare_heap_area(addr_frag1, addr_frag2, snapshot2, snapshot2,
if (res_compare != 1) {
equal = 1;
- xbt_dynar_reset(previous);
break;
}
- xbt_dynar_reset(previous);
-
}
i2++;
for(i = 1; i <= state->heaplimit; i++) {
malloc_info* heapinfo1 = mc_snapshot_read_region(&heapinfos1[i], heap_region1, &heapinfo_temp1, sizeof(malloc_info));
- if (heapinfo1->type == 0) {
+ if (heapinfo1->type == MMALLOC_TYPE_UNFRAGMENTED) {
if (i1 == state->heaplimit) {
if (heapinfo1->busy_block.busy_size > 0) {
if (state->equals_to1_(i, 0).valid == 0) {
for (i=1; i <= state->heaplimit; i++) {
malloc_info* heapinfo2 = mc_snapshot_read_region(&heapinfos2[i], heap_region2, &heapinfo_temp2, sizeof(malloc_info));
- if (heapinfo2->type == 0) {
+ if (heapinfo2->type == MMALLOC_TYPE_UNFRAGMENTED) {
if (i1 == state->heaplimit) {
if (heapinfo2->busy_block.busy_size > 0) {
if (state->equals_to2_(i, 0).valid == 0) {
if (i1 == state->heaplimit)
XBT_DEBUG("Number of blocks/fragments not found in heap2 : %d", nb_diff2);
- xbt_dynar_free(&previous);
return ((nb_diff1 > 0) || (nb_diff2 > 0));
}
}
}
- if (mc_snapshot_region_memcp(((char *) real_area1) + i, heap_region1, ((char *) real_area2) + i, heap_region2, 1) != 0) {
+ if (mc_snapshot_region_memcmp(((char *) real_area1) + i, heap_region1, ((char *) real_area2) + i, heap_region2, 1) != 0) {
pointer_align = (i / sizeof(void *)) * sizeof(void *);
addr_pointed1 = mc_snapshot_read_pointer((char *) real_area1 + pointer_align, snapshot1);
if (real_area1 == real_area2)
return -1;
else
- return (mc_snapshot_region_memcp(real_area1, heap_region1, real_area2, heap_region2, area_size) != 0);
+ return (mc_snapshot_region_memcmp(real_area1, heap_region1, real_area2, heap_region2, area_size) != 0);
} else {
if (area_size != -1 && type->byte_size != area_size)
return -1;
else {
- return (mc_snapshot_region_memcp(real_area1, heap_region1, real_area2, heap_region2, type->byte_size) != 0);
+ return (mc_snapshot_region_memcmp(real_area1, heap_region1, real_area2, heap_region2, type->byte_size) != 0);
}
}
break;
if (area_size != -1 && type->byte_size != area_size)
return -1;
else
- return (mc_snapshot_region_memcp(real_area1, heap_region1, real_area2, heap_region2, type->byte_size) != 0);
+ return (mc_snapshot_region_memcmp(real_area1, heap_region1, real_area2, heap_region2, type->byte_size) != 0);
break;
case DW_TAG_typedef:
case DW_TAG_const_type:
int check_ignore = 0;
void *real_addr_block1, *real_addr_block2, *real_addr_frag1, *real_addr_frag2;
-
int type_size = -1;
int offset1 = 0, offset2 = 0;
int new_size1 = -1, new_size2 = -1;
malloc_info heapinfo_temp1, heapinfo_temp2;
+ void* real_area1_to_compare = area1;
+ void* real_area2_to_compare = area2;
+
if (previous == NULL) {
previous =
xbt_dynar_new(sizeof(heap_area_pair_t), heap_area_pair_free_voidp);
malloc_info* heapinfo1 = mc_snapshot_read_region(&heapinfos1[block1], heap_region1, &heapinfo_temp1, sizeof(malloc_info));
malloc_info* heapinfo2 = mc_snapshot_read_region(&heapinfos2[block2], heap_region2, &heapinfo_temp2, sizeof(malloc_info));
- if ((heapinfo1->type == -1) && (heapinfo2->type == -1)) { /* Free block */
+ if ((heapinfo1->type == MMALLOC_TYPE_FREE || heapinfo1->type==MMALLOC_TYPE_HEAPINFO)
+ && (heapinfo2->type == MMALLOC_TYPE_FREE || heapinfo2->type ==MMALLOC_TYPE_HEAPINFO)) {
+ /* Free block */
if (match_pairs) {
match_equals(state, previous);
xbt_dynar_free(&previous);
}
return 0;
- } else if ((heapinfo1->type == 0) && (heapinfo2->type == 0)) { /* Complete block */
+ } else if (heapinfo1->type == MMALLOC_TYPE_UNFRAGMENTED
+ && heapinfo2->type == MMALLOC_TYPE_UNFRAGMENTED) {
+ /* Complete block */
// TODO, lookup variable type from block type as done for fragmented blocks
+ offset1 = (char *) area1 - (char *) real_addr_block1;
+ offset2 = (char *) area2 - (char *) real_addr_block2;
+
if (state->equals_to1_(block1, 0).valid
&& state->equals_to2_(block2, 0).valid) {
if (equal_blocks(state, block1, block2)) {
if (type_size != -1) {
if (type_size != heapinfo1->busy_block.busy_size
&& type_size != heapinfo2->busy_block.busy_size
- && type->name != NULL && !strcmp(type->name, "s_smx_context")) {
+ && (type->name == NULL || !strcmp(type->name, "struct s_smx_context"))) {
if (match_pairs) {
match_equals(state, previous);
xbt_dynar_free(&previous);
// Process address of the fragment:
real_addr_frag1 =
(void *) ((char *) real_addr_block1 +
- (frag1 << ((xbt_mheap_t) state->s_heap)->heapinfo[block1].
- type));
+ (frag1 << heapinfo1->type));
real_addr_frag2 =
(void *) ((char *) real_addr_block2 +
- (frag2 << ((xbt_mheap_t) state->s_heap)->heapinfo[block2].
- type));
+ (frag2 << heapinfo2->type));
// Check the size of the fragments against the size of the type:
if (type_size != -1) {
}
return -1;
}
+ // ?
if (type_size != heapinfo1->busy_frag.frag_size[frag1]
|| type_size != heapinfo2->busy_frag.frag_size[frag2]) {
if (match_pairs) {
return -1;
}
}
+
// Check if the blocks are already matched together:
if (state->equals_to1_(block1, frag1).valid
&& state->equals_to2_(block2, frag2).valid) {
- if (equal_fragments(state, block1, frag1, block2, frag2)) {
+ if (offset1==offset2 && equal_fragments(state, block1, frag1, block2, frag2)) {
if (match_pairs) {
match_equals(state, previous);
xbt_dynar_free(&previous);
return 1;
}
}
+
// Size of the fragment:
size = heapinfo1->busy_frag.frag_size[frag1];
// Remember (basic) type inference.
- // The current data structure only allows us to do this for the whole block.
+ // The current data structure only allows us to do this for the whole fragment.
if (type != NULL && area1 == real_addr_frag1) {
state->types1_(block1, frag1) = type;
}
/* Start comparison */
if (type) {
res_compare =
- compare_heap_area_with_type(state, area1, area2, snapshot1, snapshot2,
+ compare_heap_area_with_type(state, real_area1_to_compare, real_area2_to_compare, snapshot1, snapshot2,
previous, type, size, check_ignore,
pointer_level);
} else {
res_compare =
- compare_heap_area_without_type(state, area1, area2, snapshot1, snapshot2,
+ compare_heap_area_without_type(state, real_area1_to_compare, real_area2_to_compare, snapshot1, snapshot2,
previous, size, check_ignore);
}
if (res_compare == 1) {
|| (block > state->heapsize1) || (block < 1))
return -1;
- if (heapinfo[block].type == -1) { /* Free block */
+ if (heapinfo[block].type == MMALLOC_TYPE_FREE || heapinfo[block].type == MMALLOC_TYPE_HEAPINFO) { /* Free block */
return -1;
- } else if (heapinfo[block].type == 0) { /* Complete block */
+ } else if (heapinfo[block].type == MMALLOC_TYPE_UNFRAGMENTED) { /* Complete block */
return (int) heapinfo[block].busy_block.busy_size;
} else {
frag =
} else {
- if (state->heapinfo1[i].type == -1) { /* Free block */
+ if (state->heapinfo1[i].type == MMALLOC_TYPE_FREE
+ || state->heapinfo1[i].type == MMALLOC_TYPE_HAPINFO) { /* Free block */
i++;
continue;
}
- if (state->heapinfo1[i].type == 0) { /* Large block */
+ if (state->heapinfo1[i].type == MMALLOC_TYPE_UNFRAGMENTED) { /* Large block */
if (state->heapinfo1[i].busy_block.size !=
state->heapinfo2[i].busy_block.size) {
int _sg_do_model_check = 0;
int _sg_mc_checkpoint = 0;
int _sg_mc_sparse_checkpoint = 0;
-int _sg_mc_soft_dirty = 1;
+int _sg_mc_soft_dirty = 0;
char *_sg_mc_property_file = NULL;
int _sg_mc_timeout = 0;
int _sg_mc_hash = 0;
pagenos[i] = reference_pages[i];
mc_model_checker->pages->ref_page(reference_pages[i]);
} else {
- // Otherwise, we need to store the page the hard hard
+ // Otherwise, we need to store the page the hard way
// (by reading its content):
void* page = (char*) data + (i << xbt_pagebits);
pagenos[i] = mc_model_checker->pages->store_page(page);
* If possible, the restoration will be incremental
* (the modified pages will not be touched).
*
- * @param data The start of the region (must be at the beginning of a page)
- * @param pag_count Number of pages of the region
+ * @param start_addr
+ * @param page_count Number of pages of the region
+ * @param pagenos
* @param pagemap Linux kernel pagemap values fot this region (or NULL)
* @param reference_pages Snapshot page numbers of the previous soft_dirty_reset (or NULL)
*/
-void mc_restore_page_snapshot_region(mc_mem_region_t region, size_t page_count, uint64_t* pagemap, mc_mem_region_t reference_region)
+void mc_restore_page_snapshot_region(void* start_addr, size_t page_count, size_t* pagenos, uint64_t* pagemap, size_t* reference_pagenos)
{
for (size_t i=0; i!=page_count; ++i) {
bool softclean = pagemap && !(pagemap[i] & SOFT_DIRTY);
- if (softclean && reference_region && reference_region->page_numbers[i] == region->page_numbers[i]) {
+ if (softclean && reference_pagenos && pagenos[i] == reference_pagenos[i]) {
// The page is softclean and is the same as the reference one:
// the page is already in the target state.
continue;
}
// Otherwise, copy the page:
- void* target_page = mc_page_from_number(region->start_addr, i);
- const void* source_page = mc_model_checker->pages->get_page(region->page_numbers[i]);
+ void* target_page = mc_page_from_number(start_addr, i);
+ const void* source_page = mc_model_checker->pages->get_page(pagenos[i]);
memcpy(target_page, source_page, xbt_pagesize);
}
}
}
// Incremental per-page snapshot restoration:
- mc_restore_page_snapshot_region(reg, page_count, pagemap, ref_reg);
+ mc_restore_page_snapshot_region(reg->start_addr, page_count, reg->page_numbers,
+ pagemap, ref_reg ? ref_reg->page_numbers : NULL);
// This is funny, the restoration can restore the state of the current heap,
// if this happen free(pagemap) would free from the wrong heap:
return new s_mc_pages_store_t(500);
}
+void mc_pages_store_delete(mc_pages_store_t store)
+{
+ delete store;
+}
+
+}
+
+#ifdef SIMGRID_TEST
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#include <memory>
+
+#include "mc/mc_page_store.h"
+
+static int value = 0;
+
+static void new_content(void* data, size_t size)
+{
+ memset(data, ++value, size);
}
+
+static void* getpage()
+{
+ return mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+}
+
+extern "C" {
+
+XBT_TEST_SUITE("mc_page_store", "Page store");
+
+XBT_TEST_UNIT("base", test_mc_page_store, "Test adding/removing pages in the store")
+{
+ xbt_test_add("Init");
+ size_t pagesize = (size_t) getpagesize();
+ std::auto_ptr<s_mc_pages_store_t> store = std::auto_ptr<s_mc_pages_store_t>(new s_mc_pages_store(500));
+ void* data = getpage();
+ xbt_test_assert(store->size()==0, "Bad size");
+
+ xbt_test_add("Store the page once");
+ new_content(data, pagesize);
+ size_t pageno1 = store->store_page(data);
+ xbt_test_assert(store->get_ref(pageno1)==1, "Bad refcount");
+ const void* copy = store->get_page(pageno1);
+ xbt_test_assert(memcmp(data, copy, pagesize)==0, "Page data should be the same");
+ xbt_test_assert(store->size()==1, "Bad size");
+
+ xbt_test_add("Store the same page again");
+ size_t pageno2 = store->store_page(data);
+ xbt_test_assert(pageno1==pageno2, "Page should be the same");
+ xbt_test_assert(store->get_ref(pageno1)==2, "Bad refcount");
+ xbt_test_assert(store->size()==1, "Bad size");
+
+ xbt_test_add("Store a new page");
+ new_content(data, pagesize);
+ size_t pageno3 = store->store_page(data);
+ xbt_test_assert(pageno1 != pageno3, "New page should be different");
+ xbt_test_assert(store->size()==2, "Bad size");
+
+ xbt_test_add("Unref pages");
+ store->unref_page(pageno1);
+ xbt_assert(store->get_ref(pageno1)==1, "Bad refcount");
+ xbt_assert(store->size()==2, "Bad size");
+ store->unref_page(pageno2);
+ xbt_test_assert(store->size()==1, "Bad size");
+
+ xbt_test_add("Reallocate page");
+ new_content(data, pagesize);
+ size_t pageno4 = store->store_page(data);
+ xbt_test_assert(pageno1 == pageno4, "Page was not reused");
+ xbt_test_assert(store->get_ref(pageno4)==1, "Bad refcount");
+ xbt_test_assert(store->size()==2, "Bad size");
+}
+
+}
+
+#endif /* SIMGRID_TEST */
typedef struct s_mc_pages_store s_mc_pages_store_t, * mc_pages_store_t;
mc_pages_store_t mc_pages_store_new();
+void mc_pages_store_delete(mc_pages_store_t store);
/**
*/
size_t* mc_take_page_snapshot_region(void* data, size_t page_count, uint64_t* pagemap, size_t* reference_pages);
void mc_free_page_snapshot_region(size_t* pagenos, size_t page_count);
-void mc_restore_page_snapshot_region(mc_mem_region_t region, size_t page_count, uint64_t* pagemap, mc_mem_region_t reference_region);
+void mc_restore_page_snapshot_region(void* start_addr, size_t page_count, size_t* pagenos, uint64_t* pagemap, size_t* reference_pagenos);
mc_mem_region_t mc_region_new_sparse(int type, void *start_addr, size_t size, mc_mem_region_t ref_reg);
+void MC_region_destroy(mc_mem_region_t reg);
void mc_region_restore_sparse(mc_mem_region_t reg, mc_mem_region_t ref_reg);
void mc_softdirty_reset();
void* mc_snapshot_read_fragmented(void* addr, mc_mem_region_t region, void* target, size_t size);
void* mc_snapshot_read(void* addr, mc_snapshot_t snapshot, void* target, size_t size);
-int mc_snapshot_region_memcp(
+int mc_snapshot_region_memcmp(
void* addr1, mc_mem_region_t region1,
void* addr2, mc_mem_region_t region2, size_t size);
-int mc_snapshot_memcp(
+int mc_snapshot_memcmp(
void* addr1, mc_snapshot_t snapshot1,
void* addr2, mc_snapshot_t snapshot2, size_t size);
static inline __attribute__((always_inline))
void* mc_snapshot_read_region(void* addr, mc_mem_region_t region, void* target, size_t size)
{
- uintptr_t offset = (uintptr_t) addr - (uintptr_t) region->start_addr;
+ if (region==NULL)
+ return addr;
- xbt_assert(addr >= region->start_addr && (char*) addr+size < (char*)region->start_addr+region->size,
+ uintptr_t offset = (char*) addr - (char*) region->start_addr;
+
+ xbt_assert(mc_region_contain(region, addr),
"Trying to read out of the region boundary.");
// Linear memory region:
if (region->data) {
- return (void*) ((uintptr_t) region->data + offset);
+ return (char*) region->data + offset;
}
// Fragmented memory region:
else if (region->page_numbers) {
+ // Last byte of the region:
void* end = (char*) addr + size - 1;
if( mc_same_page(addr, end) ) {
// The memory is contained in a single page:
}
}
+static inline __attribute__ ((always_inline))
+void* mc_snapshot_read_pointer_region(void* addr, mc_mem_region_t region)
+{
+ void* res;
+ return *(void**) mc_snapshot_read_region(addr, region, &res, sizeof(void*));
+}
SG_END_DECL()
#include "mc_mmu.h"
#include "mc_page_store.h"
+/** @brief Find the snapshoted region from a pointer
+ *
+ * @param addr Pointer
+ * @param snapshot Snapshot
+ * @param Snapshot region in the snapshot this pointer belongs to
+ * (or NULL if it does not belong to any snapshot region)
+ * */
mc_mem_region_t mc_get_snapshot_region(void* addr, mc_snapshot_t snapshot)
{
for (size_t i = 0; i != NB_REGIONS; ++i) {
*/
void* mc_snapshot_read_fragmented(void* addr, mc_mem_region_t region, void* target, size_t size)
{
+ // Last byte of the memory area:
void* end = (char*) addr + size - 1;
+
+ // Page of the last byte of the memory area:
size_t page_end = mc_page_number(NULL, end);
+
void* dest = target;
+ if (dest==NULL) {
+ xbt_die("Missing destination buffer for fragmented memory access");
+ }
+
// Read each page:
while (mc_page_number(NULL, addr) != page_end) {
void* snapshot_addr = mc_translate_address_region((uintptr_t) addr, region);
* @param snapshot Snapshot (or NULL is no snapshot)
* @param target Buffer to store the value
* @param size Size of the data to read in bytes
- * @return Pointer where the data is located (target buffer of original location)
+ * @return Pointer where the data is located (target buffer or original location)
*/
void* mc_snapshot_read(void* addr, mc_snapshot_t snapshot, void* target, size_t size)
{
* @param snapshot2 Region of the address in the second snapshot
* @return same as memcmp
* */
-int mc_snapshot_region_memcp(
+int mc_snapshot_region_memcmp(
void* addr1, mc_mem_region_t region1,
void* addr2, mc_mem_region_t region2, size_t size)
{
// Using alloca() for large allocations may trigger stack overflow:
// use malloc if the buffer is too big.
-
bool stack_alloc = size < 64;
- void* buffer = stack_alloc ? alloca(2*size) : malloc(2*size);
- void* buffer1 = mc_snapshot_read_region(addr1, region1, buffer, size);
- void* buffer2 = mc_snapshot_read_region(addr2, region2, (char*) buffer + size, size);
+ void* buffer1a = (region1==NULL || region1->data) ? NULL : stack_alloc ? alloca(size) : malloc(size);
+ void* buffer2a = (region2==NULL || region2->data) ? NULL : stack_alloc ? alloca(size) : malloc(size);
+ void* buffer1 = mc_snapshot_read_region(addr1, region1, buffer1a, size);
+ void* buffer2 = mc_snapshot_read_region(addr2, region2, buffer2a, size);
int res;
if (buffer1 == buffer2) {
- res = 0;
+ res = 0;
} else {
res = memcmp(buffer1, buffer2, size);
}
if (!stack_alloc) {
- free(buffer);
+ free(buffer1a);
+ free(buffer2a);
}
return res;
}
* @param snapshot2 Second snapshot
* @return same as memcmp
* */
-int mc_snapshot_memcp(
+int mc_snapshot_memcmp(
void* addr1, mc_snapshot_t snapshot1,
void* addr2, mc_snapshot_t snapshot2, size_t size)
{
mc_mem_region_t region1 = mc_get_snapshot_region(addr1, snapshot1);
mc_mem_region_t region2 = mc_get_snapshot_region(addr2, snapshot2);
- return mc_snapshot_region_memcp(addr1, region1, addr2, region2, size);
+ return mc_snapshot_region_memcmp(addr1, region1, addr2, region2, size);
+}
+
+#ifdef SIMGRID_TEST
+
+#include <string.h>
+#include <stdlib.h>
+
+#include <sys/mman.h>
+
+#include "mc/mc_private.h"
+
+XBT_TEST_SUITE("mc_snapshot", "Snapshots");
+
+static inline void init_memory(void* mem, size_t size)
+{
+ char* dest = (char*) mem;
+ for (int i=0; i!=size; ++i) {
+ dest[i] = rand() & 255;
+ }
+}
+
+static void test_snapshot(bool sparse_checkpoint);
+
+XBT_TEST_UNIT("page_snapshots", test_per_snpashots, "Test per-page snapshots")
+{
+ test_snapshot(1);
+}
+
+
+XBT_TEST_UNIT("flat_snapshot", test_flat_snapshots, "Test flat snapshots")
+{
+ test_snapshot(0);
}
+
+
+static void test_snapshot(bool sparse_checkpoint) {
+
+ xbt_test_add("Initialisation");
+ _sg_mc_soft_dirty = 0;
+ _sg_mc_sparse_checkpoint = sparse_checkpoint;
+ xbt_assert(xbt_pagesize == getpagesize());
+ xbt_assert(1 << xbt_pagebits == xbt_pagesize);
+ mc_model_checker = xbt_new0(s_mc_model_checker_t, 1);
+ mc_model_checker->pages = mc_pages_store_new();
+
+ for(int n=1; n!=256; ++n) {
+
+ // Store region page(s):
+ size_t byte_size = n * xbt_pagesize;
+ void* source = mmap(NULL, byte_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ xbt_assert(source!=MAP_FAILED, "Could not allocate source memory");
+
+ // Init memory and take snapshots:
+ init_memory(source, byte_size);
+ mc_mem_region_t region0 = mc_region_new_sparse(0, source, byte_size, NULL);
+ for(int i=0; i<n; i+=2) {
+ init_memory((char*) source + i*xbt_pagesize, xbt_pagesize);
+ }
+ mc_mem_region_t region = mc_region_new_sparse(0, source, byte_size, NULL);
+
+ void* destination = mmap(NULL, byte_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ xbt_assert(source!=MAP_FAILED, "Could not allocate destination memory");
+
+ xbt_test_add("Reading whole region data for %i page(s)", n);
+ void* read = mc_snapshot_read_region(source, region, destination, byte_size);
+ xbt_test_assert(!memcmp(source, read, byte_size), "Mismatch in mc_snapshot_read_region()");
+
+ xbt_test_add("Reading parts of region data for %i page(s)", n);
+ for(int j=0; j!=100; ++j) {
+ size_t offset = rand() % byte_size;
+ size_t size = rand() % (byte_size - offset);
+ void* read = mc_snapshot_read_region((char*) source+offset, region, destination, size);
+ xbt_test_assert(!memcmp((char*) source+offset, read, size),
+ "Mismatch in mc_snapshot_read_region()");
+ }
+
+ xbt_test_add("Compare whole region data for %i page(s)", n);
+ xbt_test_assert(!mc_snapshot_region_memcmp(source, NULL, source, region, byte_size),
+ "Mismatch in mc_snapshot_region_memcmp() for the whole region");
+ xbt_test_assert(mc_snapshot_region_memcmp(source, region0, source, region, byte_size),
+ "Unexpected match in mc_snapshot_region_memcmp() with previous snapshot");
+
+ xbt_test_add("Compare parts of region data for %i page(s) with current value", n);
+ for(int j=0; j!=100; ++j) {
+ size_t offset = rand() % byte_size;
+ size_t size = rand() % (byte_size - offset);
+ xbt_test_assert(!mc_snapshot_region_memcmp((char*) source+offset, NULL, (char*) source+offset, region, size),
+ "Mismatch in mc_snapshot_region_memcmp()");
+ }
+
+ xbt_test_add("Compare parts of region data for %i page(s) with itself", n);
+ for(int j=0; j!=100; ++j) {
+ size_t offset = rand() % byte_size;
+ size_t size = rand() % (byte_size - offset);
+ xbt_test_assert(!mc_snapshot_region_memcmp((char*) source+offset, region, (char*) source+offset, region, size),
+ "Mismatch in mc_snapshot_region_memcmp()");
+ }
+
+ if (n==1) {
+ xbt_test_add("Read pointer for %i page(s)", n);
+ memcpy(source, &mc_model_checker, sizeof(void*));
+ mc_mem_region_t region2 = mc_region_new_sparse(0, source, byte_size, NULL);
+ xbt_test_assert(mc_snapshot_read_pointer_region(source, region2) == mc_model_checker,
+ "Mismtach in mc_snapshot_read_pointer_region()");
+ MC_region_destroy(region2);
+ }
+
+ MC_region_destroy(region);
+ MC_region_destroy(region0);
+ munmap(destination, byte_size);
+ munmap(source, byte_size);
+ }
+
+ mc_pages_store_delete(mc_model_checker->pages);
+ xbt_free(mc_model_checker);
+ mc_model_checker = NULL;
+}
+
+#endif /* SIMGRID_TEST */
+
xbt_cfg_register(&_sg_cfg_set, "model-check/soft-dirty",
"Use sparse per-page snapshots.",
xbt_cfgelm_boolean, 1, 1, _mc_cfg_cb_soft_dirty, NULL);
- xbt_cfg_setdefault_boolean(_sg_cfg_set, "model-check/soft-dirty", "yes");
+ xbt_cfg_setdefault_boolean(_sg_cfg_set, "model-check/soft-dirty", "no");
/* do liveness model-checking */
xbt_cfg_register(&_sg_cfg_set, "model-check/property",
type = mdp->heapinfo[block].type;
switch (type) {
- case -1: /* Already free */
+ case MMALLOC_TYPE_HEAPINFO:
UNLOCK(mdp);
- THROWF(system_error, 0, "Asked to free a fragment in a block that is already free. I'm puzzled\n");
+ THROWF(system_error, 0, "Asked to free a fragment in a heapinfo block. I'm confused.\n");
+ break;
+
+ case MMALLOC_TYPE_FREE: /* Already free */
+ UNLOCK(mdp);
+ THROWF(system_error, 0, "Asked to free a fragment in a block that is already free. I'm puzzled.\n");
break;
- case 0:
+ case MMALLOC_TYPE_UNFRAGMENTED:
/* Get as many statistics as early as we can. */
mdp -> heapstats.chunks_used--;
mdp -> heapstats.bytes_used -=
mdp->heapinfo[i].free_block.size += mdp->heapinfo[block].busy_block.size;
/* Mark all my ex-blocks as free */
for (it=0; it<mdp->heapinfo[block].busy_block.size; it++) {
- if (mdp->heapinfo[block+it].type <0) {
+ if (mdp->heapinfo[block+it].type < 0) {
fprintf(stderr,"Internal Error: Asked to free a block already marked as free (block=%lu it=%d type=%lu). Please report this bug.\n",
(unsigned long)block,it,(unsigned long)mdp->heapinfo[block].type);
abort();
}
- mdp->heapinfo[block+it].type = -1;
+ mdp->heapinfo[block+it].type = MMALLOC_TYPE_FREE;
}
block = i;
(unsigned long)block,it,(unsigned long)mdp->heapinfo[block].free_block.size,(unsigned long)mdp->heapinfo[block].type);
abort();
}
- mdp->heapinfo[block+it].type = -1;
+ mdp->heapinfo[block+it].type = MMALLOC_TYPE_FREE;
}
}
break;
default:
+ if (type < 0) {
+ fprintf(stderr, "Unkown mmalloc block type.\n");
+ abort();
+ }
+
/* Do some of the statistics. */
mdp -> heapstats.chunks_used--;
mdp -> heapstats.bytes_used -= 1 << type;
xbt_swag_remove(&mdp->heapinfo[block],&mdp->fraghead[type]);
/* pretend that this block is used and free it so that it gets properly coalesced with adjacent free blocks */
- mdp->heapinfo[block].type = 0;
+ mdp->heapinfo[block].type = MMALLOC_TYPE_UNFRAGMENTED;
mdp->heapinfo[block].busy_block.size = 1;
mdp->heapinfo[block].busy_block.busy_size = 0;
-/* Initialization for access to a mmap'd malloc managed region. */
+/* Initialization for acces s to a mmap'd malloc managed region. */
/* Copyright (c) 2012-2014. The SimGrid Team.
* All rights reserved. */
int bytes = 0;
while(i<=((struct mdesc *)heap)->heaplimit){
- if(((struct mdesc *)heap)->heapinfo[i].type == 0){
+ if(((struct mdesc *)heap)->heapinfo[i].type == MMALLOC_TYPE_UNFRAGMENTED){
if(((struct mdesc *)heap)->heapinfo[i].busy_block.busy_size > 0)
bytes += ((struct mdesc *)heap)->heapinfo[i].busy_block.busy_size;
- }else if(((struct mdesc *)heap)->heapinfo[i].type > 0){
+ } else if(((struct mdesc *)heap)->heapinfo[i].type > 0){
for(j=0; j < (size_t) (BLOCKSIZE >> ((struct mdesc *)heap)->heapinfo[i].type); j++){
if(((struct mdesc *)heap)->heapinfo[i].busy_frag.frag_size[j] > 0)
bytes += ((struct mdesc *)heap)->heapinfo[i].busy_frag.frag_size[j];
ssize_t mmalloc_get_busy_size(xbt_mheap_t heap, void *ptr){
ssize_t block = ((char*)ptr - (char*)(heap->heapbase)) / BLOCKSIZE + 1;
- if(heap->heapinfo[block].type == -1)
+ if(heap->heapinfo[block].type < 0)
return -1;
- else if(heap->heapinfo[block].type == 0)
+ else if(heap->heapinfo[block].type == MMALLOC_TYPE_UNFRAGMENTED)
return heap->heapinfo[block].busy_block.busy_size;
else{
ssize_t frag = ((uintptr_t) (ADDR2UINT (ptr) % (BLOCKSIZE))) >> heap->heapinfo[block].type;
}
}
+
+void mmcheck(xbt_mheap_t heap) {return;
+ if (!heap->heapinfo)
+ return;
+ malloc_info* heapinfo = NULL;
+ for (size_t i=1; i < heap->heaplimit; i += mmalloc_get_increment(heapinfo)) {
+ heapinfo = heap->heapinfo + i;
+ switch (heapinfo->type) {
+ case MMALLOC_TYPE_HEAPINFO:
+ case MMALLOC_TYPE_FREE:
+ if (heapinfo->free_block.size==0) {
+ xbt_die("Block size == 0");
+ }
+ break;
+ case MMALLOC_TYPE_UNFRAGMENTED:
+ if (heapinfo->busy_block.size==0) {
+ xbt_die("Block size == 0");
+ }
+ if (heapinfo->busy_block.busy_size==0 && heapinfo->busy_block.size!=0) {
+ xbt_die("Empty busy block");
+ }
+ break;
+ default:
+ if (heapinfo->type<0) {
+ xbt_die("Unkown mmalloc block type.");
+ }
+ }
+ }
+}
return (result);
}
+/** Initialise heapinfo about the heapinfo pages :)
+ *
+ */
+static void initialize_heapinfo_heapinfo(xbt_mheap_t mdp)
+{
+ // Update heapinfo about the heapinfo pages (!):
+ xbt_assert((uintptr_t) mdp->heapinfo % BLOCKSIZE == 0);
+ int block = BLOCK(mdp->heapinfo);
+ size_t nblocks = mdp->heapsize * sizeof(malloc_info) / BLOCKSIZE;
+ // Mark them as free:
+ for (size_t j=0; j!=nblocks; ++j) {
+ mdp->heapinfo[block+j].type = MMALLOC_TYPE_FREE;
+ mdp->heapinfo[block+j].free_block.size = 0;
+ mdp->heapinfo[block+j].free_block.next = 0;
+ mdp->heapinfo[block+j].free_block.prev = 0;
+ }
+ mdp->heapinfo[block].free_block.size = nblocks;
+}
+
/* Finish the initialization of the mheap. If we want to inline it
* properly, we need to make the align function publicly visible, too */
static void initialize(xbt_mheap_t mdp)
int i;
malloc_info mi; /* to compute the offset of the swag hook */
+ // Update mdp meta-data:
mdp->heapsize = HEAP / BLOCKSIZE;
mdp->heapinfo = (malloc_info *)
align(mdp, mdp->heapsize * sizeof(malloc_info));
+ mdp->heapbase = (void *) mdp->heapinfo;
+ mdp->flags |= MMALLOC_INITIALIZED;
+ // Update root heapinfo:
memset((void *) mdp->heapinfo, 0, mdp->heapsize * sizeof(malloc_info));
- mdp->heapinfo[0].type=-1;
+ mdp->heapinfo[0].type = MMALLOC_TYPE_FREE;
mdp->heapinfo[0].free_block.size = 0;
mdp->heapinfo[0].free_block.next = mdp->heapinfo[0].free_block.prev = 0;
mdp->heapindex = 0;
- mdp->heapbase = (void *) mdp->heapinfo;
- mdp->flags |= MMALLOC_INITIALIZED;
+
+ initialize_heapinfo_heapinfo(mdp);
for (i=0;i<BLOCKLOG;i++) {
xbt_swag_init(&(mdp->fraghead[i]),
/* Copy old info into new location */
oldinfo = mdp->heapinfo;
newinfo = (malloc_info *) align(mdp, newsize * sizeof(malloc_info));
- memset(newinfo, 0, newsize * sizeof(malloc_info));
memcpy(newinfo, oldinfo, mdp->heapsize * sizeof(malloc_info));
+ /* Initialise the new blockinfo : */
+ memset((char*) newinfo + mdp->heapsize * sizeof(malloc_info), 0,
+ (newsize - mdp->heapsize)* sizeof(malloc_info));
+
/* Update the swag of busy blocks containing free fragments by applying the offset to all swag_hooks. Yeah. My hand is right in the fan and I still type */
size_t offset=((char*)newinfo)-((char*)oldinfo);
/* mark the space previously occupied by the block info as free by first marking it
* as occupied in the regular way, and then freing it */
for (it=0; it<BLOCKIFY(mdp->heapsize * sizeof(malloc_info)); it++){
- newinfo[BLOCK(oldinfo)+it].type = 0;
+ newinfo[BLOCK(oldinfo)+it].type = MMALLOC_TYPE_UNFRAGMENTED;
newinfo[BLOCK(oldinfo)+it].busy_block.ignore = 0;
}
newinfo[BLOCK(oldinfo)].busy_block.busy_size = size;
mfree(mdp, (void *) oldinfo);
mdp->heapsize = newsize;
+
+ initialize_heapinfo_heapinfo(mdp);
}
mdp->heaplimit = BLOCK((char *) result + size);
block = BLOCK(result);
for (it=0;it<blocks;it++){
- mdp->heapinfo[block+it].type = 0;
+ mdp->heapinfo[block+it].type = MMALLOC_TYPE_UNFRAGMENTED;
mdp->heapinfo[block+it].busy_block.busy_size = 0;
mdp->heapinfo[block+it].busy_block.ignore = 0;
+ mdp->heapinfo[block+it].busy_block.size = 0;
}
mdp->heapinfo[block].busy_block.size = blocks;
mdp->heapinfo[block].busy_block.busy_size = requested_size;
}
for (it=0;it<blocks;it++){
- mdp->heapinfo[block+it].type = 0;
+ mdp->heapinfo[block+it].type = MMALLOC_TYPE_UNFRAGMENTED;
mdp->heapinfo[block+it].busy_block.busy_size = 0;
mdp->heapinfo[block+it].busy_block.ignore = 0;
+ mdp->heapinfo[block+it].busy_block.size = 0;
}
mdp->heapinfo[block].busy_block.size = blocks;
- mdp->heapinfo[block].busy_block.busy_size = requested_size;
+ mdp->heapinfo[block].busy_block.busy_size = requested_size;
//mdp->heapinfo[block].busy_block.bt_size = xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
//mdp->heapinfo[block].busy_block.bt_size = xbt_libunwind_backtrace(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
? -1 \
: (MDP) -> fd)
-/* Get core for the memory region specified by MDP, using SIZE as the
- amount to either add to or subtract from the existing region. Works
- like sbrk(), but using mmap().
-
- It never returns NULL. Instead, it dies verbosely on errors. */
-
+/** @brief Add memoty to this heap
+ *
+ * Get core for the memory region specified by MDP, using SIZE as the
+ * amount to either add to or subtract from the existing region. Works
+ * like sbrk(), but using mmap().
+ *
+ * It never returns NULL. Instead, it dies verbosely on errors.
+ *
+ * @param mdp The heap
+ * @param size Bytes to allocate for this heap (or <0 to free memory from this heap)
+ */
void *mmorecore(struct mdesc *mdp, ssize_t size)
{
ssize_t test = 0;
void *mapto; /* Address we actually mapped to */
char buf = 0; /* Single byte to write to extend mapped file */
-// fprintf(stderr,"increase %p by %u\n",mdp,size);
-
if (size == 0) {
/* Just return the current "break" value. */
result = mdp->breakval;
}
/* Let's call mmap. Note that it is possible that mdp->top
- is 0. In this case mmap will choose the address for us */
+ is 0. In this case mmap will choose the address for us.
+ This call might very well overwrite an already existing memory mapping
+ (leading to weird bugs).
+ */
mapto = mmap(mdp->top, mapbytes, PROT_READ | PROT_WRITE,
MAP_PRIVATE_OR_SHARED(mdp) | MAP_IS_ANONYMOUS(mdp) |
MAP_FIXED, MAP_ANON_OR_FD(mdp), foffset);
- if (mapto == (void *) -1/* That's MAP_FAILED */) {
+ if (mapto == MAP_FAILED) {
char buff[1024];
fprintf(stderr,"Internal error: mmap returned MAP_FAILED! error: %s\n",strerror(errno));
sprintf(buff,"cat /proc/%d/maps",getpid());
result = (void *) mdp->breakval;
mdp->breakval = (char *) mdp->breakval + size;
} else {
+ /* Memory is already mapped, we only need to increase the breakval: */
result = (void *) mdp->breakval;
mdp->breakval = (char *) mdp->breakval + size;
}
int fragment2;
}s_heap_area_pair_t, *heap_area_pair_t;
+#define MMALLOC_TYPE_HEAPINFO (-2)
+#define MMALLOC_TYPE_FREE (-1)
+#define MMALLOC_TYPE_UNFRAGMENTED 0
+/* >0 values are fragmented blocks */
+
/* Data structure giving per-block information.
*
* There is one such structure in the mdp->heapinfo array per block used in that heap,
};
} malloc_info;
-/* Internal structure that defines the format of the malloc-descriptor.
- This gets written to the base address of the region that mmalloc is
- managing, and thus also becomes the file header for the mapped file,
- if such a file exists. */
-
+/** @brief Descriptor of a mmalloc area
+ *
+ * Internal structure that defines the format of the malloc-descriptor.
+ * This gets written to the base address of the region that mmalloc is
+ * managing, and thus also becomes the file header for the mapped file,
+ * if such a file exists.
+ * */
struct mdesc {
- /* Semaphore locking the access to the heap */
+ /** @brief Semaphore locking the access to the heap */
sem_t sem;
- /* Number of processes that attached the heap */
+ /** @brief Number of processes that attached the heap */
unsigned int refcount;
- /* Chained lists of mdescs */
+ /** @brief Chained lists of mdescs */
struct mdesc *next_mdesc;
- /* The "magic number" for an mmalloc file. */
+ /** @brief The "magic number" for an mmalloc file. */
char magic[MMALLOC_MAGIC_SIZE];
- /* The size in bytes of this structure, used as a sanity check when reusing
- a previously created mapped file. */
+ /** @brief The size in bytes of this structure
+ *
+ * Used as a sanity check when reusing a previously created mapped file.
+ * */
unsigned int headersize;
- /* The version number of the mmalloc package that created this file. */
+ /** @brief Version number of the mmalloc package that created this file. */
unsigned char version;
unsigned int options;
- /* Some flag bits to keep track of various internal things. */
+ /** @brief Some flag bits to keep track of various internal things. */
unsigned int flags;
- /* Number of info entries. */
+ /** @brief Number of info entries. */
size_t heapsize;
- /* Pointer to first block of the heap (base of the first block). */
+ /** @brief Pointer to first block of the heap (base of the first block). */
void *heapbase;
- /* Current search index for the heap table. */
- /* Search index in the info table. */
+ /** @brief Current search index for the heap table.
+ *
+ * Search index in the info table.
+ */
size_t heapindex;
- /* Limit of valid info table indices. */
+ /** @brief Limit of valid info table indices. */
size_t heaplimit;
- /* Block information table. */
- /* Table indexed by block number giving per-block information. */
+ /** @brief Block information table.
+ *
+ * Table indexed by block number giving per-block information.
+ */
malloc_info *heapinfo;
- /* List of all blocks containing free fragments of this size.
+ /* @brief List of all blocks containing free fragments of a given size.
+ *
* The array indice is the log2 of requested size.
* Actually only the sizes 8->11 seem to be used, but who cares? */
s_xbt_swag_t fraghead[BLOCKLOG];
- /* The base address of the memory region for this malloc heap. This
- is the location where the bookkeeping data for mmap and for malloc
- begins. */
-
+ /* @brief Base address of the memory region for this malloc heap
+ *
+ * This is the location where the bookkeeping data for mmap and
+ * for malloc begins.
+ */
void *base;
- /* The current location in the memory region for this malloc heap which
- represents the end of memory in use. */
-
+ /** @brief End of memory in use
+ *
+ * Some memory might be already mapped by the OS but not used
+ * by the heap.
+ * */
void *breakval;
- /* The end of the current memory region for this malloc heap. This is
- the first location past the end of mapped memory.
- Compared to breakval, this value is rounded to the next memory page.
- */
-
+ /** @brief End of the current memory region for this malloc heap.
+ *
+ * This is the first location past the end of mapped memory.
+ *
+ * Compared to breakval, this value is rounded to the next memory page.
+ */
void *top;
- /* Open file descriptor for the file to which this malloc heap is mapped.
- This will always be a valid file descriptor, since /dev/zero is used
- by default if no open file is supplied by the client. Also note that
- it may change each time the region is mapped and unmapped. */
-
+ /** @brief Open file descriptor for the file to which this malloc heap is mapped
+ *
+ * If this value is negative, MAP_ANONYMOUS memory is used.
+ *
+ * Also note that it may change each time the region is mapped and unmapped. */
int fd;
- /* Instrumentation. */
-
+ /* @brief Instrumentation */
struct mstats heapstats;
};
XBT_PUBLIC( void *)__mmalloc_remap_core(xbt_mheap_t mdp);
-/* Get core for the memory region specified by MDP, using SIZE as the
- amount to either add to or subtract from the existing region. Works
- like sbrk(), but using mmap(). */
XBT_PUBLIC( void *)mmorecore(struct mdesc *mdp, ssize_t size);
/* Thread-safety (if the sem is already created)
}
+static inline int mmalloc_get_increment(malloc_info* heapinfo) {
+ if (heapinfo->type < 0) {
+ return heapinfo->free_block.size;
+ } else if (heapinfo->type == 0) {
+ return heapinfo->busy_block.size;
+ } else {
+ return 1;
+ }
+}
+
+void mmcheck(xbt_mheap_t heap);
+
#endif /* __MMPRIVATE_H */
type = mdp->heapinfo[block].type;
switch (type) {
- case -1:
+ case MMALLOC_TYPE_HEAPINFO:
+ fprintf(stderr, "Asked realloc a fragment coming from a heapinfo block. I'm confused.\n");
+ abort();
+ break;
+
+ case MMALLOC_TYPE_FREE:
fprintf(stderr, "Asked realloc a fragment coming from a *free* block. I'm puzzled.\n");
abort();
break;
- case 0:
+ case MMALLOC_TYPE_UNFRAGMENTED:
/* Maybe reallocate a large block to a small fragment. */
if (size <= BLOCKSIZE / 2) { // Full block -> Fragment; no need to optimize for time
/* The new size is smaller; return excess memory to the free list. */
//printf("(%s) return excess memory...",xbt_thread_self_name());
for (it= block+blocks; it< mdp->heapinfo[block].busy_block.size ; it++){
- mdp->heapinfo[it].type = 0; // FIXME that should be useless, type should already be 0 here
+ mdp->heapinfo[it].type = MMALLOC_TYPE_UNFRAGMENTED; // FIXME that should be useless, type should already be 0 here
mdp->heapinfo[it].busy_block.ignore = 0;
+ mdp->heapinfo[it].busy_block.size = 0;
+ mdp->heapinfo[it].busy_block.busy_size = 0;
}
mdp->heapinfo[block + blocks].busy_block.size
default: /* Fragment -> ??; type=logarithm to base two of the fragment size. */
+ if (type < 0) {
+ fprintf(stderr, "Unkown mmalloc block type.\n");
+ abort();
+ }
+
if (size > (size_t) (1 << (type - 1)) && size <= (size_t) (1 << type)) {
/* The new size is the same kind of fragment. */
//printf("(%s) new size is same kind of fragment...",xbt_thread_self_name());
if(HAVE_MC)
set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
-
- add_executable(page_store page_store.cpp)
- target_link_libraries(page_store simgrid)
endif()
set(tesh_files
${tesh_files}
- ${CMAKE_CURRENT_SOURCE_DIR}/page_store.tesh
PARENT_SCOPE
)
set(testsuite_src
${testsuite_src}
- ${CMAKE_CURRENT_SOURCE_DIR}/page_store.cpp
PARENT_SCOPE
)
+++ /dev/null
-#include <string.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <unistd.h>
-#include <sys/mman.h>
-
-#include "mc/mc_page_store.h"
-
-static int value = 0;
-
-static void new_content(void* data, size_t size)
-{
- memset(data, ++value, size);
-}
-
-static void* getpage()
-{
- return mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
-}
-
-int main(int argc, char** argv)
-{
- // Init
- size_t pagesize = (size_t) getpagesize();
- mc_pages_store_t store = new s_mc_pages_store(500);
- void* data = getpage();
-
- // Init:
- xbt_assert(store->size()==0, "Bad size");
-
- // Store the page once:
- new_content(data, pagesize);
- size_t pageno1 = store->store_page(data);
- xbt_assert(store->get_ref(pageno1)==1, "Bad refcount");
- const void* copy = store->get_page(pageno1);
- xbt_assert(memcmp(data, copy, pagesize)==0, "Page data should be the same");
- xbt_assert(store->size()==1, "Bad size");
-
- // Store the same page again:
- size_t pageno2 = store->store_page(data);
- xbt_assert(pageno1==pageno2, "Page should be the same");
- xbt_assert(store->get_ref(pageno1)==2, "Bad refcount");
- xbt_assert(store->size()==1, "Bad size");
-
- // Store a new page:
- new_content(data, pagesize);
- size_t pageno3 = store->store_page(data);
- xbt_assert(pageno1 != pageno3, "New page should be different");
- xbt_assert(store->size()==2, "Bad size");
-
- // Unref pages:
- store->unref_page(pageno1);
- xbt_assert(store->get_ref(pageno1)==1, "Bad refcount");
- xbt_assert(store->size()==2, "Bad size");
- store->unref_page(pageno2);
- xbt_assert(store->size()==1, "Bad size");
-
- // Reallocate page:
- new_content(data, pagesize);
- size_t pageno4 = store->store_page(data);
- xbt_assert(pageno1 == pageno4, "Page was not reused");
- xbt_assert(store->get_ref(pageno4)==1, "Bad refcount");
- xbt_assert(store->size()==2, "Bad size");
-
- return 0;
-}
+++ /dev/null
-#! ./tesh
-
-$ $SG_TEST_EXENV ${bindir:=.}/page_store
$outfile = $infile;
$outfile =~ s/\.c$/_unit.c/;
+ $outfile =~ s/\.cpp$/_unit.cpp/;
$outfile =~ s|.*/([^/]*)$|$1| if $outfile =~ m|/|;
$outfile = "$outdir$outfile";