1 /* Memory allocator `malloc'. */
3 /* Copyright (c) 2010-2015. The SimGrid Team.
4 * All rights reserved. */
6 /* This program is free software; you can redistribute it and/or modify it
7 * under the terms of the license (GNU LGPL) which comes with this package. */
9 /* Copyright 1990, 1991, 1992 Free Software Foundation
11 Written May 1989 by Mike Haertel.
12 Heavily modified Mar 1992 by Fred Fish for mmap'd version. */
14 #include <string.h> /* Prototypes for memcpy, memmove, memset, etc */
16 #include "mmprivate.h"
18 /* Prototypes for local functions */
20 static void initialize(xbt_mheap_t mdp);
21 static void *register_morecore(xbt_mheap_t mdp, size_t size);
22 static void *align(xbt_mheap_t mdp, size_t size);
24 /* Allocation aligned on block boundary.
26 * It never returns NULL, but dies verbosely on error.
28 static void *align(struct mdesc *mdp, size_t size)
31 unsigned long int adj;
33 result = mmorecore(mdp, size);
35 /* if this reservation does not fill up the last block of our resa,
36 * complete the reservation by also asking for the full latest block.
38 * Also, the returned block is aligned to the end of block (but I've
39 * no fucking idea of why, actually -- http://abstrusegoose.com/432 --
40 * but not doing so seems to lead to issues).
42 adj = RESIDUAL(result, BLOCKSIZE);
44 adj = BLOCKSIZE - adj;
46 result = (char *) result + adj;
51 /** Initialise heapinfo about the heapinfo pages :)
54 static void initialize_heapinfo_heapinfo(xbt_mheap_t mdp)
56 // Update heapinfo about the heapinfo pages (!):
57 xbt_assert((uintptr_t) mdp->heapinfo % BLOCKSIZE == 0);
58 int block = BLOCK(mdp->heapinfo);
59 size_t nblocks = mdp->heapsize * sizeof(malloc_info) / BLOCKSIZE;
61 for (size_t j=0; j!=nblocks; ++j) {
62 mdp->heapinfo[block+j].type = MMALLOC_TYPE_FREE;
63 mdp->heapinfo[block+j].free_block.size = 0;
64 mdp->heapinfo[block+j].free_block.next = 0;
65 mdp->heapinfo[block+j].free_block.prev = 0;
67 mdp->heapinfo[block].free_block.size = nblocks;
70 /* Finish the initialization of the mheap. If we want to inline it
71 * properly, we need to make the align function publicly visible, too */
72 static void initialize(xbt_mheap_t mdp)
75 malloc_info mi; /* to compute the offset of the swag hook */
77 // Update mdp meta-data:
78 mdp->heapsize = HEAP / BLOCKSIZE;
79 mdp->heapinfo = (malloc_info *)
80 align(mdp, mdp->heapsize * sizeof(malloc_info));
81 mdp->heapbase = (void *) mdp->heapinfo;
82 mdp->flags |= MMALLOC_INITIALIZED;
84 // Update root heapinfo:
85 memset((void *) mdp->heapinfo, 0, mdp->heapsize * sizeof(malloc_info));
86 mdp->heapinfo[0].type = MMALLOC_TYPE_FREE;
87 mdp->heapinfo[0].free_block.size = 0;
88 mdp->heapinfo[0].free_block.next = mdp->heapinfo[0].free_block.prev = 0;
91 initialize_heapinfo_heapinfo(mdp);
93 for (i=0;i<BLOCKLOG;i++) {
94 xbt_swag_init(&(mdp->fraghead[i]),
95 xbt_swag_offset(mi, freehook));
99 #define update_hook(a,offset) do { if (a) { a = ((char*)a +(offset));} }while(0)
101 /* Get neatly aligned memory from the low level layers, and register it
102 * into the heap info table as necessary. */
103 static void *register_morecore(struct mdesc *mdp, size_t size)
107 malloc_info *newinfo, *oldinfo;
110 result = align(mdp, size); // Never returns NULL
112 /* Check if we need to grow the info table (in a multiplicative manner) */
113 if ((size_t) BLOCK((char *) result + size) > mdp->heapsize) {
116 newsize = mdp->heapsize;
117 while ((size_t) BLOCK((char *) result + size) > newsize)
120 /* Copy old info into new location */
121 oldinfo = mdp->heapinfo;
122 newinfo = (malloc_info *) align(mdp, newsize * sizeof(malloc_info));
123 memcpy(newinfo, oldinfo, mdp->heapsize * sizeof(malloc_info));
125 /* Initialise the new blockinfo : */
126 memset((char*) newinfo + mdp->heapsize * sizeof(malloc_info), 0,
127 (newsize - mdp->heapsize)* sizeof(malloc_info));
129 /* Update the swag of busy blocks containing free fragments by applying the offset to all swag_hooks. Yeah. My hand is right in the fan and I still type */
130 size_t offset=((char*)newinfo)-((char*)oldinfo);
132 for (i=1/*first element of heapinfo describes the mdesc area*/;
135 update_hook(newinfo[i].freehook.next,offset);
136 update_hook(newinfo[i].freehook.prev,offset);
138 // also update the starting points of the swag
139 for (i=0;i<BLOCKLOG;i++) {
140 update_hook(mdp->fraghead[i].head,offset);
141 update_hook(mdp->fraghead[i].tail,offset);
143 mdp->heapinfo = newinfo;
145 /* mark the space previously occupied by the block info as free by first marking it
146 * as occupied in the regular way, and then freing it */
147 for (it=0; it<BLOCKIFY(mdp->heapsize * sizeof(malloc_info)); it++){
148 newinfo[BLOCK(oldinfo)+it].type = MMALLOC_TYPE_UNFRAGMENTED;
149 newinfo[BLOCK(oldinfo)+it].busy_block.ignore = 0;
152 newinfo[BLOCK(oldinfo)].busy_block.size = BLOCKIFY(mdp->heapsize * sizeof(malloc_info));
153 newinfo[BLOCK(oldinfo)].busy_block.busy_size = size;
154 mfree(mdp, (void *) oldinfo);
155 mdp->heapsize = newsize;
157 initialize_heapinfo_heapinfo(mdp);
160 mdp->heaplimit = BLOCK((char *) result + size);
165 /* Allocate memory from the heap. */
166 void *mmalloc(xbt_mheap_t mdp, size_t size) {
167 void *res= mmalloc_no_memset(mdp,size);
168 if (mdp->options & XBT_MHEAP_OPTION_MEMSET) {
173 /* Spliting mmalloc this way is mandated by a trick in mrealloc, that gives
174 back the memory of big blocks to the system before reallocating them: we don't
175 want to loose the beginning of the area when this happens */
176 void *mmalloc_no_memset(xbt_mheap_t mdp, size_t size)
179 size_t block, blocks, lastblocks, start;
184 size_t requested_size = size; // The amount of memory requested by user, for real
186 /* Work even if the user was stupid enough to ask a ridicullously small block (even 0-length),
187 * ie return a valid block that can be realloced and freed.
188 * glibc malloc does not use this trick but return a constant pointer, but we need to enlist the free fragments later on.
190 if (size < SMALLEST_POSSIBLE_MALLOC)
191 size = SMALLEST_POSSIBLE_MALLOC;
193 // printf("(%s) Mallocing %d bytes on %p (default: %p)...",xbt_thread_self_name(),size,mdp,__mmalloc_default_mdp);fflush(stdout);
195 if (!(mdp->flags & MMALLOC_INITIALIZED))
198 mmalloc_paranoia(mdp);
200 /* Determine the allocation policy based on the request size. */
201 if (size <= BLOCKSIZE / 2) {
202 /* Small allocation to receive a fragment of a block.
203 Determine the logarithm to base two of the fragment size. */
206 while ((size /= 2) != 0) {
210 /* Look in the fragment lists for a free fragment of the desired size. */
211 if (xbt_swag_size(&mdp->fraghead[log])>0) {
212 /* There are free fragments of this size; Get one of them and prepare to return it.
213 Update the block's nfree and if no other free fragment, get out of the swag. */
215 /* search a fragment that I could return as a result */
216 malloc_info *candidate_info = xbt_swag_getFirst(&mdp->fraghead[log]);
217 size_t candidate_block = (candidate_info - &(mdp->heapinfo[0]));
218 size_t candidate_frag;
219 for (candidate_frag=0;candidate_frag<(size_t) (BLOCKSIZE >> log);candidate_frag++)
220 if (candidate_info->busy_frag.frag_size[candidate_frag] == -1)
222 xbt_assert(candidate_frag < (size_t) (BLOCKSIZE >> log),
223 "Block %zu was registered as containing free fragments of type %zu, but I can't find any",candidate_block,log);
225 result = (void*) (((char*)ADDRESS(candidate_block)) + (candidate_frag << log));
227 /* Remove this fragment from the list of free guys */
228 candidate_info->busy_frag.nfree--;
229 if (candidate_info->busy_frag.nfree == 0) {
230 xbt_swag_remove(candidate_info,&mdp->fraghead[log]);
233 /* Update our metadata about this fragment */
234 candidate_info->busy_frag.frag_size[candidate_frag] = requested_size;
235 candidate_info->busy_frag.ignore[candidate_frag] = 0;
236 //xbt_backtrace_no_malloc(candidate_info->busy_frag.bt[candidate_frag],XBT_BACKTRACE_SIZE);
237 //xbt_libunwind_backtrace(candidate_info->busy_frag.bt[candidate_frag],XBT_BACKTRACE_SIZE);
239 /* Update the statistics. */
240 mdp -> heapstats.chunks_used++;
241 mdp -> heapstats.bytes_used += 1 << log;
242 mdp -> heapstats.chunks_free--;
243 mdp -> heapstats.bytes_free -= 1 << log;
246 /* No free fragments of the desired size, so get a new block
247 and break it into fragments, returning the first. */
249 result = mmalloc(mdp, BLOCKSIZE); // does not return NULL
250 block = BLOCK(result);
252 mdp->heapinfo[block].type = log;
253 /* Link all fragments but the first as free, and add the block to the swag of blocks containing free frags */
254 for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i) {
255 mdp->heapinfo[block].busy_frag.frag_size[i] = -1;
256 mdp->heapinfo[block].busy_frag.ignore[i] = 0;
258 mdp->heapinfo[block].busy_frag.nfree = i - 1;
259 mdp->heapinfo[block].freehook.prev = NULL;
260 mdp->heapinfo[block].freehook.next = NULL;
262 xbt_swag_insert(&mdp->heapinfo[block], &(mdp->fraghead[log]));
264 /* mark the fragment returned as busy */
265 mdp->heapinfo[block].busy_frag.frag_size[0] = requested_size;
266 mdp->heapinfo[block].busy_frag.ignore[0] = 0;
267 //xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_frag.bt[0],XBT_BACKTRACE_SIZE);
268 //xbt_libunwind_backtrace(mdp->heapinfo[block].busy_frag.bt[0],XBT_BACKTRACE_SIZE);
271 mdp -> heapstats.chunks_free += (BLOCKSIZE >> log) - 1;
272 mdp -> heapstats.bytes_free += BLOCKSIZE - (1 << log);
273 mdp -> heapstats.bytes_used -= BLOCKSIZE - (1 << log);
276 /* Large allocation to receive one or more blocks.
277 Search the free list in a circle starting at the last place visited.
278 If we loop completely around without finding a large enough
279 space we will have to get more memory from the system. */
280 blocks = BLOCKIFY(size);
281 start = block = MALLOC_SEARCH_START;
282 while (mdp->heapinfo[block].free_block.size < blocks) {
283 if (mdp->heapinfo[block].type >=0) { // Don't trust xbt_die and friends in malloc-level library, you fool!
284 fprintf(stderr,"Internal error: found a free block not marked as such (block=%lu type=%lu). Please report this bug.\n",(unsigned long)block,(unsigned long)mdp->heapinfo[block].type);
288 block = mdp->heapinfo[block].free_block.next;
289 if (block == start) {
290 /* Need to get more from the system. Check to see if
291 the new core will be contiguous with the final free
292 block; if so we don't need to get as much. */
293 block = mdp->heapinfo[0].free_block.prev;
294 lastblocks = mdp->heapinfo[block].free_block.size;
295 if (mdp->heaplimit != 0 &&
296 block + lastblocks == mdp->heaplimit &&
297 mmorecore(mdp, 0) == ADDRESS(block + lastblocks) &&
298 (register_morecore(mdp, (blocks - lastblocks) * BLOCKSIZE)) != NULL) {
299 /* Which block we are extending (the `final free
300 block' referred to above) might have changed, if
301 it got combined with a freed info table. */
302 block = mdp->heapinfo[0].free_block.prev;
304 mdp->heapinfo[block].free_block.size += (blocks - lastblocks);
307 result = register_morecore(mdp, blocks * BLOCKSIZE);
309 block = BLOCK(result);
310 for (it=0;it<blocks;it++){
311 mdp->heapinfo[block+it].type = MMALLOC_TYPE_UNFRAGMENTED;
312 mdp->heapinfo[block+it].busy_block.busy_size = 0;
313 mdp->heapinfo[block+it].busy_block.ignore = 0;
314 mdp->heapinfo[block+it].busy_block.size = 0;
316 mdp->heapinfo[block].busy_block.size = blocks;
317 mdp->heapinfo[block].busy_block.busy_size = requested_size;
318 //mdp->heapinfo[block].busy_block.bt_size=xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
319 //mdp->heapinfo[block].busy_block.bt_size = xbt_libunwind_backtrace(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
320 mdp -> heapstats.chunks_used++;
321 mdp -> heapstats.bytes_used += blocks * BLOCKSIZE;
325 /* Need large block(s), but found some in the existing heap */
328 /* At this point we have found a suitable free list entry.
329 Figure out how to remove what we need from the list. */
330 result = ADDRESS(block);
331 if (mdp->heapinfo[block].free_block.size > blocks) {
332 /* The block we found has a bit left over,
333 so relink the tail end back into the free list. */
334 mdp->heapinfo[block + blocks].free_block.size
335 = mdp->heapinfo[block].free_block.size - blocks;
336 mdp->heapinfo[block + blocks].free_block.next
337 = mdp->heapinfo[block].free_block.next;
338 mdp->heapinfo[block + blocks].free_block.prev
339 = mdp->heapinfo[block].free_block.prev;
340 mdp->heapinfo[mdp->heapinfo[block].free_block.prev].free_block.next
341 = mdp->heapinfo[mdp->heapinfo[block].free_block.next].free_block.prev
342 = mdp->heapindex = block + blocks;
344 /* The block exactly matches our requirements,
345 so just remove it from the list. */
346 mdp->heapinfo[mdp->heapinfo[block].free_block.next].free_block.prev
347 = mdp->heapinfo[block].free_block.prev;
348 mdp->heapinfo[mdp->heapinfo[block].free_block.prev].free_block.next
349 = mdp->heapindex = mdp->heapinfo[block].free_block.next;
352 for (it=0;it<blocks;it++){
353 mdp->heapinfo[block+it].type = MMALLOC_TYPE_UNFRAGMENTED;
354 mdp->heapinfo[block+it].busy_block.busy_size = 0;
355 mdp->heapinfo[block+it].busy_block.ignore = 0;
356 mdp->heapinfo[block+it].busy_block.size = 0;
358 mdp->heapinfo[block].busy_block.size = blocks;
359 mdp->heapinfo[block].busy_block.busy_size = requested_size;
360 //mdp->heapinfo[block].busy_block.bt_size = xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
361 //mdp->heapinfo[block].busy_block.bt_size = xbt_libunwind_backtrace(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
363 mdp -> heapstats.chunks_used++;
364 mdp -> heapstats.bytes_used += blocks * BLOCKSIZE;
365 mdp -> heapstats.bytes_free -= blocks * BLOCKSIZE;
368 //printf("(%s) Done mallocing. Result is %p\n",xbt_thread_self_name(),result);fflush(stdout);