X-Git-Url: https://git.kernelconcepts.de/?a=blobdiff_plain;f=common%2Fdlmalloc.c;h=3c70d5dedefad75cbf1f81cb6cbf0a004c0f113e;hb=e6bf18dba2a21bebf2c421b1c2e188225f6485a1;hp=0c0487228e500aaadb021352ad8eb9027949e897;hpb=8bde7f776c77b343aca29b8c7b58464d915ac245;p=karo-tx-uboot.git diff --git a/common/dlmalloc.c b/common/dlmalloc.c index 0c0487228e..3c70d5dede 100644 --- a/common/dlmalloc.c +++ b/common/dlmalloc.c @@ -1,3 +1,5 @@ +#include + #if 0 /* Moved to malloc.h */ /* ---------- To make a malloc.h, start cutting here ------------ */ @@ -199,7 +201,7 @@ MORECORE_FAILURE (default: -1) The value returned upon failure of MORECORE. MORECORE_CLEARS (default 1) - True (1) if the routine mapped to MORECORE zeroes out memory (which + true (1) if the routine mapped to MORECORE zeroes out memory (which holds for sbrk). DEFAULT_TRIM_THRESHOLD DEFAULT_TOP_PAD @@ -220,7 +222,6 @@ - /* Preliminaries */ #ifndef __STD_C @@ -285,13 +286,6 @@ extern "C" { */ -#ifdef DEBUG -#include -#else -#define assert(x) ((void)0) -#endif - - /* INTERNAL_SIZE_T is the word-size used for internal bookkeeping of chunk sizes. On a 64-bit machine, you can reduce malloc @@ -933,10 +927,10 @@ struct mallinfo mALLINFo(); #endif /* ---------- To make a malloc.h, end cutting here ------------ */ -#else /* Moved to malloc.h */ +#endif /* 0 */ /* Moved to malloc.h */ #include -#if 0 +#ifdef DEBUG #if __STD_C static void malloc_update_mallinfo (void); void malloc_stats (void); @@ -944,10 +938,9 @@ void malloc_stats (void); static void malloc_update_mallinfo (); void malloc_stats(); #endif -#endif /* 0 */ +#endif /* DEBUG */ -#endif /* 0 */ /* Moved to malloc.h */ -#include +DECLARE_GLOBAL_DATA_PTR; /* Emulation of sbrk for WIN32 @@ -1152,7 +1145,7 @@ struct malloc_chunk INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */ struct malloc_chunk* fd; /* double links -- used only if free. */ struct malloc_chunk* bk; -}; +} __attribute__((__may_alias__)) ; typedef struct malloc_chunk* mchunkptr; @@ -1454,7 +1447,7 @@ typedef struct malloc_chunk* mbinptr; indexing, maintain locality, and avoid some initialization tests. */ -#define top (bin_at(0)->fd) /* The topmost chunk */ +#define top (av_[2]) /* The topmost chunk */ #define last_remainder (bin_at(1)) /* remainder from last split */ @@ -1472,7 +1465,7 @@ typedef struct malloc_chunk* mbinptr; #define IAV(i) bin_at(i), bin_at(i) static mbinptr av_[NAV * 2 + 2] = { - 0, 0, + NULL, NULL, IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7), IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15), IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23), @@ -1491,17 +1484,53 @@ static mbinptr av_[NAV * 2 + 2] = { IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127) }; -void malloc_bin_reloc (void) +#ifdef CONFIG_NEEDS_MANUAL_RELOC +static void malloc_bin_reloc(void) { - DECLARE_GLOBAL_DATA_PTR; + mbinptr *p = &av_[2]; + size_t i; - unsigned long *p = (unsigned long *)(&av_[2]); - int i; - for (i=2; i<(sizeof(av_)/sizeof(mbinptr)); ++i) { - *p++ += gd->reloc_off; - } + for (i = 2; i < ARRAY_SIZE(av_); ++i, ++p) + *p = (mbinptr)((ulong)*p + gd->reloc_off); +} +#else +static inline void malloc_bin_reloc(void) {} +#endif + +ulong mem_malloc_start = 0; +ulong mem_malloc_end = 0; +ulong mem_malloc_brk = 0; + +void *sbrk(ptrdiff_t increment) +{ + ulong old = mem_malloc_brk; + ulong new = old + increment; + + /* + * if we are giving memory back make sure we clear it out since + * we set MORECORE_CLEARS to 1 + */ + if (increment < 0) + memset((void *)new, 0, -increment); + + if ((new < mem_malloc_start) || (new > mem_malloc_end)) + return (void *)MORECORE_FAILURE; + + mem_malloc_brk = new; + + return (void *)old; +} + +void mem_malloc_init(ulong start, ulong size) +{ + mem_malloc_start = start; + mem_malloc_end = start + size; + mem_malloc_brk = start; + + memset((void *)mem_malloc_start, 0, size); + + malloc_bin_reloc(); } - /* field-extraction macros */ @@ -1551,13 +1580,14 @@ void malloc_bin_reloc (void) #define BINBLOCKWIDTH 4 /* bins per block */ -#define binblocks (bin_at(0)->size) /* bitvector of nonempty blocks */ +#define binblocks_r ((INTERNAL_SIZE_T)av_[1]) /* bitvector of nonempty blocks */ +#define binblocks_w (av_[1]) /* bin<->block macros */ #define idx2binblock(ix) ((unsigned)1 << (ix / BINBLOCKWIDTH)) -#define mark_binblock(ii) (binblocks |= idx2binblock(ii)) -#define clear_binblock(ii) (binblocks &= ~(idx2binblock(ii))) +#define mark_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r | idx2binblock(ii))) +#define clear_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r & ~(idx2binblock(ii)))) @@ -1589,9 +1619,9 @@ static struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; /* Tracking mmaps */ -#if 0 +#ifdef DEBUG static unsigned int n_mmaps = 0; -#endif /* 0 */ +#endif /* DEBUG */ static unsigned long mmapped_mem = 0; #if HAVE_MMAP static unsigned int max_n_mmaps = 0; @@ -1621,9 +1651,7 @@ static void do_check_chunk(mchunkptr p) static void do_check_chunk(p) mchunkptr p; #endif { -#if 0 /* causes warnings because assert() is off */ INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; -#endif /* 0 */ /* No checkable chunk is mmapped */ assert(!chunk_is_mmapped(p)); @@ -1645,9 +1673,7 @@ static void do_check_free_chunk(p) mchunkptr p; #endif { INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; -#if 0 /* causes warnings because assert() is off */ mchunkptr next = chunk_at_offset(p, sz); -#endif /* 0 */ do_check_chunk(p); @@ -1711,10 +1737,8 @@ static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; #endif { -#if 0 /* causes warnings because assert() is off */ INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; long room = sz - s; -#endif /* 0 */ do_check_inuse_chunk(p); @@ -2150,7 +2174,13 @@ Void_t* mALLOc(bytes) size_t bytes; INTERNAL_SIZE_T nb; - if ((long)bytes < 0) return 0; + /* check if mem_malloc_init() was run */ + if ((mem_malloc_start == 0) && (mem_malloc_end == 0)) { + /* not initialized yet */ + return NULL; + } + + if ((long)bytes < 0) return NULL; nb = request2size(bytes); /* padded request size; */ @@ -2249,17 +2279,17 @@ Void_t* mALLOc(bytes) size_t bytes; search for best fitting chunk by scanning bins in blockwidth units. */ - if ( (block = idx2binblock(idx)) <= binblocks) + if ( (block = idx2binblock(idx)) <= binblocks_r) { /* Get to the first marked block */ - if ( (block & binblocks) == 0) + if ( (block & binblocks_r) == 0) { /* force to an even block boundary */ idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH; block <<= 1; - while ((block & binblocks) == 0) + while ((block & binblocks_r) == 0) { idx += BINBLOCKWIDTH; block <<= 1; @@ -2314,7 +2344,7 @@ Void_t* mALLOc(bytes) size_t bytes; { if ((startidx & (BINBLOCKWIDTH - 1)) == 0) { - binblocks &= ~block; + av_[1] = (mbinptr)(binblocks_r & ~block); break; } --startidx; @@ -2323,9 +2353,9 @@ Void_t* mALLOc(bytes) size_t bytes; /* Get to the next possibly nonempty block */ - if ( (block <<= 1) <= binblocks && (block != 0) ) + if ( (block <<= 1) <= binblocks_r && (block != 0) ) { - while ((block & binblocks) == 0) + while ((block & binblocks_r) == 0) { idx += BINBLOCKWIDTH; block <<= 1; @@ -2353,7 +2383,7 @@ Void_t* mALLOc(bytes) size_t bytes; /* Try to extend */ malloc_extend_top(nb); if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE) - return 0; /* propagate failure */ + return NULL; /* propagate failure */ } victim = top; @@ -2407,7 +2437,7 @@ void fREe(mem) Void_t* mem; mchunkptr fwd; /* misc temp for linking */ int islr; /* track whether merging with last_remainder */ - if (mem == 0) /* free(0) has no effect */ + if (mem == NULL) /* free(0) has no effect */ return; p = mem2chunk(mem); @@ -2553,10 +2583,10 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; if (bytes == 0) { fREe(oldmem); return 0; } #endif - if ((long)bytes < 0) return 0; + if ((long)bytes < 0) return NULL; /* realloc of null is supposed to be same as malloc */ - if (oldmem == 0) return mALLOc(bytes); + if (oldmem == NULL) return mALLOc(bytes); newp = oldp = mem2chunk(oldmem); newsize = oldsize = chunksize(oldp); @@ -2617,7 +2647,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; } else { - next = 0; + next = NULL; nextsize = 0; } @@ -2630,7 +2660,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; /* try forward + backward first to save a later consolidation */ - if (next != 0) + if (next != NULL) { /* into top */ if (next == top) @@ -2663,7 +2693,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; } /* backward only */ - if (prev != 0 && (long)(prevsize + newsize) >= (long)nb) + if (prev != NULL && (long)(prevsize + newsize) >= (long)nb) { unlink(prev, bck, fwd); newp = prev; @@ -2678,8 +2708,8 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; newmem = mALLOc (bytes); - if (newmem == 0) /* propagate failure */ - return 0; + if (newmem == NULL) /* propagate failure */ + return NULL; /* Avoid copy if newp is next chunk after oldp. */ /* (This can only happen when new chunk is sbrk'ed.) */ @@ -2757,7 +2787,7 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes; mchunkptr remainder; /* spare room at end to split off */ long remainder_size; /* its size */ - if ((long)bytes < 0) return 0; + if ((long)bytes < 0) return NULL; /* If need less alignment than we give anyway, just relay to malloc */ @@ -2772,7 +2802,7 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes; nb = request2size(bytes); m = (char*)(mALLOc(nb + alignment + MINSIZE)); - if (m == 0) return 0; /* propagate failure */ + if (m == NULL) return NULL; /* propagate failure */ p = mem2chunk(m); @@ -2897,10 +2927,10 @@ Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size; #endif Void_t* mem = mALLOc (sz); - if ((long)n < 0) return 0; + if ((long)n < 0) return NULL; - if (mem == 0) - return 0; + if (mem == NULL) + return NULL; else { p = mem2chunk(mem); @@ -3046,7 +3076,7 @@ size_t malloc_usable_size(mem) Void_t* mem; #endif { mchunkptr p; - if (mem == 0) + if (mem == NULL) return 0; else { @@ -3066,7 +3096,7 @@ size_t malloc_usable_size(mem) Void_t* mem; /* Utility to update current_mallinfo for malloc_stats and mallinfo() */ -#if 0 +#ifdef DEBUG static void malloc_update_mallinfo() { int i; @@ -3104,7 +3134,7 @@ static void malloc_update_mallinfo() current_mallinfo.keepcost = chunksize(top); } -#endif /* 0 */ +#endif /* DEBUG */ @@ -3123,7 +3153,7 @@ static void malloc_update_mallinfo() */ -#if 0 +#ifdef DEBUG void malloc_stats() { malloc_update_mallinfo(); @@ -3138,19 +3168,19 @@ void malloc_stats() (unsigned int)max_n_mmaps); #endif } -#endif /* 0 */ +#endif /* DEBUG */ /* mallinfo returns a copy of updated current mallinfo. */ -#if 0 +#ifdef DEBUG struct mallinfo mALLINFo() { malloc_update_mallinfo(); return current_mallinfo; } -#endif /* 0 */ +#endif /* DEBUG */