X-Git-Url: https://git.kernelconcepts.de/?a=blobdiff_plain;f=common%2Fdlmalloc.c;h=f9873393c183350795ed3ac7c15f5552afb5419b;hb=0043b1faa7f8a6b9a38ac949335cf3a5c3074459;hp=e9bab09b8eac7c0ec4900a3685f15a807b448c31;hpb=52eb2c79110151b9017a0829c4d44ee7b8e2ca04;p=karo-tx-uboot.git diff --git a/common/dlmalloc.c b/common/dlmalloc.c index e9bab09b8e..f9873393c1 100644 --- a/common/dlmalloc.c +++ b/common/dlmalloc.c @@ -1,5 +1,9 @@ #include +#ifdef CONFIG_SANDBOX +#define DEBUG +#endif + #if 0 /* Moved to malloc.h */ /* ---------- To make a malloc.h, start cutting here ------------ */ @@ -201,7 +205,7 @@ MORECORE_FAILURE (default: -1) The value returned upon failure of MORECORE. MORECORE_CLEARS (default 1) - True (1) if the routine mapped to MORECORE zeroes out memory (which + true (1) if the routine mapped to MORECORE zeroes out memory (which holds for sbrk). DEFAULT_TRIM_THRESHOLD DEFAULT_TOP_PAD @@ -220,7 +224,7 @@ */ - + /* Preliminaries */ @@ -286,13 +290,6 @@ extern "C" { */ -#ifdef DEBUG -#include -#else -#define assert(x) ((void)0) -#endif - - /* INTERNAL_SIZE_T is the word-size used for internal bookkeeping of chunk sizes. On a 64-bit machine, you can reduce malloc @@ -937,6 +934,8 @@ struct mallinfo mALLINFo(); #endif /* 0 */ /* Moved to malloc.h */ #include +#include + #ifdef DEBUG #if __STD_C static void malloc_update_mallinfo (void); @@ -1139,7 +1138,7 @@ gAllocatedSize)) #endif - + /* Type declarations @@ -1279,7 +1278,7 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ serviced via calls to mmap, and then later released via munmap. */ - + /* sizes, alignments */ #define SIZE_SZ (sizeof(INTERNAL_SIZE_T)) @@ -1304,7 +1303,7 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0) - + /* Physical chunk operations @@ -1339,7 +1338,7 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) - + /* Dealing with use bits @@ -1378,7 +1377,7 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) - + /* Dealing with size fields @@ -1401,7 +1400,7 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) - + /* @@ -1472,7 +1471,7 @@ typedef struct malloc_chunk* mbinptr; #define IAV(i) bin_at(i), bin_at(i) static mbinptr av_[NAV * 2 + 2] = { - 0, 0, + NULL, NULL, IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7), IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15), IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23), @@ -1492,14 +1491,16 @@ static mbinptr av_[NAV * 2 + 2] = { }; #ifdef CONFIG_NEEDS_MANUAL_RELOC -void malloc_bin_reloc (void) +static void malloc_bin_reloc(void) { - unsigned long *p = (unsigned long *)(&av_[2]); - int i; - for (i=2; i<(sizeof(av_)/sizeof(mbinptr)); ++i) { - *p++ += gd->reloc_off; - } + mbinptr *p = &av_[2]; + size_t i; + + for (i = 2; i < ARRAY_SIZE(av_); ++i, ++p) + *p = (mbinptr)((ulong)*p + gd->reloc_off); } +#else +static inline void malloc_bin_reloc(void) {} #endif ulong mem_malloc_start = 0; @@ -1533,6 +1534,8 @@ void mem_malloc_init(ulong start, ulong size) mem_malloc_brk = start; memset((void *)mem_malloc_start, 0, size); + + malloc_bin_reloc(); } /* field-extraction macros */ @@ -1569,7 +1572,7 @@ void mem_malloc_init(ulong start, ulong size) #define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH) - + /* To help compensate for the large number of bins, a one-level index @@ -1593,7 +1596,7 @@ void mem_malloc_init(ulong start, ulong size) #define clear_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r & ~(idx2binblock(ii)))) - + /* Other static bookkeeping data */ @@ -1631,7 +1634,7 @@ static unsigned int max_n_mmaps = 0; static unsigned long max_mmapped_mem = 0; #endif - + /* Debugging support @@ -1654,9 +1657,7 @@ static void do_check_chunk(mchunkptr p) static void do_check_chunk(p) mchunkptr p; #endif { -#if 0 /* causes warnings because assert() is off */ INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; -#endif /* 0 */ /* No checkable chunk is mmapped */ assert(!chunk_is_mmapped(p)); @@ -1678,9 +1679,7 @@ static void do_check_free_chunk(p) mchunkptr p; #endif { INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; -#if 0 /* causes warnings because assert() is off */ mchunkptr next = chunk_at_offset(p, sz); -#endif /* 0 */ do_check_chunk(p); @@ -1744,10 +1743,8 @@ static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; #endif { -#if 0 /* causes warnings because assert() is off */ INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; long room = sz - s; -#endif /* 0 */ do_check_inuse_chunk(p); @@ -1778,7 +1775,7 @@ static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; #define check_malloced_chunk(P,N) #endif - + /* Macro-based internal utilities @@ -1850,7 +1847,7 @@ static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; (last_remainder->fd = last_remainder->bk = last_remainder) - + /* Routines dealing with mmap(). */ @@ -1981,7 +1978,7 @@ static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size; #endif /* HAVE_MMAP */ - + /* Extend the top-most chunk by obtaining memory from system. @@ -2098,7 +2095,7 @@ static void malloc_extend_top(nb) INTERNAL_SIZE_T nb; } - + /* Main public routines */ @@ -2183,13 +2180,27 @@ Void_t* mALLOc(bytes) size_t bytes; INTERNAL_SIZE_T nb; +#ifdef CONFIG_SYS_MALLOC_F_LEN + if (!(gd->flags & GD_FLG_RELOC)) { + ulong new_ptr; + void *ptr; + + new_ptr = gd->malloc_ptr + bytes; + if (new_ptr > gd->malloc_limit) + panic("Out of pre-reloc memory"); + ptr = map_sysmem(gd->malloc_base + gd->malloc_ptr, bytes); + gd->malloc_ptr = ALIGN(new_ptr, sizeof(new_ptr)); + return ptr; + } +#endif + /* check if mem_malloc_init() was run */ if ((mem_malloc_start == 0) && (mem_malloc_end == 0)) { /* not initialized yet */ - return 0; + return NULL; } - if ((long)bytes < 0) return 0; + if ((long)bytes < 0) return NULL; nb = request2size(bytes); /* padded request size; */ @@ -2392,7 +2403,7 @@ Void_t* mALLOc(bytes) size_t bytes; /* Try to extend */ malloc_extend_top(nb); if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE) - return 0; /* propagate failure */ + return NULL; /* propagate failure */ } victim = top; @@ -2405,7 +2416,7 @@ Void_t* mALLOc(bytes) size_t bytes; } - + /* @@ -2446,7 +2457,13 @@ void fREe(mem) Void_t* mem; mchunkptr fwd; /* misc temp for linking */ int islr; /* track whether merging with last_remainder */ - if (mem == 0) /* free(0) has no effect */ +#ifdef CONFIG_SYS_MALLOC_F_LEN + /* free() is a no-op - all the memory will be freed on relocation */ + if (!(gd->flags & GD_FLG_RELOC)) + return; +#endif + + if (mem == NULL) /* free(0) has no effect */ return; p = mem2chunk(mem); @@ -2522,7 +2539,7 @@ void fREe(mem) Void_t* mem; } - + /* @@ -2592,10 +2609,17 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; if (bytes == 0) { fREe(oldmem); return 0; } #endif - if ((long)bytes < 0) return 0; + if ((long)bytes < 0) return NULL; /* realloc of null is supposed to be same as malloc */ - if (oldmem == 0) return mALLOc(bytes); + if (oldmem == NULL) return mALLOc(bytes); + +#ifdef CONFIG_SYS_MALLOC_F_LEN + if (!(gd->flags & GD_FLG_RELOC)) { + /* This is harder to support and should not be needed */ + panic("pre-reloc realloc() is not supported"); + } +#endif newp = oldp = mem2chunk(oldmem); newsize = oldsize = chunksize(oldp); @@ -2656,7 +2680,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; } else { - next = 0; + next = NULL; nextsize = 0; } @@ -2669,7 +2693,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; /* try forward + backward first to save a later consolidation */ - if (next != 0) + if (next != NULL) { /* into top */ if (next == top) @@ -2702,7 +2726,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; } /* backward only */ - if (prev != 0 && (long)(prevsize + newsize) >= (long)nb) + if (prev != NULL && (long)(prevsize + newsize) >= (long)nb) { unlink(prev, bck, fwd); newp = prev; @@ -2717,8 +2741,8 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; newmem = mALLOc (bytes); - if (newmem == 0) /* propagate failure */ - return 0; + if (newmem == NULL) /* propagate failure */ + return NULL; /* Avoid copy if newp is next chunk after oldp. */ /* (This can only happen when new chunk is sbrk'ed.) */ @@ -2759,7 +2783,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; } - + /* @@ -2796,7 +2820,7 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes; mchunkptr remainder; /* spare room at end to split off */ long remainder_size; /* its size */ - if ((long)bytes < 0) return 0; + if ((long)bytes < 0) return NULL; /* If need less alignment than we give anyway, just relay to malloc */ @@ -2811,7 +2835,7 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes; nb = request2size(bytes); m = (char*)(mALLOc(nb + alignment + MINSIZE)); - if (m == 0) return 0; /* propagate failure */ + if (m == NULL) return NULL; /* propagate failure */ p = mem2chunk(m); @@ -2877,7 +2901,7 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes; } - + /* @@ -2936,12 +2960,18 @@ Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size; #endif Void_t* mem = mALLOc (sz); - if ((long)n < 0) return 0; + if ((long)n < 0) return NULL; - if (mem == 0) - return 0; + if (mem == NULL) + return NULL; else { +#ifdef CONFIG_SYS_MALLOC_F_LEN + if (!(gd->flags & GD_FLG_RELOC)) { + MALLOC_ZERO(mem, sz); + return mem; + } +#endif p = mem2chunk(mem); /* Two optional cases in which clearing not necessary */ @@ -2984,7 +3014,7 @@ void cfree(mem) Void_t *mem; } #endif - + /* @@ -3065,7 +3095,7 @@ int malloc_trim(pad) size_t pad; } } - + /* malloc_usable_size: @@ -3085,7 +3115,7 @@ size_t malloc_usable_size(mem) Void_t* mem; #endif { mchunkptr p; - if (mem == 0) + if (mem == NULL) return 0; else { @@ -3101,7 +3131,7 @@ size_t malloc_usable_size(mem) Void_t* mem; } - + /* Utility to update current_mallinfo for malloc_stats and mallinfo() */ @@ -3145,7 +3175,7 @@ static void malloc_update_mallinfo() } #endif /* DEBUG */ - + /* @@ -3192,7 +3222,7 @@ struct mallinfo mALLINFo() #endif /* DEBUG */ - + /* mallopt: