diff options
author | Andrea Fioraldi <andreafioraldi@gmail.com> | 2021-02-12 09:42:22 +0100 |
---|---|---|
committer | Andrea Fioraldi <andreafioraldi@gmail.com> | 2021-02-12 09:42:22 +0100 |
commit | 22a3c7f7d043d9dbf39c847061d88a4577537031 (patch) | |
tree | 37f2e2e3b5561168e92c36a0b96f1caec42f99c6 /qemu_mode/libqasan/dlmalloc.c | |
parent | 16ffbb37f5897ca318e747518fdae6b4e56b52ac (diff) | |
download | afl++-22a3c7f7d043d9dbf39c847061d88a4577537031.tar.gz |
fix #736 (ty b1gr3db)
Diffstat (limited to 'qemu_mode/libqasan/dlmalloc.c')
-rw-r--r-- | qemu_mode/libqasan/dlmalloc.c | 5553 |
1 files changed, 3269 insertions, 2284 deletions
diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c index 7e3cb159..bace0ff6 100644 --- a/qemu_mode/libqasan/dlmalloc.c +++ b/qemu_mode/libqasan/dlmalloc.c @@ -207,9 +207,12 @@ mspaces as thread-locals. For example: static __thread mspace tlms = 0; void* tlmalloc(size_t bytes) { + if (tlms == 0) tlms = create_mspace(0, 0); return mspace_malloc(tlms, bytes); + } + void tlfree(void* mem) { mspace_free(tlms, mem); } Unless FOOTERS is defined, each mspace is completely independent. @@ -525,201 +528,203 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP improvement at the expense of carrying around more memory. */ -#define USE_DL_PREFIX - -/* Version identifier to allow people to support multiple versions */ -#ifndef DLMALLOC_VERSION -#define DLMALLOC_VERSION 20806 -#endif /* DLMALLOC_VERSION */ - -#ifndef DLMALLOC_EXPORT -#define DLMALLOC_EXPORT extern -#endif - -#ifndef WIN32 -#ifdef _WIN32 -#define WIN32 1 -#endif /* _WIN32 */ -#ifdef _WIN32_WCE -#define LACKS_FCNTL_H -#define WIN32 1 -#endif /* _WIN32_WCE */ -#endif /* WIN32 */ -#ifdef WIN32 -#define WIN32_LEAN_AND_MEAN -#include <windows.h> -#include <tchar.h> -#define HAVE_MMAP 1 -#define HAVE_MORECORE 0 -#define LACKS_UNISTD_H -#define LACKS_SYS_PARAM_H -#define LACKS_SYS_MMAN_H -#define LACKS_STRING_H -#define LACKS_STRINGS_H -#define LACKS_SYS_TYPES_H -#define LACKS_ERRNO_H -#define LACKS_SCHED_H -#ifndef MALLOC_FAILURE_ACTION -#define MALLOC_FAILURE_ACTION -#endif /* MALLOC_FAILURE_ACTION */ -#ifndef MMAP_CLEARS -#ifdef _WIN32_WCE /* WINCE reportedly does not clear */ -#define MMAP_CLEARS 0 -#else -#define MMAP_CLEARS 1 -#endif /* _WIN32_WCE */ -#endif /*MMAP_CLEARS */ -#endif /* WIN32 */ - -#if defined(DARWIN) || defined(_DARWIN) -/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ -#ifndef HAVE_MORECORE -#define HAVE_MORECORE 0 -#define HAVE_MMAP 1 -/* OSX allocators provide 16 byte alignment */ -#ifndef MALLOC_ALIGNMENT -#define MALLOC_ALIGNMENT ((size_t)16U) -#endif -#endif /* HAVE_MORECORE */ -#endif /* DARWIN */ - -#ifndef LACKS_SYS_TYPES_H -#include <sys/types.h> /* For size_t */ -#endif /* LACKS_SYS_TYPES_H */ - -/* The maximum possible size_t value has all bits set */ -#define MAX_SIZE_T (~(size_t)0) - -#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ -#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ - (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) -#endif /* USE_LOCKS */ - -#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ -#if ((defined(__GNUC__) && \ - ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ - defined(__i386__) || defined(__x86_64__))) || \ - (defined(_MSC_VER) && _MSC_VER>=1310)) -#ifndef USE_SPIN_LOCKS -#define USE_SPIN_LOCKS 1 -#endif /* USE_SPIN_LOCKS */ -#elif USE_SPIN_LOCKS -#error "USE_SPIN_LOCKS defined without implementation" -#endif /* ... locks available... */ -#elif !defined(USE_SPIN_LOCKS) -#define USE_SPIN_LOCKS 0 -#endif /* USE_LOCKS */ - -#ifndef ONLY_MSPACES -#define ONLY_MSPACES 0 -#endif /* ONLY_MSPACES */ -#ifndef MSPACES -#if ONLY_MSPACES -#define MSPACES 1 -#else /* ONLY_MSPACES */ -#define MSPACES 0 -#endif /* ONLY_MSPACES */ -#endif /* MSPACES */ -#ifndef MALLOC_ALIGNMENT -#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) -#endif /* MALLOC_ALIGNMENT */ -#ifndef FOOTERS -#define FOOTERS 0 -#endif /* FOOTERS */ -#ifndef ABORT -#define ABORT abort() -#endif /* ABORT */ -#ifndef ABORT_ON_ASSERT_FAILURE -#define ABORT_ON_ASSERT_FAILURE 1 -#endif /* ABORT_ON_ASSERT_FAILURE */ -#ifndef PROCEED_ON_ERROR -#define PROCEED_ON_ERROR 0 -#endif /* PROCEED_ON_ERROR */ - -#ifndef INSECURE -#define INSECURE 0 -#endif /* INSECURE */ -#ifndef MALLOC_INSPECT_ALL -#define MALLOC_INSPECT_ALL 0 -#endif /* MALLOC_INSPECT_ALL */ -#ifndef HAVE_MMAP -#define HAVE_MMAP 1 -#endif /* HAVE_MMAP */ -#ifndef MMAP_CLEARS -#define MMAP_CLEARS 1 -#endif /* MMAP_CLEARS */ -#ifndef HAVE_MREMAP -#ifdef linux -#define HAVE_MREMAP 1 -#define _GNU_SOURCE /* Turns on mremap() definition */ -#else /* linux */ -#define HAVE_MREMAP 0 -#endif /* linux */ -#endif /* HAVE_MREMAP */ -#ifndef MALLOC_FAILURE_ACTION -#define MALLOC_FAILURE_ACTION errno = ENOMEM; -#endif /* MALLOC_FAILURE_ACTION */ -#ifndef HAVE_MORECORE -#if ONLY_MSPACES -#define HAVE_MORECORE 0 -#else /* ONLY_MSPACES */ -#define HAVE_MORECORE 1 -#endif /* ONLY_MSPACES */ -#endif /* HAVE_MORECORE */ -#if !HAVE_MORECORE -#define MORECORE_CONTIGUOUS 0 -#else /* !HAVE_MORECORE */ -#define MORECORE_DEFAULT sbrk -#ifndef MORECORE_CONTIGUOUS -#define MORECORE_CONTIGUOUS 1 -#endif /* MORECORE_CONTIGUOUS */ -#endif /* HAVE_MORECORE */ -#ifndef DEFAULT_GRANULARITY -#if (MORECORE_CONTIGUOUS || defined(WIN32)) -#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ -#else /* MORECORE_CONTIGUOUS */ -#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) -#endif /* MORECORE_CONTIGUOUS */ -#endif /* DEFAULT_GRANULARITY */ -#ifndef DEFAULT_TRIM_THRESHOLD -#ifndef MORECORE_CANNOT_TRIM -#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) -#else /* MORECORE_CANNOT_TRIM */ -#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T -#endif /* MORECORE_CANNOT_TRIM */ -#endif /* DEFAULT_TRIM_THRESHOLD */ -#ifndef DEFAULT_MMAP_THRESHOLD -#if HAVE_MMAP -#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) -#else /* HAVE_MMAP */ -#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T -#endif /* HAVE_MMAP */ -#endif /* DEFAULT_MMAP_THRESHOLD */ -#ifndef MAX_RELEASE_CHECK_RATE -#if HAVE_MMAP -#define MAX_RELEASE_CHECK_RATE 4095 -#else -#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T -#endif /* HAVE_MMAP */ -#endif /* MAX_RELEASE_CHECK_RATE */ -#ifndef USE_BUILTIN_FFS -#define USE_BUILTIN_FFS 0 -#endif /* USE_BUILTIN_FFS */ -#ifndef USE_DEV_RANDOM -#define USE_DEV_RANDOM 0 -#endif /* USE_DEV_RANDOM */ -#ifndef NO_MALLINFO -#define NO_MALLINFO 0 -#endif /* NO_MALLINFO */ -#ifndef MALLINFO_FIELD_TYPE -#define MALLINFO_FIELD_TYPE size_t -#endif /* MALLINFO_FIELD_TYPE */ -#ifndef NO_MALLOC_STATS -#define NO_MALLOC_STATS 0 -#endif /* NO_MALLOC_STATS */ -#ifndef NO_SEGMENT_TRAVERSAL -#define NO_SEGMENT_TRAVERSAL 0 -#endif /* NO_SEGMENT_TRAVERSAL */ + #define USE_DL_PREFIX + + /* Version identifier to allow people to support multiple versions */ + #ifndef DLMALLOC_VERSION + #define DLMALLOC_VERSION 20806 + #endif /* DLMALLOC_VERSION */ + + #ifndef DLMALLOC_EXPORT + #define DLMALLOC_EXPORT extern + #endif + + #ifndef WIN32 + #ifdef _WIN32 + #define WIN32 1 + #endif /* _WIN32 */ + #ifdef _WIN32_WCE + #define LACKS_FCNTL_H + #define WIN32 1 + #endif /* _WIN32_WCE */ + #endif /* WIN32 */ + #ifdef WIN32 + #define WIN32_LEAN_AND_MEAN + #include <windows.h> + #include <tchar.h> + #define HAVE_MMAP 1 + #define HAVE_MORECORE 0 + #define LACKS_UNISTD_H + #define LACKS_SYS_PARAM_H + #define LACKS_SYS_MMAN_H + #define LACKS_STRING_H + #define LACKS_STRINGS_H + #define LACKS_SYS_TYPES_H + #define LACKS_ERRNO_H + #define LACKS_SCHED_H + #ifndef MALLOC_FAILURE_ACTION + #define MALLOC_FAILURE_ACTION + #endif /* MALLOC_FAILURE_ACTION */ + #ifndef MMAP_CLEARS + #ifdef _WIN32_WCE /* WINCE reportedly does not clear */ + #define MMAP_CLEARS 0 + #else + #define MMAP_CLEARS 1 + #endif /* _WIN32_WCE */ + #endif /*MMAP_CLEARS */ + #endif /* WIN32 */ + + #if defined(DARWIN) || defined(_DARWIN) + /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ + #ifndef HAVE_MORECORE + #define HAVE_MORECORE 0 + #define HAVE_MMAP 1 + /* OSX allocators provide 16 byte alignment */ + #ifndef MALLOC_ALIGNMENT + #define MALLOC_ALIGNMENT ((size_t)16U) + #endif + #endif /* HAVE_MORECORE */ + #endif /* DARWIN */ + + #ifndef LACKS_SYS_TYPES_H + #include <sys/types.h> /* For size_t */ + #endif /* LACKS_SYS_TYPES_H */ + + /* The maximum possible size_t value has all bits set */ + #define MAX_SIZE_T (~(size_t)0) + + #ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ + #define USE_LOCKS \ + ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ + (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) + #endif /* USE_LOCKS */ + + #if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ + #if ((defined(__GNUC__) && \ + ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ + defined(__i386__) || defined(__x86_64__))) || \ + (defined(_MSC_VER) && _MSC_VER >= 1310)) + #ifndef USE_SPIN_LOCKS + #define USE_SPIN_LOCKS 1 + #endif /* USE_SPIN_LOCKS */ + #elif USE_SPIN_LOCKS + #error "USE_SPIN_LOCKS defined without implementation" + #endif /* ... locks available... */ + #elif !defined(USE_SPIN_LOCKS) + #define USE_SPIN_LOCKS 0 + #endif /* USE_LOCKS */ + + #ifndef ONLY_MSPACES + #define ONLY_MSPACES 0 + #endif /* ONLY_MSPACES */ + #ifndef MSPACES + #if ONLY_MSPACES + #define MSPACES 1 + #else /* ONLY_MSPACES */ + #define MSPACES 0 + #endif /* ONLY_MSPACES */ + #endif /* MSPACES */ + #ifndef MALLOC_ALIGNMENT + #define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) + #endif /* MALLOC_ALIGNMENT */ + #ifndef FOOTERS + #define FOOTERS 0 + #endif /* FOOTERS */ + #ifndef ABORT + #define ABORT abort() + #endif /* ABORT */ + #ifndef ABORT_ON_ASSERT_FAILURE + #define ABORT_ON_ASSERT_FAILURE 1 + #endif /* ABORT_ON_ASSERT_FAILURE */ + #ifndef PROCEED_ON_ERROR + #define PROCEED_ON_ERROR 0 + #endif /* PROCEED_ON_ERROR */ + + #ifndef INSECURE + #define INSECURE 0 + #endif /* INSECURE */ + #ifndef MALLOC_INSPECT_ALL + #define MALLOC_INSPECT_ALL 0 + #endif /* MALLOC_INSPECT_ALL */ + #ifndef HAVE_MMAP + #define HAVE_MMAP 1 + #endif /* HAVE_MMAP */ + #ifndef MMAP_CLEARS + #define MMAP_CLEARS 1 + #endif /* MMAP_CLEARS */ + #ifndef HAVE_MREMAP + #ifdef linux + #define HAVE_MREMAP 1 + #define _GNU_SOURCE /* Turns on mremap() definition */ + #else /* linux */ + #define HAVE_MREMAP 0 + #endif /* linux */ + #endif /* HAVE_MREMAP */ + #ifndef MALLOC_FAILURE_ACTION + #define MALLOC_FAILURE_ACTION errno = ENOMEM; + #endif /* MALLOC_FAILURE_ACTION */ + #ifndef HAVE_MORECORE + #if ONLY_MSPACES + #define HAVE_MORECORE 0 + #else /* ONLY_MSPACES */ + #define HAVE_MORECORE 1 + #endif /* ONLY_MSPACES */ + #endif /* HAVE_MORECORE */ + #if !HAVE_MORECORE + #define MORECORE_CONTIGUOUS 0 + #else /* !HAVE_MORECORE */ + #define MORECORE_DEFAULT sbrk + #ifndef MORECORE_CONTIGUOUS + #define MORECORE_CONTIGUOUS 1 + #endif /* MORECORE_CONTIGUOUS */ + #endif /* HAVE_MORECORE */ + #ifndef DEFAULT_GRANULARITY + #if (MORECORE_CONTIGUOUS || defined(WIN32)) + #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ + #else /* MORECORE_CONTIGUOUS */ + #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) + #endif /* MORECORE_CONTIGUOUS */ + #endif /* DEFAULT_GRANULARITY */ + #ifndef DEFAULT_TRIM_THRESHOLD + #ifndef MORECORE_CANNOT_TRIM + #define DEFAULT_TRIM_THRESHOLD \ + ((size_t)2U * (size_t)1024U * (size_t)1024U) + #else /* MORECORE_CANNOT_TRIM */ + #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T + #endif /* MORECORE_CANNOT_TRIM */ + #endif /* DEFAULT_TRIM_THRESHOLD */ + #ifndef DEFAULT_MMAP_THRESHOLD + #if HAVE_MMAP + #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) + #else /* HAVE_MMAP */ + #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T + #endif /* HAVE_MMAP */ + #endif /* DEFAULT_MMAP_THRESHOLD */ + #ifndef MAX_RELEASE_CHECK_RATE + #if HAVE_MMAP + #define MAX_RELEASE_CHECK_RATE 4095 + #else + #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T + #endif /* HAVE_MMAP */ + #endif /* MAX_RELEASE_CHECK_RATE */ + #ifndef USE_BUILTIN_FFS + #define USE_BUILTIN_FFS 0 + #endif /* USE_BUILTIN_FFS */ + #ifndef USE_DEV_RANDOM + #define USE_DEV_RANDOM 0 + #endif /* USE_DEV_RANDOM */ + #ifndef NO_MALLINFO + #define NO_MALLINFO 0 + #endif /* NO_MALLINFO */ + #ifndef MALLINFO_FIELD_TYPE + #define MALLINFO_FIELD_TYPE size_t + #endif /* MALLINFO_FIELD_TYPE */ + #ifndef NO_MALLOC_STATS + #define NO_MALLOC_STATS 0 + #endif /* NO_MALLOC_STATS */ + #ifndef NO_SEGMENT_TRAVERSAL + #define NO_SEGMENT_TRAVERSAL 0 + #endif /* NO_SEGMENT_TRAVERSAL */ /* mallopt tuning options. SVID/XPG defines four standard parameter @@ -728,123 +733,128 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP malloc does support the following options. */ -#undef M_TRIM_THRESHOLD -#undef M_GRANULARITY -#undef M_MMAP_THRESHOLD -#define M_TRIM_THRESHOLD (-1) -#define M_GRANULARITY (-2) -#define M_MMAP_THRESHOLD (-3) + #undef M_TRIM_THRESHOLD + #undef M_GRANULARITY + #undef M_MMAP_THRESHOLD + #define M_TRIM_THRESHOLD (-1) + #define M_GRANULARITY (-2) + #define M_MMAP_THRESHOLD (-3) /* ------------------------ Mallinfo declarations ------------------------ */ -#if !NO_MALLINFO -/* - This version of malloc supports the standard SVID/XPG mallinfo - routine that returns a struct containing usage properties and - statistics. It should work on any system that has a - /usr/include/malloc.h defining struct mallinfo. The main - declaration needed is the mallinfo struct that is returned (by-copy) - by mallinfo(). The malloinfo struct contains a bunch of fields that - are not even meaningful in this version of malloc. These fields are - are instead filled by mallinfo() with other numbers that might be of - interest. - - HAVE_USR_INCLUDE_MALLOC_H should be set if you have a - /usr/include/malloc.h file that includes a declaration of struct - mallinfo. If so, it is included; else a compliant version is - declared below. These must be precisely the same for mallinfo() to - work. The original SVID version of this struct, defined on most - systems with mallinfo, declares all fields as ints. But some others - define as unsigned long. If your system defines the fields using a - type of different width than listed here, you MUST #include your - system version and #define HAVE_USR_INCLUDE_MALLOC_H. -*/ + #if !NO_MALLINFO + /* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any system that has a + /usr/include/malloc.h defining struct mallinfo. The main + declaration needed is the mallinfo struct that is returned (by-copy) + by mallinfo(). The malloinfo struct contains a bunch of fields that + are not even meaningful in this version of malloc. These fields are + are instead filled by mallinfo() with other numbers that might be of + interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else a compliant version is + declared below. These must be precisely the same for mallinfo() to + work. The original SVID version of this struct, defined on most + systems with mallinfo, declares all fields as ints. But some others + define as unsigned long. If your system defines the fields using a + type of different width than listed here, you MUST #include your + system version and #define HAVE_USR_INCLUDE_MALLOC_H. + */ -/* #define HAVE_USR_INCLUDE_MALLOC_H */ + /* #define HAVE_USR_INCLUDE_MALLOC_H */ -#ifdef HAVE_USR_INCLUDE_MALLOC_H -#include "/usr/include/malloc.h" -#else /* HAVE_USR_INCLUDE_MALLOC_H */ -#ifndef STRUCT_MALLINFO_DECLARED -/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */ -#define _STRUCT_MALLINFO -#define STRUCT_MALLINFO_DECLARED 1 + #ifdef HAVE_USR_INCLUDE_MALLOC_H + #include "/usr/include/malloc.h" + #else /* HAVE_USR_INCLUDE_MALLOC_H */ + #ifndef STRUCT_MALLINFO_DECLARED + /* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is + * defined */ + #define _STRUCT_MALLINFO + #define STRUCT_MALLINFO_DECLARED 1 struct mallinfo { - MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ - MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ - MALLINFO_FIELD_TYPE smblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ - MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ - MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ - MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ - MALLINFO_FIELD_TYPE fordblks; /* total free space */ - MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ + + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ + }; -#endif /* STRUCT_MALLINFO_DECLARED */ -#endif /* HAVE_USR_INCLUDE_MALLOC_H */ -#endif /* NO_MALLINFO */ + + #endif /* STRUCT_MALLINFO_DECLARED */ + #endif /* HAVE_USR_INCLUDE_MALLOC_H */ + #endif /* NO_MALLINFO */ /* Try to persuade compilers to inline. The most critical functions for inlining are defined as macros, so these aren't used for them. */ -#ifndef FORCEINLINE - #if defined(__GNUC__) -#define FORCEINLINE __inline __attribute__ ((always_inline)) - #elif defined(_MSC_VER) - #define FORCEINLINE __forceinline + #ifndef FORCEINLINE + #if defined(__GNUC__) + #define FORCEINLINE __inline __attribute__((always_inline)) + #elif defined(_MSC_VER) + #define FORCEINLINE __forceinline + #endif #endif -#endif -#ifndef NOINLINE - #if defined(__GNUC__) - #define NOINLINE __attribute__ ((noinline)) - #elif defined(_MSC_VER) - #define NOINLINE __declspec(noinline) - #else - #define NOINLINE + #ifndef NOINLINE + #if defined(__GNUC__) + #define NOINLINE __attribute__((noinline)) + #elif defined(_MSC_VER) + #define NOINLINE __declspec(noinline) + #else + #define NOINLINE + #endif #endif -#endif -#ifdef __cplusplus + #ifdef __cplusplus extern "C" { -#ifndef FORCEINLINE - #define FORCEINLINE inline -#endif -#endif /* __cplusplus */ -#ifndef FORCEINLINE - #define FORCEINLINE -#endif - -#if !ONLY_MSPACES - -/* ------------------- Declarations of public routines ------------------- */ - -#ifndef USE_DL_PREFIX -#define dlcalloc calloc -#define dlfree free -#define dlmalloc malloc -#define dlmemalign memalign -#define dlposix_memalign posix_memalign -#define dlrealloc realloc -#define dlrealloc_in_place realloc_in_place -#define dlvalloc valloc -#define dlpvalloc pvalloc -#define dlmallinfo mallinfo -#define dlmallopt mallopt -#define dlmalloc_trim malloc_trim -#define dlmalloc_stats malloc_stats -#define dlmalloc_usable_size malloc_usable_size -#define dlmalloc_footprint malloc_footprint -#define dlmalloc_max_footprint malloc_max_footprint -#define dlmalloc_footprint_limit malloc_footprint_limit -#define dlmalloc_set_footprint_limit malloc_set_footprint_limit -#define dlmalloc_inspect_all malloc_inspect_all -#define dlindependent_calloc independent_calloc -#define dlindependent_comalloc independent_comalloc -#define dlbulk_free bulk_free -#endif /* USE_DL_PREFIX */ + + #ifndef FORCEINLINE + #define FORCEINLINE inline + #endif + #endif /* __cplusplus */ + #ifndef FORCEINLINE + #define FORCEINLINE + #endif + + #if !ONLY_MSPACES + + /* ------------------- Declarations of public routines ------------------- */ + + #ifndef USE_DL_PREFIX + #define dlcalloc calloc + #define dlfree free + #define dlmalloc malloc + #define dlmemalign memalign + #define dlposix_memalign posix_memalign + #define dlrealloc realloc + #define dlrealloc_in_place realloc_in_place + #define dlvalloc valloc + #define dlpvalloc pvalloc + #define dlmallinfo mallinfo + #define dlmallopt mallopt + #define dlmalloc_trim malloc_trim + #define dlmalloc_stats malloc_stats + #define dlmalloc_usable_size malloc_usable_size + #define dlmalloc_footprint malloc_footprint + #define dlmalloc_max_footprint malloc_max_footprint + #define dlmalloc_footprint_limit malloc_footprint_limit + #define dlmalloc_set_footprint_limit malloc_set_footprint_limit + #define dlmalloc_inspect_all malloc_inspect_all + #define dlindependent_calloc independent_calloc + #define dlindependent_comalloc independent_comalloc + #define dlbulk_free bulk_free + #endif /* USE_DL_PREFIX */ /* malloc(size_t n) @@ -860,7 +870,7 @@ extern "C" { maximum supported value of n differs across systems, but is in all cases less than the maximum representable value of a size_t. */ -DLMALLOC_EXPORT void* dlmalloc(size_t); +DLMALLOC_EXPORT void *dlmalloc(size_t); /* free(void* p) @@ -869,14 +879,14 @@ DLMALLOC_EXPORT void* dlmalloc(size_t); It has no effect if p is null. If p was not malloced or already freed, free(p) will by default cause the current program to abort. */ -DLMALLOC_EXPORT void dlfree(void*); +DLMALLOC_EXPORT void dlfree(void *); /* calloc(size_t n_elements, size_t element_size); Returns a pointer to n_elements * element_size bytes, with all locations set to zero. */ -DLMALLOC_EXPORT void* dlcalloc(size_t, size_t); +DLMALLOC_EXPORT void *dlcalloc(size_t, size_t); /* realloc(void* p, size_t n) @@ -900,7 +910,7 @@ DLMALLOC_EXPORT void* dlcalloc(size_t, size_t); The old unix realloc convention of allowing the last-free'd chunk to be used as an argument to realloc is not supported. */ -DLMALLOC_EXPORT void* dlrealloc(void*, size_t); +DLMALLOC_EXPORT void *dlrealloc(void *, size_t); /* realloc_in_place(void* p, size_t n) @@ -915,7 +925,7 @@ DLMALLOC_EXPORT void* dlrealloc(void*, size_t); Returns p if successful; otherwise null. */ -DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t); +DLMALLOC_EXPORT void *dlrealloc_in_place(void *, size_t); /* memalign(size_t alignment, size_t n); @@ -929,7 +939,7 @@ DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t); Overreliance on memalign is a sure way to fragment space. */ -DLMALLOC_EXPORT void* dlmemalign(size_t, size_t); +DLMALLOC_EXPORT void *dlmemalign(size_t, size_t); /* int posix_memalign(void** pp, size_t alignment, size_t n); @@ -939,14 +949,14 @@ DLMALLOC_EXPORT void* dlmemalign(size_t, size_t); returns EINVAL if the alignment is not a power of two (3) fails and returns ENOMEM if memory cannot be allocated. */ -DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t); +DLMALLOC_EXPORT int dlposix_memalign(void **, size_t, size_t); /* valloc(size_t n); Equivalent to memalign(pagesize, n), where pagesize is the page size of the system. If the pagesize is unknown, 4096 is used. */ -DLMALLOC_EXPORT void* dlvalloc(size_t); +DLMALLOC_EXPORT void *dlvalloc(size_t); /* mallopt(int parameter_number, int parameter_value) @@ -1021,7 +1031,7 @@ DLMALLOC_EXPORT size_t dlmalloc_footprint_limit(); */ DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); -#if MALLOC_INSPECT_ALL + #if MALLOC_INSPECT_ALL /* malloc_inspect_all(void(*handler)(void *start, void *end, @@ -1043,19 +1053,23 @@ DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); than 1000, you could write: static int count = 0; void count_chunks(void* start, void* end, size_t used, void* arg) { + if (used >= 1000) ++count; + } + then: malloc_inspect_all(count_chunks, NULL); malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined. */ -DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), - void* arg); +DLMALLOC_EXPORT void dlmalloc_inspect_all(void (*handler)(void *, void *, + size_t, void *), + void *arg); -#endif /* MALLOC_INSPECT_ALL */ + #endif /* MALLOC_INSPECT_ALL */ -#if !NO_MALLINFO + #if !NO_MALLINFO /* mallinfo() Returns (by copy) a struct containing various summary statistics: @@ -1079,7 +1093,7 @@ DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, thus be inaccurate. */ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); -#endif /* NO_MALLINFO */ + #endif /* NO_MALLINFO */ /* independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); @@ -1117,6 +1131,7 @@ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); struct Node { int item; struct Node* next; }; struct Node* build_list() { + struct Node** pool; int n = read_number_of_nodes_needed(); if (n <= 0) return 0; @@ -1128,9 +1143,11 @@ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); pool[i]->next = pool[i+1]; free(pool); // Can now free the array (or not, if it is needed later) return first; + } + */ -DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); +DLMALLOC_EXPORT void **dlindependent_calloc(size_t, size_t, void **); /* independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); @@ -1169,6 +1186,7 @@ DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); struct Foot { ... } void send_message(char* msg) { + int msglen = strlen(msg); size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; void* chunks[3]; @@ -1178,6 +1196,7 @@ DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); char* body = (char*)(chunks[1]); struct Foot* foot = (struct Foot*)(chunks[2]); // ... + } In general though, independent_comalloc is worth using only for @@ -1188,7 +1207,7 @@ DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); since it cannot reuse existing noncontiguous small chunks that might be available for some of the elements. */ -DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**); +DLMALLOC_EXPORT void **dlindependent_comalloc(size_t, size_t *, void **); /* bulk_free(void* array[], size_t n_elements) @@ -1199,14 +1218,14 @@ DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**); is returned. For large arrays of pointers with poor locality, it may be worthwhile to sort this array before calling bulk_free. */ -DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements); +DLMALLOC_EXPORT size_t dlbulk_free(void **, size_t n_elements); /* pvalloc(size_t n); Equivalent to valloc(minimum-page-that-holds(n)), that is, round up n to nearest pagesize. */ -DLMALLOC_EXPORT void* dlpvalloc(size_t); +DLMALLOC_EXPORT void *dlpvalloc(size_t); /* malloc_trim(size_t pad); @@ -1229,7 +1248,7 @@ DLMALLOC_EXPORT void* dlpvalloc(size_t); Malloc_trim returns 1 if it actually released any memory, else 0. */ -DLMALLOC_EXPORT int dlmalloc_trim(size_t); +DLMALLOC_EXPORT int dlmalloc_trim(size_t); /* malloc_stats(); @@ -1250,7 +1269,7 @@ DLMALLOC_EXPORT int dlmalloc_trim(size_t); malloc_stats prints only the most commonly interesting statistics. More information can be obtained by calling mallinfo. */ -DLMALLOC_EXPORT void dlmalloc_stats(void); +DLMALLOC_EXPORT void dlmalloc_stats(void); /* malloc_usable_size(void* p); @@ -1266,17 +1285,17 @@ DLMALLOC_EXPORT void dlmalloc_stats(void); p = malloc(n); assert(malloc_usable_size(p) >= 256); */ -size_t dlmalloc_usable_size(void*); +size_t dlmalloc_usable_size(void *); -#endif /* ONLY_MSPACES */ + #endif /* ONLY_MSPACES */ -#if MSPACES + #if MSPACES /* mspace is an opaque type representing an independent region of space that supports mspace_malloc, etc. */ -typedef void* mspace; +typedef void *mspace; /* create_mspace creates and returns a new independent space with the @@ -1308,7 +1327,8 @@ DLMALLOC_EXPORT size_t destroy_mspace(mspace msp); Destroying this space will deallocate all additionally allocated space (if possible) but not the initial base. */ -DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked); +DLMALLOC_EXPORT mspace create_mspace_with_base(void *base, size_t capacity, + int locked); /* mspace_track_large_chunks controls whether requests for large chunks @@ -1323,12 +1343,11 @@ DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int */ DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable); - /* mspace_malloc behaves as malloc, but operates within the given space. */ -DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes); +DLMALLOC_EXPORT void *mspace_malloc(mspace msp, size_t bytes); /* mspace_free behaves as free, but operates within @@ -1338,7 +1357,7 @@ DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes); free may be called instead of mspace_free because freed chunks from any space are handled by their originating spaces. */ -DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem); +DLMALLOC_EXPORT void mspace_free(mspace msp, void *mem); /* mspace_realloc behaves as realloc, but operates within @@ -1349,33 +1368,38 @@ DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem); realloced chunks from any space are handled by their originating spaces. */ -DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize); +DLMALLOC_EXPORT void *mspace_realloc(mspace msp, void *mem, size_t newsize); /* mspace_calloc behaves as calloc, but operates within the given space. */ -DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); +DLMALLOC_EXPORT void *mspace_calloc(mspace msp, size_t n_elements, + size_t elem_size); /* mspace_memalign behaves as memalign, but operates within the given space. */ -DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); +DLMALLOC_EXPORT void *mspace_memalign(mspace msp, size_t alignment, + size_t bytes); /* mspace_independent_calloc behaves as independent_calloc, but operates within the given space. */ -DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, void* chunks[]); +DLMALLOC_EXPORT void **mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, + void * chunks[]); /* mspace_independent_comalloc behaves as independent_comalloc, but operates within the given space. */ -DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements, - size_t sizes[], void* chunks[]); +DLMALLOC_EXPORT void **mspace_independent_comalloc(mspace msp, + size_t n_elements, + size_t sizes[], + void * chunks[]); /* mspace_footprint() returns the number of bytes obtained from the @@ -1389,19 +1413,18 @@ DLMALLOC_EXPORT size_t mspace_footprint(mspace msp); */ DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp); - -#if !NO_MALLINFO + #if !NO_MALLINFO /* mspace_mallinfo behaves as mallinfo, but reports properties of the given space. */ DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp); -#endif /* NO_MALLINFO */ + #endif /* NO_MALLINFO */ /* malloc_usable_size(void* p) behaves the same as malloc_usable_size; */ -DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem); +DLMALLOC_EXPORT size_t mspace_usable_size(const void *mem); /* mspace_malloc_stats behaves as malloc_stats, but reports @@ -1420,11 +1443,13 @@ DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad); */ DLMALLOC_EXPORT int mspace_mallopt(int, int); -#endif /* MSPACES */ + #endif /* MSPACES */ + + #ifdef __cplusplus + +} /* end of extern "C" */ -#ifdef __cplusplus -} /* end of extern "C" */ -#endif /* __cplusplus */ + #endif /* __cplusplus */ /* ======================================================================== @@ -1438,392 +1463,413 @@ DLMALLOC_EXPORT int mspace_mallopt(int, int); /*------------------------------ internal #includes ---------------------- */ -#ifdef _MSC_VER -#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ -#endif /* _MSC_VER */ -#if !NO_MALLOC_STATS -#include <stdio.h> /* for printing in malloc_stats */ -#endif /* NO_MALLOC_STATS */ -#ifndef LACKS_ERRNO_H -#include <errno.h> /* for MALLOC_FAILURE_ACTION */ -#endif /* LACKS_ERRNO_H */ -#ifdef DEBUG -#if ABORT_ON_ASSERT_FAILURE -#undef assert -#define assert(x) if(!(x)) ABORT -#else /* ABORT_ON_ASSERT_FAILURE */ -#include <assert.h> -#endif /* ABORT_ON_ASSERT_FAILURE */ -#else /* DEBUG */ -#ifndef assert -#define assert(x) -#endif -#define DEBUG 0 -#endif /* DEBUG */ -#if !defined(WIN32) && !defined(LACKS_TIME_H) -#include <time.h> /* for magic initialization */ -#endif /* WIN32 */ -#ifndef LACKS_STDLIB_H -#include <stdlib.h> /* for abort() */ -#endif /* LACKS_STDLIB_H */ -#ifndef LACKS_STRING_H -#include <string.h> /* for memset etc */ -#endif /* LACKS_STRING_H */ -#if USE_BUILTIN_FFS -#ifndef LACKS_STRINGS_H -#include <strings.h> /* for ffs */ -#endif /* LACKS_STRINGS_H */ -#endif /* USE_BUILTIN_FFS */ -#if HAVE_MMAP -#ifndef LACKS_SYS_MMAN_H -/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ -#if (defined(linux) && !defined(__USE_GNU)) -#define __USE_GNU 1 -#include <sys/mman.h> /* for mmap */ -#undef __USE_GNU -#else -#include <sys/mman.h> /* for mmap */ -#endif /* linux */ -#endif /* LACKS_SYS_MMAN_H */ -#ifndef LACKS_FCNTL_H -#include <fcntl.h> -#endif /* LACKS_FCNTL_H */ -#endif /* HAVE_MMAP */ -#ifndef LACKS_UNISTD_H -#include <unistd.h> /* for sbrk, sysconf */ -#else /* LACKS_UNISTD_H */ -#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) -extern void* sbrk(ptrdiff_t); -#endif /* FreeBSD etc */ -#endif /* LACKS_UNISTD_H */ - -/* Declarations for locking */ -#if USE_LOCKS -#ifndef WIN32 -#if defined (__SVR4) && defined (__sun) /* solaris */ -#include <thread.h> -#elif !defined(LACKS_SCHED_H) -#include <sched.h> -#endif /* solaris or LACKS_SCHED_H */ -#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS -#include <pthread.h> -#endif /* USE_RECURSIVE_LOCKS ... */ -#elif defined(_MSC_VER) -#ifndef _M_AMD64 -/* These are already defined on AMD64 builds */ -#ifdef __cplusplus + #ifdef _MSC_VER + #pragma warning(disable : 4146) /* no "unsigned" warnings */ + #endif /* _MSC_VER */ + #if !NO_MALLOC_STATS + #include <stdio.h> /* for printing in malloc_stats */ + #endif /* NO_MALLOC_STATS */ + #ifndef LACKS_ERRNO_H + #include <errno.h> /* for MALLOC_FAILURE_ACTION */ + #endif /* LACKS_ERRNO_H */ + #ifdef DEBUG + #if ABORT_ON_ASSERT_FAILURE + #undef assert + #define assert(x) \ + if (!(x)) ABORT + #else /* ABORT_ON_ASSERT_FAILURE */ + #include <assert.h> + #endif /* ABORT_ON_ASSERT_FAILURE */ + #else /* DEBUG */ + #ifndef assert + #define assert(x) + #endif + #define DEBUG 0 + #endif /* DEBUG */ + #if !defined(WIN32) && !defined(LACKS_TIME_H) + #include <time.h> /* for magic initialization */ + #endif /* WIN32 */ + #ifndef LACKS_STDLIB_H + #include <stdlib.h> /* for abort() */ + #endif /* LACKS_STDLIB_H */ + #ifndef LACKS_STRING_H + #include <string.h> /* for memset etc */ + #endif /* LACKS_STRING_H */ + #if USE_BUILTIN_FFS + #ifndef LACKS_STRINGS_H + #include <strings.h> /* for ffs */ + #endif /* LACKS_STRINGS_H */ + #endif /* USE_BUILTIN_FFS */ + #if HAVE_MMAP + #ifndef LACKS_SYS_MMAN_H + /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ + #if (defined(linux) && !defined(__USE_GNU)) + #define __USE_GNU 1 + #include <sys/mman.h> /* for mmap */ + #undef __USE_GNU + #else + #include <sys/mman.h> /* for mmap */ + #endif /* linux */ + #endif /* LACKS_SYS_MMAN_H */ + #ifndef LACKS_FCNTL_H + #include <fcntl.h> + #endif /* LACKS_FCNTL_H */ + #endif /* HAVE_MMAP */ + #ifndef LACKS_UNISTD_H + #include <unistd.h> /* for sbrk, sysconf */ + #else /* LACKS_UNISTD_H */ + #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +extern void *sbrk(ptrdiff_t); + #endif /* FreeBSD etc */ + #endif /* LACKS_UNISTD_H */ + + /* Declarations for locking */ + #if USE_LOCKS + #ifndef WIN32 + #if defined(__SVR4) && defined(__sun) /* solaris */ + #include <thread.h> + #elif !defined(LACKS_SCHED_H) + #include <sched.h> + #endif /* solaris or LACKS_SCHED_H */ + #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || \ + !USE_SPIN_LOCKS + #include <pthread.h> + #endif /* USE_RECURSIVE_LOCKS ... */ + #elif defined(_MSC_VER) + #ifndef _M_AMD64 + /* These are already defined on AMD64 builds */ + #ifdef __cplusplus extern "C" { -#endif /* __cplusplus */ -LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp); + + #endif /* __cplusplus */ +LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, + LONG Comp); LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); -#ifdef __cplusplus + #ifdef __cplusplus + } -#endif /* __cplusplus */ -#endif /* _M_AMD64 */ -#pragma intrinsic (_InterlockedCompareExchange) -#pragma intrinsic (_InterlockedExchange) -#define interlockedcompareexchange _InterlockedCompareExchange -#define interlockedexchange _InterlockedExchange -#elif defined(WIN32) && defined(__GNUC__) -#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b) -#define interlockedexchange __sync_lock_test_and_set -#endif /* Win32 */ -#else /* USE_LOCKS */ -#endif /* USE_LOCKS */ - -#ifndef LOCK_AT_FORK -#define LOCK_AT_FORK 0 -#endif - -/* Declarations for bit scanning on win32 */ -#if defined(_MSC_VER) && _MSC_VER>=1300 -#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ -#ifdef __cplusplus + + #endif /* __cplusplus */ + #endif /* _M_AMD64 */ + #pragma intrinsic(_InterlockedCompareExchange) + #pragma intrinsic(_InterlockedExchange) + #define interlockedcompareexchange _InterlockedCompareExchange + #define interlockedexchange _InterlockedExchange + #elif defined(WIN32) && defined(__GNUC__) + #define interlockedcompareexchange(a, b, c) \ + __sync_val_compare_and_swap(a, c, b) + #define interlockedexchange __sync_lock_test_and_set + #endif /* Win32 */ + #else /* USE_LOCKS */ + #endif /* USE_LOCKS */ + + #ifndef LOCK_AT_FORK + #define LOCK_AT_FORK 0 + #endif + + /* Declarations for bit scanning on win32 */ + #if defined(_MSC_VER) && _MSC_VER >= 1300 + #ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ + #ifdef __cplusplus extern "C" { -#endif /* __cplusplus */ + + #endif /* __cplusplus */ unsigned char _BitScanForward(unsigned long *index, unsigned long mask); unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); -#ifdef __cplusplus + #ifdef __cplusplus + } -#endif /* __cplusplus */ - -#define BitScanForward _BitScanForward -#define BitScanReverse _BitScanReverse -#pragma intrinsic(_BitScanForward) -#pragma intrinsic(_BitScanReverse) -#endif /* BitScanForward */ -#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ - -#ifndef WIN32 -#ifndef malloc_getpagesize -# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ -# ifndef _SC_PAGE_SIZE -# define _SC_PAGE_SIZE _SC_PAGESIZE -# endif -# endif -# ifdef _SC_PAGE_SIZE -# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) -# else -# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) - extern size_t getpagesize(); -# define malloc_getpagesize getpagesize() -# else -# ifdef WIN32 /* use supplied emulation of getpagesize */ -# define malloc_getpagesize getpagesize() -# else -# ifndef LACKS_SYS_PARAM_H -# include <sys/param.h> -# endif -# ifdef EXEC_PAGESIZE -# define malloc_getpagesize EXEC_PAGESIZE -# else -# ifdef NBPG -# ifndef CLSIZE -# define malloc_getpagesize NBPG -# else -# define malloc_getpagesize (NBPG * CLSIZE) -# endif -# else -# ifdef NBPC -# define malloc_getpagesize NBPC -# else -# ifdef PAGESIZE -# define malloc_getpagesize PAGESIZE -# else /* just guess */ -# define malloc_getpagesize ((size_t)4096U) -# endif -# endif -# endif -# endif -# endif -# endif -# endif -#endif -#endif - -/* ------------------- size_t and alignment properties -------------------- */ - -/* The byte and bit size of a size_t */ -#define SIZE_T_SIZE (sizeof(size_t)) -#define SIZE_T_BITSIZE (sizeof(size_t) << 3) - -/* Some constants coerced to size_t */ -/* Annoying but necessary to avoid errors on some platforms */ -#define SIZE_T_ZERO ((size_t)0) -#define SIZE_T_ONE ((size_t)1) -#define SIZE_T_TWO ((size_t)2) -#define SIZE_T_FOUR ((size_t)4) -#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) -#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) -#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) -#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) - -/* The bit mask value corresponding to MALLOC_ALIGNMENT */ -#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) - -/* True if address a has acceptable alignment */ -#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) - -/* the number of bytes to offset an address to align it */ -#define align_offset(A)\ - ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ - ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) - -/* -------------------------- MMAP preliminaries ------------------------- */ -/* - If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and - checks to fail so compiler optimizer can delete code rather than - using so many "#if"s. -*/ + #endif /* __cplusplus */ + + #define BitScanForward _BitScanForward + #define BitScanReverse _BitScanReverse + #pragma intrinsic(_BitScanForward) + #pragma intrinsic(_BitScanReverse) + #endif /* BitScanForward */ + #endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ + + #ifndef WIN32 + #ifndef malloc_getpagesize + #ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ + #ifndef _SC_PAGE_SIZE + #define _SC_PAGE_SIZE _SC_PAGESIZE + #endif + #endif + #ifdef _SC_PAGE_SIZE + #define malloc_getpagesize sysconf(_SC_PAGE_SIZE) + #else + #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) +extern size_t getpagesize(); + #define malloc_getpagesize getpagesize() + #else + #ifdef WIN32 /* use supplied emulation of getpagesize */ + #define malloc_getpagesize getpagesize() + #else + #ifndef LACKS_SYS_PARAM_H + #include <sys/param.h> + #endif + #ifdef EXEC_PAGESIZE + #define malloc_getpagesize EXEC_PAGESIZE + #else + #ifdef NBPG + #ifndef CLSIZE + #define malloc_getpagesize NBPG + #else + #define malloc_getpagesize (NBPG * CLSIZE) + #endif + #else + #ifdef NBPC + #define malloc_getpagesize NBPC + #else + #ifdef PAGESIZE + #define malloc_getpagesize PAGESIZE + #else /* just guess */ + #define malloc_getpagesize ((size_t)4096U) + #endif + #endif + #endif + #endif + #endif + #endif + #endif + #endif + #endif + /* ------------------- size_t and alignment properties -------------------- */ -/* MORECORE and MMAP must return MFAIL on failure */ -#define MFAIL ((void*)(MAX_SIZE_T)) -#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ + /* The byte and bit size of a size_t */ + #define SIZE_T_SIZE (sizeof(size_t)) + #define SIZE_T_BITSIZE (sizeof(size_t) << 3) -#if HAVE_MMAP + /* Some constants coerced to size_t */ + /* Annoying but necessary to avoid errors on some platforms */ + #define SIZE_T_ZERO ((size_t)0) + #define SIZE_T_ONE ((size_t)1) + #define SIZE_T_TWO ((size_t)2) + #define SIZE_T_FOUR ((size_t)4) + #define TWO_SIZE_T_SIZES (SIZE_T_SIZE << 1) + #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE << 2) + #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES + TWO_SIZE_T_SIZES) + #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) -#ifndef WIN32 -#define MMAP_PROT (PROT_READ|PROT_WRITE) -#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) -#define MAP_ANONYMOUS MAP_ANON -#endif /* MAP_ANON */ -#ifdef MAP_ANONYMOUS + /* The bit mask value corresponding to MALLOC_ALIGNMENT */ + #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) -#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) + /* True if address a has acceptable alignment */ + #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) -static FORCEINLINE void* unixmmap(size_t size) { - void* result; + /* the number of bytes to offset an address to align it */ + #define align_offset(A) \ + ((((size_t)(A)&CHUNK_ALIGN_MASK) == 0) \ + ? 0 \ + : ((MALLOC_ALIGNMENT - ((size_t)(A)&CHUNK_ALIGN_MASK)) & \ + CHUNK_ALIGN_MASK)) + + /* -------------------------- MMAP preliminaries ------------------------- */ + + /* + If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and + checks to fail so compiler optimizer can delete code rather than + using so many "#if"s. + */ + + /* MORECORE and MMAP must return MFAIL on failure */ + #define MFAIL ((void *)(MAX_SIZE_T)) + #define CMFAIL ((char *)(MFAIL)) /* defined for convenience */ + + #if HAVE_MMAP + + #ifndef WIN32 + #define MMAP_PROT (PROT_READ | PROT_WRITE) + #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) + #define MAP_ANONYMOUS MAP_ANON + #endif /* MAP_ANON */ + #ifdef MAP_ANONYMOUS + + #define MMAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS) + +static FORCEINLINE void *unixmmap(size_t size) { + + void *result; result = mmap(0, size, MMAP_PROT, MMAP_FLAGS, -1, 0); - if (result == MFAIL) - return MFAIL; + if (result == MFAIL) return MFAIL; return result; + } -static FORCEINLINE int unixmunmap(void* ptr, size_t size) { +static FORCEINLINE int unixmunmap(void *ptr, size_t size) { + int result; result = munmap(ptr, size); - if (result != 0) - return result; + if (result != 0) return result; return result; + } -#define MMAP_DEFAULT(s) unixmmap(s) -#define MUNMAP_DEFAULT(a, s) unixmunmap((a), (s)) + #define MMAP_DEFAULT(s) unixmmap(s) + #define MUNMAP_DEFAULT(a, s) unixmunmap((a), (s)) -#else /* MAP_ANONYMOUS */ -/* - Nearly all versions of mmap support MAP_ANONYMOUS, so the following - is unlikely to be needed, but is supplied just in case. -*/ -#define MMAP_FLAGS (MAP_PRIVATE) -static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ -#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ - (dev_zero_fd = open("/dev/zero", O_RDWR), \ - mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ - mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) -#define MUNMAP_DEFAULT(a, s) munmap((a), (s)) -#endif /* MAP_ANONYMOUS */ + #else /* MAP_ANONYMOUS */ + /* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. + */ + #define MMAP_FLAGS (MAP_PRIVATE) +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ + #define MMAP_DEFAULT(s) \ + ((dev_zero_fd < 0) \ + ? (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) \ + : mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) + #define MUNMAP_DEFAULT(a, s) munmap((a), (s)) + #endif /* MAP_ANONYMOUS */ -#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) + #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) -#else /* WIN32 */ + #else /* WIN32 */ /* Win32 MMAP via VirtualAlloc */ -static FORCEINLINE void* win32mmap(size_t size) { - void* ptr; +static FORCEINLINE void *win32mmap(size_t size) { + + void *ptr; - ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); - if (ptr == 0) - return MFAIL; + ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + if (ptr == 0) return MFAIL; return ptr; + } /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ -static FORCEINLINE void* win32direct_mmap(size_t size) { - void* ptr; +static FORCEINLINE void *win32direct_mmap(size_t size) { + + void *ptr; - ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, - PAGE_READWRITE); - if (ptr == 0) - return MFAIL; + ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, + PAGE_READWRITE); + if (ptr == 0) return MFAIL; return ptr; + } /* This function supports releasing coalesed segments */ -static FORCEINLINE int win32munmap(void* ptr, size_t size) { +static FORCEINLINE int win32munmap(void *ptr, size_t size) { + MEMORY_BASIC_INFORMATION minfo; - char* cptr = (char*)ptr; + char *cptr = (char *)ptr; while (size) { - if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) - return -1; + + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) return -1; if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || minfo.State != MEM_COMMIT || minfo.RegionSize > size) return -1; - if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) - return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) return -1; cptr += minfo.RegionSize; size -= minfo.RegionSize; + } return 0; + } -#define MMAP_DEFAULT(s) win32mmap(s) -#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) -#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) -#endif /* WIN32 */ -#endif /* HAVE_MMAP */ + #define MMAP_DEFAULT(s) win32mmap(s) + #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) + #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) + #endif /* WIN32 */ + #endif /* HAVE_MMAP */ + + #if HAVE_MREMAP + #ifndef WIN32 -#if HAVE_MREMAP -#ifndef WIN32 +static FORCEINLINE void *dlmremap(void *old_address, size_t old_size, + size_t new_size, int flags) { -static FORCEINLINE void* dlmremap(void* old_address, size_t old_size, size_t new_size, int flags) { - void* result; + void *result; result = mremap(old_address, old_size, new_size, flags); - if (result == MFAIL) - return MFAIL; + if (result == MFAIL) return MFAIL; return result; + } -#define MREMAP_DEFAULT(addr, osz, nsz, mv) dlmremap((addr), (osz), (nsz), (mv)) -#endif /* WIN32 */ -#endif /* HAVE_MREMAP */ + #define MREMAP_DEFAULT(addr, osz, nsz, mv) \ + dlmremap((addr), (osz), (nsz), (mv)) + #endif /* WIN32 */ + #endif /* HAVE_MREMAP */ -/** - * Define CALL_MORECORE - */ -#if HAVE_MORECORE + /** + * Define CALL_MORECORE + */ + #if HAVE_MORECORE #ifdef MORECORE - #define CALL_MORECORE(S) MORECORE(S) - #else /* MORECORE */ - #define CALL_MORECORE(S) MORECORE_DEFAULT(S) - #endif /* MORECORE */ -#else /* HAVE_MORECORE */ - #define CALL_MORECORE(S) MFAIL -#endif /* HAVE_MORECORE */ - -/** - * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP - */ -#if HAVE_MMAP - #define USE_MMAP_BIT (SIZE_T_ONE) + #define CALL_MORECORE(S) MORECORE(S) + #else /* MORECORE */ + #define CALL_MORECORE(S) MORECORE_DEFAULT(S) + #endif /* MORECORE */ + #else /* HAVE_MORECORE */ + #define CALL_MORECORE(S) MFAIL + #endif /* HAVE_MORECORE */ + + /** + * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP + */ + #if HAVE_MMAP + #define USE_MMAP_BIT (SIZE_T_ONE) #ifdef MMAP - #define CALL_MMAP(s) MMAP(s) - #else /* MMAP */ - #define CALL_MMAP(s) MMAP_DEFAULT(s) - #endif /* MMAP */ + #define CALL_MMAP(s) MMAP(s) + #else /* MMAP */ + #define CALL_MMAP(s) MMAP_DEFAULT(s) + #endif /* MMAP */ #ifdef MUNMAP - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) - #else /* MUNMAP */ - #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) - #endif /* MUNMAP */ + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) + #else /* MUNMAP */ + #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) + #endif /* MUNMAP */ #ifdef DIRECT_MMAP - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #else /* DIRECT_MMAP */ - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) - #endif /* DIRECT_MMAP */ -#else /* HAVE_MMAP */ - #define USE_MMAP_BIT (SIZE_T_ZERO) - - #define MMAP(s) MFAIL - #define MUNMAP(a, s) (-1) - #define DIRECT_MMAP(s) MFAIL - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #define CALL_MMAP(s) MMAP(s) - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) -#endif /* HAVE_MMAP */ - -/** - * Define CALL_MREMAP - */ -#if HAVE_MMAP && HAVE_MREMAP + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #else /* DIRECT_MMAP */ + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) + #endif /* DIRECT_MMAP */ + #else /* HAVE_MMAP */ + #define USE_MMAP_BIT (SIZE_T_ZERO) + + #define MMAP(s) MFAIL + #define MUNMAP(a, s) (-1) + #define DIRECT_MMAP(s) MFAIL + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #define CALL_MMAP(s) MMAP(s) + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) + #endif /* HAVE_MMAP */ + + /** + * Define CALL_MREMAP + */ + #if HAVE_MMAP && HAVE_MREMAP #ifdef MREMAP - #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) - #else /* MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) - #endif /* MREMAP */ -#else /* HAVE_MMAP && HAVE_MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL -#endif /* HAVE_MMAP && HAVE_MREMAP */ - -/* mstate bit set if continguous morecore disabled or failed */ -#define USE_NONCONTIGUOUS_BIT (4U) + #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) + #else /* MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) \ + MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) + #endif /* MREMAP */ + #else /* HAVE_MMAP && HAVE_MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL + #endif /* HAVE_MMAP && HAVE_MREMAP */ -/* segment bit set in create_mspace_with_base */ -#define EXTERN_BIT (8U) + /* mstate bit set if continguous morecore disabled or failed */ + #define USE_NONCONTIGUOUS_BIT (4U) + /* segment bit set in create_mspace_with_base */ + #define EXTERN_BIT (8U) /* --------------------------- Lock preliminaries ------------------------ */ @@ -1855,248 +1901,286 @@ static FORCEINLINE void* dlmremap(void* old_address, size_t old_size, size_t new */ -#if !USE_LOCKS -#define USE_LOCK_BIT (0U) -#define INITIAL_LOCK(l) (0) -#define DESTROY_LOCK(l) (0) -#define ACQUIRE_MALLOC_GLOBAL_LOCK() -#define RELEASE_MALLOC_GLOBAL_LOCK() - -#else -#if USE_LOCKS > 1 -/* ----------------------- User-defined locks ------------------------ */ -/* Define your own lock implementation here */ -/* #define INITIAL_LOCK(lk) ... */ -/* #define DESTROY_LOCK(lk) ... */ -/* #define ACQUIRE_LOCK(lk) ... */ -/* #define RELEASE_LOCK(lk) ... */ -/* #define TRY_LOCK(lk) ... */ -/* static MLOCK_T malloc_global_mutex = ... */ - -#elif USE_SPIN_LOCKS - -/* First, define CAS_LOCK and CLEAR_LOCK on ints */ -/* Note CAS_LOCK defined to return 0 on success */ - -#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) -#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) -#define CLEAR_LOCK(sl) __sync_lock_release(sl) - -#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) + #if !USE_LOCKS + #define USE_LOCK_BIT (0U) + #define INITIAL_LOCK(l) (0) + #define DESTROY_LOCK(l) (0) + #define ACQUIRE_MALLOC_GLOBAL_LOCK() + #define RELEASE_MALLOC_GLOBAL_LOCK() + + #else + #if USE_LOCKS > 1 + /* ----------------------- User-defined locks ------------------------ */ + /* Define your own lock implementation here */ + /* #define INITIAL_LOCK(lk) ... */ + /* #define DESTROY_LOCK(lk) ... */ + /* #define ACQUIRE_LOCK(lk) ... */ + /* #define RELEASE_LOCK(lk) ... */ + /* #define TRY_LOCK(lk) ... */ + /* static MLOCK_T malloc_global_mutex = ... */ + + #elif USE_SPIN_LOCKS + + /* First, define CAS_LOCK and CLEAR_LOCK on ints */ + /* Note CAS_LOCK defined to return 0 on success */ + + #if defined(__GNUC__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) + #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) + #define CLEAR_LOCK(sl) __sync_lock_release(sl) + + #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) /* Custom spin locks for older gcc on x86 */ static FORCEINLINE int x86_cas_lock(int *sl) { + int ret; int val = 1; int cmp = 0; - __asm__ __volatile__ ("lock; cmpxchgl %1, %2" - : "=a" (ret) - : "r" (val), "m" (*(sl)), "0"(cmp) - : "memory", "cc"); + __asm__ __volatile__("lock; cmpxchgl %1, %2" + : "=a"(ret) + : "r"(val), "m"(*(sl)), "0"(cmp) + : "memory", "cc"); return ret; + } -static FORCEINLINE void x86_clear_lock(int* sl) { +static FORCEINLINE void x86_clear_lock(int *sl) { + assert(*sl != 0); int prev = 0; int ret; - __asm__ __volatile__ ("lock; xchgl %0, %1" - : "=r" (ret) - : "m" (*(sl)), "0"(prev) - : "memory"); + __asm__ __volatile__("lock; xchgl %0, %1" + : "=r"(ret) + : "m"(*(sl)), "0"(prev) + : "memory"); + } -#define CAS_LOCK(sl) x86_cas_lock(sl) -#define CLEAR_LOCK(sl) x86_clear_lock(sl) - -#else /* Win32 MSC */ -#define CAS_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)1) -#define CLEAR_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)0) - -#endif /* ... gcc spins locks ... */ - -/* How to yield for a spin lock */ -#define SPINS_PER_YIELD 63 -#if defined(_MSC_VER) -#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ -#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) -#elif defined (__SVR4) && defined (__sun) /* solaris */ -#define SPIN_LOCK_YIELD thr_yield(); -#elif !defined(LACKS_SCHED_H) -#define SPIN_LOCK_YIELD sched_yield(); -#else -#define SPIN_LOCK_YIELD -#endif /* ... yield ... */ - -#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 + #define CAS_LOCK(sl) x86_cas_lock(sl) + #define CLEAR_LOCK(sl) x86_clear_lock(sl) + + #else /* Win32 MSC */ + #define CAS_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)1) + #define CLEAR_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)0) + + #endif /* ... gcc spins locks ... */ + + /* How to yield for a spin lock */ + #define SPINS_PER_YIELD 63 + #if defined(_MSC_VER) + #define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ + #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) + #elif defined(__SVR4) && defined(__sun) /* solaris */ + #define SPIN_LOCK_YIELD thr_yield(); + #elif !defined(LACKS_SCHED_H) + #define SPIN_LOCK_YIELD sched_yield(); + #else + #define SPIN_LOCK_YIELD + #endif /* ... yield ... */ + + #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 /* Plain spin locks use single word (embedded in malloc_states) */ static int spin_acquire_lock(int *sl) { + int spins = 0; while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) { - if ((++spins & SPINS_PER_YIELD) == 0) { - SPIN_LOCK_YIELD; - } + + if ((++spins & SPINS_PER_YIELD) == 0) { SPIN_LOCK_YIELD; } + } + return 0; + } -#define MLOCK_T int -#define TRY_LOCK(sl) !CAS_LOCK(sl) -#define RELEASE_LOCK(sl) CLEAR_LOCK(sl) -#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0) -#define INITIAL_LOCK(sl) (*sl = 0) -#define DESTROY_LOCK(sl) (0) + #define MLOCK_T int + #define TRY_LOCK(sl) !CAS_LOCK(sl) + #define RELEASE_LOCK(sl) CLEAR_LOCK(sl) + #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl) ? spin_acquire_lock(sl) : 0) + #define INITIAL_LOCK(sl) (*sl = 0) + #define DESTROY_LOCK(sl) (0) static MLOCK_T malloc_global_mutex = 0; -#else /* USE_RECURSIVE_LOCKS */ -/* types for lock owners */ -#ifdef WIN32 -#define THREAD_ID_T DWORD -#define CURRENT_THREAD GetCurrentThreadId() -#define EQ_OWNER(X,Y) ((X) == (Y)) -#else -/* - Note: the following assume that pthread_t is a type that can be - initialized to (casted) zero. If this is not the case, you will need to - somehow redefine these or not use spin locks. -*/ -#define THREAD_ID_T pthread_t -#define CURRENT_THREAD pthread_self() -#define EQ_OWNER(X,Y) pthread_equal(X, Y) -#endif + #else /* USE_RECURSIVE_LOCKS */ + /* types for lock owners */ + #ifdef WIN32 + #define THREAD_ID_T DWORD + #define CURRENT_THREAD GetCurrentThreadId() + #define EQ_OWNER(X, Y) ((X) == (Y)) + #else + /* + Note: the following assume that pthread_t is a type that can be + initialized to (casted) zero. If this is not the case, you will need + to somehow redefine these or not use spin locks. + */ + #define THREAD_ID_T pthread_t + #define CURRENT_THREAD pthread_self() + #define EQ_OWNER(X, Y) pthread_equal(X, Y) + #endif struct malloc_recursive_lock { - int sl; + + int sl; unsigned int c; - THREAD_ID_T threadid; + THREAD_ID_T threadid; + }; -#define MLOCK_T struct malloc_recursive_lock -static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0}; + #define MLOCK_T struct malloc_recursive_lock +static MLOCK_T malloc_global_mutex = {0, 0, (THREAD_ID_T)0}; static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) { + assert(lk->sl != 0); - if (--lk->c == 0) { - CLEAR_LOCK(&lk->sl); - } + if (--lk->c == 0) { CLEAR_LOCK(&lk->sl); } + } static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) { + THREAD_ID_T mythreadid = CURRENT_THREAD; - int spins = 0; + int spins = 0; for (;;) { + if (*((volatile int *)(&lk->sl)) == 0) { + if (!CAS_LOCK(&lk->sl)) { + lk->threadid = mythreadid; lk->c = 1; return 0; + } - } - else if (EQ_OWNER(lk->threadid, mythreadid)) { + + } else if (EQ_OWNER(lk->threadid, mythreadid)) { + ++lk->c; return 0; + } - if ((++spins & SPINS_PER_YIELD) == 0) { - SPIN_LOCK_YIELD; - } + + if ((++spins & SPINS_PER_YIELD) == 0) { SPIN_LOCK_YIELD; } + } + } static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) { + THREAD_ID_T mythreadid = CURRENT_THREAD; if (*((volatile int *)(&lk->sl)) == 0) { + if (!CAS_LOCK(&lk->sl)) { + lk->threadid = mythreadid; lk->c = 1; return 1; + } - } - else if (EQ_OWNER(lk->threadid, mythreadid)) { + + } else if (EQ_OWNER(lk->threadid, mythreadid)) { + ++lk->c; return 1; + } + return 0; + } -#define RELEASE_LOCK(lk) recursive_release_lock(lk) -#define TRY_LOCK(lk) recursive_try_lock(lk) -#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) -#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) -#define DESTROY_LOCK(lk) (0) -#endif /* USE_RECURSIVE_LOCKS */ - -#elif defined(WIN32) /* Win32 critical sections */ -#define MLOCK_T CRITICAL_SECTION -#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) -#define RELEASE_LOCK(lk) LeaveCriticalSection(lk) -#define TRY_LOCK(lk) TryEnterCriticalSection(lk) -#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000)) -#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) -#define NEED_GLOBAL_LOCK_INIT - -static MLOCK_T malloc_global_mutex; + #define RELEASE_LOCK(lk) recursive_release_lock(lk) + #define TRY_LOCK(lk) recursive_try_lock(lk) + #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) + #define INITIAL_LOCK(lk) \ + ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) + #define DESTROY_LOCK(lk) (0) + #endif /* USE_RECURSIVE_LOCKS */ + + #elif defined(WIN32) /* Win32 critical sections */ + #define MLOCK_T CRITICAL_SECTION + #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) + #define RELEASE_LOCK(lk) LeaveCriticalSection(lk) + #define TRY_LOCK(lk) TryEnterCriticalSection(lk) + #define INITIAL_LOCK(lk) \ + (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000 | 4000)) + #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) + #define NEED_GLOBAL_LOCK_INIT + +static MLOCK_T malloc_global_mutex; static volatile LONG malloc_global_mutex_status; /* Use spin loop to initialize global lock */ static void init_malloc_global_mutex() { + for (;;) { + long stat = malloc_global_mutex_status; - if (stat > 0) - return; + if (stat > 0) return; /* transition to < 0 while initializing, then to > 0) */ - if (stat == 0 && - interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) { + if (stat == 0 && interlockedcompareexchange(&malloc_global_mutex_status, + (LONG)-1, (LONG)0) == 0) { + InitializeCriticalSection(&malloc_global_mutex); interlockedexchange(&malloc_global_mutex_status, (LONG)1); return; + } + SleepEx(0, FALSE); + } + } -#else /* pthreads-based locks */ -#define MLOCK_T pthread_mutex_t -#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) -#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) -#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) -#define INITIAL_LOCK(lk) pthread_init_lock(lk) -#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) + #else /* pthreads-based locks */ + #define MLOCK_T pthread_mutex_t + #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) + #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) + #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) + #define INITIAL_LOCK(lk) pthread_init_lock(lk) + #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) -#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) + #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && \ + defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) /* Cope with old-style linux recursive lock initialization by adding */ /* skipped internal declaration from pthread.h */ -extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr, - int __kind)); -#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP -#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) -#endif /* USE_RECURSIVE_LOCKS ... */ +extern int pthread_mutexattr_setkind_np __P((pthread_mutexattr_t * __attr, + int __kind)); + #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP + #define pthread_mutexattr_settype(x, y) \ + pthread_mutexattr_setkind_np(x, y) + #endif /* USE_RECURSIVE_LOCKS ... */ static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER; -static int pthread_init_lock (MLOCK_T *lk) { +static int pthread_init_lock(MLOCK_T *lk) { + pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr)) return 1; -#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 + #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1; -#endif + #endif if (pthread_mutex_init(lk, &attr)) return 1; if (pthread_mutexattr_destroy(&attr)) return 1; return 0; + } -#endif /* ... lock types ... */ + #endif /* ... lock types ... */ -/* Common code for all lock types */ -#define USE_LOCK_BIT (2U) + /* Common code for all lock types */ + #define USE_LOCK_BIT (2U) -#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK -#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); -#endif + #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK + #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); + #endif -#ifndef RELEASE_MALLOC_GLOBAL_LOCK -#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); -#endif + #ifndef RELEASE_MALLOC_GLOBAL_LOCK + #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); + #endif -#endif /* USE_LOCKS */ + #endif /* USE_LOCKS */ /* ----------------------- Chunk representations ------------------------ */ @@ -2236,56 +2320,56 @@ static int pthread_init_lock (MLOCK_T *lk) { */ struct malloc_chunk { - size_t prev_foot; /* Size of previous chunk (if free). */ - size_t head; /* Size and inuse bits. */ - struct malloc_chunk* fd; /* double links -- used only if free. */ - struct malloc_chunk* bk; + + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk *fd; /* double links -- used only if free. */ + struct malloc_chunk *bk; + }; typedef struct malloc_chunk mchunk; -typedef struct malloc_chunk* mchunkptr; -typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ -typedef unsigned int bindex_t; /* Described below */ -typedef unsigned int binmap_t; /* Described below */ -typedef unsigned int flag_t; /* The type of various bit flag sets */ +typedef struct malloc_chunk *mchunkptr; +typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */ +typedef unsigned int bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ /* ------------------- Chunks sizes and alignments ----------------------- */ -#define MCHUNK_SIZE (sizeof(mchunk)) - -#if FOOTERS -#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) -#else /* FOOTERS */ -#define CHUNK_OVERHEAD (SIZE_T_SIZE) -#endif /* FOOTERS */ + #define MCHUNK_SIZE (sizeof(mchunk)) -/* MMapped chunks need a second word of overhead ... */ -#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) -/* ... and additional padding for fake next-chunk at foot */ -#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) + #if FOOTERS + #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) + #else /* FOOTERS */ + #define CHUNK_OVERHEAD (SIZE_T_SIZE) + #endif /* FOOTERS */ -/* The smallest size we can malloc is an aligned minimal chunk */ -#define MIN_CHUNK_SIZE\ - ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + /* MMapped chunks need a second word of overhead ... */ + #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) + /* ... and additional padding for fake next-chunk at foot */ + #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) -/* conversion from malloc headers to user pointers, and back */ -#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) -#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) -/* chunk associated with aligned address A */ -#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + /* The smallest size we can malloc is an aligned minimal chunk */ + #define MIN_CHUNK_SIZE ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) -/* Bounds on request (not chunk) sizes. */ -#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) -#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + /* conversion from malloc headers to user pointers, and back */ + #define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES)) + #define mem2chunk(mem) ((mchunkptr)((char *)(mem)-TWO_SIZE_T_SIZES)) + /* chunk associated with aligned address A */ + #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) -/* pad request bytes into a usable size */ -#define pad_request(req) \ - (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + /* Bounds on request (not chunk) sizes. */ + #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) + #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) -/* pad request, checking for minimum (but not maximum) */ -#define request2size(req) \ - (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + /* pad request bytes into a usable size */ + #define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + /* pad request, checking for minimum (but not maximum) */ + #define request2size(req) \ + (((req) < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(req)) /* ------------------ Operations on head and foot fields ----------------- */ @@ -2297,61 +2381,60 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */ FLAG4_BIT is not used by this malloc, but might be useful in extensions. */ -#define PINUSE_BIT (SIZE_T_ONE) -#define CINUSE_BIT (SIZE_T_TWO) -#define FLAG4_BIT (SIZE_T_FOUR) -#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) -#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) + #define PINUSE_BIT (SIZE_T_ONE) + #define CINUSE_BIT (SIZE_T_TWO) + #define FLAG4_BIT (SIZE_T_FOUR) + #define INUSE_BITS (PINUSE_BIT | CINUSE_BIT) + #define FLAG_BITS (PINUSE_BIT | CINUSE_BIT | FLAG4_BIT) -/* Head value for fenceposts */ -#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + /* Head value for fenceposts */ + #define FENCEPOST_HEAD (INUSE_BITS | SIZE_T_SIZE) -/* extraction of fields from head words */ -#define cinuse(p) ((p)->head & CINUSE_BIT) -#define pinuse(p) ((p)->head & PINUSE_BIT) -#define flag4inuse(p) ((p)->head & FLAG4_BIT) -#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) -#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) + /* extraction of fields from head words */ + #define cinuse(p) ((p)->head & CINUSE_BIT) + #define pinuse(p) ((p)->head & PINUSE_BIT) + #define flag4inuse(p) ((p)->head & FLAG4_BIT) + #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) + #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) -#define chunksize(p) ((p)->head & ~(FLAG_BITS)) + #define chunksize(p) ((p)->head & ~(FLAG_BITS)) -#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) -#define set_flag4(p) ((p)->head |= FLAG4_BIT) -#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) + #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) + #define set_flag4(p) ((p)->head |= FLAG4_BIT) + #define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) -/* Treat space at ptr +/- offset as a chunk */ -#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) -#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) + /* Treat space at ptr +/- offset as a chunk */ + #define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s))) + #define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s))) -/* Ptr to next or previous physical malloc_chunk. */ -#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) -#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) + /* Ptr to next or previous physical malloc_chunk. */ + #define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~FLAG_BITS))) + #define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot))) -/* extract next chunk's pinuse bit */ -#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + /* extract next chunk's pinuse bit */ + #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) -/* Get/set size at footer */ -#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) -#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) + /* Get/set size at footer */ + #define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot) + #define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s)) -/* Set size, pinuse bit, and foot */ -#define set_size_and_pinuse_of_free_chunk(p, s)\ - ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + /* Set size, pinuse bit, and foot */ + #define set_size_and_pinuse_of_free_chunk(p, s) \ + ((p)->head = (s | PINUSE_BIT), set_foot(p, s)) -/* Set size, pinuse bit, foot, and clear next pinuse */ -#define set_free_with_pinuse(p, s, n)\ - (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + /* Set size, pinuse bit, foot, and clear next pinuse */ + #define set_free_with_pinuse(p, s, n) \ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) -/* Get the internal overhead associated with chunk p */ -#define overhead_for(p)\ - (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) + /* Get the internal overhead associated with chunk p */ + #define overhead_for(p) (is_mmapped(p) ? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) -/* Return true if malloced space is not necessarily cleared */ -#if MMAP_CLEARS -#define calloc_must_clear(p) (!is_mmapped(p)) -#else /* MMAP_CLEARS */ -#define calloc_must_clear(p) (1) -#endif /* MMAP_CLEARS */ + /* Return true if malloced space is not necessarily cleared */ + #if MMAP_CLEARS + #define calloc_must_clear(p) (!is_mmapped(p)) + #else /* MMAP_CLEARS */ + #define calloc_must_clear(p) (1) + #endif /* MMAP_CLEARS */ /* ---------------------- Overlaid data structures ----------------------- */ @@ -2445,23 +2528,25 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ size_t prev_foot; size_t head; - struct malloc_tree_chunk* fd; - struct malloc_tree_chunk* bk; + struct malloc_tree_chunk *fd; + struct malloc_tree_chunk *bk; - struct malloc_tree_chunk* child[2]; - struct malloc_tree_chunk* parent; + struct malloc_tree_chunk *child[2]; + struct malloc_tree_chunk *parent; bindex_t index; + }; typedef struct malloc_tree_chunk tchunk; -typedef struct malloc_tree_chunk* tchunkptr; -typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ +typedef struct malloc_tree_chunk *tchunkptr; +typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */ -/* A little helper macro for trees */ -#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + /* A little helper macro for trees */ + #define leftmost_child(t) ((t)->child[0] != 0 ? (t)->child[0] : (t)->child[1]) /* ----------------------------- Segments -------------------------------- */ @@ -2521,141 +2606,145 @@ typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ */ struct malloc_segment { - char* base; /* base address */ - size_t size; /* allocated size */ - struct malloc_segment* next; /* ptr to next segment */ - flag_t sflags; /* mmap and extern flag */ + + char * base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment *next; /* ptr to next segment */ + flag_t sflags; /* mmap and extern flag */ + }; -#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) -#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) + #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) + #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) typedef struct malloc_segment msegment; -typedef struct malloc_segment* msegmentptr; +typedef struct malloc_segment *msegmentptr; -/* ---------------------------- malloc_state ----------------------------- */ + /* ---------------------------- malloc_state ----------------------------- */ -/* - A malloc_state holds all of the bookkeeping for a space. - The main fields are: - - Top - The topmost chunk of the currently active segment. Its size is - cached in topsize. The actual size of topmost space is - topsize+TOP_FOOT_SIZE, which includes space reserved for adding - fenceposts and segment records if necessary when getting more - space from the system. The size at which to autotrim top is - cached from mparams in trim_check, except that it is disabled if - an autotrim fails. - - Designated victim (dv) - This is the preferred chunk for servicing small requests that - don't have exact fits. It is normally the chunk split off most - recently to service another small request. Its size is cached in - dvsize. The link fields of this chunk are not maintained since it - is not kept in a bin. - - SmallBins - An array of bin headers for free chunks. These bins hold chunks - with sizes less than MIN_LARGE_SIZE bytes. Each bin contains - chunks of all the same size, spaced 8 bytes apart. To simplify - use in double-linked lists, each bin header acts as a malloc_chunk - pointing to the real first node, if it exists (else pointing to - itself). This avoids special-casing for headers. But to avoid - waste, we allocate only the fd/bk pointers of bins, and then use - repositioning tricks to treat these as the fields of a chunk. - - TreeBins - Treebins are pointers to the roots of trees holding a range of - sizes. There are 2 equally spaced treebins for each power of two - from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything - larger. - - Bin maps - There is one bit map for small bins ("smallmap") and one for - treebins ("treemap). Each bin sets its bit when non-empty, and - clears the bit when empty. Bit operations are then used to avoid - bin-by-bin searching -- nearly all "search" is done without ever - looking at bins that won't be selected. The bit maps - conservatively use 32 bits per map word, even if on 64bit system. - For a good description of some of the bit-based techniques used - here, see Henry S. Warren Jr's book "Hacker's Delight" (and - supplement at http://hackersdelight.org/). Many of these are - intended to reduce the branchiness of paths through malloc etc, as - well as to reduce the number of memory locations read or written. - - Segments - A list of segments headed by an embedded malloc_segment record - representing the initial space. - - Address check support - The least_addr field is the least address ever obtained from - MORECORE or MMAP. Attempted frees and reallocs of any address less - than this are trapped (unless INSECURE is defined). - - Magic tag - A cross-check field that should always hold same value as mparams.magic. - - Max allowed footprint - The maximum allowed bytes to allocate from system (zero means no limit) - - Flags - Bits recording whether to use MMAP, locks, or contiguous MORECORE - - Statistics - Each space keeps track of current and maximum system memory - obtained via MORECORE or MMAP. - - Trim support - Fields holding the amount of unused topmost memory that should trigger - trimming, and a counter to force periodic scanning to release unused - non-topmost segments. - - Locking - If USE_LOCKS is defined, the "mutex" lock is acquired and released - around every public call using this mspace. - - Extension support - A void* pointer and a size_t field that can be used to help implement - extensions to this malloc. -*/ + /* + A malloc_state holds all of the bookkeeping for a space. + The main fields are: + + Top + The topmost chunk of the currently active segment. Its size is + cached in topsize. The actual size of topmost space is + topsize+TOP_FOOT_SIZE, which includes space reserved for adding + fenceposts and segment records if necessary when getting more + space from the system. The size at which to autotrim top is + cached from mparams in trim_check, except that it is disabled if + an autotrim fails. + + Designated victim (dv) + This is the preferred chunk for servicing small requests that + don't have exact fits. It is normally the chunk split off most + recently to service another small request. Its size is cached in + dvsize. The link fields of this chunk are not maintained since it + is not kept in a bin. + + SmallBins + An array of bin headers for free chunks. These bins hold chunks + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains + chunks of all the same size, spaced 8 bytes apart. To simplify + use in double-linked lists, each bin header acts as a malloc_chunk + pointing to the real first node, if it exists (else pointing to + itself). This avoids special-casing for headers. But to avoid + waste, we allocate only the fd/bk pointers of bins, and then use + repositioning tricks to treat these as the fields of a chunk. + + TreeBins + Treebins are pointers to the roots of trees holding a range of + sizes. There are 2 equally spaced treebins for each power of two + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything + larger. + + Bin maps + There is one bit map for small bins ("smallmap") and one for + treebins ("treemap). Each bin sets its bit when non-empty, and + clears the bit when empty. Bit operations are then used to avoid + bin-by-bin searching -- nearly all "search" is done without ever + looking at bins that won't be selected. The bit maps + conservatively use 32 bits per map word, even if on 64bit system. + For a good description of some of the bit-based techniques used + here, see Henry S. Warren Jr's book "Hacker's Delight" (and + supplement at http://hackersdelight.org/). Many of these are + intended to reduce the branchiness of paths through malloc etc, as + well as to reduce the number of memory locations read or written. + + Segments + A list of segments headed by an embedded malloc_segment record + representing the initial space. + + Address check support + The least_addr field is the least address ever obtained from + MORECORE or MMAP. Attempted frees and reallocs of any address less + than this are trapped (unless INSECURE is defined). + + Magic tag + A cross-check field that should always hold same value as mparams.magic. + + Max allowed footprint + The maximum allowed bytes to allocate from system (zero means no limit) + + Flags + Bits recording whether to use MMAP, locks, or contiguous MORECORE + + Statistics + Each space keeps track of current and maximum system memory + obtained via MORECORE or MMAP. + + Trim support + Fields holding the amount of unused topmost memory that should trigger + trimming, and a counter to force periodic scanning to release unused + non-topmost segments. + + Locking + If USE_LOCKS is defined, the "mutex" lock is acquired and released + around every public call using this mspace. + + Extension support + A void* pointer and a size_t field that can be used to help implement + extensions to this malloc. + */ -/* Bin types, widths and sizes */ -#define NSMALLBINS (32U) -#define NTREEBINS (32U) -#define SMALLBIN_SHIFT (3U) -#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) -#define TREEBIN_SHIFT (8U) -#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) -#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) -#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + /* Bin types, widths and sizes */ + #define NSMALLBINS (32U) + #define NTREEBINS (32U) + #define SMALLBIN_SHIFT (3U) + #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) + #define TREEBIN_SHIFT (8U) + #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) + #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) + #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) struct malloc_state { - binmap_t smallmap; - binmap_t treemap; - size_t dvsize; - size_t topsize; - char* least_addr; - mchunkptr dv; - mchunkptr top; - size_t trim_check; - size_t release_checks; - size_t magic; - mchunkptr smallbins[(NSMALLBINS+1)*2]; - tbinptr treebins[NTREEBINS]; - size_t footprint; - size_t max_footprint; - size_t footprint_limit; /* zero means no limit */ - flag_t mflags; -#if USE_LOCKS - MLOCK_T mutex; /* locate lock among fields that rarely change */ -#endif /* USE_LOCKS */ - msegment seg; - void* extp; /* Unused but available for extensions */ - size_t exts; + + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char * least_addr; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t release_checks; + size_t magic; + mchunkptr smallbins[(NSMALLBINS + 1) * 2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + size_t footprint_limit; /* zero means no limit */ + flag_t mflags; + #if USE_LOCKS + MLOCK_T mutex; /* locate lock among fields that rarely change */ + #endif /* USE_LOCKS */ + msegment seg; + void * extp; /* Unused but available for extensions */ + size_t exts; + }; -typedef struct malloc_state* mstate; +typedef struct malloc_state *mstate; /* ------------- Global malloc_state and malloc_params ------------------- */ @@ -2667,123 +2756,128 @@ typedef struct malloc_state* mstate; */ struct malloc_params { + size_t magic; size_t page_size; size_t granularity; size_t mmap_threshold; size_t trim_threshold; flag_t default_mflags; + }; static struct malloc_params mparams; -/* Ensure mparams initialized */ -#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) + /* Ensure mparams initialized */ + #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) -#if !ONLY_MSPACES + #if !ONLY_MSPACES /* The global malloc_state used for all non-"mspace" calls */ static struct malloc_state _gm_; -#define gm (&_gm_) -#define is_global(M) ((M) == &_gm_) + #define gm (&_gm_) + #define is_global(M) ((M) == &_gm_) -#endif /* !ONLY_MSPACES */ + #endif /* !ONLY_MSPACES */ -#define is_initialized(M) ((M)->top != 0) + #define is_initialized(M) ((M)->top != 0) /* -------------------------- system alloc setup ------------------------- */ /* Operations on mflags */ -#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) -#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) -#if USE_LOCKS -#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) -#else -#define disable_lock(M) -#endif - -#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) -#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) -#if HAVE_MMAP -#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) -#else -#define disable_mmap(M) -#endif - -#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) -#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) - -#define set_lock(M,L)\ - ((M)->mflags = (L)?\ - ((M)->mflags | USE_LOCK_BIT) :\ - ((M)->mflags & ~USE_LOCK_BIT)) - -/* page-align a size */ -#define page_align(S)\ - (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) - -/* granularity-align a size */ -#define granularity_align(S)\ - (((S) + (mparams.granularity - SIZE_T_ONE))\ - & ~(mparams.granularity - SIZE_T_ONE)) - - -/* For mmap, use granularity alignment on windows, else page-align */ -#ifdef WIN32 -#define mmap_align(S) granularity_align(S) -#else -#define mmap_align(S) page_align(S) -#endif - -/* For sys_alloc, enough padding to ensure can malloc request on success */ -#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) - -#define is_page_aligned(S)\ - (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) -#define is_granularity_aligned(S)\ - (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) - -/* True if segment S holds address A */ -#define segment_holds(S, A)\ - ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) + #define use_lock(M) ((M)->mflags & USE_LOCK_BIT) + #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) + #if USE_LOCKS + #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) + #else + #define disable_lock(M) + #endif + + #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) + #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) + #if HAVE_MMAP + #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) + #else + #define disable_mmap(M) + #endif + + #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) + #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) + + #define set_lock(M, L) \ + ((M)->mflags = \ + (L) ? ((M)->mflags | USE_LOCK_BIT) : ((M)->mflags & ~USE_LOCK_BIT)) + + /* page-align a size */ + #define page_align(S) \ + (((S) + (mparams.page_size - SIZE_T_ONE)) & \ + ~(mparams.page_size - SIZE_T_ONE)) + + /* granularity-align a size */ + #define granularity_align(S) \ + (((S) + (mparams.granularity - SIZE_T_ONE)) & \ + ~(mparams.granularity - SIZE_T_ONE)) + + /* For mmap, use granularity alignment on windows, else page-align */ + #ifdef WIN32 + #define mmap_align(S) granularity_align(S) + #else + #define mmap_align(S) page_align(S) + #endif + + /* For sys_alloc, enough padding to ensure can malloc request on success */ + #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) + + #define is_page_aligned(S) \ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) + #define is_granularity_aligned(S) \ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) + + /* True if segment S holds address A */ + #define segment_holds(S, A) \ + ((char *)(A) >= S->base && (char *)(A) < S->base + S->size) /* Return segment holding given address */ -static msegmentptr segment_holding(mstate m, char* addr) { +static msegmentptr segment_holding(mstate m, char *addr) { + msegmentptr sp = &m->seg; for (;;) { - if (addr >= sp->base && addr < sp->base + sp->size) - return sp; - if ((sp = sp->next) == 0) - return 0; + + if (addr >= sp->base && addr < sp->base + sp->size) return sp; + if ((sp = sp->next) == 0) return 0; + } + } /* Return true if segment contains a segment link */ static int has_segment_link(mstate m, msegmentptr ss) { + msegmentptr sp = &m->seg; for (;;) { - if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) - return 1; - if ((sp = sp->next) == 0) - return 0; + + if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size) return 1; + if ((sp = sp->next) == 0) return 0; + } -} -#ifndef MORECORE_CANNOT_TRIM -#define should_trim(M,s) ((s) > (M)->trim_check) -#else /* MORECORE_CANNOT_TRIM */ -#define should_trim(M,s) (0) -#endif /* MORECORE_CANNOT_TRIM */ +} -/* - TOP_FOOT_SIZE is padding at the end of a segment, including space - that may be needed to place segment records and fenceposts when new - noncontiguous segments are added. -*/ -#define TOP_FOOT_SIZE\ - (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + #ifndef MORECORE_CANNOT_TRIM + #define should_trim(M, s) ((s) > (M)->trim_check) + #else /* MORECORE_CANNOT_TRIM */ + #define should_trim(M, s) (0) + #endif /* MORECORE_CANNOT_TRIM */ + /* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. + */ + #define TOP_FOOT_SIZE \ + (align_offset(chunk2mem(0)) + pad_request(sizeof(struct malloc_segment)) + \ + MIN_CHUNK_SIZE) /* ------------------------------- Hooks -------------------------------- */ @@ -2793,20 +2887,25 @@ static int has_segment_link(mstate m, msegmentptr ss) { anything you like. */ -#if USE_LOCKS -#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) -#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } -#else /* USE_LOCKS */ + #if USE_LOCKS + #define PREACTION(M) ((use_lock(M)) ? ACQUIRE_LOCK(&(M)->mutex) : 0) + #define POSTACTION(M) \ + { \ + \ + if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); \ + \ + } + #else /* USE_LOCKS */ -#ifndef PREACTION -#define PREACTION(M) (0) -#endif /* PREACTION */ + #ifndef PREACTION + #define PREACTION(M) (0) + #endif /* PREACTION */ -#ifndef POSTACTION -#define POSTACTION(M) -#endif /* POSTACTION */ + #ifndef POSTACTION + #define POSTACTION(M) + #endif /* POSTACTION */ -#endif /* USE_LOCKS */ + #endif /* USE_LOCKS */ /* CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. @@ -2816,7 +2915,7 @@ static int has_segment_link(mstate m, msegmentptr ss) { useful in custom actions that try to help diagnose errors. */ -#if PROCEED_ON_ERROR + #if PROCEED_ON_ERROR /* A count of the number of corruption errors causing resets */ int malloc_corruption_error_count; @@ -2824,211 +2923,240 @@ int malloc_corruption_error_count; /* default corruption action */ static void reset_on_error(mstate m); -#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) -#define USAGE_ERROR_ACTION(m, p) + #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) + #define USAGE_ERROR_ACTION(m, p) -#else /* PROCEED_ON_ERROR */ + #else /* PROCEED_ON_ERROR */ -#ifndef CORRUPTION_ERROR_ACTION -#define CORRUPTION_ERROR_ACTION(m) ABORT -#endif /* CORRUPTION_ERROR_ACTION */ + #ifndef CORRUPTION_ERROR_ACTION + #define CORRUPTION_ERROR_ACTION(m) ABORT + #endif /* CORRUPTION_ERROR_ACTION */ -#ifndef USAGE_ERROR_ACTION -#define USAGE_ERROR_ACTION(m,p) ABORT -#endif /* USAGE_ERROR_ACTION */ - -#endif /* PROCEED_ON_ERROR */ + #ifndef USAGE_ERROR_ACTION + #define USAGE_ERROR_ACTION(m, p) ABORT + #endif /* USAGE_ERROR_ACTION */ + #endif /* PROCEED_ON_ERROR */ /* -------------------------- Debugging setup ---------------------------- */ -#if ! DEBUG + #if !DEBUG -#define check_free_chunk(M,P) -#define check_inuse_chunk(M,P) -#define check_malloced_chunk(M,P,N) -#define check_mmapped_chunk(M,P) -#define check_malloc_state(M) -#define check_top_chunk(M,P) + #define check_free_chunk(M, P) + #define check_inuse_chunk(M, P) + #define check_malloced_chunk(M, P, N) + #define check_mmapped_chunk(M, P) + #define check_malloc_state(M) + #define check_top_chunk(M, P) -#else /* DEBUG */ -#define check_free_chunk(M,P) do_check_free_chunk(M,P) -#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) -#define check_top_chunk(M,P) do_check_top_chunk(M,P) -#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) -#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) -#define check_malloc_state(M) do_check_malloc_state(M) + #else /* DEBUG */ + #define check_free_chunk(M, P) do_check_free_chunk(M, P) + #define check_inuse_chunk(M, P) do_check_inuse_chunk(M, P) + #define check_top_chunk(M, P) do_check_top_chunk(M, P) + #define check_malloced_chunk(M, P, N) do_check_malloced_chunk(M, P, N) + #define check_mmapped_chunk(M, P) do_check_mmapped_chunk(M, P) + #define check_malloc_state(M) do_check_malloc_state(M) static void do_check_any_chunk(mstate m, mchunkptr p); static void do_check_top_chunk(mstate m, mchunkptr p); static void do_check_mmapped_chunk(mstate m, mchunkptr p); static void do_check_inuse_chunk(mstate m, mchunkptr p); static void do_check_free_chunk(mstate m, mchunkptr p); -static void do_check_malloced_chunk(mstate m, void* mem, size_t s); +static void do_check_malloced_chunk(mstate m, void *mem, size_t s); static void do_check_tree(mstate m, tchunkptr t); static void do_check_treebin(mstate m, bindex_t i); static void do_check_smallbin(mstate m, bindex_t i); static void do_check_malloc_state(mstate m); static int bin_find(mstate m, mchunkptr x); static size_t traverse_and_check(mstate m); -#endif /* DEBUG */ + #endif /* DEBUG */ /* ---------------------------- Indexing Bins ---------------------------- */ -#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) -#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) -#define small_index2size(i) ((i) << SMALLBIN_SHIFT) -#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) - -/* addressing by index. See above about smallbin repositioning */ -#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) -#define treebin_at(M,i) (&((M)->treebins[i])) - -/* assign tree index for size S to variable I. Use x86 asm if possible */ -#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) -#define compute_tree_index(S, I)\ -{\ - unsigned int X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} - -#elif defined (__INTEL_COMPILER) -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K = _bit_scan_reverse (X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} + #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) + #define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) + #define small_index2size(i) ((i) << SMALLBIN_SHIFT) + #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + + /* addressing by index. See above about smallbin repositioning */ + #define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i) << 1]))) + #define treebin_at(M, i) (&((M)->treebins[i])) + + /* assign tree index for size S to variable I. Use x86 asm if possible */ + #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + #define compute_tree_index(S, I) \ + { \ + \ + unsigned int X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int K = (unsigned)sizeof(X) * __CHAR_BIT__ - 1 - \ + (unsigned)__builtin_clz(X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ + \ + } \ + \ + } -#elif defined(_MSC_VER) && _MSC_VER>=1300 -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K;\ - _BitScanReverse((DWORD *) &K, (DWORD) X);\ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} + #elif defined(__INTEL_COMPILER) + #define compute_tree_index(S, I) \ + { \ + \ + size_t X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int K = _bit_scan_reverse(X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ + \ + } \ + \ + } -#else /* GNUC */ -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int Y = (unsigned int)X;\ - unsigned int N = ((Y - 0x100) >> 16) & 8;\ - unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ - N += K;\ - N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ - K = 14 - N + ((Y <<= K) >> 15);\ - I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ - }\ -} -#endif /* GNUC */ + #elif defined(_MSC_VER) && _MSC_VER >= 1300 + #define compute_tree_index(S, I) \ + { \ + \ + size_t X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int K; \ + _BitScanReverse((DWORD *)&K, (DWORD)X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ + \ + } \ + \ + } -/* Bit representing maximum resolved size in a treebin at i */ -#define bit_for_tree_index(i) \ - (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + #else /* GNUC */ + #define compute_tree_index(S, I) \ + { \ + \ + size_t X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int Y = (unsigned int)X; \ + unsigned int N = ((Y - 0x100) >> 16) & 8; \ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4; \ + N += K; \ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2; \ + K = 14 - N + ((Y <<= K) >> 15); \ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1)); \ + \ + } \ + \ + } + #endif /* GNUC */ -/* Shift placing maximum resolved bit in a treebin at i as sign bit */ -#define leftshift_for_tree_index(i) \ - ((i == NTREEBINS-1)? 0 : \ - ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + /* Bit representing maximum resolved size in a treebin at i */ + #define bit_for_tree_index(i) \ + (i == NTREEBINS - 1) ? (SIZE_T_BITSIZE - 1) \ + : (((i) >> 1) + TREEBIN_SHIFT - 2) -/* The size of the smallest chunk held in bin with index i */ -#define minsize_for_tree_index(i) \ - ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ - (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + /* Shift placing maximum resolved bit in a treebin at i as sign bit */ + #define leftshift_for_tree_index(i) \ + ((i == NTREEBINS - 1) \ + ? 0 \ + : ((SIZE_T_BITSIZE - SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + /* The size of the smallest chunk held in bin with index i */ + #define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i)&SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) -/* ------------------------ Operations on bin maps ----------------------- */ + /* ------------------------ Operations on bin maps ----------------------- */ -/* bit corresponding to given index */ -#define idx2bit(i) ((binmap_t)(1) << (i)) + /* bit corresponding to given index */ + #define idx2bit(i) ((binmap_t)(1) << (i)) -/* Mark/Clear bits with given index */ -#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) -#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) -#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + /* Mark/Clear bits with given index */ + #define mark_smallmap(M, i) ((M)->smallmap |= idx2bit(i)) + #define clear_smallmap(M, i) ((M)->smallmap &= ~idx2bit(i)) + #define smallmap_is_marked(M, i) ((M)->smallmap & idx2bit(i)) -#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) -#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) -#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + #define mark_treemap(M, i) ((M)->treemap |= idx2bit(i)) + #define clear_treemap(M, i) ((M)->treemap &= ~idx2bit(i)) + #define treemap_is_marked(M, i) ((M)->treemap & idx2bit(i)) -/* isolate the least set bit of a bitmap */ -#define least_bit(x) ((x) & -(x)) + /* isolate the least set bit of a bitmap */ + #define least_bit(x) ((x) & -(x)) -/* mask with all bits to left of least bit of x on */ -#define left_bits(x) ((x<<1) | -(x<<1)) + /* mask with all bits to left of least bit of x on */ + #define left_bits(x) ((x << 1) | -(x << 1)) -/* mask with all bits to left of or equal to least bit of x on */ -#define same_or_left_bits(x) ((x) | -(x)) + /* mask with all bits to left of or equal to least bit of x on */ + #define same_or_left_bits(x) ((x) | -(x)) /* index corresponding to given bit. Use x86 asm if possible */ -#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - J = __builtin_ctz(X); \ - I = (bindex_t)J;\ -} + #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int J; \ + J = __builtin_ctz(X); \ + I = (bindex_t)J; \ + \ + } -#elif defined (__INTEL_COMPILER) -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - J = _bit_scan_forward (X); \ - I = (bindex_t)J;\ -} + #elif defined(__INTEL_COMPILER) + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int J; \ + J = _bit_scan_forward(X); \ + I = (bindex_t)J; \ + \ + } -#elif defined(_MSC_VER) && _MSC_VER>=1300 -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - _BitScanForward((DWORD *) &J, X);\ - I = (bindex_t)J;\ -} + #elif defined(_MSC_VER) && _MSC_VER >= 1300 + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int J; \ + _BitScanForward((DWORD *)&J, X); \ + I = (bindex_t)J; \ + \ + } -#elif USE_BUILTIN_FFS -#define compute_bit2idx(X, I) I = ffs(X)-1 - -#else -#define compute_bit2idx(X, I)\ -{\ - unsigned int Y = X - 1;\ - unsigned int K = Y >> (16-4) & 16;\ - unsigned int N = K; Y >>= K;\ - N += K = Y >> (8-3) & 8; Y >>= K;\ - N += K = Y >> (4-2) & 4; Y >>= K;\ - N += K = Y >> (2-1) & 2; Y >>= K;\ - N += K = Y >> (1-0) & 1; Y >>= K;\ - I = (bindex_t)(N + Y);\ -} -#endif /* GNUC */ + #elif USE_BUILTIN_FFS + #define compute_bit2idx(X, I) I = ffs(X) - 1 + #else + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int Y = X - 1; \ + unsigned int K = Y >> (16 - 4) & 16; \ + unsigned int N = K; \ + Y >>= K; \ + N += K = Y >> (8 - 3) & 8; \ + Y >>= K; \ + N += K = Y >> (4 - 2) & 4; \ + Y >>= K; \ + N += K = Y >> (2 - 1) & 2; \ + Y >>= K; \ + N += K = Y >> (1 - 0) & 1; \ + Y >>= K; \ + I = (bindex_t)(N + Y); \ + \ + } + #endif /* GNUC */ /* ----------------------- Runtime Check Support ------------------------- */ @@ -3058,122 +3186,142 @@ static size_t traverse_and_check(mstate m); next, etc). This turns out to be cheaper than relying on hashes. */ -#if !INSECURE -/* Check if address a is at least as high as any from MORECORE or MMAP */ -#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) -/* Check if address of next chunk n is higher than base chunk p */ -#define ok_next(p, n) ((char*)(p) < (char*)(n)) -/* Check if p has inuse status */ -#define ok_inuse(p) is_inuse(p) -/* Check if p has its pinuse bit on */ -#define ok_pinuse(p) pinuse(p) - -#else /* !INSECURE */ -#define ok_address(M, a) (1) -#define ok_next(b, n) (1) -#define ok_inuse(p) (1) -#define ok_pinuse(p) (1) -#endif /* !INSECURE */ - -#if (FOOTERS && !INSECURE) -/* Check if (alleged) mstate m has expected magic field */ -#define ok_magic(M) ((M)->magic == mparams.magic) -#else /* (FOOTERS && !INSECURE) */ -#define ok_magic(M) (1) -#endif /* (FOOTERS && !INSECURE) */ - -/* In gcc, use __builtin_expect to minimize impact of checks */ -#if !INSECURE -#if defined(__GNUC__) && __GNUC__ >= 3 -#define RTCHECK(e) __builtin_expect(e, 1) -#else /* GNUC */ -#define RTCHECK(e) (e) -#endif /* GNUC */ -#else /* !INSECURE */ -#define RTCHECK(e) (1) -#endif /* !INSECURE */ + #if !INSECURE + /* Check if address a is at least as high as any from MORECORE or MMAP */ + #define ok_address(M, a) ((char *)(a) >= (M)->least_addr) + /* Check if address of next chunk n is higher than base chunk p */ + #define ok_next(p, n) ((char *)(p) < (char *)(n)) + /* Check if p has inuse status */ + #define ok_inuse(p) is_inuse(p) + /* Check if p has its pinuse bit on */ + #define ok_pinuse(p) pinuse(p) + + #else /* !INSECURE */ + #define ok_address(M, a) (1) + #define ok_next(b, n) (1) + #define ok_inuse(p) (1) + #define ok_pinuse(p) (1) + #endif /* !INSECURE */ + + #if (FOOTERS && !INSECURE) + /* Check if (alleged) mstate m has expected magic field */ + #define ok_magic(M) ((M)->magic == mparams.magic) + #else /* (FOOTERS && !INSECURE) */ + #define ok_magic(M) (1) + #endif /* (FOOTERS && !INSECURE) */ + + /* In gcc, use __builtin_expect to minimize impact of checks */ + #if !INSECURE + #if defined(__GNUC__) && __GNUC__ >= 3 + #define RTCHECK(e) __builtin_expect(e, 1) + #else /* GNUC */ + #define RTCHECK(e) (e) + #endif /* GNUC */ + #else /* !INSECURE */ + #define RTCHECK(e) (1) + #endif /* !INSECURE */ /* macros to set up inuse chunks with or without footers */ -#if !FOOTERS + #if !FOOTERS -#define mark_inuse_foot(M,p,s) + #define mark_inuse_foot(M, p, s) -/* Macros for setting head/foot of non-mmapped chunks */ + /* Macros for setting head/foot of non-mmapped chunks */ -/* Set cinuse bit and pinuse bit of next chunk */ -#define set_inuse(M,p,s)\ - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + /* Set cinuse bit and pinuse bit of next chunk */ + #define set_inuse(M, p, s) \ + ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \ + ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) -/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ -#define set_inuse_and_pinuse(M,p,s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + /* Set cinuse and pinuse of this chunk and pinuse of next chunk */ + #define set_inuse_and_pinuse(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \ + ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) -/* Set size, cinuse and pinuse bit of this chunk */ -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + /* Set size, cinuse and pinuse bit of this chunk */ + #define set_size_and_pinuse_of_inuse_chunk(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT)) -#else /* FOOTERS */ + #else /* FOOTERS */ -/* Set foot of inuse chunk to be xor of mstate and seed */ -#define mark_inuse_foot(M,p,s)\ - (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) + /* Set foot of inuse chunk to be xor of mstate and seed */ + #define mark_inuse_foot(M, p, s) \ + (((mchunkptr)((char *)(p) + (s)))->prev_foot = \ + ((size_t)(M) ^ mparams.magic)) -#define get_mstate_for(p)\ - ((mstate)(((mchunkptr)((char*)(p) +\ - (chunksize(p))))->prev_foot ^ mparams.magic)) + #define get_mstate_for(p) \ + ((mstate)(((mchunkptr)((char *)(p) + (chunksize(p))))->prev_foot ^ \ + mparams.magic)) -#define set_inuse(M,p,s)\ - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ - mark_inuse_foot(M,p,s)) + #define set_inuse(M, p, s) \ + ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \ + (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M, p, s)) -#define set_inuse_and_pinuse(M,p,s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ - mark_inuse_foot(M,p,s)) + #define set_inuse_and_pinuse(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \ + (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M, p, s)) -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - mark_inuse_foot(M, p, s)) + #define set_size_and_pinuse_of_inuse_chunk(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), mark_inuse_foot(M, p, s)) -#endif /* !FOOTERS */ + #endif /* !FOOTERS */ /* ---------------------------- setting mparams -------------------------- */ -#if LOCK_AT_FORK -static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); } -static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); } -static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); } -#endif /* LOCK_AT_FORK */ + #if LOCK_AT_FORK +static void pre_fork(void) { + + ACQUIRE_LOCK(&(gm)->mutex); + +} + +static void post_fork_parent(void) { + + RELEASE_LOCK(&(gm)->mutex); + +} + +static void post_fork_child(void) { + + INITIAL_LOCK(&(gm)->mutex); + +} + + #endif /* LOCK_AT_FORK */ /* Initialize mparams */ static int init_mparams(void) { -#ifdef NEED_GLOBAL_LOCK_INIT - if (malloc_global_mutex_status <= 0) - init_malloc_global_mutex(); -#endif + + #ifdef NEED_GLOBAL_LOCK_INIT + if (malloc_global_mutex_status <= 0) init_malloc_global_mutex(); + #endif ACQUIRE_MALLOC_GLOBAL_LOCK(); if (mparams.magic == 0) { + size_t magic; size_t psize; size_t gsize; -#ifndef WIN32 + #ifndef WIN32 psize = malloc_getpagesize; - gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize); -#else /* WIN32 */ + gsize = ((DEFAULT_GRANULARITY != 0) ? DEFAULT_GRANULARITY : psize); + #else /* WIN32 */ { + SYSTEM_INFO system_info; GetSystemInfo(&system_info); psize = system_info.dwPageSize; - gsize = ((DEFAULT_GRANULARITY != 0)? - DEFAULT_GRANULARITY : system_info.dwAllocationGranularity); + gsize = + ((DEFAULT_GRANULARITY != 0) ? DEFAULT_GRANULARITY + : system_info.dwAllocationGranularity); + } -#endif /* WIN32 */ + + #endif /* WIN32 */ /* Sanity-check configuration: size_t must be unsigned and as wide as pointer type. @@ -3181,187 +3329,216 @@ static int init_mparams(void) { alignment must be at least 8. Alignment, min chunk size, and page size must all be powers of 2. */ - if ((sizeof(size_t) != sizeof(char*)) || - (MAX_SIZE_T < MIN_CHUNK_SIZE) || - (sizeof(int) < 4) || - (MALLOC_ALIGNMENT < (size_t)8U) || - ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || - ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || - ((gsize & (gsize-SIZE_T_ONE)) != 0) || - ((psize & (psize-SIZE_T_ONE)) != 0)) + if ((sizeof(size_t) != sizeof(char *)) || (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) || + ((gsize & (gsize - SIZE_T_ONE)) != 0) || + ((psize & (psize - SIZE_T_ONE)) != 0)) ABORT; mparams.granularity = gsize; mparams.page_size = psize; mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; -#if MORECORE_CONTIGUOUS - mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; -#else /* MORECORE_CONTIGUOUS */ - mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; -#endif /* MORECORE_CONTIGUOUS */ - -#if !ONLY_MSPACES + #if MORECORE_CONTIGUOUS + mparams.default_mflags = USE_LOCK_BIT | USE_MMAP_BIT; + #else /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = + USE_LOCK_BIT | USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT; + #endif /* MORECORE_CONTIGUOUS */ + + #if !ONLY_MSPACES /* Set up lock for main malloc area */ gm->mflags = mparams.default_mflags; (void)INITIAL_LOCK(&gm->mutex); -#endif -#if LOCK_AT_FORK + #endif + #if LOCK_AT_FORK pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child); -#endif + #endif { -#if USE_DEV_RANDOM - int fd; + + #if USE_DEV_RANDOM + int fd; unsigned char buf[sizeof(size_t)]; /* Try to use /dev/urandom, else fall back on using time */ if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && read(fd, buf, sizeof(buf)) == sizeof(buf)) { - magic = *((size_t *) buf); + + magic = *((size_t *)buf); close(fd); - } - else -#endif /* USE_DEV_RANDOM */ -#ifdef WIN32 - magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); -#elif defined(LACKS_TIME_H) + + } else + + #endif /* USE_DEV_RANDOM */ + #ifdef WIN32 + magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); + #elif defined(LACKS_TIME_H) magic = (size_t)&magic ^ (size_t)0x55555555U; -#else + #else magic = (size_t)(time(0) ^ (size_t)0x55555555U); -#endif - magic |= (size_t)8U; /* ensure nonzero */ - magic &= ~(size_t)7U; /* improve chances of fault for bad values */ + #endif + magic |= (size_t)8U; /* ensure nonzero */ + magic &= ~(size_t)7U; /* improve chances of fault for bad values */ /* Until memory modes commonly available, use volatile-write */ (*(volatile size_t *)(&(mparams.magic))) = magic; + } + } RELEASE_MALLOC_GLOBAL_LOCK(); return 1; + } /* support for mallopt */ static int change_mparam(int param_number, int value) { + size_t val; ensure_initialization(); - val = (value == -1)? MAX_SIZE_T : (size_t)value; - switch(param_number) { - case M_TRIM_THRESHOLD: - mparams.trim_threshold = val; - return 1; - case M_GRANULARITY: - if (val >= mparams.page_size && ((val & (val-1)) == 0)) { - mparams.granularity = val; + val = (value == -1) ? MAX_SIZE_T : (size_t)value; + switch (param_number) { + + case M_TRIM_THRESHOLD: + mparams.trim_threshold = val; return 1; - } - else + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val - 1)) == 0)) { + + mparams.granularity = val; + return 1; + + } else + + return 0; + case M_MMAP_THRESHOLD: + mparams.mmap_threshold = val; + return 1; + default: return 0; - case M_MMAP_THRESHOLD: - mparams.mmap_threshold = val; - return 1; - default: - return 0; + } + } -#if DEBUG + #if DEBUG /* ------------------------- Debugging Support --------------------------- */ /* Check properties of any chunk, whether free, inuse, mmapped etc */ static void do_check_any_chunk(mstate m, mchunkptr p) { + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); + } /* Check properties of top chunk */ static void do_check_top_chunk(mstate m, mchunkptr p) { - msegmentptr sp = segment_holding(m, (char*)p); - size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ + + msegmentptr sp = segment_holding(m, (char *)p); + size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ assert(sp != 0); assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); assert(sz == m->topsize); assert(sz > 0); - assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); + assert(sz == ((sp->base + sp->size) - (char *)p) - TOP_FOOT_SIZE); assert(pinuse(p)); assert(!pinuse(chunk_plus_offset(p, sz))); + } /* Check properties of (inuse) mmapped chunks */ static void do_check_mmapped_chunk(mstate m, mchunkptr p) { - size_t sz = chunksize(p); + + size_t sz = chunksize(p); size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD); assert(is_mmapped(p)); assert(use_mmap(m)); assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); assert(!is_small(sz)); - assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); + assert((len & (mparams.page_size - SIZE_T_ONE)) == 0); assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); - assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); + assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0); + } /* Check properties of inuse chunks */ static void do_check_inuse_chunk(mstate m, mchunkptr p) { + do_check_any_chunk(m, p); assert(is_inuse(p)); assert(next_pinuse(p)); /* If not pinuse and not mmapped, previous chunk has OK offset */ assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); - if (is_mmapped(p)) - do_check_mmapped_chunk(m, p); + if (is_mmapped(p)) do_check_mmapped_chunk(m, p); + } /* Check properties of free chunks */ static void do_check_free_chunk(mstate m, mchunkptr p) { - size_t sz = chunksize(p); + + size_t sz = chunksize(p); mchunkptr next = chunk_plus_offset(p, sz); do_check_any_chunk(m, p); assert(!is_inuse(p)); assert(!next_pinuse(p)); - assert (!is_mmapped(p)); + assert(!is_mmapped(p)); if (p != m->dv && p != m->top) { + if (sz >= MIN_CHUNK_SIZE) { + assert((sz & CHUNK_ALIGN_MASK) == 0); assert(is_aligned(chunk2mem(p))); assert(next->prev_foot == sz); assert(pinuse(p)); - assert (next == m->top || is_inuse(next)); + assert(next == m->top || is_inuse(next)); assert(p->fd->bk == p); assert(p->bk->fd == p); - } - else /* markers are always of size SIZE_T_SIZE */ + + } else /* markers are always of size SIZE_T_SIZE */ + assert(sz == SIZE_T_SIZE); + } + } /* Check properties of malloced chunks at the point they are malloced */ -static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { +static void do_check_malloced_chunk(mstate m, void *mem, size_t s) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); - size_t sz = p->head & ~INUSE_BITS; + size_t sz = p->head & ~INUSE_BITS; do_check_inuse_chunk(m, p); assert((sz & CHUNK_ALIGN_MASK) == 0); assert(sz >= MIN_CHUNK_SIZE); assert(sz >= s); /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); + } + } /* Check a tree and its subtrees. */ static void do_check_tree(mstate m, tchunkptr t) { + tchunkptr head = 0; tchunkptr u = t; - bindex_t tindex = t->index; - size_t tsize = chunksize(t); - bindex_t idx; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; compute_tree_index(tsize, idx); assert(tindex == idx); assert(tsize >= MIN_LARGE_SIZE); assert(tsize >= minsize_for_tree_index(idx)); - assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); + assert((idx == NTREEBINS - 1) || (tsize < minsize_for_tree_index((idx + 1)))); - do { /* traverse through chain of same-sized nodes */ + do { /* traverse through chain of same-sized nodes */ do_check_any_chunk(m, ((mchunkptr)u)); assert(u->index == tindex); assert(chunksize(u) == tsize); @@ -3370,56 +3547,72 @@ static void do_check_tree(mstate m, tchunkptr t) { assert(u->fd->bk == u); assert(u->bk->fd == u); if (u->parent == 0) { + assert(u->child[0] == 0); assert(u->child[1] == 0); - } - else { - assert(head == 0); /* only one node on chain has parent */ + + } else { + + assert(head == 0); /* only one node on chain has parent */ head = u; assert(u->parent != u); - assert (u->parent->child[0] == u || - u->parent->child[1] == u || - *((tbinptr*)(u->parent)) == u); + assert(u->parent->child[0] == u || u->parent->child[1] == u || + *((tbinptr *)(u->parent)) == u); if (u->child[0] != 0) { + assert(u->child[0]->parent == u); assert(u->child[0] != u); do_check_tree(m, u->child[0]); + } + if (u->child[1] != 0) { + assert(u->child[1]->parent == u); assert(u->child[1] != u); do_check_tree(m, u->child[1]); + } + if (u->child[0] != 0 && u->child[1] != 0) { + assert(chunksize(u->child[0]) < chunksize(u->child[1])); + } + } + u = u->fd; + } while (u != t); + assert(head != 0); + } /* Check all the chunks in a treebin. */ static void do_check_treebin(mstate m, bindex_t i) { - tbinptr* tb = treebin_at(m, i); + + tbinptr * tb = treebin_at(m, i); tchunkptr t = *tb; - int empty = (m->treemap & (1U << i)) == 0; - if (t == 0) - assert(empty); - if (!empty) - do_check_tree(m, t); + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) assert(empty); + if (!empty) do_check_tree(m, t); + } /* Check all the chunks in a smallbin. */ static void do_check_smallbin(mstate m, bindex_t i) { - sbinptr b = smallbin_at(m, i); - mchunkptr p = b->bk; + + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; unsigned int empty = (m->smallmap & (1U << i)) == 0; - if (p == b) - assert(empty); + if (p == b) assert(empty); if (!empty) { + for (; p != b; p = p->bk) { - size_t size = chunksize(p); + + size_t size = chunksize(p); mchunkptr q; /* each chunk claims to be free */ do_check_free_chunk(m, p); @@ -3428,324 +3621,435 @@ static void do_check_smallbin(mstate m, bindex_t i) { assert(p->bk == b || chunksize(p->bk) == chunksize(p)); /* chunk is followed by an inuse chunk */ q = next_chunk(p); - if (q->head != FENCEPOST_HEAD) - do_check_inuse_chunk(m, q); + if (q->head != FENCEPOST_HEAD) do_check_inuse_chunk(m, q); + } + } + } /* Find x in a bin. Used in other check functions. */ static int bin_find(mstate m, mchunkptr x) { + size_t size = chunksize(x); if (is_small(size)) { + bindex_t sidx = small_index(size); - sbinptr b = smallbin_at(m, sidx); + sbinptr b = smallbin_at(m, sidx); if (smallmap_is_marked(m, sidx)) { + mchunkptr p = b; do { - if (p == x) - return 1; + + if (p == x) return 1; + } while ((p = p->fd) != b); + } - } - else { + + } else { + bindex_t tidx; compute_tree_index(size, tidx); if (treemap_is_marked(m, tidx)) { + tchunkptr t = *treebin_at(m, tidx); - size_t sizebits = size << leftshift_for_tree_index(tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); while (t != 0 && chunksize(t) != size) { - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + + t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]; sizebits <<= 1; + } + if (t != 0) { + tchunkptr u = t; do { - if (u == (tchunkptr)x) - return 1; + + if (u == (tchunkptr)x) return 1; + } while ((u = u->fd) != t); + } + } + } + return 0; + } /* Traverse each chunk and check it; return total */ static size_t traverse_and_check(mstate m) { + size_t sum = 0; if (is_initialized(m)) { + msegmentptr s = &m->seg; sum += m->topsize + TOP_FOOT_SIZE; while (s != 0) { + mchunkptr q = align_as_chunk(s->base); mchunkptr lastq = 0; assert(pinuse(q)); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { + while (segment_holds(s, q) && q != m->top && q->head != FENCEPOST_HEAD) { + sum += chunksize(q); if (is_inuse(q)) { + assert(!bin_find(m, q)); do_check_inuse_chunk(m, q); - } - else { + + } else { + assert(q == m->dv || bin_find(m, q)); - assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ + assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ do_check_free_chunk(m, q); + } + lastq = q; q = next_chunk(q); + } + s = s->next; + } + } + return sum; -} +} /* Check all properties of malloc_state. */ static void do_check_malloc_state(mstate m) { + bindex_t i; - size_t total; + size_t total; /* check bins */ for (i = 0; i < NSMALLBINS; ++i) do_check_smallbin(m, i); for (i = 0; i < NTREEBINS; ++i) do_check_treebin(m, i); - if (m->dvsize != 0) { /* check dv chunk */ + if (m->dvsize != 0) { /* check dv chunk */ do_check_any_chunk(m, m->dv); assert(m->dvsize == chunksize(m->dv)); assert(m->dvsize >= MIN_CHUNK_SIZE); assert(bin_find(m, m->dv) == 0); + } - if (m->top != 0) { /* check top chunk */ + if (m->top != 0) { /* check top chunk */ do_check_top_chunk(m, m->top); /*assert(m->topsize == chunksize(m->top)); redundant */ assert(m->topsize > 0); assert(bin_find(m, m->top) == 0); + } total = traverse_and_check(m); assert(total <= m->footprint); assert(m->footprint <= m->max_footprint); + } -#endif /* DEBUG */ + + #endif /* DEBUG */ /* ----------------------------- statistics ------------------------------ */ -#if !NO_MALLINFO + #if !NO_MALLINFO static struct mallinfo internal_mallinfo(mstate m) { - struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + struct mallinfo nm = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; ensure_initialization(); if (!PREACTION(m)) { + check_malloc_state(m); if (is_initialized(m)) { - size_t nfree = SIZE_T_ONE; /* top always free */ - size_t mfree = m->topsize + TOP_FOOT_SIZE; - size_t sum = mfree; + + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; msegmentptr s = &m->seg; while (s != 0) { + mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { + while (segment_holds(s, q) && q != m->top && + q->head != FENCEPOST_HEAD) { + size_t sz = chunksize(q); sum += sz; if (!is_inuse(q)) { + mfree += sz; ++nfree; + } + q = next_chunk(q); + } + s = s->next; + } - nm.arena = sum; - nm.ordblks = nfree; - nm.hblkhd = m->footprint - sum; - nm.usmblks = m->max_footprint; + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; nm.uordblks = m->footprint - mfree; nm.fordblks = mfree; nm.keepcost = m->topsize; + } POSTACTION(m); + } + return nm; + } -#endif /* !NO_MALLINFO */ -#if !NO_MALLOC_STATS + #endif /* !NO_MALLINFO */ + + #if !NO_MALLOC_STATS static void internal_malloc_stats(mstate m) { + ensure_initialization(); if (!PREACTION(m)) { + size_t maxfp = 0; size_t fp = 0; size_t used = 0; check_malloc_state(m); if (is_initialized(m)) { + msegmentptr s = &m->seg; maxfp = m->max_footprint; fp = m->footprint; used = fp - (m->topsize + TOP_FOOT_SIZE); while (s != 0) { + mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { - if (!is_inuse(q)) - used -= chunksize(q); + while (segment_holds(s, q) && q != m->top && + q->head != FENCEPOST_HEAD) { + + if (!is_inuse(q)) used -= chunksize(q); q = next_chunk(q); + } + s = s->next; + } + } - POSTACTION(m); /* drop lock */ + + POSTACTION(m); /* drop lock */ fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); + } + } -#endif /* NO_MALLOC_STATS */ -/* ----------------------- Operations on smallbins ----------------------- */ + #endif /* NO_MALLOC_STATS */ -/* - Various forms of linking and unlinking are defined as macros. Even - the ones for trees, which are very long but have very short typical - paths. This is ugly but reduces reliance on inlining support of - compilers. -*/ + /* ----------------------- Operations on smallbins ----------------------- */ -/* Link a free chunk into a smallbin */ -#define insert_small_chunk(M, P, S) {\ - bindex_t I = small_index(S);\ - mchunkptr B = smallbin_at(M, I);\ - mchunkptr F = B;\ - assert(S >= MIN_CHUNK_SIZE);\ - if (!smallmap_is_marked(M, I))\ - mark_smallmap(M, I);\ - else if (RTCHECK(ok_address(M, B->fd)))\ - F = B->fd;\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - B->fd = P;\ - F->bk = P;\ - P->fd = F;\ - P->bk = B;\ -} + /* + Various forms of linking and unlinking are defined as macros. Even + the ones for trees, which are very long but have very short typical + paths. This is ugly but reduces reliance on inlining support of + compilers. + */ -/* Unlink a chunk from a smallbin */ -#define unlink_small_chunk(M, P, S) {\ - mchunkptr F = P->fd;\ - mchunkptr B = P->bk;\ - bindex_t I = small_index(S);\ - assert(P != B);\ - assert(P != F);\ - assert(chunksize(P) == small_index2size(I));\ - if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \ - if (B == F) {\ - clear_smallmap(M, I);\ - }\ - else if (RTCHECK(B == smallbin_at(M,I) ||\ - (ok_address(M, B) && B->fd == P))) {\ - F->bk = B;\ - B->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ -} + /* Link a free chunk into a smallbin */ + #define insert_small_chunk(M, P, S) \ + { \ + \ + bindex_t I = small_index(S); \ + mchunkptr B = smallbin_at(M, I); \ + mchunkptr F = B; \ + assert(S >= MIN_CHUNK_SIZE); \ + if (!smallmap_is_marked(M, I)) \ + mark_smallmap(M, I); \ + else if (RTCHECK(ok_address(M, B->fd))) \ + F = B->fd; \ + else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + B->fd = P; \ + F->bk = P; \ + P->fd = F; \ + P->bk = B; \ + \ + } -/* Unlink the first chunk from a smallbin */ -#define unlink_first_small_chunk(M, B, P, I) {\ - mchunkptr F = P->fd;\ - assert(P != B);\ - assert(P != F);\ - assert(chunksize(P) == small_index2size(I));\ - if (B == F) {\ - clear_smallmap(M, I);\ - }\ - else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\ - F->bk = B;\ - B->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ -} + /* Unlink a chunk from a smallbin */ + #define unlink_small_chunk(M, P, S) \ + { \ + \ + mchunkptr F = P->fd; \ + mchunkptr B = P->bk; \ + bindex_t I = small_index(S); \ + assert(P != B); \ + assert(P != F); \ + assert(chunksize(P) == small_index2size(I)); \ + if (RTCHECK(F == smallbin_at(M, I) || \ + (ok_address(M, F) && F->bk == P))) { \ + \ + if (B == F) { \ + \ + clear_smallmap(M, I); \ + \ + } else if (RTCHECK(B == smallbin_at(M, I) || \ + \ + (ok_address(M, B) && B->fd == P))) { \ + \ + F->bk = B; \ + B->fd = F; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } -/* Replace dv node, binning the old one */ -/* Used only when dvsize known to be small */ -#define replace_dv(M, P, S) {\ - size_t DVS = M->dvsize;\ - assert(is_small(DVS));\ - if (DVS != 0) {\ - mchunkptr DV = M->dv;\ - insert_small_chunk(M, DV, DVS);\ - }\ - M->dvsize = S;\ - M->dv = P;\ -} + /* Unlink the first chunk from a smallbin */ + #define unlink_first_small_chunk(M, B, P, I) \ + { \ + \ + mchunkptr F = P->fd; \ + assert(P != B); \ + assert(P != F); \ + assert(chunksize(P) == small_index2size(I)); \ + if (B == F) { \ + \ + clear_smallmap(M, I); \ + \ + } else if (RTCHECK(ok_address(M, F) && F->bk == P)) { \ + \ + F->bk = B; \ + B->fd = F; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } -/* ------------------------- Operations on trees ------------------------- */ - -/* Insert chunk into tree */ -#define insert_large_chunk(M, X, S) {\ - tbinptr* H;\ - bindex_t I;\ - compute_tree_index(S, I);\ - H = treebin_at(M, I);\ - X->index = I;\ - X->child[0] = X->child[1] = 0;\ - if (!treemap_is_marked(M, I)) {\ - mark_treemap(M, I);\ - *H = X;\ - X->parent = (tchunkptr)H;\ - X->fd = X->bk = X;\ - }\ - else {\ - tchunkptr T = *H;\ - size_t K = S << leftshift_for_tree_index(I);\ - for (;;) {\ - if (chunksize(T) != S) {\ - tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ - K <<= 1;\ - if (*C != 0)\ - T = *C;\ - else if (RTCHECK(ok_address(M, C))) {\ - *C = X;\ - X->parent = T;\ - X->fd = X->bk = X;\ - break;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - break;\ - }\ - }\ - else {\ - tchunkptr F = T->fd;\ - if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ - T->fd = F->bk = X;\ - X->fd = F;\ - X->bk = T;\ - X->parent = 0;\ - break;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - break;\ - }\ - }\ - }\ - }\ -} + /* Replace dv node, binning the old one */ + /* Used only when dvsize known to be small */ + #define replace_dv(M, P, S) \ + { \ + \ + size_t DVS = M->dvsize; \ + assert(is_small(DVS)); \ + if (DVS != 0) { \ + \ + mchunkptr DV = M->dv; \ + insert_small_chunk(M, DV, DVS); \ + \ + } \ + M->dvsize = S; \ + M->dv = P; \ + \ + } + + /* ------------------------- Operations on trees ------------------------- */ + + /* Insert chunk into tree */ + #define insert_large_chunk(M, X, S) \ + { \ + \ + tbinptr *H; \ + bindex_t I; \ + compute_tree_index(S, I); \ + H = treebin_at(M, I); \ + X->index = I; \ + X->child[0] = X->child[1] = 0; \ + if (!treemap_is_marked(M, I)) { \ + \ + mark_treemap(M, I); \ + *H = X; \ + X->parent = (tchunkptr)H; \ + X->fd = X->bk = X; \ + \ + } else { \ + \ + tchunkptr T = *H; \ + size_t K = S << leftshift_for_tree_index(I); \ + for (;;) { \ + \ + if (chunksize(T) != S) { \ + \ + tchunkptr *C = \ + &(T->child[(K >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]); \ + K <<= 1; \ + if (*C != 0) \ + T = *C; \ + else if (RTCHECK(ok_address(M, C))) { \ + \ + *C = X; \ + X->parent = T; \ + X->fd = X->bk = X; \ + break; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + break; \ + \ + } \ + \ + } else { \ + \ + tchunkptr F = T->fd; \ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) { \ + \ + T->fd = F->bk = X; \ + X->fd = F; \ + X->bk = T; \ + X->parent = 0; \ + break; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + break; \ + \ + } \ + \ + } \ + \ + } \ + \ + } \ + \ + } /* Unlink steps: @@ -3764,104 +4068,145 @@ static void internal_malloc_stats(mstate m) { x's parent and children to x's replacement (or null if none). */ -#define unlink_large_chunk(M, X) {\ - tchunkptr XP = X->parent;\ - tchunkptr R;\ - if (X->bk != X) {\ - tchunkptr F = X->fd;\ - R = X->bk;\ - if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\ - F->bk = R;\ - R->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else {\ - tchunkptr* RP;\ - if (((R = *(RP = &(X->child[1]))) != 0) ||\ - ((R = *(RP = &(X->child[0]))) != 0)) {\ - tchunkptr* CP;\ - while ((*(CP = &(R->child[1])) != 0) ||\ - (*(CP = &(R->child[0])) != 0)) {\ - R = *(RP = CP);\ - }\ - if (RTCHECK(ok_address(M, RP)))\ - *RP = 0;\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - }\ - if (XP != 0) {\ - tbinptr* H = treebin_at(M, X->index);\ - if (X == *H) {\ - if ((*H = R) == 0) \ - clear_treemap(M, X->index);\ - }\ - else if (RTCHECK(ok_address(M, XP))) {\ - if (XP->child[0] == X) \ - XP->child[0] = R;\ - else \ - XP->child[1] = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - if (R != 0) {\ - if (RTCHECK(ok_address(M, R))) {\ - tchunkptr C0, C1;\ - R->parent = XP;\ - if ((C0 = X->child[0]) != 0) {\ - if (RTCHECK(ok_address(M, C0))) {\ - R->child[0] = C0;\ - C0->parent = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - if ((C1 = X->child[1]) != 0) {\ - if (RTCHECK(ok_address(M, C1))) {\ - R->child[1] = C1;\ - C1->parent = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ -} + #define unlink_large_chunk(M, X) \ + { \ + \ + tchunkptr XP = X->parent; \ + tchunkptr R; \ + if (X->bk != X) { \ + \ + tchunkptr F = X->fd; \ + R = X->bk; \ + if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) { \ + \ + F->bk = R; \ + R->fd = F; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } else { \ + \ + tchunkptr *RP; \ + if (((R = *(RP = &(X->child[1]))) != 0) || \ + ((R = *(RP = &(X->child[0]))) != 0)) { \ + \ + tchunkptr *CP; \ + while ((*(CP = &(R->child[1])) != 0) || \ + (*(CP = &(R->child[0])) != 0)) { \ + \ + R = *(RP = CP); \ + \ + } \ + if (RTCHECK(ok_address(M, RP))) \ + *RP = 0; \ + else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } \ + \ + } \ + if (XP != 0) { \ + \ + tbinptr *H = treebin_at(M, X->index); \ + if (X == *H) { \ + \ + if ((*H = R) == 0) clear_treemap(M, X->index); \ + \ + } else if (RTCHECK(ok_address(M, XP))) { \ + \ + if (XP->child[0] == X) \ + XP->child[0] = R; \ + else \ + XP->child[1] = R; \ + \ + } else \ + \ + CORRUPTION_ERROR_ACTION(M); \ + if (R != 0) { \ + \ + if (RTCHECK(ok_address(M, R))) { \ + \ + tchunkptr C0, C1; \ + R->parent = XP; \ + if ((C0 = X->child[0]) != 0) { \ + \ + if (RTCHECK(ok_address(M, C0))) { \ + \ + R->child[0] = C0; \ + C0->parent = R; \ + \ + } else \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + if ((C1 = X->child[1]) != 0) { \ + \ + if (RTCHECK(ok_address(M, C1))) { \ + \ + R->child[1] = C1; \ + C1->parent = R; \ + \ + } else \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } else \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } \ + \ + } /* Relays to large vs small bin operations */ -#define insert_chunk(M, P, S)\ - if (is_small(S)) insert_small_chunk(M, P, S)\ - else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } - -#define unlink_chunk(M, P, S)\ - if (is_small(S)) unlink_small_chunk(M, P, S)\ - else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + #define insert_chunk(M, P, S) \ + if (is_small(S)) insert_small_chunk(M, P, S) else { \ + \ + tchunkptr TP = (tchunkptr)(P); \ + insert_large_chunk(M, TP, S); \ + \ + } + #define unlink_chunk(M, P, S) \ + if (is_small(S)) unlink_small_chunk(M, P, S) else { \ + \ + tchunkptr TP = (tchunkptr)(P); \ + unlink_large_chunk(M, TP); \ + \ + } /* Relays to internal calls to malloc/free from realloc, memalign etc */ -#if ONLY_MSPACES -#define internal_malloc(m, b) mspace_malloc(m, b) -#define internal_free(m, mem) mspace_free(m,mem); -#else /* ONLY_MSPACES */ -#if MSPACES -#define internal_malloc(m, b)\ - ((m == gm)? dlmalloc(b) : mspace_malloc(m, b)) -#define internal_free(m, mem)\ - if (m == gm) dlfree(mem); else mspace_free(m,mem); -#else /* MSPACES */ -#define internal_malloc(m, b) dlmalloc(b) -#define internal_free(m, mem) dlfree(mem) -#endif /* MSPACES */ -#endif /* ONLY_MSPACES */ + #if ONLY_MSPACES + #define internal_malloc(m, b) mspace_malloc(m, b) + #define internal_free(m, mem) mspace_free(m, mem); + #else /* ONLY_MSPACES */ + #if MSPACES + #define internal_malloc(m, b) \ + ((m == gm) ? dlmalloc(b) : mspace_malloc(m, b)) + #define internal_free(m, mem) \ + if (m == gm) \ + dlfree(mem); \ + else \ + mspace_free(m, mem); + #else /* MSPACES */ + #define internal_malloc(m, b) dlmalloc(b) + #define internal_free(m, mem) dlfree(mem) + #endif /* MSPACES */ + #endif /* ONLY_MSPACES */ /* ----------------------- Direct-mmapping chunks ----------------------- */ @@ -3874,80 +4219,93 @@ static void internal_malloc_stats(mstate m) { */ /* Malloc using mmap */ -static void* mmap_alloc(mstate m, size_t nb) { +static void *mmap_alloc(mstate m, size_t nb) { + size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); if (m->footprint_limit != 0) { + size_t fp = m->footprint + mmsize; - if (fp <= m->footprint || fp > m->footprint_limit) - return 0; + if (fp <= m->footprint || fp > m->footprint_limit) return 0; + } - if (mmsize > nb) { /* Check for wrap around 0 */ - char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); + + if (mmsize > nb) { /* Check for wrap around 0 */ + char *mm = (char *)(CALL_DIRECT_MMAP(mmsize)); if (mm != CMFAIL) { - size_t offset = align_offset(chunk2mem(mm)); - size_t psize = mmsize - offset - MMAP_FOOT_PAD; + + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - MMAP_FOOT_PAD; mchunkptr p = (mchunkptr)(mm + offset); p->prev_foot = offset; p->head = psize; mark_inuse_foot(m, p, psize); chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0; - if (m->least_addr == 0 || mm < m->least_addr) - m->least_addr = mm; + if (m->least_addr == 0 || mm < m->least_addr) m->least_addr = mm; if ((m->footprint += mmsize) > m->max_footprint) m->max_footprint = m->footprint; assert(is_aligned(chunk2mem(p))); check_mmapped_chunk(m, p); return chunk2mem(p); + } + } + return 0; + } /* Realloc using mmap */ static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) { + size_t oldsize = chunksize(oldp); - (void)flags; /* placate people compiling -Wunused */ - if (is_small(nb)) /* Can't shrink mmap regions below small size */ + (void)flags; /* placate people compiling -Wunused */ + if (is_small(nb)) /* Can't shrink mmap regions below small size */ return 0; /* Keep old chunk if big enough but not too big */ if (oldsize >= nb + SIZE_T_SIZE && (oldsize - nb) <= (mparams.granularity << 1)) return oldp; else { + size_t offset = oldp->prev_foot; size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - char* cp = (char*)CALL_MREMAP((char*)oldp - offset, - oldmmsize, newmmsize, flags); + char * cp = + (char *)CALL_MREMAP((char *)oldp - offset, oldmmsize, newmmsize, flags); if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); - size_t psize = newmmsize - offset - MMAP_FOOT_PAD; + size_t psize = newmmsize - offset - MMAP_FOOT_PAD; newp->head = psize; mark_inuse_foot(m, newp, psize); chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0; - if (cp < m->least_addr) - m->least_addr = cp; + if (cp < m->least_addr) m->least_addr = cp; if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) m->max_footprint = m->footprint; check_mmapped_chunk(m, newp); return newp; + } + } + return 0; -} +} /* -------------------------- mspace management -------------------------- */ /* Initialize top chunk and its size */ static void init_top(mstate m, mchunkptr p, size_t psize) { + /* Ensure alignment */ size_t offset = align_offset(chunk2mem(p)); - p = (mchunkptr)((char*)p + offset); + p = (mchunkptr)((char *)p + offset); psize -= offset; m->top = p; @@ -3955,23 +4313,29 @@ static void init_top(mstate m, mchunkptr p, size_t psize) { p->head = psize | PINUSE_BIT; /* set size of fake trailing chunk holding overhead space only once */ chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; - m->trim_check = mparams.trim_threshold; /* reset on each update */ + m->trim_check = mparams.trim_threshold; /* reset on each update */ + } /* Initialize bins for a new mstate that is otherwise zeroed out */ static void init_bins(mstate m) { + /* Establish circular links for smallbins */ bindex_t i; for (i = 0; i < NSMALLBINS; ++i) { - sbinptr bin = smallbin_at(m,i); + + sbinptr bin = smallbin_at(m, i); bin->fd = bin->bk = bin; + } + } -#if PROCEED_ON_ERROR + #if PROCEED_ON_ERROR /* default corruption action */ static void reset_on_error(mstate m) { + int i; ++malloc_corruption_error_count; /* Reinitialize fields to forget about all memory */ @@ -3984,67 +4348,78 @@ static void reset_on_error(mstate m) { for (i = 0; i < NTREEBINS; ++i) *treebin_at(m, i) = 0; init_bins(m); + } -#endif /* PROCEED_ON_ERROR */ + + #endif /* PROCEED_ON_ERROR */ /* Allocate chunk and prepend remainder with chunk in successor base. */ -static void* prepend_alloc(mstate m, char* newbase, char* oldbase, - size_t nb) { +static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb) { + mchunkptr p = align_as_chunk(newbase); mchunkptr oldfirst = align_as_chunk(oldbase); - size_t psize = (char*)oldfirst - (char*)p; + size_t psize = (char *)oldfirst - (char *)p; mchunkptr q = chunk_plus_offset(p, nb); - size_t qsize = psize - nb; + size_t qsize = psize - nb; set_size_and_pinuse_of_inuse_chunk(m, p, nb); - assert((char*)oldfirst > (char*)q); + assert((char *)oldfirst > (char *)q); assert(pinuse(oldfirst)); assert(qsize >= MIN_CHUNK_SIZE); /* consolidate remainder with first chunk of old base */ if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; m->top = q; q->head = tsize | PINUSE_BIT; check_top_chunk(m, q); - } - else if (oldfirst == m->dv) { + + } else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; m->dv = q; set_size_and_pinuse_of_free_chunk(q, dsize); - } - else { + + } else { + if (!is_inuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); unlink_chunk(m, oldfirst, nsize); oldfirst = chunk_plus_offset(oldfirst, nsize); qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); insert_chunk(m, q, qsize); check_free_chunk(m, q); + } check_malloced_chunk(m, chunk2mem(p), nb); return chunk2mem(p); + } /* Add a segment to hold a new noncontiguous region */ -static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { +static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped) { + /* Determine locations and sizes of segment, fenceposts, old top */ - char* old_top = (char*)m->top; + char * old_top = (char *)m->top; msegmentptr oldsp = segment_holding(m, old_top); - char* old_end = oldsp->base + oldsp->size; - size_t ssize = pad_request(sizeof(struct malloc_segment)); - char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - size_t offset = align_offset(chunk2mem(rawsp)); - char* asp = rawsp + offset; - char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; - mchunkptr sp = (mchunkptr)csp; + char * old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char * rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char * asp = rawsp + offset; + char * csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp; + mchunkptr sp = (mchunkptr)csp; msegmentptr ss = (msegmentptr)(chunk2mem(sp)); - mchunkptr tnext = chunk_plus_offset(sp, ssize); - mchunkptr p = tnext; - int nfences = 0; + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + int nfences = 0; /* reset top to new space */ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); @@ -4052,7 +4427,7 @@ static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { /* Set up segment record */ assert(is_aligned(ss)); set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); - *ss = m->seg; /* Push current record */ + *ss = m->seg; /* Push current record */ m->seg.base = tbase; m->seg.size = tsize; m->seg.sflags = mmapped; @@ -4060,53 +4435,61 @@ static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { /* Insert trailing fenceposts */ for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); p->head = FENCEPOST_HEAD; ++nfences; - if ((char*)(&(nextp->head)) < old_end) + if ((char *)(&(nextp->head)) < old_end) p = nextp; else break; + } + assert(nfences >= 2); /* Insert the rest of old top into a bin as an ordinary free chunk */ if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; - size_t psize = csp - old_top; + size_t psize = csp - old_top; mchunkptr tn = chunk_plus_offset(q, psize); set_free_with_pinuse(q, psize, tn); insert_chunk(m, q, psize); + } check_top_chunk(m, m->top); + } /* -------------------------- System allocation -------------------------- */ /* Get memory from system using MORECORE or MMAP */ -static void* sys_alloc(mstate m, size_t nb) { - char* tbase = CMFAIL; +static void *sys_alloc(mstate m, size_t nb) { + + char * tbase = CMFAIL; size_t tsize = 0; flag_t mmap_flag = 0; - size_t asize; /* allocation size */ + size_t asize; /* allocation size */ ensure_initialization(); /* Directly map large chunks, but only if already initialized */ if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) { - void* mem = mmap_alloc(m, nb); - if (mem != 0) - return mem; + + void *mem = mmap_alloc(m, nb); + if (mem != 0) return mem; + } asize = granularity_align(nb + SYS_ALLOC_PADDING); - if (asize <= nb) - return 0; /* wraparound */ + if (asize <= nb) return 0; /* wraparound */ if (m->footprint_limit != 0) { + size_t fp = m->footprint + asize; - if (fp <= m->footprint || fp > m->footprint_limit) - return 0; + if (fp <= m->footprint || fp > m->footprint_limit) return 0; + } /* @@ -4132,91 +4515,119 @@ static void* sys_alloc(mstate m, size_t nb) { */ if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { - char* br = CMFAIL; - size_t ssize = asize; /* sbrk call size */ - msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); + + char * br = CMFAIL; + size_t ssize = asize; /* sbrk call size */ + msegmentptr ss = (m->top == 0) ? 0 : segment_holding(m, (char *)m->top); ACQUIRE_MALLOC_GLOBAL_LOCK(); - if (ss == 0) { /* First time through or recovery */ - char* base = (char*)CALL_MORECORE(0); + if (ss == 0) { /* First time through or recovery */ + char *base = (char *)CALL_MORECORE(0); if (base != CMFAIL) { + size_t fp; /* Adjust to end on a page boundary */ if (!is_page_aligned(base)) ssize += (page_align((size_t)base) - (size_t)base); - fp = m->footprint + ssize; /* recheck limits */ + fp = m->footprint + ssize; /* recheck limits */ if (ssize > nb && ssize < HALF_MAX_SIZE_T && (m->footprint_limit == 0 || (fp > m->footprint && fp <= m->footprint_limit)) && - (br = (char*)(CALL_MORECORE(ssize))) == base) { + (br = (char *)(CALL_MORECORE(ssize))) == base) { + tbase = base; tsize = ssize; + } + } - } - else { + + } else { + /* Subtract out existing available top space from MORECORE request. */ ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING); /* Use mem here only if it did continuously extend old space */ if (ssize < HALF_MAX_SIZE_T && - (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) { + (br = (char *)(CALL_MORECORE(ssize))) == ss->base + ss->size) { + tbase = br; tsize = ssize; + } + } - if (tbase == CMFAIL) { /* Cope with partial failure */ - if (br != CMFAIL) { /* Try to use/extend the space we did get */ - if (ssize < HALF_MAX_SIZE_T && - ssize < nb + SYS_ALLOC_PADDING) { + if (tbase == CMFAIL) { /* Cope with partial failure */ + if (br != CMFAIL) { /* Try to use/extend the space we did get */ + if (ssize < HALF_MAX_SIZE_T && ssize < nb + SYS_ALLOC_PADDING) { + size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize); if (esize < HALF_MAX_SIZE_T) { - char* end = (char*)CALL_MORECORE(esize); + + char *end = (char *)CALL_MORECORE(esize); if (end != CMFAIL) ssize += esize; - else { /* Can't use; try to release */ - (void) CALL_MORECORE(-ssize); + else { /* Can't use; try to release */ + (void)CALL_MORECORE(-ssize); br = CMFAIL; + } + } + } + } - if (br != CMFAIL) { /* Use the space we did get */ + + if (br != CMFAIL) { /* Use the space we did get */ tbase = br; tsize = ssize; - } - else - disable_contiguous(m); /* Don't try contiguous path in the future */ + + } else + + disable_contiguous(m); /* Don't try contiguous path in the future */ + } RELEASE_MALLOC_GLOBAL_LOCK(); + } - if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ - char* mp = (char*)(CALL_MMAP(asize)); + if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ + char *mp = (char *)(CALL_MMAP(asize)); if (mp != CMFAIL) { + tbase = mp; tsize = asize; mmap_flag = USE_MMAP_BIT; + } + } - if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ + if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ if (asize < HALF_MAX_SIZE_T) { - char* br = CMFAIL; - char* end = CMFAIL; + + char *br = CMFAIL; + char *end = CMFAIL; ACQUIRE_MALLOC_GLOBAL_LOCK(); - br = (char*)(CALL_MORECORE(asize)); - end = (char*)(CALL_MORECORE(0)); + br = (char *)(CALL_MORECORE(asize)); + end = (char *)(CALL_MORECORE(0)); RELEASE_MALLOC_GLOBAL_LOCK(); if (br != CMFAIL && end != CMFAIL && br < end) { + size_t ssize = end - br; if (ssize > nb + TOP_FOOT_SIZE) { + tbase = br; tsize = ssize; + } + } + } + } if (tbase != CMFAIL) { @@ -4224,61 +4635,66 @@ static void* sys_alloc(mstate m, size_t nb) { if ((m->footprint += tsize) > m->max_footprint) m->max_footprint = m->footprint; - if (!is_initialized(m)) { /* first-time initialization */ - if (m->least_addr == 0 || tbase < m->least_addr) - m->least_addr = tbase; + if (!is_initialized(m)) { /* first-time initialization */ + if (m->least_addr == 0 || tbase < m->least_addr) m->least_addr = tbase; m->seg.base = tbase; m->seg.size = tsize; m->seg.sflags = mmap_flag; m->magic = mparams.magic; m->release_checks = MAX_RELEASE_CHECK_RATE; init_bins(m); -#if !ONLY_MSPACES + #if !ONLY_MSPACES if (is_global(m)) init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); else -#endif + #endif { + /* Offset top by embedded malloc_state */ mchunkptr mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); + init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE); + } + } else { + /* Try to merge with an existing segment */ msegmentptr sp = &m->seg; /* Only consider most recent segment if traversal suppressed */ while (sp != 0 && tbase != sp->base + sp->size) sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && - !is_extern_segment(sp) && + if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & USE_MMAP_BIT) == mmap_flag && - segment_holds(sp, m->top)) { /* append */ + segment_holds(sp, m->top)) { /* append */ sp->size += tsize; init_top(m, m->top, m->topsize + tsize); - } - else { - if (tbase < m->least_addr) - m->least_addr = tbase; + + } else { + + if (tbase < m->least_addr) m->least_addr = tbase; sp = &m->seg; while (sp != 0 && sp->base != tbase + tsize) sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && - !is_extern_segment(sp) && + if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & USE_MMAP_BIT) == mmap_flag) { - char* oldbase = sp->base; + + char *oldbase = sp->base; sp->base = tbase; sp->size += tsize; return prepend_alloc(m, tbase, oldbase, nb); - } - else + + } else + add_segment(m, tbase, tsize, mmap_flag); + } + } - if (nb < m->topsize) { /* Allocate from new or extended top space */ - size_t rsize = m->topsize -= nb; + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; mchunkptr p = m->top; mchunkptr r = m->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -4286,353 +4702,463 @@ static void* sys_alloc(mstate m, size_t nb) { check_top_chunk(m, m->top); check_malloced_chunk(m, chunk2mem(p), nb); return chunk2mem(p); + } + } MALLOC_FAILURE_ACTION; return 0; + } /* ----------------------- system deallocation -------------------------- */ /* Unmap and unlink any mmapped segments that don't contain used chunks */ static size_t release_unused_segments(mstate m) { - size_t released = 0; - int nsegs = 0; + + size_t released = 0; + int nsegs = 0; msegmentptr pred = &m->seg; msegmentptr sp = pred->next; while (sp != 0) { - char* base = sp->base; - size_t size = sp->size; + + char * base = sp->base; + size_t size = sp->size; msegmentptr next = sp->next; ++nsegs; if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { + mchunkptr p = align_as_chunk(base); - size_t psize = chunksize(p); + size_t psize = chunksize(p); /* Can unmap if first chunk holds entire segment and not pinned */ - if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { + if (!is_inuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; - assert(segment_holds(sp, (char*)sp)); + assert(segment_holds(sp, (char *)sp)); if (p == m->dv) { + m->dv = 0; m->dvsize = 0; - } - else { + + } else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; m->footprint -= size; /* unlink obsoleted record */ sp = pred; sp->next = next; - } - else { /* back out if cannot unmap */ + + } else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } - if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ + + if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ break; pred = sp; sp = next; + } + /* Reset check counter */ - m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)? - (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE); + m->release_checks = (((size_t)nsegs > (size_t)MAX_RELEASE_CHECK_RATE) + ? (size_t)nsegs + : (size_t)MAX_RELEASE_CHECK_RATE); return released; + } static int sys_trim(mstate m, size_t pad) { + size_t released = 0; ensure_initialization(); if (pad < MAX_REQUEST && is_initialized(m)) { - pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ size_t unit = mparams.granularity; - size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - - SIZE_T_ONE) * unit; - msegmentptr sp = segment_holding(m, (char*)m->top); + size_t extra = + ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char *)m->top); if (!is_extern_segment(sp)) { + if (is_mmapped_segment(sp)) { - if (HAVE_MMAP && - sp->size >= extra && - !has_segment_link(m, sp)) { /* can't shrink if pinned */ + + if (HAVE_MMAP && sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ size_t newsize = sp->size - extra; - (void)newsize; /* placate people compiling -Wunused-variable */ + (void)newsize; /* placate people compiling -Wunused-variable */ /* Prefer mremap, fall back to munmap */ if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } - } - else if (HAVE_MORECORE) { - if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ + + } else if (HAVE_MORECORE) { + + if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; ACQUIRE_MALLOC_GLOBAL_LOCK(); { + /* Make sure end of memory is where we last set it. */ - char* old_br = (char*)(CALL_MORECORE(0)); + char *old_br = (char *)(CALL_MORECORE(0)); if (old_br == sp->base + sp->size) { - char* rel_br = (char*)(CALL_MORECORE(-extra)); - char* new_br = (char*)(CALL_MORECORE(0)); + + char *rel_br = (char *)(CALL_MORECORE(-extra)); + char *new_br = (char *)(CALL_MORECORE(0)); if (rel_br != CMFAIL && new_br < old_br) released = old_br - new_br; + } + } + RELEASE_MALLOC_GLOBAL_LOCK(); + } + } if (released != 0) { + sp->size -= released; m->footprint -= released; init_top(m, m->top, m->topsize - released); check_top_chunk(m, m->top); + } + } /* Unmap any unused mmapped segments */ - if (HAVE_MMAP) - released += release_unused_segments(m); + if (HAVE_MMAP) released += release_unused_segments(m); /* On failure, disable autotrim to avoid repeated failed future calls */ - if (released == 0 && m->topsize > m->trim_check) - m->trim_check = MAX_SIZE_T; + if (released == 0 && m->topsize > m->trim_check) m->trim_check = MAX_SIZE_T; + } - return (released != 0)? 1 : 0; + return (released != 0) ? 1 : 0; + } /* Consolidate and bin a chunk. Differs from exported versions of free mainly in that the chunk need not be marked as inuse. */ static void dispose_chunk(mstate m, mchunkptr p, size_t psize) { + mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { + mchunkptr prev; - size_t prevsize = p->prev_foot; + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) - m->footprint -= psize; + if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) m->footprint -= psize; return; + } + prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ if (p != m->dv) { + unlink_chunk(m, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { + + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + m->dvsize = psize; set_free_with_pinuse(p, psize, next); return; + } - } - else { + + } else { + CORRUPTION_ERROR_ACTION(m); return; + } + } + if (RTCHECK(ok_address(m, next))) { - if (!cinuse(next)) { /* consolidate forward */ + + if (!cinuse(next)) { /* consolidate forward */ if (next == m->top) { + size_t tsize = m->topsize += psize; m->top = p; p->head = tsize | PINUSE_BIT; if (p == m->dv) { + m->dv = 0; m->dvsize = 0; + } + return; - } - else if (next == m->dv) { + + } else if (next == m->dv) { + size_t dsize = m->dvsize += psize; m->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); return; - } - else { + + } else { + size_t nsize = chunksize(next); psize += nsize; unlink_chunk(m, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == m->dv) { + m->dvsize = psize; return; + } + } - } - else { + + } else { + set_free_with_pinuse(p, psize, next); + } + insert_chunk(m, p, psize); - } - else { + + } else { + CORRUPTION_ERROR_ACTION(m); + } + } /* ---------------------------- malloc --------------------------- */ /* allocate a large request from the best fitting chunk in a treebin */ -static void* tmalloc_large(mstate m, size_t nb) { +static void *tmalloc_large(mstate m, size_t nb) { + tchunkptr v = 0; - size_t rsize = -nb; /* Unsigned negation */ + size_t rsize = -nb; /* Unsigned negation */ tchunkptr t; - bindex_t idx; + bindex_t idx; compute_tree_index(nb, idx); if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ - size_t sizebits = nb << leftshift_for_tree_index(idx); - tchunkptr rst = 0; /* The deepest untaken right subtree */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ for (;;) { + tchunkptr rt; - size_t trem = chunksize(t) - nb; + size_t trem = chunksize(t) - nb; if (trem < rsize) { + v = t; - if ((rsize = trem) == 0) - break; + if ((rsize = trem) == 0) break; + } + rt = t->child[1]; - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; - if (rt != 0 && rt != t) - rst = rt; + t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) rst = rt; if (t == 0) { - t = rst; /* set t to least subtree holding sizes > nb */ + + t = rst; /* set t to least subtree holding sizes > nb */ break; + } + sizebits <<= 1; + } + } - if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; if (leftbits != 0) { + bindex_t i; binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); t = *treebin_at(m, i); + } + } - while (t != 0) { /* find smallest of tree or subtree */ + while (t != 0) { /* find smallest of tree or subtree */ size_t trem = chunksize(t) - nb; if (trem < rsize) { + rsize = trem; v = t; + } + t = leftmost_child(t); + } /* If dv is a better fit, return 0 so malloc will use it */ if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { - if (RTCHECK(ok_address(m, v))) { /* split */ + + if (RTCHECK(ok_address(m, v))) { /* split */ mchunkptr r = chunk_plus_offset(v, nb); assert(chunksize(v) == rsize + nb); if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); if (rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(m, v, (rsize + nb)); else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); set_size_and_pinuse_of_free_chunk(r, rsize); insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + } + CORRUPTION_ERROR_ACTION(m); + } + return 0; + } /* allocate a small request from the best fitting chunk in a treebin */ -static void* tmalloc_small(mstate m, size_t nb) { +static void *tmalloc_small(mstate m, size_t nb) { + tchunkptr t, v; - size_t rsize; - bindex_t i; - binmap_t leastbit = least_bit(m->treemap); + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); compute_bit2idx(leastbit, i); v = t = *treebin_at(m, i); rsize = chunksize(t) - nb; while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; if (trem < rsize) { + rsize = trem; v = t; + } + } if (RTCHECK(ok_address(m, v))) { + mchunkptr r = chunk_plus_offset(v, nb); assert(chunksize(v) == rsize + nb); if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); if (rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(m, v, (rsize + nb)); else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(m, r, rsize); + } + return chunk2mem(v); + } + } CORRUPTION_ERROR_ACTION(m); return 0; -} -#if !ONLY_MSPACES +} -void* dlmalloc(size_t bytes) { - /* - Basic algorithm: - If a small request (< 256 bytes minus per-chunk overhead): - 1. If one exists, use a remainderless chunk in associated smallbin. - (Remainderless means that there are too few excess bytes to - represent as a chunk.) - 2. If it is big enough, use the dv chunk, which is normally the - chunk adjacent to the one used for the most recent small request. - 3. If one exists, split the smallest available chunk in a bin, - saving remainder in dv. - 4. If it is big enough, use the top chunk. - 5. If available, get memory from system and use it - Otherwise, for a large request: - 1. Find the smallest available binned chunk that fits, and use it - if it is better fitting than dv chunk, splitting if necessary. - 2. If better fitting than any binned chunk, use the dv chunk. - 3. If it is big enough, use the top chunk. - 4. If request size >= mmap threshold, try to directly mmap this chunk. - 5. If available, get memory from system and use it - - The ugly goto's here ensure that postaction occurs along all paths. - */ + #if !ONLY_MSPACES + +void *dlmalloc(size_t bytes) { + + /* + Basic algorithm: + If a small request (< 256 bytes minus per-chunk overhead): + 1. If one exists, use a remainderless chunk in associated smallbin. + (Remainderless means that there are too few excess bytes to + represent as a chunk.) + 2. If it is big enough, use the dv chunk, which is normally the + chunk adjacent to the one used for the most recent small request. + 3. If one exists, split the smallest available chunk in a bin, + saving remainder in dv. + 4. If it is big enough, use the top chunk. + 5. If available, get memory from system and use it + Otherwise, for a large request: + 1. Find the smallest available binned chunk that fits, and use it + if it is better fitting than dv chunk, splitting if necessary. + 2. If better fitting than any binned chunk, use the dv chunk. + 3. If it is big enough, use the top chunk. + 4. If request size >= mmap threshold, try to directly mmap this chunk. + 5. If available, get memory from system and use it + + The ugly goto's here ensure that postaction occurs along all paths. + */ -#if USE_LOCKS - ensure_initialization(); /* initialize in sys_alloc if not using locks */ -#endif + #if USE_LOCKS + ensure_initialization(); /* initialize in sys_alloc if not using locks */ + #endif if (!PREACTION(gm)) { - void* mem; + + void * mem; size_t nb; if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; binmap_t smallbits; - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes); idx = small_index(nb); smallbits = gm->smallmap >> idx; - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ + idx += ~smallbits & 1; /* Uses next bin if idx empty */ b = smallbin_at(gm, idx); p = b->fd; assert(chunksize(p) == small_index2size(idx)); @@ -4641,15 +5167,17 @@ void* dlmalloc(size_t bytes) { mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; + } else if (nb > gm->dvsize) { - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); b = smallbin_at(gm, i); p = b->fd; @@ -4660,54 +5188,71 @@ void* dlmalloc(size_t bytes) { if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(gm, p, small_index2size(i)); else { + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); r = chunk_plus_offset(p, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(gm, r, rsize); + } + mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; + } else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); goto postaction; + } + } - } - else if (bytes >= MAX_REQUEST) + + } else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ else { + nb = pad_request(bytes); if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); goto postaction; + } + } if (nb <= gm->dvsize) { - size_t rsize = gm->dvsize - nb; + + size_t rsize = gm->dvsize - nb; mchunkptr p = gm->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ mchunkptr r = gm->dv = chunk_plus_offset(p, nb); gm->dvsize = rsize; set_size_and_pinuse_of_free_chunk(r, rsize); set_size_and_pinuse_of_inuse_chunk(gm, p, nb); - } - else { /* exhaust dv */ + + } else { /* exhaust dv */ + size_t dvs = gm->dvsize; gm->dvsize = 0; gm->dv = 0; set_inuse_and_pinuse(gm, p, dvs); + } + mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; + } - else if (nb < gm->topsize) { /* Split top */ - size_t rsize = gm->topsize -= nb; + else if (nb < gm->topsize) { /* Split top */ + size_t rsize = gm->topsize -= nb; mchunkptr p = gm->top; mchunkptr r = gm->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -4716,6 +5261,7 @@ void* dlmalloc(size_t bytes) { check_top_chunk(gm, gm->top); check_malloced_chunk(gm, mem, nb); goto postaction; + } mem = sys_alloc(gm, nb); @@ -4723,14 +5269,17 @@ void* dlmalloc(size_t bytes) { postaction: POSTACTION(gm); return mem; + } return 0; + } /* ---------------------------- free --------------------------- */ -void dlfree(void* mem) { +void dlfree(void *mem) { + /* Consolidate freed chunks with preceeding or succeeding bordering free chunks, if they exist, and then place in a bin. Intermixed @@ -4738,164 +5287,216 @@ void dlfree(void* mem) { */ if (mem != 0) { - mchunkptr p = mem2chunk(mem); -#if FOOTERS + + mchunkptr p = mem2chunk(mem); + #if FOOTERS mstate fm = get_mstate_for(p); if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); return; + } -#else /* FOOTERS */ -#define fm gm -#endif /* FOOTERS */ + + #else /* FOOTERS */ + #define fm gm + #endif /* FOOTERS */ if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { - size_t psize = chunksize(p); + + size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) fm->footprint -= psize; goto postaction; - } - else { + + } else { + mchunkptr prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { + + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; set_free_with_pinuse(p, psize, next); goto postaction; + } - } - else + + } else + goto erroraction; + } + } if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - if (!cinuse(next)) { /* consolidate forward */ + + if (!cinuse(next)) { /* consolidate forward */ if (next == fm->top) { + size_t tsize = fm->topsize += psize; fm->top = p; p->head = tsize | PINUSE_BIT; if (p == fm->dv) { + fm->dv = 0; fm->dvsize = 0; + } - if (should_trim(fm, tsize)) - sys_trim(fm, 0); + + if (should_trim(fm, tsize)) sys_trim(fm, 0); goto postaction; - } - else if (next == fm->dv) { + + } else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; fm->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); goto postaction; - } - else { + + } else { + size_t nsize = chunksize(next); psize += nsize; unlink_chunk(fm, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == fm->dv) { + fm->dvsize = psize; goto postaction; + } + } - } - else + + } else + set_free_with_pinuse(p, psize, next); if (is_small(psize)) { + insert_small_chunk(fm, p, psize); check_free_chunk(fm, p); - } - else { + + } else { + tchunkptr tp = (tchunkptr)p; insert_large_chunk(fm, tp, psize); check_free_chunk(fm, p); - if (--fm->release_checks == 0) - release_unused_segments(fm); + if (--fm->release_checks == 0) release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: USAGE_ERROR_ACTION(fm, p); postaction: POSTACTION(fm); + } + } -#if !FOOTERS -#undef fm -#endif /* FOOTERS */ + + #if !FOOTERS + #undef fm + #endif /* FOOTERS */ + } -void* dlcalloc(size_t n_elements, size_t elem_size) { - void* mem; +void *dlcalloc(size_t n_elements, size_t elem_size) { + + void * mem; size_t req = 0; if (n_elements != 0) { + req = n_elements * elem_size; if (((n_elements | elem_size) & ~(size_t)0xffff) && (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = dlmalloc(req); if (mem != 0 && calloc_must_clear(mem2chunk(mem))) __builtin_memset(mem, 0, req); return mem; + } -#endif /* !ONLY_MSPACES */ + #endif /* !ONLY_MSPACES */ /* ------------ Internal support for realloc, memalign, etc -------------- */ /* Try to realloc; only in-place unless can_move true */ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, int can_move) { + mchunkptr newp = 0; - size_t oldsize = chunksize(p); + size_t oldsize = chunksize(p); mchunkptr next = chunk_plus_offset(p, oldsize); - if (RTCHECK(ok_address(m, p) && ok_inuse(p) && - ok_next(p, next) && ok_pinuse(next))) { + if (RTCHECK(ok_address(m, p) && ok_inuse(p) && ok_next(p, next) && + ok_pinuse(next))) { + if (is_mmapped(p)) { + newp = mmap_resize(m, p, nb, can_move); - } - else if (oldsize >= nb) { /* already big enough */ + + } else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; - if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ + if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ mchunkptr r = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, r, rsize); dispose_chunk(m, r, rsize); + } + newp = p; - } - else if (next == m->top) { /* extend into top */ + + } else if (next == m->top) { /* extend into top */ + if (oldsize + m->topsize > nb) { - size_t newsize = oldsize + m->topsize; - size_t newtopsize = newsize - nb; + + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; mchunkptr newtop = chunk_plus_offset(p, nb); set_inuse(m, p, nb); - newtop->head = newtopsize |PINUSE_BIT; + newtop->head = newtopsize | PINUSE_BIT; m->top = newtop; m->topsize = newtopsize; newp = p; + } - } - else if (next == m->dv) { /* extend into dv */ + + } else if (next == m->dv) { /* extend into dv */ + size_t dvs = m->dvsize; if (oldsize + dvs >= nb) { + size_t dsize = oldsize + dvs - nb; if (dsize >= MIN_CHUNK_SIZE) { + mchunkptr r = chunk_plus_offset(p, nb); mchunkptr n = chunk_plus_offset(r, dsize); set_inuse(m, p, nb); @@ -4903,64 +5504,87 @@ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, clear_pinuse(n); m->dvsize = dsize; m->dv = r; - } - else { /* exhaust dv */ + + } else { /* exhaust dv */ + size_t newsize = oldsize + dvs; set_inuse(m, p, newsize); m->dvsize = 0; m->dv = 0; + } + newp = p; + } - } - else if (!cinuse(next)) { /* extend into next free chunk */ + + } else if (!cinuse(next)) { /* extend into next free chunk */ + size_t nextsize = chunksize(next); if (oldsize + nextsize >= nb) { + size_t rsize = oldsize + nextsize - nb; unlink_chunk(m, next, nextsize); if (rsize < MIN_CHUNK_SIZE) { + size_t newsize = oldsize + nextsize; set_inuse(m, p, newsize); - } - else { + + } else { + mchunkptr r = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, r, rsize); dispose_chunk(m, r, rsize); + } + newp = p; + } + } - } - else { + + } else { + USAGE_ERROR_ACTION(m, chunk2mem(p)); + } + return newp; + } -static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { - void* mem = 0; - if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ +static void *internal_memalign(mstate m, size_t alignment, size_t bytes) { + + void *mem = 0; + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ alignment = MIN_CHUNK_SIZE; - if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ + if ((alignment & (alignment - SIZE_T_ONE)) != 0) { /* Ensure a power of 2 */ size_t a = MALLOC_ALIGNMENT << 1; - while (a < alignment) a <<= 1; + while (a < alignment) + a <<= 1; alignment = a; + } + if (bytes >= MAX_REQUEST - alignment) { - if (m != 0) { /* Test isn't needed but avoids compiler warning */ + + if (m != 0) { /* Test isn't needed but avoids compiler warning */ MALLOC_FAILURE_ACTION; + } - } - else { + + } else { + size_t nb = request2size(bytes); size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; mem = internal_malloc(m, req); if (mem != 0) { + mchunkptr p = mem2chunk(mem); - if (PREACTION(m)) - return 0; - if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ + if (PREACTION(m)) return 0; + if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ /* Find an aligned spot inside chunk. Since we need to give back leading space in a chunk of at least MIN_CHUNK_SIZE, if @@ -4969,47 +5593,59 @@ static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { We've allocated enough total room so that this is always possible. */ - char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment - - SIZE_T_ONE)) & - -alignment)); - char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? - br : br+alignment; + char * br = (char *)mem2chunk((size_t)( + ((size_t)((char *)mem + alignment - SIZE_T_ONE)) & -alignment)); + char * pos = ((size_t)(br - (char *)(p)) >= MIN_CHUNK_SIZE) + ? br + : br + alignment; mchunkptr newp = (mchunkptr)pos; - size_t leadsize = pos - (char*)(p); - size_t newsize = chunksize(p) - leadsize; + size_t leadsize = pos - (char *)(p); + size_t newsize = chunksize(p) - leadsize; - if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ + if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ newp->prev_foot = p->prev_foot + leadsize; newp->head = newsize; - } - else { /* Otherwise, give back leader, use the rest */ + + } else { /* Otherwise, give back leader, use the rest */ + set_inuse(m, newp, newsize); set_inuse(m, p, leadsize); dispose_chunk(m, p, leadsize); + } + p = newp; + } /* Give back spare room at the end */ if (!is_mmapped(p)) { + size_t size = chunksize(p); if (size > nb + MIN_CHUNK_SIZE) { - size_t remainder_size = size - nb; + + size_t remainder_size = size - nb; mchunkptr remainder = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, remainder, remainder_size); dispose_chunk(m, remainder, remainder_size); + } + } mem = chunk2mem(p); - assert (chunksize(p) >= nb); + assert(chunksize(p) >= nb); assert(((size_t)mem & (alignment - 1)) == 0); check_inuse_chunk(m, p); POSTACTION(m); + } + } + return mem; + } /* @@ -5019,50 +5655,50 @@ static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { bit 0 set if all elements are same size (using sizes[0]) bit 1 set if elements should be zeroed */ -static void** ialloc(mstate m, - size_t n_elements, - size_t* sizes, - int opts, - void* chunks[]) { - - size_t element_size; /* chunksize of each element, if all same */ - size_t contents_size; /* total size of elements */ - size_t array_size; /* request size of pointer array */ - void* mem; /* malloced aggregate space */ - mchunkptr p; /* corresponding chunk */ - size_t remainder_size; /* remaining bytes while splitting */ - void** marray; /* either "chunks" or malloced ptr array */ - mchunkptr array_chunk; /* chunk for malloced ptr array */ - flag_t was_enabled; /* to disable mmap */ +static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, + void *chunks[]) { + + size_t element_size; /* chunksize of each element, if all same */ + size_t contents_size; /* total size of elements */ + size_t array_size; /* request size of pointer array */ + void * mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + void ** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + flag_t was_enabled; /* to disable mmap */ size_t size; size_t i; ensure_initialization(); /* compute array length, if needed */ if (chunks != 0) { - if (n_elements == 0) - return chunks; /* nothing to do */ + + if (n_elements == 0) return chunks; /* nothing to do */ marray = chunks; array_size = 0; - } - else { + + } else { + /* if empty req, must still return chunk representing empty array */ - if (n_elements == 0) - return (void**)internal_malloc(m, 0); + if (n_elements == 0) return (void **)internal_malloc(m, 0); marray = 0; - array_size = request2size(n_elements * (sizeof(void*))); + array_size = request2size(n_elements * (sizeof(void *))); + } /* compute total element size */ - if (opts & 0x1) { /* all-same-size */ + if (opts & 0x1) { /* all-same-size */ element_size = request2size(*sizes); contents_size = n_elements * element_size; - } - else { /* add up all the sizes */ + + } else { /* add up all the sizes */ + element_size = 0; contents_size = 0; for (i = 0; i != n_elements; ++i) contents_size += request2size(sizes[i]); + } size = contents_size + array_size; @@ -5075,10 +5711,8 @@ static void** ialloc(mstate m, was_enabled = use_mmap(m); disable_mmap(m); mem = internal_malloc(m, size - CHUNK_OVERHEAD); - if (was_enabled) - enable_mmap(m); - if (mem == 0) - return 0; + if (was_enabled) enable_mmap(m); + if (mem == 0) return 0; if (PREACTION(m)) return 0; p = mem2chunk(mem); @@ -5086,24 +5720,30 @@ static void** ialloc(mstate m, assert(!is_mmapped(p)); - if (opts & 0x2) { /* optionally clear the elements */ - __builtin_memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); + if (opts & 0x2) { /* optionally clear the elements */ + __builtin_memset((size_t *)mem, 0, + remainder_size - SIZE_T_SIZE - array_size); + } /* If not provided, allocate the pointer array as final part of chunk */ if (marray == 0) { - size_t array_chunk_size; + + size_t array_chunk_size; array_chunk = chunk_plus_offset(p, contents_size); array_chunk_size = remainder_size - contents_size; - marray = (void**) (chunk2mem(array_chunk)); + marray = (void **)(chunk2mem(array_chunk)); set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); remainder_size = contents_size; + } /* split out elements */ - for (i = 0; ; ++i) { + for (i = 0;; ++i) { + marray[i] = chunk2mem(p); - if (i != n_elements-1) { + if (i != n_elements - 1) { + if (element_size != 0) size = element_size; else @@ -5111,31 +5751,42 @@ static void** ialloc(mstate m, remainder_size -= size; set_size_and_pinuse_of_inuse_chunk(m, p, size); p = chunk_plus_offset(p, size); - } - else { /* the final element absorbs any overallocation slop */ + + } else { /* the final element absorbs any overallocation slop */ + set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); break; + } + } -#if DEBUG + #if DEBUG if (marray != chunks) { + /* final element must have exactly exhausted chunk */ if (element_size != 0) { + assert(remainder_size == element_size); - } - else { + + } else { + assert(remainder_size == request2size(sizes[i])); + } + check_inuse_chunk(m, mem2chunk(marray)); + } + for (i = 0; i != n_elements; ++i) check_inuse_chunk(m, mem2chunk(marray[i])); -#endif /* DEBUG */ + #endif /* DEBUG */ POSTACTION(m); return marray; + } /* Try to free all pointers in the given array. @@ -5145,316 +5796,431 @@ static void** ialloc(mstate m, chunks before freeing, which will occur often if allocated with ialloc or the array is sorted. */ -static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) { +static size_t internal_bulk_free(mstate m, void *array[], size_t nelem) { + size_t unfreed = 0; if (!PREACTION(m)) { - void** a; - void** fence = &(array[nelem]); + + void **a; + void **fence = &(array[nelem]); for (a = array; a != fence; ++a) { - void* mem = *a; + + void *mem = *a; if (mem != 0) { + mchunkptr p = mem2chunk(mem); - size_t psize = chunksize(p); -#if FOOTERS + size_t psize = chunksize(p); + #if FOOTERS if (get_mstate_for(p) != m) { + ++unfreed; continue; + } -#endif + + #endif check_inuse_chunk(m, p); *a = 0; if (RTCHECK(ok_address(m, p) && ok_inuse(p))) { - void ** b = a + 1; /* try to merge with next chunk */ + + void ** b = a + 1; /* try to merge with next chunk */ mchunkptr next = next_chunk(p); if (b != fence && *b == chunk2mem(next)) { + size_t newsize = chunksize(next) + psize; set_inuse(m, p, newsize); *b = chunk2mem(p); - } - else + + } else + dispose_chunk(m, p, psize); - } - else { + + } else { + CORRUPTION_ERROR_ACTION(m); break; + } + } + } - if (should_trim(m, m->topsize)) - sys_trim(m, 0); + + if (should_trim(m, m->topsize)) sys_trim(m, 0); POSTACTION(m); + } + return unfreed; + } -/* Traversal */ -#if MALLOC_INSPECT_ALL + /* Traversal */ + #if MALLOC_INSPECT_ALL static void internal_inspect_all(mstate m, - void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { + void (*handler)(void *start, void *end, + size_t used_bytes, + void * callback_arg), + void *arg) { + if (is_initialized(m)) { - mchunkptr top = m->top; + + mchunkptr top = m->top; msegmentptr s; for (s = &m->seg; s != 0; s = s->next) { + mchunkptr q = align_as_chunk(s->base); while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) { + mchunkptr next = next_chunk(q); - size_t sz = chunksize(q); - size_t used; - void* start; + size_t sz = chunksize(q); + size_t used; + void * start; if (is_inuse(q)) { - used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ + + used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ start = chunk2mem(q); - } - else { + + } else { + used = 0; - if (is_small(sz)) { /* offset by possible bookkeeping */ - start = (void*)((char*)q + sizeof(struct malloc_chunk)); - } - else { - start = (void*)((char*)q + sizeof(struct malloc_tree_chunk)); + if (is_small(sz)) { /* offset by possible bookkeeping */ + start = (void *)((char *)q + sizeof(struct malloc_chunk)); + + } else { + + start = (void *)((char *)q + sizeof(struct malloc_tree_chunk)); + } + } - if (start < (void*)next) /* skip if all space is bookkeeping */ + + if (start < (void *)next) /* skip if all space is bookkeeping */ handler(start, next, used, arg); - if (q == top) - break; + if (q == top) break; q = next; + } + } + } + } -#endif /* MALLOC_INSPECT_ALL */ + + #endif /* MALLOC_INSPECT_ALL */ /* ------------------ Exported realloc, memalign, etc -------------------- */ -#if !ONLY_MSPACES + #if !ONLY_MSPACES -void* dlrealloc(void* oldmem, size_t bytes) { - void* mem = 0; +void *dlrealloc(void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem == 0) { + mem = dlmalloc(bytes); - } - else if (bytes >= MAX_REQUEST) { + + } else if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } -#ifdef REALLOC_ZERO_BYTES_FREES + + #ifdef REALLOC_ZERO_BYTES_FREES else if (bytes == 0) { + dlfree(oldmem); + } -#endif /* REALLOC_ZERO_BYTES_FREES */ + + #endif /* REALLOC_ZERO_BYTES_FREES */ else { - size_t nb = request2size(bytes); + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = gm; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); POSTACTION(m); if (newp != 0) { + check_inuse_chunk(m, newp); mem = chunk2mem(newp); - } - else { + + } else { + mem = internal_malloc(m, bytes); if (mem != 0) { + size_t oc = chunksize(oldp) - overhead_for(oldp); - __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes); + __builtin_memcpy(mem, oldmem, (oc < bytes) ? oc : bytes); internal_free(m, oldmem); + } + } + } + } + return mem; + } -void* dlrealloc_in_place(void* oldmem, size_t bytes) { - void* mem = 0; +void *dlrealloc_in_place(void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem != 0) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; - } - else { - size_t nb = request2size(bytes); + + } else { + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = gm; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); POSTACTION(m); if (newp == oldp) { + check_inuse_chunk(m, newp); mem = oldmem; + } + } + } + } + return mem; + } -void* dlmemalign(size_t alignment, size_t bytes) { - if (alignment <= MALLOC_ALIGNMENT) { - return dlmalloc(bytes); - } +void *dlmemalign(size_t alignment, size_t bytes) { + + if (alignment <= MALLOC_ALIGNMENT) { return dlmalloc(bytes); } return internal_memalign(gm, alignment, bytes); + } -int dlposix_memalign(void** pp, size_t alignment, size_t bytes) { - void* mem = 0; +int dlposix_memalign(void **pp, size_t alignment, size_t bytes) { + + void *mem = 0; if (alignment == MALLOC_ALIGNMENT) mem = dlmalloc(bytes); else { - size_t d = alignment / sizeof(void*); - size_t r = alignment % sizeof(void*); - if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0) + + size_t d = alignment / sizeof(void *); + size_t r = alignment % sizeof(void *); + if (r != 0 || d == 0 || (d & (d - SIZE_T_ONE)) != 0) return EINVAL; else if (bytes <= MAX_REQUEST - alignment) { - if (alignment < MIN_CHUNK_SIZE) - alignment = MIN_CHUNK_SIZE; + + if (alignment < MIN_CHUNK_SIZE) alignment = MIN_CHUNK_SIZE; mem = internal_memalign(gm, alignment, bytes); + } + } + if (mem == 0) return ENOMEM; else { + *pp = mem; return 0; + } + } -void* dlvalloc(size_t bytes) { +void *dlvalloc(size_t bytes) { + size_t pagesz; ensure_initialization(); pagesz = mparams.page_size; return dlmemalign(pagesz, bytes); + } -void* dlpvalloc(size_t bytes) { +void *dlpvalloc(size_t bytes) { + size_t pagesz; ensure_initialization(); pagesz = mparams.page_size; - return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); + return dlmemalign(pagesz, + (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); + } -void** dlindependent_calloc(size_t n_elements, size_t elem_size, - void* chunks[]) { - size_t sz = elem_size; /* serves as 1-element array */ +void **dlindependent_calloc(size_t n_elements, size_t elem_size, + void *chunks[]) { + + size_t sz = elem_size; /* serves as 1-element array */ return ialloc(gm, n_elements, &sz, 3, chunks); + } -void** dlindependent_comalloc(size_t n_elements, size_t sizes[], - void* chunks[]) { +void **dlindependent_comalloc(size_t n_elements, size_t sizes[], + void *chunks[]) { + return ialloc(gm, n_elements, sizes, 0, chunks); + } -size_t dlbulk_free(void* array[], size_t nelem) { +size_t dlbulk_free(void *array[], size_t nelem) { + return internal_bulk_free(gm, array, nelem); + } -#if MALLOC_INSPECT_ALL -void dlmalloc_inspect_all(void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { + #if MALLOC_INSPECT_ALL +void dlmalloc_inspect_all(void (*handler)(void *start, void *end, + size_t used_bytes, + void * callback_arg), + void *arg) { + ensure_initialization(); if (!PREACTION(gm)) { + internal_inspect_all(gm, handler, arg); POSTACTION(gm); + } + } -#endif /* MALLOC_INSPECT_ALL */ + + #endif /* MALLOC_INSPECT_ALL */ int dlmalloc_trim(size_t pad) { + int result = 0; ensure_initialization(); if (!PREACTION(gm)) { + result = sys_trim(gm, pad); POSTACTION(gm); + } + return result; + } size_t dlmalloc_footprint(void) { + return gm->footprint; + } size_t dlmalloc_max_footprint(void) { + return gm->max_footprint; + } size_t dlmalloc_footprint_limit(void) { + size_t maf = gm->footprint_limit; return maf == 0 ? MAX_SIZE_T : maf; + } size_t dlmalloc_set_footprint_limit(size_t bytes) { - size_t result; /* invert sense of 0 */ - if (bytes == 0) - result = granularity_align(1); /* Use minimal size */ + + size_t result; /* invert sense of 0 */ + if (bytes == 0) result = granularity_align(1); /* Use minimal size */ if (bytes == MAX_SIZE_T) - result = 0; /* disable */ + result = 0; /* disable */ else result = granularity_align(bytes); return gm->footprint_limit = result; + } -#if !NO_MALLINFO + #if !NO_MALLINFO struct mallinfo dlmallinfo(void) { + return internal_mallinfo(gm); + } -#endif /* NO_MALLINFO */ -#if !NO_MALLOC_STATS + #endif /* NO_MALLINFO */ + + #if !NO_MALLOC_STATS void dlmalloc_stats() { + internal_malloc_stats(gm); + } -#endif /* NO_MALLOC_STATS */ + + #endif /* NO_MALLOC_STATS */ int dlmallopt(int param_number, int value) { + return change_mparam(param_number, value); + } -size_t dlmalloc_usable_size(void* mem) { +size_t dlmalloc_usable_size(void *mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); - if (is_inuse(p)) - return chunksize(p) - overhead_for(p); + if (is_inuse(p)) return chunksize(p) - overhead_for(p); + } + return 0; + } -#endif /* !ONLY_MSPACES */ + #endif /* !ONLY_MSPACES */ /* ----------------------------- user mspaces ---------------------------- */ -#if MSPACES + #if MSPACES + +static mstate init_user_mstate(char *tbase, size_t tsize) { -static mstate init_user_mstate(char* tbase, size_t tsize) { - size_t msize = pad_request(sizeof(struct malloc_state)); + size_t msize = pad_request(sizeof(struct malloc_state)); mchunkptr mn; mchunkptr msp = align_as_chunk(tbase); - mstate m = (mstate)(chunk2mem(msp)); + mstate m = (mstate)(chunk2mem(msp)); __builtin_memset(m, 0, msize); (void)INITIAL_LOCK(&m->mutex); - msp->head = (msize|INUSE_BITS); + msp->head = (msize | INUSE_BITS); m->seg.base = m->least_addr = tbase; m->seg.size = m->footprint = m->max_footprint = tsize; m->magic = mparams.magic; @@ -5465,82 +6231,111 @@ static mstate init_user_mstate(char* tbase, size_t tsize) { disable_contiguous(m); init_bins(m); mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); + init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE); check_top_chunk(m, m->top); return m; + } mspace create_mspace(size_t capacity, int locked) { + mstate m = 0; size_t msize; ensure_initialization(); msize = pad_request(sizeof(struct malloc_state)); - if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { - size_t rs = ((capacity == 0)? mparams.granularity : - (capacity + TOP_FOOT_SIZE + msize)); + if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) { + + size_t rs = ((capacity == 0) ? mparams.granularity + : (capacity + TOP_FOOT_SIZE + msize)); size_t tsize = granularity_align(rs); - char* tbase = (char*)(CALL_MMAP(tsize)); + char * tbase = (char *)(CALL_MMAP(tsize)); if (tbase != CMFAIL) { + m = init_user_mstate(tbase, tsize); m->seg.sflags = USE_MMAP_BIT; set_lock(m, locked); + } + } + return (mspace)m; + } -mspace create_mspace_with_base(void* base, size_t capacity, int locked) { +mspace create_mspace_with_base(void *base, size_t capacity, int locked) { + mstate m = 0; size_t msize; ensure_initialization(); msize = pad_request(sizeof(struct malloc_state)); if (capacity > msize + TOP_FOOT_SIZE && - capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { - m = init_user_mstate((char*)base, capacity); + capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) { + + m = init_user_mstate((char *)base, capacity); m->seg.sflags = EXTERN_BIT; set_lock(m, locked); + } + return (mspace)m; + } int mspace_track_large_chunks(mspace msp, int enable) { - int ret = 0; + + int ret = 0; mstate ms = (mstate)msp; if (!PREACTION(ms)) { - if (!use_mmap(ms)) { - ret = 1; - } + + if (!use_mmap(ms)) { ret = 1; } if (!enable) { + enable_mmap(ms); + } else { + disable_mmap(ms); + } + POSTACTION(ms); + } + return ret; + } size_t destroy_mspace(mspace msp) { + size_t freed = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + msegmentptr sp = &ms->seg; - (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ + (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ while (sp != 0) { - char* base = sp->base; + + char * base = sp->base; size_t size = sp->size; flag_t flag = sp->sflags; - (void)base; /* placate people compiling -Wunused-variable */ + (void)base; /* placate people compiling -Wunused-variable */ sp = sp->next; if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) && CALL_MUNMAP(base, size) == 0) freed += size; + } + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return freed; + } /* @@ -5548,25 +6343,31 @@ size_t destroy_mspace(mspace msp) { versions. This is not so nice but better than the alternatives. */ -void* mspace_malloc(mspace msp, size_t bytes) { +void *mspace_malloc(mspace msp, size_t bytes) { + mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + if (!PREACTION(ms)) { - void* mem; + + void * mem; size_t nb; if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; binmap_t smallbits; - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes); idx = small_index(nb); smallbits = ms->smallmap >> idx; - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ + idx += ~smallbits & 1; /* Uses next bin if idx empty */ b = smallbin_at(ms, idx); p = b->fd; assert(chunksize(p) == small_index2size(idx)); @@ -5575,15 +6376,17 @@ void* mspace_malloc(mspace msp, size_t bytes) { mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; + } else if (nb > ms->dvsize) { - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); b = smallbin_at(ms, i); p = b->fd; @@ -5594,54 +6397,71 @@ void* mspace_malloc(mspace msp, size_t bytes) { if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(ms, p, small_index2size(i)); else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); r = chunk_plus_offset(p, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; + } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); goto postaction; + } + } - } - else if (bytes >= MAX_REQUEST) + + } else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ else { + nb = pad_request(bytes); if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); goto postaction; + } + } if (nb <= ms->dvsize) { - size_t rsize = ms->dvsize - nb; + + size_t rsize = ms->dvsize - nb; mchunkptr p = ms->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ mchunkptr r = ms->dv = chunk_plus_offset(p, nb); ms->dvsize = rsize; set_size_and_pinuse_of_free_chunk(r, rsize); set_size_and_pinuse_of_inuse_chunk(ms, p, nb); - } - else { /* exhaust dv */ + + } else { /* exhaust dv */ + size_t dvs = ms->dvsize; ms->dvsize = 0; ms->dv = 0; set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; + } - else if (nb < ms->topsize) { /* Split top */ - size_t rsize = ms->topsize -= nb; + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; mchunkptr p = ms->top; mchunkptr r = ms->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -5650,6 +6470,7 @@ void* mspace_malloc(mspace msp, size_t bytes) { check_top_chunk(ms, ms->top); check_malloced_chunk(ms, mem, nb); goto postaction; + } mem = sys_alloc(ms, nb); @@ -5657,372 +6478,519 @@ void* mspace_malloc(mspace msp, size_t bytes) { postaction: POSTACTION(ms); return mem; + } return 0; + } -void mspace_free(mspace msp, void* mem) { +void mspace_free(mspace msp, void *mem) { + if (mem != 0) { - mchunkptr p = mem2chunk(mem); -#if FOOTERS + + mchunkptr p = mem2chunk(mem); + #if FOOTERS mstate fm = get_mstate_for(p); - (void)msp; /* placate people compiling -Wunused */ -#else /* FOOTERS */ + (void)msp; /* placate people compiling -Wunused */ + #else /* FOOTERS */ mstate fm = (mstate)msp; -#endif /* FOOTERS */ + #endif /* FOOTERS */ if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); return; + } + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { - size_t psize = chunksize(p); + + size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) fm->footprint -= psize; goto postaction; - } - else { + + } else { + mchunkptr prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { + + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; set_free_with_pinuse(p, psize, next); goto postaction; + } - } - else + + } else + goto erroraction; + } + } if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - if (!cinuse(next)) { /* consolidate forward */ + + if (!cinuse(next)) { /* consolidate forward */ if (next == fm->top) { + size_t tsize = fm->topsize += psize; fm->top = p; p->head = tsize | PINUSE_BIT; if (p == fm->dv) { + fm->dv = 0; fm->dvsize = 0; + } - if (should_trim(fm, tsize)) - sys_trim(fm, 0); + + if (should_trim(fm, tsize)) sys_trim(fm, 0); goto postaction; - } - else if (next == fm->dv) { + + } else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; fm->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); goto postaction; - } - else { + + } else { + size_t nsize = chunksize(next); psize += nsize; unlink_chunk(fm, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == fm->dv) { + fm->dvsize = psize; goto postaction; + } + } - } - else + + } else + set_free_with_pinuse(p, psize, next); if (is_small(psize)) { + insert_small_chunk(fm, p, psize); check_free_chunk(fm, p); - } - else { + + } else { + tchunkptr tp = (tchunkptr)p; insert_large_chunk(fm, tp, psize); check_free_chunk(fm, p); - if (--fm->release_checks == 0) - release_unused_segments(fm); + if (--fm->release_checks == 0) release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: USAGE_ERROR_ACTION(fm, p); postaction: POSTACTION(fm); + } + } + } -void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { - void* mem; +void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + + void * mem; size_t req = 0; mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + if (n_elements != 0) { + req = n_elements * elem_size; if (((n_elements | elem_size) & ~(size_t)0xffff) && (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = internal_malloc(ms, req); if (mem != 0 && calloc_must_clear(mem2chunk(mem))) __builtin_memset(mem, 0, req); return mem; + } -void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { - void* mem = 0; +void *mspace_realloc(mspace msp, void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem == 0) { + mem = mspace_malloc(msp, bytes); - } - else if (bytes >= MAX_REQUEST) { + + } else if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } -#ifdef REALLOC_ZERO_BYTES_FREES + + #ifdef REALLOC_ZERO_BYTES_FREES else if (bytes == 0) { + mspace_free(msp, oldmem); + } -#endif /* REALLOC_ZERO_BYTES_FREES */ + + #endif /* REALLOC_ZERO_BYTES_FREES */ else { - size_t nb = request2size(bytes); + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = (mstate)msp; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); POSTACTION(m); if (newp != 0) { + check_inuse_chunk(m, newp); mem = chunk2mem(newp); - } - else { + + } else { + mem = mspace_malloc(m, bytes); if (mem != 0) { + size_t oc = chunksize(oldp) - overhead_for(oldp); - __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes); + __builtin_memcpy(mem, oldmem, (oc < bytes) ? oc : bytes); mspace_free(m, oldmem); + } + } + } + } + return mem; + } -void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) { - void* mem = 0; +void *mspace_realloc_in_place(mspace msp, void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem != 0) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; - } - else { - size_t nb = request2size(bytes); + + } else { + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = (mstate)msp; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); - (void)msp; /* placate people compiling -Wunused */ + (void)msp; /* placate people compiling -Wunused */ if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); POSTACTION(m); if (newp == oldp) { + check_inuse_chunk(m, newp); mem = oldmem; + } + } + } + } + return mem; + } -void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { +void *mspace_memalign(mspace msp, size_t alignment, size_t bytes) { + mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } - if (alignment <= MALLOC_ALIGNMENT) - return mspace_malloc(msp, bytes); + + if (alignment <= MALLOC_ALIGNMENT) return mspace_malloc(msp, bytes); return internal_memalign(ms, alignment, bytes); + } -void** mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, void* chunks[]) { - size_t sz = elem_size; /* serves as 1-element array */ +void **mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void *chunks[]) { + + size_t sz = elem_size; /* serves as 1-element array */ mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + return ialloc(ms, n_elements, &sz, 3, chunks); + } -void** mspace_independent_comalloc(mspace msp, size_t n_elements, - size_t sizes[], void* chunks[]) { +void **mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void *chunks[]) { + mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + return ialloc(ms, n_elements, sizes, 0, chunks); + } -size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) { +size_t mspace_bulk_free(mspace msp, void *array[], size_t nelem) { + return internal_bulk_free((mstate)msp, array, nelem); + } -#if MALLOC_INSPECT_ALL + #if MALLOC_INSPECT_ALL void mspace_inspect_all(mspace msp, - void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { + void (*handler)(void *start, void *end, + size_t used_bytes, void *callback_arg), + void *arg) { + mstate ms = (mstate)msp; if (ok_magic(ms)) { + if (!PREACTION(ms)) { + internal_inspect_all(ms, handler, arg); POSTACTION(ms); + } + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + } -#endif /* MALLOC_INSPECT_ALL */ + + #endif /* MALLOC_INSPECT_ALL */ int mspace_trim(mspace msp, size_t pad) { - int result = 0; + + int result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + if (!PREACTION(ms)) { + result = sys_trim(ms, pad); POSTACTION(ms); + } + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } -#if !NO_MALLOC_STATS + #if !NO_MALLOC_STATS void mspace_malloc_stats(mspace msp) { + mstate ms = (mstate)msp; if (ok_magic(ms)) { + internal_malloc_stats(ms); + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + } -#endif /* NO_MALLOC_STATS */ + + #endif /* NO_MALLOC_STATS */ size_t mspace_footprint(mspace msp) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + result = ms->footprint; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } size_t mspace_max_footprint(mspace msp) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + result = ms->max_footprint; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } size_t mspace_footprint_limit(mspace msp) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + size_t maf = ms->footprint_limit; result = (maf == 0) ? MAX_SIZE_T : maf; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } size_t mspace_set_footprint_limit(mspace msp, size_t bytes) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { - if (bytes == 0) - result = granularity_align(1); /* Use minimal size */ + + if (bytes == 0) result = granularity_align(1); /* Use minimal size */ if (bytes == MAX_SIZE_T) - result = 0; /* disable */ + result = 0; /* disable */ else result = granularity_align(bytes); ms->footprint_limit = result; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } -#if !NO_MALLINFO + #if !NO_MALLINFO struct mallinfo mspace_mallinfo(mspace msp) { + mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - } + if (!ok_magic(ms)) { USAGE_ERROR_ACTION(ms, ms); } return internal_mallinfo(ms); + } -#endif /* NO_MALLINFO */ -size_t mspace_usable_size(const void* mem) { + #endif /* NO_MALLINFO */ + +size_t mspace_usable_size(const void *mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); - if (is_inuse(p)) - return chunksize(p) - overhead_for(p); + if (is_inuse(p)) return chunksize(p) - overhead_for(p); + } + return 0; + } int mspace_mallopt(int param_number, int value) { + return change_mparam(param_number, value); -} -#endif /* MSPACES */ +} + #endif /* MSPACES */ /* -------------------- Alternative MORECORE functions ------------------- */ @@ -6067,35 +7035,48 @@ int mspace_mallopt(int param_number, int value) { void *osMoreCore(int size) { + void *ptr = 0; static void *sbrk_top = 0; if (size > 0) { + if (size < MINIMUM_MORECORE_SIZE) size = MINIMUM_MORECORE_SIZE; if (CurrentExecutionLevel() == kTaskLevel) ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); if (ptr == 0) { + return (void *) MFAIL; + } + // save ptrs so they can be freed during cleanup our_os_pools[next_os_pool] = ptr; next_os_pool++; ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); sbrk_top = (char *) ptr + size; return ptr; + } + else if (size < 0) { + // we don't currently support shrink behavior return (void *) MFAIL; + } + else { + return sbrk_top; + } + } // cleanup any allocated memory pools @@ -6103,19 +7084,22 @@ int mspace_mallopt(int param_number, int value) { void osCleanupMem(void) { + void **ptr; for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) if (*ptr) { + PoolDeallocate(*ptr); *ptr = 0; + } + } */ - /* ----------------------------------------------------------------------- History: v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea @@ -6335,4 +7319,5 @@ History: */ -#endif // __GLIBC__ +#endif // __GLIBC__ + |