From 267b085f80074e61bdacf1e85e99014b6b2cdad2 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Wed, 10 Feb 2021 15:15:16 +0100 Subject: dlmalloc only for non glibc qasan and AFL_QEMU_FORCE_DFL --- qemu_mode/qemuafl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'qemu_mode/qemuafl') diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 6ab6bf28..47722f64 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 6ab6bf28decb3e36eee43ffbd4a3bfd052dbbb50 +Subproject commit 47722f64e4c1662bad97dc25f3e4cc63959ff5f3 -- cgit 1.4.1 From 22a3c7f7d043d9dbf39c847061d88a4577537031 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 12 Feb 2021 09:42:22 +0100 Subject: fix #736 (ty b1gr3db) --- qemu_mode/QEMUAFL_VERSION | 2 +- qemu_mode/libqasan/dlmalloc.c | 5553 ++++++++++++++++++++++++----------------- qemu_mode/libqasan/hooks.c | 2 + qemu_mode/libqasan/libqasan.c | 2 +- qemu_mode/libqasan/malloc.c | 109 +- qemu_mode/libqasan/string.c | 2 +- qemu_mode/qemuafl | 2 +- src/afl-fuzz-redqueen.c | 3 +- 8 files changed, 3332 insertions(+), 2343 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/qemu_mode/QEMUAFL_VERSION b/qemu_mode/QEMUAFL_VERSION index d9f0ec33..e73a9588 100644 --- a/qemu_mode/QEMUAFL_VERSION +++ b/qemu_mode/QEMUAFL_VERSION @@ -1 +1 @@ -47722f64e4 +9a258d5b7a diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c index 7e3cb159..bace0ff6 100644 --- a/qemu_mode/libqasan/dlmalloc.c +++ b/qemu_mode/libqasan/dlmalloc.c @@ -207,9 +207,12 @@ mspaces as thread-locals. For example: static __thread mspace tlms = 0; void* tlmalloc(size_t bytes) { + if (tlms == 0) tlms = create_mspace(0, 0); return mspace_malloc(tlms, bytes); + } + void tlfree(void* mem) { mspace_free(tlms, mem); } Unless FOOTERS is defined, each mspace is completely independent. @@ -525,201 +528,203 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP improvement at the expense of carrying around more memory. */ -#define USE_DL_PREFIX - -/* Version identifier to allow people to support multiple versions */ -#ifndef DLMALLOC_VERSION -#define DLMALLOC_VERSION 20806 -#endif /* DLMALLOC_VERSION */ - -#ifndef DLMALLOC_EXPORT -#define DLMALLOC_EXPORT extern -#endif - -#ifndef WIN32 -#ifdef _WIN32 -#define WIN32 1 -#endif /* _WIN32 */ -#ifdef _WIN32_WCE -#define LACKS_FCNTL_H -#define WIN32 1 -#endif /* _WIN32_WCE */ -#endif /* WIN32 */ -#ifdef WIN32 -#define WIN32_LEAN_AND_MEAN -#include -#include -#define HAVE_MMAP 1 -#define HAVE_MORECORE 0 -#define LACKS_UNISTD_H -#define LACKS_SYS_PARAM_H -#define LACKS_SYS_MMAN_H -#define LACKS_STRING_H -#define LACKS_STRINGS_H -#define LACKS_SYS_TYPES_H -#define LACKS_ERRNO_H -#define LACKS_SCHED_H -#ifndef MALLOC_FAILURE_ACTION -#define MALLOC_FAILURE_ACTION -#endif /* MALLOC_FAILURE_ACTION */ -#ifndef MMAP_CLEARS -#ifdef _WIN32_WCE /* WINCE reportedly does not clear */ -#define MMAP_CLEARS 0 -#else -#define MMAP_CLEARS 1 -#endif /* _WIN32_WCE */ -#endif /*MMAP_CLEARS */ -#endif /* WIN32 */ - -#if defined(DARWIN) || defined(_DARWIN) -/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ -#ifndef HAVE_MORECORE -#define HAVE_MORECORE 0 -#define HAVE_MMAP 1 -/* OSX allocators provide 16 byte alignment */ -#ifndef MALLOC_ALIGNMENT -#define MALLOC_ALIGNMENT ((size_t)16U) -#endif -#endif /* HAVE_MORECORE */ -#endif /* DARWIN */ - -#ifndef LACKS_SYS_TYPES_H -#include /* For size_t */ -#endif /* LACKS_SYS_TYPES_H */ - -/* The maximum possible size_t value has all bits set */ -#define MAX_SIZE_T (~(size_t)0) - -#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ -#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ - (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) -#endif /* USE_LOCKS */ - -#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ -#if ((defined(__GNUC__) && \ - ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ - defined(__i386__) || defined(__x86_64__))) || \ - (defined(_MSC_VER) && _MSC_VER>=1310)) -#ifndef USE_SPIN_LOCKS -#define USE_SPIN_LOCKS 1 -#endif /* USE_SPIN_LOCKS */ -#elif USE_SPIN_LOCKS -#error "USE_SPIN_LOCKS defined without implementation" -#endif /* ... locks available... */ -#elif !defined(USE_SPIN_LOCKS) -#define USE_SPIN_LOCKS 0 -#endif /* USE_LOCKS */ - -#ifndef ONLY_MSPACES -#define ONLY_MSPACES 0 -#endif /* ONLY_MSPACES */ -#ifndef MSPACES -#if ONLY_MSPACES -#define MSPACES 1 -#else /* ONLY_MSPACES */ -#define MSPACES 0 -#endif /* ONLY_MSPACES */ -#endif /* MSPACES */ -#ifndef MALLOC_ALIGNMENT -#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) -#endif /* MALLOC_ALIGNMENT */ -#ifndef FOOTERS -#define FOOTERS 0 -#endif /* FOOTERS */ -#ifndef ABORT -#define ABORT abort() -#endif /* ABORT */ -#ifndef ABORT_ON_ASSERT_FAILURE -#define ABORT_ON_ASSERT_FAILURE 1 -#endif /* ABORT_ON_ASSERT_FAILURE */ -#ifndef PROCEED_ON_ERROR -#define PROCEED_ON_ERROR 0 -#endif /* PROCEED_ON_ERROR */ - -#ifndef INSECURE -#define INSECURE 0 -#endif /* INSECURE */ -#ifndef MALLOC_INSPECT_ALL -#define MALLOC_INSPECT_ALL 0 -#endif /* MALLOC_INSPECT_ALL */ -#ifndef HAVE_MMAP -#define HAVE_MMAP 1 -#endif /* HAVE_MMAP */ -#ifndef MMAP_CLEARS -#define MMAP_CLEARS 1 -#endif /* MMAP_CLEARS */ -#ifndef HAVE_MREMAP -#ifdef linux -#define HAVE_MREMAP 1 -#define _GNU_SOURCE /* Turns on mremap() definition */ -#else /* linux */ -#define HAVE_MREMAP 0 -#endif /* linux */ -#endif /* HAVE_MREMAP */ -#ifndef MALLOC_FAILURE_ACTION -#define MALLOC_FAILURE_ACTION errno = ENOMEM; -#endif /* MALLOC_FAILURE_ACTION */ -#ifndef HAVE_MORECORE -#if ONLY_MSPACES -#define HAVE_MORECORE 0 -#else /* ONLY_MSPACES */ -#define HAVE_MORECORE 1 -#endif /* ONLY_MSPACES */ -#endif /* HAVE_MORECORE */ -#if !HAVE_MORECORE -#define MORECORE_CONTIGUOUS 0 -#else /* !HAVE_MORECORE */ -#define MORECORE_DEFAULT sbrk -#ifndef MORECORE_CONTIGUOUS -#define MORECORE_CONTIGUOUS 1 -#endif /* MORECORE_CONTIGUOUS */ -#endif /* HAVE_MORECORE */ -#ifndef DEFAULT_GRANULARITY -#if (MORECORE_CONTIGUOUS || defined(WIN32)) -#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ -#else /* MORECORE_CONTIGUOUS */ -#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) -#endif /* MORECORE_CONTIGUOUS */ -#endif /* DEFAULT_GRANULARITY */ -#ifndef DEFAULT_TRIM_THRESHOLD -#ifndef MORECORE_CANNOT_TRIM -#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) -#else /* MORECORE_CANNOT_TRIM */ -#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T -#endif /* MORECORE_CANNOT_TRIM */ -#endif /* DEFAULT_TRIM_THRESHOLD */ -#ifndef DEFAULT_MMAP_THRESHOLD -#if HAVE_MMAP -#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) -#else /* HAVE_MMAP */ -#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T -#endif /* HAVE_MMAP */ -#endif /* DEFAULT_MMAP_THRESHOLD */ -#ifndef MAX_RELEASE_CHECK_RATE -#if HAVE_MMAP -#define MAX_RELEASE_CHECK_RATE 4095 -#else -#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T -#endif /* HAVE_MMAP */ -#endif /* MAX_RELEASE_CHECK_RATE */ -#ifndef USE_BUILTIN_FFS -#define USE_BUILTIN_FFS 0 -#endif /* USE_BUILTIN_FFS */ -#ifndef USE_DEV_RANDOM -#define USE_DEV_RANDOM 0 -#endif /* USE_DEV_RANDOM */ -#ifndef NO_MALLINFO -#define NO_MALLINFO 0 -#endif /* NO_MALLINFO */ -#ifndef MALLINFO_FIELD_TYPE -#define MALLINFO_FIELD_TYPE size_t -#endif /* MALLINFO_FIELD_TYPE */ -#ifndef NO_MALLOC_STATS -#define NO_MALLOC_STATS 0 -#endif /* NO_MALLOC_STATS */ -#ifndef NO_SEGMENT_TRAVERSAL -#define NO_SEGMENT_TRAVERSAL 0 -#endif /* NO_SEGMENT_TRAVERSAL */ + #define USE_DL_PREFIX + + /* Version identifier to allow people to support multiple versions */ + #ifndef DLMALLOC_VERSION + #define DLMALLOC_VERSION 20806 + #endif /* DLMALLOC_VERSION */ + + #ifndef DLMALLOC_EXPORT + #define DLMALLOC_EXPORT extern + #endif + + #ifndef WIN32 + #ifdef _WIN32 + #define WIN32 1 + #endif /* _WIN32 */ + #ifdef _WIN32_WCE + #define LACKS_FCNTL_H + #define WIN32 1 + #endif /* _WIN32_WCE */ + #endif /* WIN32 */ + #ifdef WIN32 + #define WIN32_LEAN_AND_MEAN + #include + #include + #define HAVE_MMAP 1 + #define HAVE_MORECORE 0 + #define LACKS_UNISTD_H + #define LACKS_SYS_PARAM_H + #define LACKS_SYS_MMAN_H + #define LACKS_STRING_H + #define LACKS_STRINGS_H + #define LACKS_SYS_TYPES_H + #define LACKS_ERRNO_H + #define LACKS_SCHED_H + #ifndef MALLOC_FAILURE_ACTION + #define MALLOC_FAILURE_ACTION + #endif /* MALLOC_FAILURE_ACTION */ + #ifndef MMAP_CLEARS + #ifdef _WIN32_WCE /* WINCE reportedly does not clear */ + #define MMAP_CLEARS 0 + #else + #define MMAP_CLEARS 1 + #endif /* _WIN32_WCE */ + #endif /*MMAP_CLEARS */ + #endif /* WIN32 */ + + #if defined(DARWIN) || defined(_DARWIN) + /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ + #ifndef HAVE_MORECORE + #define HAVE_MORECORE 0 + #define HAVE_MMAP 1 + /* OSX allocators provide 16 byte alignment */ + #ifndef MALLOC_ALIGNMENT + #define MALLOC_ALIGNMENT ((size_t)16U) + #endif + #endif /* HAVE_MORECORE */ + #endif /* DARWIN */ + + #ifndef LACKS_SYS_TYPES_H + #include /* For size_t */ + #endif /* LACKS_SYS_TYPES_H */ + + /* The maximum possible size_t value has all bits set */ + #define MAX_SIZE_T (~(size_t)0) + + #ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ + #define USE_LOCKS \ + ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ + (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) + #endif /* USE_LOCKS */ + + #if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ + #if ((defined(__GNUC__) && \ + ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ + defined(__i386__) || defined(__x86_64__))) || \ + (defined(_MSC_VER) && _MSC_VER >= 1310)) + #ifndef USE_SPIN_LOCKS + #define USE_SPIN_LOCKS 1 + #endif /* USE_SPIN_LOCKS */ + #elif USE_SPIN_LOCKS + #error "USE_SPIN_LOCKS defined without implementation" + #endif /* ... locks available... */ + #elif !defined(USE_SPIN_LOCKS) + #define USE_SPIN_LOCKS 0 + #endif /* USE_LOCKS */ + + #ifndef ONLY_MSPACES + #define ONLY_MSPACES 0 + #endif /* ONLY_MSPACES */ + #ifndef MSPACES + #if ONLY_MSPACES + #define MSPACES 1 + #else /* ONLY_MSPACES */ + #define MSPACES 0 + #endif /* ONLY_MSPACES */ + #endif /* MSPACES */ + #ifndef MALLOC_ALIGNMENT + #define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) + #endif /* MALLOC_ALIGNMENT */ + #ifndef FOOTERS + #define FOOTERS 0 + #endif /* FOOTERS */ + #ifndef ABORT + #define ABORT abort() + #endif /* ABORT */ + #ifndef ABORT_ON_ASSERT_FAILURE + #define ABORT_ON_ASSERT_FAILURE 1 + #endif /* ABORT_ON_ASSERT_FAILURE */ + #ifndef PROCEED_ON_ERROR + #define PROCEED_ON_ERROR 0 + #endif /* PROCEED_ON_ERROR */ + + #ifndef INSECURE + #define INSECURE 0 + #endif /* INSECURE */ + #ifndef MALLOC_INSPECT_ALL + #define MALLOC_INSPECT_ALL 0 + #endif /* MALLOC_INSPECT_ALL */ + #ifndef HAVE_MMAP + #define HAVE_MMAP 1 + #endif /* HAVE_MMAP */ + #ifndef MMAP_CLEARS + #define MMAP_CLEARS 1 + #endif /* MMAP_CLEARS */ + #ifndef HAVE_MREMAP + #ifdef linux + #define HAVE_MREMAP 1 + #define _GNU_SOURCE /* Turns on mremap() definition */ + #else /* linux */ + #define HAVE_MREMAP 0 + #endif /* linux */ + #endif /* HAVE_MREMAP */ + #ifndef MALLOC_FAILURE_ACTION + #define MALLOC_FAILURE_ACTION errno = ENOMEM; + #endif /* MALLOC_FAILURE_ACTION */ + #ifndef HAVE_MORECORE + #if ONLY_MSPACES + #define HAVE_MORECORE 0 + #else /* ONLY_MSPACES */ + #define HAVE_MORECORE 1 + #endif /* ONLY_MSPACES */ + #endif /* HAVE_MORECORE */ + #if !HAVE_MORECORE + #define MORECORE_CONTIGUOUS 0 + #else /* !HAVE_MORECORE */ + #define MORECORE_DEFAULT sbrk + #ifndef MORECORE_CONTIGUOUS + #define MORECORE_CONTIGUOUS 1 + #endif /* MORECORE_CONTIGUOUS */ + #endif /* HAVE_MORECORE */ + #ifndef DEFAULT_GRANULARITY + #if (MORECORE_CONTIGUOUS || defined(WIN32)) + #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ + #else /* MORECORE_CONTIGUOUS */ + #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) + #endif /* MORECORE_CONTIGUOUS */ + #endif /* DEFAULT_GRANULARITY */ + #ifndef DEFAULT_TRIM_THRESHOLD + #ifndef MORECORE_CANNOT_TRIM + #define DEFAULT_TRIM_THRESHOLD \ + ((size_t)2U * (size_t)1024U * (size_t)1024U) + #else /* MORECORE_CANNOT_TRIM */ + #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T + #endif /* MORECORE_CANNOT_TRIM */ + #endif /* DEFAULT_TRIM_THRESHOLD */ + #ifndef DEFAULT_MMAP_THRESHOLD + #if HAVE_MMAP + #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) + #else /* HAVE_MMAP */ + #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T + #endif /* HAVE_MMAP */ + #endif /* DEFAULT_MMAP_THRESHOLD */ + #ifndef MAX_RELEASE_CHECK_RATE + #if HAVE_MMAP + #define MAX_RELEASE_CHECK_RATE 4095 + #else + #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T + #endif /* HAVE_MMAP */ + #endif /* MAX_RELEASE_CHECK_RATE */ + #ifndef USE_BUILTIN_FFS + #define USE_BUILTIN_FFS 0 + #endif /* USE_BUILTIN_FFS */ + #ifndef USE_DEV_RANDOM + #define USE_DEV_RANDOM 0 + #endif /* USE_DEV_RANDOM */ + #ifndef NO_MALLINFO + #define NO_MALLINFO 0 + #endif /* NO_MALLINFO */ + #ifndef MALLINFO_FIELD_TYPE + #define MALLINFO_FIELD_TYPE size_t + #endif /* MALLINFO_FIELD_TYPE */ + #ifndef NO_MALLOC_STATS + #define NO_MALLOC_STATS 0 + #endif /* NO_MALLOC_STATS */ + #ifndef NO_SEGMENT_TRAVERSAL + #define NO_SEGMENT_TRAVERSAL 0 + #endif /* NO_SEGMENT_TRAVERSAL */ /* mallopt tuning options. SVID/XPG defines four standard parameter @@ -728,123 +733,128 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP malloc does support the following options. */ -#undef M_TRIM_THRESHOLD -#undef M_GRANULARITY -#undef M_MMAP_THRESHOLD -#define M_TRIM_THRESHOLD (-1) -#define M_GRANULARITY (-2) -#define M_MMAP_THRESHOLD (-3) + #undef M_TRIM_THRESHOLD + #undef M_GRANULARITY + #undef M_MMAP_THRESHOLD + #define M_TRIM_THRESHOLD (-1) + #define M_GRANULARITY (-2) + #define M_MMAP_THRESHOLD (-3) /* ------------------------ Mallinfo declarations ------------------------ */ -#if !NO_MALLINFO -/* - This version of malloc supports the standard SVID/XPG mallinfo - routine that returns a struct containing usage properties and - statistics. It should work on any system that has a - /usr/include/malloc.h defining struct mallinfo. The main - declaration needed is the mallinfo struct that is returned (by-copy) - by mallinfo(). The malloinfo struct contains a bunch of fields that - are not even meaningful in this version of malloc. These fields are - are instead filled by mallinfo() with other numbers that might be of - interest. - - HAVE_USR_INCLUDE_MALLOC_H should be set if you have a - /usr/include/malloc.h file that includes a declaration of struct - mallinfo. If so, it is included; else a compliant version is - declared below. These must be precisely the same for mallinfo() to - work. The original SVID version of this struct, defined on most - systems with mallinfo, declares all fields as ints. But some others - define as unsigned long. If your system defines the fields using a - type of different width than listed here, you MUST #include your - system version and #define HAVE_USR_INCLUDE_MALLOC_H. -*/ + #if !NO_MALLINFO + /* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any system that has a + /usr/include/malloc.h defining struct mallinfo. The main + declaration needed is the mallinfo struct that is returned (by-copy) + by mallinfo(). The malloinfo struct contains a bunch of fields that + are not even meaningful in this version of malloc. These fields are + are instead filled by mallinfo() with other numbers that might be of + interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else a compliant version is + declared below. These must be precisely the same for mallinfo() to + work. The original SVID version of this struct, defined on most + systems with mallinfo, declares all fields as ints. But some others + define as unsigned long. If your system defines the fields using a + type of different width than listed here, you MUST #include your + system version and #define HAVE_USR_INCLUDE_MALLOC_H. + */ -/* #define HAVE_USR_INCLUDE_MALLOC_H */ + /* #define HAVE_USR_INCLUDE_MALLOC_H */ -#ifdef HAVE_USR_INCLUDE_MALLOC_H -#include "/usr/include/malloc.h" -#else /* HAVE_USR_INCLUDE_MALLOC_H */ -#ifndef STRUCT_MALLINFO_DECLARED -/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */ -#define _STRUCT_MALLINFO -#define STRUCT_MALLINFO_DECLARED 1 + #ifdef HAVE_USR_INCLUDE_MALLOC_H + #include "/usr/include/malloc.h" + #else /* HAVE_USR_INCLUDE_MALLOC_H */ + #ifndef STRUCT_MALLINFO_DECLARED + /* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is + * defined */ + #define _STRUCT_MALLINFO + #define STRUCT_MALLINFO_DECLARED 1 struct mallinfo { - MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ - MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ - MALLINFO_FIELD_TYPE smblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblks; /* always 0 */ - MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ - MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ - MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ - MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ - MALLINFO_FIELD_TYPE fordblks; /* total free space */ - MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ + + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ + }; -#endif /* STRUCT_MALLINFO_DECLARED */ -#endif /* HAVE_USR_INCLUDE_MALLOC_H */ -#endif /* NO_MALLINFO */ + + #endif /* STRUCT_MALLINFO_DECLARED */ + #endif /* HAVE_USR_INCLUDE_MALLOC_H */ + #endif /* NO_MALLINFO */ /* Try to persuade compilers to inline. The most critical functions for inlining are defined as macros, so these aren't used for them. */ -#ifndef FORCEINLINE - #if defined(__GNUC__) -#define FORCEINLINE __inline __attribute__ ((always_inline)) - #elif defined(_MSC_VER) - #define FORCEINLINE __forceinline + #ifndef FORCEINLINE + #if defined(__GNUC__) + #define FORCEINLINE __inline __attribute__((always_inline)) + #elif defined(_MSC_VER) + #define FORCEINLINE __forceinline + #endif #endif -#endif -#ifndef NOINLINE - #if defined(__GNUC__) - #define NOINLINE __attribute__ ((noinline)) - #elif defined(_MSC_VER) - #define NOINLINE __declspec(noinline) - #else - #define NOINLINE + #ifndef NOINLINE + #if defined(__GNUC__) + #define NOINLINE __attribute__((noinline)) + #elif defined(_MSC_VER) + #define NOINLINE __declspec(noinline) + #else + #define NOINLINE + #endif #endif -#endif -#ifdef __cplusplus + #ifdef __cplusplus extern "C" { -#ifndef FORCEINLINE - #define FORCEINLINE inline -#endif -#endif /* __cplusplus */ -#ifndef FORCEINLINE - #define FORCEINLINE -#endif - -#if !ONLY_MSPACES - -/* ------------------- Declarations of public routines ------------------- */ - -#ifndef USE_DL_PREFIX -#define dlcalloc calloc -#define dlfree free -#define dlmalloc malloc -#define dlmemalign memalign -#define dlposix_memalign posix_memalign -#define dlrealloc realloc -#define dlrealloc_in_place realloc_in_place -#define dlvalloc valloc -#define dlpvalloc pvalloc -#define dlmallinfo mallinfo -#define dlmallopt mallopt -#define dlmalloc_trim malloc_trim -#define dlmalloc_stats malloc_stats -#define dlmalloc_usable_size malloc_usable_size -#define dlmalloc_footprint malloc_footprint -#define dlmalloc_max_footprint malloc_max_footprint -#define dlmalloc_footprint_limit malloc_footprint_limit -#define dlmalloc_set_footprint_limit malloc_set_footprint_limit -#define dlmalloc_inspect_all malloc_inspect_all -#define dlindependent_calloc independent_calloc -#define dlindependent_comalloc independent_comalloc -#define dlbulk_free bulk_free -#endif /* USE_DL_PREFIX */ + + #ifndef FORCEINLINE + #define FORCEINLINE inline + #endif + #endif /* __cplusplus */ + #ifndef FORCEINLINE + #define FORCEINLINE + #endif + + #if !ONLY_MSPACES + + /* ------------------- Declarations of public routines ------------------- */ + + #ifndef USE_DL_PREFIX + #define dlcalloc calloc + #define dlfree free + #define dlmalloc malloc + #define dlmemalign memalign + #define dlposix_memalign posix_memalign + #define dlrealloc realloc + #define dlrealloc_in_place realloc_in_place + #define dlvalloc valloc + #define dlpvalloc pvalloc + #define dlmallinfo mallinfo + #define dlmallopt mallopt + #define dlmalloc_trim malloc_trim + #define dlmalloc_stats malloc_stats + #define dlmalloc_usable_size malloc_usable_size + #define dlmalloc_footprint malloc_footprint + #define dlmalloc_max_footprint malloc_max_footprint + #define dlmalloc_footprint_limit malloc_footprint_limit + #define dlmalloc_set_footprint_limit malloc_set_footprint_limit + #define dlmalloc_inspect_all malloc_inspect_all + #define dlindependent_calloc independent_calloc + #define dlindependent_comalloc independent_comalloc + #define dlbulk_free bulk_free + #endif /* USE_DL_PREFIX */ /* malloc(size_t n) @@ -860,7 +870,7 @@ extern "C" { maximum supported value of n differs across systems, but is in all cases less than the maximum representable value of a size_t. */ -DLMALLOC_EXPORT void* dlmalloc(size_t); +DLMALLOC_EXPORT void *dlmalloc(size_t); /* free(void* p) @@ -869,14 +879,14 @@ DLMALLOC_EXPORT void* dlmalloc(size_t); It has no effect if p is null. If p was not malloced or already freed, free(p) will by default cause the current program to abort. */ -DLMALLOC_EXPORT void dlfree(void*); +DLMALLOC_EXPORT void dlfree(void *); /* calloc(size_t n_elements, size_t element_size); Returns a pointer to n_elements * element_size bytes, with all locations set to zero. */ -DLMALLOC_EXPORT void* dlcalloc(size_t, size_t); +DLMALLOC_EXPORT void *dlcalloc(size_t, size_t); /* realloc(void* p, size_t n) @@ -900,7 +910,7 @@ DLMALLOC_EXPORT void* dlcalloc(size_t, size_t); The old unix realloc convention of allowing the last-free'd chunk to be used as an argument to realloc is not supported. */ -DLMALLOC_EXPORT void* dlrealloc(void*, size_t); +DLMALLOC_EXPORT void *dlrealloc(void *, size_t); /* realloc_in_place(void* p, size_t n) @@ -915,7 +925,7 @@ DLMALLOC_EXPORT void* dlrealloc(void*, size_t); Returns p if successful; otherwise null. */ -DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t); +DLMALLOC_EXPORT void *dlrealloc_in_place(void *, size_t); /* memalign(size_t alignment, size_t n); @@ -929,7 +939,7 @@ DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t); Overreliance on memalign is a sure way to fragment space. */ -DLMALLOC_EXPORT void* dlmemalign(size_t, size_t); +DLMALLOC_EXPORT void *dlmemalign(size_t, size_t); /* int posix_memalign(void** pp, size_t alignment, size_t n); @@ -939,14 +949,14 @@ DLMALLOC_EXPORT void* dlmemalign(size_t, size_t); returns EINVAL if the alignment is not a power of two (3) fails and returns ENOMEM if memory cannot be allocated. */ -DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t); +DLMALLOC_EXPORT int dlposix_memalign(void **, size_t, size_t); /* valloc(size_t n); Equivalent to memalign(pagesize, n), where pagesize is the page size of the system. If the pagesize is unknown, 4096 is used. */ -DLMALLOC_EXPORT void* dlvalloc(size_t); +DLMALLOC_EXPORT void *dlvalloc(size_t); /* mallopt(int parameter_number, int parameter_value) @@ -1021,7 +1031,7 @@ DLMALLOC_EXPORT size_t dlmalloc_footprint_limit(); */ DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); -#if MALLOC_INSPECT_ALL + #if MALLOC_INSPECT_ALL /* malloc_inspect_all(void(*handler)(void *start, void *end, @@ -1043,19 +1053,23 @@ DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); than 1000, you could write: static int count = 0; void count_chunks(void* start, void* end, size_t used, void* arg) { + if (used >= 1000) ++count; + } + then: malloc_inspect_all(count_chunks, NULL); malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined. */ -DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), - void* arg); +DLMALLOC_EXPORT void dlmalloc_inspect_all(void (*handler)(void *, void *, + size_t, void *), + void *arg); -#endif /* MALLOC_INSPECT_ALL */ + #endif /* MALLOC_INSPECT_ALL */ -#if !NO_MALLINFO + #if !NO_MALLINFO /* mallinfo() Returns (by copy) a struct containing various summary statistics: @@ -1079,7 +1093,7 @@ DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, thus be inaccurate. */ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); -#endif /* NO_MALLINFO */ + #endif /* NO_MALLINFO */ /* independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); @@ -1117,6 +1131,7 @@ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); struct Node { int item; struct Node* next; }; struct Node* build_list() { + struct Node** pool; int n = read_number_of_nodes_needed(); if (n <= 0) return 0; @@ -1128,9 +1143,11 @@ DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); pool[i]->next = pool[i+1]; free(pool); // Can now free the array (or not, if it is needed later) return first; + } + */ -DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); +DLMALLOC_EXPORT void **dlindependent_calloc(size_t, size_t, void **); /* independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); @@ -1169,6 +1186,7 @@ DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); struct Foot { ... } void send_message(char* msg) { + int msglen = strlen(msg); size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; void* chunks[3]; @@ -1178,6 +1196,7 @@ DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); char* body = (char*)(chunks[1]); struct Foot* foot = (struct Foot*)(chunks[2]); // ... + } In general though, independent_comalloc is worth using only for @@ -1188,7 +1207,7 @@ DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); since it cannot reuse existing noncontiguous small chunks that might be available for some of the elements. */ -DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**); +DLMALLOC_EXPORT void **dlindependent_comalloc(size_t, size_t *, void **); /* bulk_free(void* array[], size_t n_elements) @@ -1199,14 +1218,14 @@ DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**); is returned. For large arrays of pointers with poor locality, it may be worthwhile to sort this array before calling bulk_free. */ -DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements); +DLMALLOC_EXPORT size_t dlbulk_free(void **, size_t n_elements); /* pvalloc(size_t n); Equivalent to valloc(minimum-page-that-holds(n)), that is, round up n to nearest pagesize. */ -DLMALLOC_EXPORT void* dlpvalloc(size_t); +DLMALLOC_EXPORT void *dlpvalloc(size_t); /* malloc_trim(size_t pad); @@ -1229,7 +1248,7 @@ DLMALLOC_EXPORT void* dlpvalloc(size_t); Malloc_trim returns 1 if it actually released any memory, else 0. */ -DLMALLOC_EXPORT int dlmalloc_trim(size_t); +DLMALLOC_EXPORT int dlmalloc_trim(size_t); /* malloc_stats(); @@ -1250,7 +1269,7 @@ DLMALLOC_EXPORT int dlmalloc_trim(size_t); malloc_stats prints only the most commonly interesting statistics. More information can be obtained by calling mallinfo. */ -DLMALLOC_EXPORT void dlmalloc_stats(void); +DLMALLOC_EXPORT void dlmalloc_stats(void); /* malloc_usable_size(void* p); @@ -1266,17 +1285,17 @@ DLMALLOC_EXPORT void dlmalloc_stats(void); p = malloc(n); assert(malloc_usable_size(p) >= 256); */ -size_t dlmalloc_usable_size(void*); +size_t dlmalloc_usable_size(void *); -#endif /* ONLY_MSPACES */ + #endif /* ONLY_MSPACES */ -#if MSPACES + #if MSPACES /* mspace is an opaque type representing an independent region of space that supports mspace_malloc, etc. */ -typedef void* mspace; +typedef void *mspace; /* create_mspace creates and returns a new independent space with the @@ -1308,7 +1327,8 @@ DLMALLOC_EXPORT size_t destroy_mspace(mspace msp); Destroying this space will deallocate all additionally allocated space (if possible) but not the initial base. */ -DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked); +DLMALLOC_EXPORT mspace create_mspace_with_base(void *base, size_t capacity, + int locked); /* mspace_track_large_chunks controls whether requests for large chunks @@ -1323,12 +1343,11 @@ DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int */ DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable); - /* mspace_malloc behaves as malloc, but operates within the given space. */ -DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes); +DLMALLOC_EXPORT void *mspace_malloc(mspace msp, size_t bytes); /* mspace_free behaves as free, but operates within @@ -1338,7 +1357,7 @@ DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes); free may be called instead of mspace_free because freed chunks from any space are handled by their originating spaces. */ -DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem); +DLMALLOC_EXPORT void mspace_free(mspace msp, void *mem); /* mspace_realloc behaves as realloc, but operates within @@ -1349,33 +1368,38 @@ DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem); realloced chunks from any space are handled by their originating spaces. */ -DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize); +DLMALLOC_EXPORT void *mspace_realloc(mspace msp, void *mem, size_t newsize); /* mspace_calloc behaves as calloc, but operates within the given space. */ -DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); +DLMALLOC_EXPORT void *mspace_calloc(mspace msp, size_t n_elements, + size_t elem_size); /* mspace_memalign behaves as memalign, but operates within the given space. */ -DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); +DLMALLOC_EXPORT void *mspace_memalign(mspace msp, size_t alignment, + size_t bytes); /* mspace_independent_calloc behaves as independent_calloc, but operates within the given space. */ -DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, void* chunks[]); +DLMALLOC_EXPORT void **mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, + void * chunks[]); /* mspace_independent_comalloc behaves as independent_comalloc, but operates within the given space. */ -DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements, - size_t sizes[], void* chunks[]); +DLMALLOC_EXPORT void **mspace_independent_comalloc(mspace msp, + size_t n_elements, + size_t sizes[], + void * chunks[]); /* mspace_footprint() returns the number of bytes obtained from the @@ -1389,19 +1413,18 @@ DLMALLOC_EXPORT size_t mspace_footprint(mspace msp); */ DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp); - -#if !NO_MALLINFO + #if !NO_MALLINFO /* mspace_mallinfo behaves as mallinfo, but reports properties of the given space. */ DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp); -#endif /* NO_MALLINFO */ + #endif /* NO_MALLINFO */ /* malloc_usable_size(void* p) behaves the same as malloc_usable_size; */ -DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem); +DLMALLOC_EXPORT size_t mspace_usable_size(const void *mem); /* mspace_malloc_stats behaves as malloc_stats, but reports @@ -1420,11 +1443,13 @@ DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad); */ DLMALLOC_EXPORT int mspace_mallopt(int, int); -#endif /* MSPACES */ + #endif /* MSPACES */ -#ifdef __cplusplus -} /* end of extern "C" */ -#endif /* __cplusplus */ + #ifdef __cplusplus + +} /* end of extern "C" */ + + #endif /* __cplusplus */ /* ======================================================================== @@ -1438,392 +1463,413 @@ DLMALLOC_EXPORT int mspace_mallopt(int, int); /*------------------------------ internal #includes ---------------------- */ -#ifdef _MSC_VER -#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ -#endif /* _MSC_VER */ -#if !NO_MALLOC_STATS -#include /* for printing in malloc_stats */ -#endif /* NO_MALLOC_STATS */ -#ifndef LACKS_ERRNO_H -#include /* for MALLOC_FAILURE_ACTION */ -#endif /* LACKS_ERRNO_H */ -#ifdef DEBUG -#if ABORT_ON_ASSERT_FAILURE -#undef assert -#define assert(x) if(!(x)) ABORT -#else /* ABORT_ON_ASSERT_FAILURE */ -#include -#endif /* ABORT_ON_ASSERT_FAILURE */ -#else /* DEBUG */ -#ifndef assert -#define assert(x) -#endif -#define DEBUG 0 -#endif /* DEBUG */ -#if !defined(WIN32) && !defined(LACKS_TIME_H) -#include /* for magic initialization */ -#endif /* WIN32 */ -#ifndef LACKS_STDLIB_H -#include /* for abort() */ -#endif /* LACKS_STDLIB_H */ -#ifndef LACKS_STRING_H -#include /* for memset etc */ -#endif /* LACKS_STRING_H */ -#if USE_BUILTIN_FFS -#ifndef LACKS_STRINGS_H -#include /* for ffs */ -#endif /* LACKS_STRINGS_H */ -#endif /* USE_BUILTIN_FFS */ -#if HAVE_MMAP -#ifndef LACKS_SYS_MMAN_H -/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ -#if (defined(linux) && !defined(__USE_GNU)) -#define __USE_GNU 1 -#include /* for mmap */ -#undef __USE_GNU -#else -#include /* for mmap */ -#endif /* linux */ -#endif /* LACKS_SYS_MMAN_H */ -#ifndef LACKS_FCNTL_H -#include -#endif /* LACKS_FCNTL_H */ -#endif /* HAVE_MMAP */ -#ifndef LACKS_UNISTD_H -#include /* for sbrk, sysconf */ -#else /* LACKS_UNISTD_H */ -#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) -extern void* sbrk(ptrdiff_t); -#endif /* FreeBSD etc */ -#endif /* LACKS_UNISTD_H */ - -/* Declarations for locking */ -#if USE_LOCKS -#ifndef WIN32 -#if defined (__SVR4) && defined (__sun) /* solaris */ -#include -#elif !defined(LACKS_SCHED_H) -#include -#endif /* solaris or LACKS_SCHED_H */ -#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS -#include -#endif /* USE_RECURSIVE_LOCKS ... */ -#elif defined(_MSC_VER) -#ifndef _M_AMD64 -/* These are already defined on AMD64 builds */ -#ifdef __cplusplus + #ifdef _MSC_VER + #pragma warning(disable : 4146) /* no "unsigned" warnings */ + #endif /* _MSC_VER */ + #if !NO_MALLOC_STATS + #include /* for printing in malloc_stats */ + #endif /* NO_MALLOC_STATS */ + #ifndef LACKS_ERRNO_H + #include /* for MALLOC_FAILURE_ACTION */ + #endif /* LACKS_ERRNO_H */ + #ifdef DEBUG + #if ABORT_ON_ASSERT_FAILURE + #undef assert + #define assert(x) \ + if (!(x)) ABORT + #else /* ABORT_ON_ASSERT_FAILURE */ + #include + #endif /* ABORT_ON_ASSERT_FAILURE */ + #else /* DEBUG */ + #ifndef assert + #define assert(x) + #endif + #define DEBUG 0 + #endif /* DEBUG */ + #if !defined(WIN32) && !defined(LACKS_TIME_H) + #include /* for magic initialization */ + #endif /* WIN32 */ + #ifndef LACKS_STDLIB_H + #include /* for abort() */ + #endif /* LACKS_STDLIB_H */ + #ifndef LACKS_STRING_H + #include /* for memset etc */ + #endif /* LACKS_STRING_H */ + #if USE_BUILTIN_FFS + #ifndef LACKS_STRINGS_H + #include /* for ffs */ + #endif /* LACKS_STRINGS_H */ + #endif /* USE_BUILTIN_FFS */ + #if HAVE_MMAP + #ifndef LACKS_SYS_MMAN_H + /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ + #if (defined(linux) && !defined(__USE_GNU)) + #define __USE_GNU 1 + #include /* for mmap */ + #undef __USE_GNU + #else + #include /* for mmap */ + #endif /* linux */ + #endif /* LACKS_SYS_MMAN_H */ + #ifndef LACKS_FCNTL_H + #include + #endif /* LACKS_FCNTL_H */ + #endif /* HAVE_MMAP */ + #ifndef LACKS_UNISTD_H + #include /* for sbrk, sysconf */ + #else /* LACKS_UNISTD_H */ + #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +extern void *sbrk(ptrdiff_t); + #endif /* FreeBSD etc */ + #endif /* LACKS_UNISTD_H */ + + /* Declarations for locking */ + #if USE_LOCKS + #ifndef WIN32 + #if defined(__SVR4) && defined(__sun) /* solaris */ + #include + #elif !defined(LACKS_SCHED_H) + #include + #endif /* solaris or LACKS_SCHED_H */ + #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || \ + !USE_SPIN_LOCKS + #include + #endif /* USE_RECURSIVE_LOCKS ... */ + #elif defined(_MSC_VER) + #ifndef _M_AMD64 + /* These are already defined on AMD64 builds */ + #ifdef __cplusplus extern "C" { -#endif /* __cplusplus */ -LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp); + + #endif /* __cplusplus */ +LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, + LONG Comp); LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); -#ifdef __cplusplus + #ifdef __cplusplus + } -#endif /* __cplusplus */ -#endif /* _M_AMD64 */ -#pragma intrinsic (_InterlockedCompareExchange) -#pragma intrinsic (_InterlockedExchange) -#define interlockedcompareexchange _InterlockedCompareExchange -#define interlockedexchange _InterlockedExchange -#elif defined(WIN32) && defined(__GNUC__) -#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b) -#define interlockedexchange __sync_lock_test_and_set -#endif /* Win32 */ -#else /* USE_LOCKS */ -#endif /* USE_LOCKS */ - -#ifndef LOCK_AT_FORK -#define LOCK_AT_FORK 0 -#endif - -/* Declarations for bit scanning on win32 */ -#if defined(_MSC_VER) && _MSC_VER>=1300 -#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ -#ifdef __cplusplus + + #endif /* __cplusplus */ + #endif /* _M_AMD64 */ + #pragma intrinsic(_InterlockedCompareExchange) + #pragma intrinsic(_InterlockedExchange) + #define interlockedcompareexchange _InterlockedCompareExchange + #define interlockedexchange _InterlockedExchange + #elif defined(WIN32) && defined(__GNUC__) + #define interlockedcompareexchange(a, b, c) \ + __sync_val_compare_and_swap(a, c, b) + #define interlockedexchange __sync_lock_test_and_set + #endif /* Win32 */ + #else /* USE_LOCKS */ + #endif /* USE_LOCKS */ + + #ifndef LOCK_AT_FORK + #define LOCK_AT_FORK 0 + #endif + + /* Declarations for bit scanning on win32 */ + #if defined(_MSC_VER) && _MSC_VER >= 1300 + #ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ + #ifdef __cplusplus extern "C" { -#endif /* __cplusplus */ + + #endif /* __cplusplus */ unsigned char _BitScanForward(unsigned long *index, unsigned long mask); unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); -#ifdef __cplusplus + #ifdef __cplusplus + } -#endif /* __cplusplus */ - -#define BitScanForward _BitScanForward -#define BitScanReverse _BitScanReverse -#pragma intrinsic(_BitScanForward) -#pragma intrinsic(_BitScanReverse) -#endif /* BitScanForward */ -#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ - -#ifndef WIN32 -#ifndef malloc_getpagesize -# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ -# ifndef _SC_PAGE_SIZE -# define _SC_PAGE_SIZE _SC_PAGESIZE -# endif -# endif -# ifdef _SC_PAGE_SIZE -# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) -# else -# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) - extern size_t getpagesize(); -# define malloc_getpagesize getpagesize() -# else -# ifdef WIN32 /* use supplied emulation of getpagesize */ -# define malloc_getpagesize getpagesize() -# else -# ifndef LACKS_SYS_PARAM_H -# include -# endif -# ifdef EXEC_PAGESIZE -# define malloc_getpagesize EXEC_PAGESIZE -# else -# ifdef NBPG -# ifndef CLSIZE -# define malloc_getpagesize NBPG -# else -# define malloc_getpagesize (NBPG * CLSIZE) -# endif -# else -# ifdef NBPC -# define malloc_getpagesize NBPC -# else -# ifdef PAGESIZE -# define malloc_getpagesize PAGESIZE -# else /* just guess */ -# define malloc_getpagesize ((size_t)4096U) -# endif -# endif -# endif -# endif -# endif -# endif -# endif -#endif -#endif - -/* ------------------- size_t and alignment properties -------------------- */ - -/* The byte and bit size of a size_t */ -#define SIZE_T_SIZE (sizeof(size_t)) -#define SIZE_T_BITSIZE (sizeof(size_t) << 3) - -/* Some constants coerced to size_t */ -/* Annoying but necessary to avoid errors on some platforms */ -#define SIZE_T_ZERO ((size_t)0) -#define SIZE_T_ONE ((size_t)1) -#define SIZE_T_TWO ((size_t)2) -#define SIZE_T_FOUR ((size_t)4) -#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) -#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) -#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) -#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) - -/* The bit mask value corresponding to MALLOC_ALIGNMENT */ -#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) - -/* True if address a has acceptable alignment */ -#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) - -/* the number of bytes to offset an address to align it */ -#define align_offset(A)\ - ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ - ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) - -/* -------------------------- MMAP preliminaries ------------------------- */ -/* - If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and - checks to fail so compiler optimizer can delete code rather than - using so many "#if"s. -*/ + #endif /* __cplusplus */ + + #define BitScanForward _BitScanForward + #define BitScanReverse _BitScanReverse + #pragma intrinsic(_BitScanForward) + #pragma intrinsic(_BitScanReverse) + #endif /* BitScanForward */ + #endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ + + #ifndef WIN32 + #ifndef malloc_getpagesize + #ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ + #ifndef _SC_PAGE_SIZE + #define _SC_PAGE_SIZE _SC_PAGESIZE + #endif + #endif + #ifdef _SC_PAGE_SIZE + #define malloc_getpagesize sysconf(_SC_PAGE_SIZE) + #else + #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) +extern size_t getpagesize(); + #define malloc_getpagesize getpagesize() + #else + #ifdef WIN32 /* use supplied emulation of getpagesize */ + #define malloc_getpagesize getpagesize() + #else + #ifndef LACKS_SYS_PARAM_H + #include + #endif + #ifdef EXEC_PAGESIZE + #define malloc_getpagesize EXEC_PAGESIZE + #else + #ifdef NBPG + #ifndef CLSIZE + #define malloc_getpagesize NBPG + #else + #define malloc_getpagesize (NBPG * CLSIZE) + #endif + #else + #ifdef NBPC + #define malloc_getpagesize NBPC + #else + #ifdef PAGESIZE + #define malloc_getpagesize PAGESIZE + #else /* just guess */ + #define malloc_getpagesize ((size_t)4096U) + #endif + #endif + #endif + #endif + #endif + #endif + #endif + #endif + #endif + + /* ------------------- size_t and alignment properties -------------------- */ + + /* The byte and bit size of a size_t */ + #define SIZE_T_SIZE (sizeof(size_t)) + #define SIZE_T_BITSIZE (sizeof(size_t) << 3) + + /* Some constants coerced to size_t */ + /* Annoying but necessary to avoid errors on some platforms */ + #define SIZE_T_ZERO ((size_t)0) + #define SIZE_T_ONE ((size_t)1) + #define SIZE_T_TWO ((size_t)2) + #define SIZE_T_FOUR ((size_t)4) + #define TWO_SIZE_T_SIZES (SIZE_T_SIZE << 1) + #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE << 2) + #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES + TWO_SIZE_T_SIZES) + #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) + + /* The bit mask value corresponding to MALLOC_ALIGNMENT */ + #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) + + /* True if address a has acceptable alignment */ + #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) + + /* the number of bytes to offset an address to align it */ + #define align_offset(A) \ + ((((size_t)(A)&CHUNK_ALIGN_MASK) == 0) \ + ? 0 \ + : ((MALLOC_ALIGNMENT - ((size_t)(A)&CHUNK_ALIGN_MASK)) & \ + CHUNK_ALIGN_MASK)) + + /* -------------------------- MMAP preliminaries ------------------------- */ + + /* + If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and + checks to fail so compiler optimizer can delete code rather than + using so many "#if"s. + */ + /* MORECORE and MMAP must return MFAIL on failure */ + #define MFAIL ((void *)(MAX_SIZE_T)) + #define CMFAIL ((char *)(MFAIL)) /* defined for convenience */ -/* MORECORE and MMAP must return MFAIL on failure */ -#define MFAIL ((void*)(MAX_SIZE_T)) -#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ + #if HAVE_MMAP -#if HAVE_MMAP + #ifndef WIN32 + #define MMAP_PROT (PROT_READ | PROT_WRITE) + #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) + #define MAP_ANONYMOUS MAP_ANON + #endif /* MAP_ANON */ + #ifdef MAP_ANONYMOUS -#ifndef WIN32 -#define MMAP_PROT (PROT_READ|PROT_WRITE) -#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) -#define MAP_ANONYMOUS MAP_ANON -#endif /* MAP_ANON */ -#ifdef MAP_ANONYMOUS + #define MMAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS) -#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) +static FORCEINLINE void *unixmmap(size_t size) { -static FORCEINLINE void* unixmmap(size_t size) { - void* result; + void *result; result = mmap(0, size, MMAP_PROT, MMAP_FLAGS, -1, 0); - if (result == MFAIL) - return MFAIL; + if (result == MFAIL) return MFAIL; return result; + } -static FORCEINLINE int unixmunmap(void* ptr, size_t size) { +static FORCEINLINE int unixmunmap(void *ptr, size_t size) { + int result; result = munmap(ptr, size); - if (result != 0) - return result; + if (result != 0) return result; return result; + } -#define MMAP_DEFAULT(s) unixmmap(s) -#define MUNMAP_DEFAULT(a, s) unixmunmap((a), (s)) + #define MMAP_DEFAULT(s) unixmmap(s) + #define MUNMAP_DEFAULT(a, s) unixmunmap((a), (s)) -#else /* MAP_ANONYMOUS */ -/* - Nearly all versions of mmap support MAP_ANONYMOUS, so the following - is unlikely to be needed, but is supplied just in case. -*/ -#define MMAP_FLAGS (MAP_PRIVATE) -static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ -#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ - (dev_zero_fd = open("/dev/zero", O_RDWR), \ - mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ - mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) -#define MUNMAP_DEFAULT(a, s) munmap((a), (s)) -#endif /* MAP_ANONYMOUS */ + #else /* MAP_ANONYMOUS */ + /* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. + */ + #define MMAP_FLAGS (MAP_PRIVATE) +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ + #define MMAP_DEFAULT(s) \ + ((dev_zero_fd < 0) \ + ? (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) \ + : mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) + #define MUNMAP_DEFAULT(a, s) munmap((a), (s)) + #endif /* MAP_ANONYMOUS */ -#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) + #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) -#else /* WIN32 */ + #else /* WIN32 */ /* Win32 MMAP via VirtualAlloc */ -static FORCEINLINE void* win32mmap(size_t size) { - void* ptr; +static FORCEINLINE void *win32mmap(size_t size) { + + void *ptr; - ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); - if (ptr == 0) - return MFAIL; + ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + if (ptr == 0) return MFAIL; return ptr; + } /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ -static FORCEINLINE void* win32direct_mmap(size_t size) { - void* ptr; +static FORCEINLINE void *win32direct_mmap(size_t size) { + + void *ptr; - ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, - PAGE_READWRITE); - if (ptr == 0) - return MFAIL; + ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, + PAGE_READWRITE); + if (ptr == 0) return MFAIL; return ptr; + } /* This function supports releasing coalesed segments */ -static FORCEINLINE int win32munmap(void* ptr, size_t size) { +static FORCEINLINE int win32munmap(void *ptr, size_t size) { + MEMORY_BASIC_INFORMATION minfo; - char* cptr = (char*)ptr; + char *cptr = (char *)ptr; while (size) { - if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) - return -1; + + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) return -1; if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || minfo.State != MEM_COMMIT || minfo.RegionSize > size) return -1; - if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) - return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) return -1; cptr += minfo.RegionSize; size -= minfo.RegionSize; + } return 0; + } -#define MMAP_DEFAULT(s) win32mmap(s) -#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) -#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) -#endif /* WIN32 */ -#endif /* HAVE_MMAP */ + #define MMAP_DEFAULT(s) win32mmap(s) + #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) + #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) + #endif /* WIN32 */ + #endif /* HAVE_MMAP */ + + #if HAVE_MREMAP + #ifndef WIN32 -#if HAVE_MREMAP -#ifndef WIN32 +static FORCEINLINE void *dlmremap(void *old_address, size_t old_size, + size_t new_size, int flags) { -static FORCEINLINE void* dlmremap(void* old_address, size_t old_size, size_t new_size, int flags) { - void* result; + void *result; result = mremap(old_address, old_size, new_size, flags); - if (result == MFAIL) - return MFAIL; + if (result == MFAIL) return MFAIL; return result; + } -#define MREMAP_DEFAULT(addr, osz, nsz, mv) dlmremap((addr), (osz), (nsz), (mv)) -#endif /* WIN32 */ -#endif /* HAVE_MREMAP */ + #define MREMAP_DEFAULT(addr, osz, nsz, mv) \ + dlmremap((addr), (osz), (nsz), (mv)) + #endif /* WIN32 */ + #endif /* HAVE_MREMAP */ -/** - * Define CALL_MORECORE - */ -#if HAVE_MORECORE + /** + * Define CALL_MORECORE + */ + #if HAVE_MORECORE #ifdef MORECORE - #define CALL_MORECORE(S) MORECORE(S) - #else /* MORECORE */ - #define CALL_MORECORE(S) MORECORE_DEFAULT(S) - #endif /* MORECORE */ -#else /* HAVE_MORECORE */ - #define CALL_MORECORE(S) MFAIL -#endif /* HAVE_MORECORE */ - -/** - * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP - */ -#if HAVE_MMAP - #define USE_MMAP_BIT (SIZE_T_ONE) + #define CALL_MORECORE(S) MORECORE(S) + #else /* MORECORE */ + #define CALL_MORECORE(S) MORECORE_DEFAULT(S) + #endif /* MORECORE */ + #else /* HAVE_MORECORE */ + #define CALL_MORECORE(S) MFAIL + #endif /* HAVE_MORECORE */ + + /** + * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP + */ + #if HAVE_MMAP + #define USE_MMAP_BIT (SIZE_T_ONE) #ifdef MMAP - #define CALL_MMAP(s) MMAP(s) - #else /* MMAP */ - #define CALL_MMAP(s) MMAP_DEFAULT(s) - #endif /* MMAP */ + #define CALL_MMAP(s) MMAP(s) + #else /* MMAP */ + #define CALL_MMAP(s) MMAP_DEFAULT(s) + #endif /* MMAP */ #ifdef MUNMAP - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) - #else /* MUNMAP */ - #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) - #endif /* MUNMAP */ + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) + #else /* MUNMAP */ + #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) + #endif /* MUNMAP */ #ifdef DIRECT_MMAP - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #else /* DIRECT_MMAP */ - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) - #endif /* DIRECT_MMAP */ -#else /* HAVE_MMAP */ - #define USE_MMAP_BIT (SIZE_T_ZERO) - - #define MMAP(s) MFAIL - #define MUNMAP(a, s) (-1) - #define DIRECT_MMAP(s) MFAIL - #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) - #define CALL_MMAP(s) MMAP(s) - #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) -#endif /* HAVE_MMAP */ - -/** - * Define CALL_MREMAP - */ -#if HAVE_MMAP && HAVE_MREMAP + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #else /* DIRECT_MMAP */ + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) + #endif /* DIRECT_MMAP */ + #else /* HAVE_MMAP */ + #define USE_MMAP_BIT (SIZE_T_ZERO) + + #define MMAP(s) MFAIL + #define MUNMAP(a, s) (-1) + #define DIRECT_MMAP(s) MFAIL + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #define CALL_MMAP(s) MMAP(s) + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) + #endif /* HAVE_MMAP */ + + /** + * Define CALL_MREMAP + */ + #if HAVE_MMAP && HAVE_MREMAP #ifdef MREMAP - #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) - #else /* MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) - #endif /* MREMAP */ -#else /* HAVE_MMAP && HAVE_MREMAP */ - #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL -#endif /* HAVE_MMAP && HAVE_MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) + #else /* MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) \ + MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) + #endif /* MREMAP */ + #else /* HAVE_MMAP && HAVE_MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL + #endif /* HAVE_MMAP && HAVE_MREMAP */ -/* mstate bit set if continguous morecore disabled or failed */ -#define USE_NONCONTIGUOUS_BIT (4U) - -/* segment bit set in create_mspace_with_base */ -#define EXTERN_BIT (8U) + /* mstate bit set if continguous morecore disabled or failed */ + #define USE_NONCONTIGUOUS_BIT (4U) + /* segment bit set in create_mspace_with_base */ + #define EXTERN_BIT (8U) /* --------------------------- Lock preliminaries ------------------------ */ @@ -1855,248 +1901,286 @@ static FORCEINLINE void* dlmremap(void* old_address, size_t old_size, size_t new */ -#if !USE_LOCKS -#define USE_LOCK_BIT (0U) -#define INITIAL_LOCK(l) (0) -#define DESTROY_LOCK(l) (0) -#define ACQUIRE_MALLOC_GLOBAL_LOCK() -#define RELEASE_MALLOC_GLOBAL_LOCK() - -#else -#if USE_LOCKS > 1 -/* ----------------------- User-defined locks ------------------------ */ -/* Define your own lock implementation here */ -/* #define INITIAL_LOCK(lk) ... */ -/* #define DESTROY_LOCK(lk) ... */ -/* #define ACQUIRE_LOCK(lk) ... */ -/* #define RELEASE_LOCK(lk) ... */ -/* #define TRY_LOCK(lk) ... */ -/* static MLOCK_T malloc_global_mutex = ... */ - -#elif USE_SPIN_LOCKS - -/* First, define CAS_LOCK and CLEAR_LOCK on ints */ -/* Note CAS_LOCK defined to return 0 on success */ - -#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) -#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) -#define CLEAR_LOCK(sl) __sync_lock_release(sl) - -#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) + #if !USE_LOCKS + #define USE_LOCK_BIT (0U) + #define INITIAL_LOCK(l) (0) + #define DESTROY_LOCK(l) (0) + #define ACQUIRE_MALLOC_GLOBAL_LOCK() + #define RELEASE_MALLOC_GLOBAL_LOCK() + + #else + #if USE_LOCKS > 1 + /* ----------------------- User-defined locks ------------------------ */ + /* Define your own lock implementation here */ + /* #define INITIAL_LOCK(lk) ... */ + /* #define DESTROY_LOCK(lk) ... */ + /* #define ACQUIRE_LOCK(lk) ... */ + /* #define RELEASE_LOCK(lk) ... */ + /* #define TRY_LOCK(lk) ... */ + /* static MLOCK_T malloc_global_mutex = ... */ + + #elif USE_SPIN_LOCKS + + /* First, define CAS_LOCK and CLEAR_LOCK on ints */ + /* Note CAS_LOCK defined to return 0 on success */ + + #if defined(__GNUC__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) + #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) + #define CLEAR_LOCK(sl) __sync_lock_release(sl) + + #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) /* Custom spin locks for older gcc on x86 */ static FORCEINLINE int x86_cas_lock(int *sl) { + int ret; int val = 1; int cmp = 0; - __asm__ __volatile__ ("lock; cmpxchgl %1, %2" - : "=a" (ret) - : "r" (val), "m" (*(sl)), "0"(cmp) - : "memory", "cc"); + __asm__ __volatile__("lock; cmpxchgl %1, %2" + : "=a"(ret) + : "r"(val), "m"(*(sl)), "0"(cmp) + : "memory", "cc"); return ret; + } -static FORCEINLINE void x86_clear_lock(int* sl) { +static FORCEINLINE void x86_clear_lock(int *sl) { + assert(*sl != 0); int prev = 0; int ret; - __asm__ __volatile__ ("lock; xchgl %0, %1" - : "=r" (ret) - : "m" (*(sl)), "0"(prev) - : "memory"); + __asm__ __volatile__("lock; xchgl %0, %1" + : "=r"(ret) + : "m"(*(sl)), "0"(prev) + : "memory"); + } -#define CAS_LOCK(sl) x86_cas_lock(sl) -#define CLEAR_LOCK(sl) x86_clear_lock(sl) - -#else /* Win32 MSC */ -#define CAS_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)1) -#define CLEAR_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)0) - -#endif /* ... gcc spins locks ... */ - -/* How to yield for a spin lock */ -#define SPINS_PER_YIELD 63 -#if defined(_MSC_VER) -#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ -#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) -#elif defined (__SVR4) && defined (__sun) /* solaris */ -#define SPIN_LOCK_YIELD thr_yield(); -#elif !defined(LACKS_SCHED_H) -#define SPIN_LOCK_YIELD sched_yield(); -#else -#define SPIN_LOCK_YIELD -#endif /* ... yield ... */ - -#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 + #define CAS_LOCK(sl) x86_cas_lock(sl) + #define CLEAR_LOCK(sl) x86_clear_lock(sl) + + #else /* Win32 MSC */ + #define CAS_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)1) + #define CLEAR_LOCK(sl) interlockedexchange((volatile LONG *)sl, (LONG)0) + + #endif /* ... gcc spins locks ... */ + + /* How to yield for a spin lock */ + #define SPINS_PER_YIELD 63 + #if defined(_MSC_VER) + #define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ + #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) + #elif defined(__SVR4) && defined(__sun) /* solaris */ + #define SPIN_LOCK_YIELD thr_yield(); + #elif !defined(LACKS_SCHED_H) + #define SPIN_LOCK_YIELD sched_yield(); + #else + #define SPIN_LOCK_YIELD + #endif /* ... yield ... */ + + #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 /* Plain spin locks use single word (embedded in malloc_states) */ static int spin_acquire_lock(int *sl) { + int spins = 0; while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) { - if ((++spins & SPINS_PER_YIELD) == 0) { - SPIN_LOCK_YIELD; - } + + if ((++spins & SPINS_PER_YIELD) == 0) { SPIN_LOCK_YIELD; } + } + return 0; + } -#define MLOCK_T int -#define TRY_LOCK(sl) !CAS_LOCK(sl) -#define RELEASE_LOCK(sl) CLEAR_LOCK(sl) -#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0) -#define INITIAL_LOCK(sl) (*sl = 0) -#define DESTROY_LOCK(sl) (0) + #define MLOCK_T int + #define TRY_LOCK(sl) !CAS_LOCK(sl) + #define RELEASE_LOCK(sl) CLEAR_LOCK(sl) + #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl) ? spin_acquire_lock(sl) : 0) + #define INITIAL_LOCK(sl) (*sl = 0) + #define DESTROY_LOCK(sl) (0) static MLOCK_T malloc_global_mutex = 0; -#else /* USE_RECURSIVE_LOCKS */ -/* types for lock owners */ -#ifdef WIN32 -#define THREAD_ID_T DWORD -#define CURRENT_THREAD GetCurrentThreadId() -#define EQ_OWNER(X,Y) ((X) == (Y)) -#else -/* - Note: the following assume that pthread_t is a type that can be - initialized to (casted) zero. If this is not the case, you will need to - somehow redefine these or not use spin locks. -*/ -#define THREAD_ID_T pthread_t -#define CURRENT_THREAD pthread_self() -#define EQ_OWNER(X,Y) pthread_equal(X, Y) -#endif + #else /* USE_RECURSIVE_LOCKS */ + /* types for lock owners */ + #ifdef WIN32 + #define THREAD_ID_T DWORD + #define CURRENT_THREAD GetCurrentThreadId() + #define EQ_OWNER(X, Y) ((X) == (Y)) + #else + /* + Note: the following assume that pthread_t is a type that can be + initialized to (casted) zero. If this is not the case, you will need + to somehow redefine these or not use spin locks. + */ + #define THREAD_ID_T pthread_t + #define CURRENT_THREAD pthread_self() + #define EQ_OWNER(X, Y) pthread_equal(X, Y) + #endif struct malloc_recursive_lock { - int sl; + + int sl; unsigned int c; - THREAD_ID_T threadid; + THREAD_ID_T threadid; + }; -#define MLOCK_T struct malloc_recursive_lock -static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0}; + #define MLOCK_T struct malloc_recursive_lock +static MLOCK_T malloc_global_mutex = {0, 0, (THREAD_ID_T)0}; static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) { + assert(lk->sl != 0); - if (--lk->c == 0) { - CLEAR_LOCK(&lk->sl); - } + if (--lk->c == 0) { CLEAR_LOCK(&lk->sl); } + } static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) { + THREAD_ID_T mythreadid = CURRENT_THREAD; - int spins = 0; + int spins = 0; for (;;) { + if (*((volatile int *)(&lk->sl)) == 0) { + if (!CAS_LOCK(&lk->sl)) { + lk->threadid = mythreadid; lk->c = 1; return 0; + } - } - else if (EQ_OWNER(lk->threadid, mythreadid)) { + + } else if (EQ_OWNER(lk->threadid, mythreadid)) { + ++lk->c; return 0; + } - if ((++spins & SPINS_PER_YIELD) == 0) { - SPIN_LOCK_YIELD; - } + + if ((++spins & SPINS_PER_YIELD) == 0) { SPIN_LOCK_YIELD; } + } + } static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) { + THREAD_ID_T mythreadid = CURRENT_THREAD; if (*((volatile int *)(&lk->sl)) == 0) { + if (!CAS_LOCK(&lk->sl)) { + lk->threadid = mythreadid; lk->c = 1; return 1; + } - } - else if (EQ_OWNER(lk->threadid, mythreadid)) { + + } else if (EQ_OWNER(lk->threadid, mythreadid)) { + ++lk->c; return 1; + } + return 0; + } -#define RELEASE_LOCK(lk) recursive_release_lock(lk) -#define TRY_LOCK(lk) recursive_try_lock(lk) -#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) -#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) -#define DESTROY_LOCK(lk) (0) -#endif /* USE_RECURSIVE_LOCKS */ - -#elif defined(WIN32) /* Win32 critical sections */ -#define MLOCK_T CRITICAL_SECTION -#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) -#define RELEASE_LOCK(lk) LeaveCriticalSection(lk) -#define TRY_LOCK(lk) TryEnterCriticalSection(lk) -#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000)) -#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) -#define NEED_GLOBAL_LOCK_INIT - -static MLOCK_T malloc_global_mutex; + #define RELEASE_LOCK(lk) recursive_release_lock(lk) + #define TRY_LOCK(lk) recursive_try_lock(lk) + #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) + #define INITIAL_LOCK(lk) \ + ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) + #define DESTROY_LOCK(lk) (0) + #endif /* USE_RECURSIVE_LOCKS */ + + #elif defined(WIN32) /* Win32 critical sections */ + #define MLOCK_T CRITICAL_SECTION + #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) + #define RELEASE_LOCK(lk) LeaveCriticalSection(lk) + #define TRY_LOCK(lk) TryEnterCriticalSection(lk) + #define INITIAL_LOCK(lk) \ + (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000 | 4000)) + #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) + #define NEED_GLOBAL_LOCK_INIT + +static MLOCK_T malloc_global_mutex; static volatile LONG malloc_global_mutex_status; /* Use spin loop to initialize global lock */ static void init_malloc_global_mutex() { + for (;;) { + long stat = malloc_global_mutex_status; - if (stat > 0) - return; + if (stat > 0) return; /* transition to < 0 while initializing, then to > 0) */ - if (stat == 0 && - interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) { + if (stat == 0 && interlockedcompareexchange(&malloc_global_mutex_status, + (LONG)-1, (LONG)0) == 0) { + InitializeCriticalSection(&malloc_global_mutex); interlockedexchange(&malloc_global_mutex_status, (LONG)1); return; + } + SleepEx(0, FALSE); + } + } -#else /* pthreads-based locks */ -#define MLOCK_T pthread_mutex_t -#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) -#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) -#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) -#define INITIAL_LOCK(lk) pthread_init_lock(lk) -#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) + #else /* pthreads-based locks */ + #define MLOCK_T pthread_mutex_t + #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) + #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) + #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) + #define INITIAL_LOCK(lk) pthread_init_lock(lk) + #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) -#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) + #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && \ + defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) /* Cope with old-style linux recursive lock initialization by adding */ /* skipped internal declaration from pthread.h */ -extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr, - int __kind)); -#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP -#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) -#endif /* USE_RECURSIVE_LOCKS ... */ +extern int pthread_mutexattr_setkind_np __P((pthread_mutexattr_t * __attr, + int __kind)); + #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP + #define pthread_mutexattr_settype(x, y) \ + pthread_mutexattr_setkind_np(x, y) + #endif /* USE_RECURSIVE_LOCKS ... */ static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER; -static int pthread_init_lock (MLOCK_T *lk) { +static int pthread_init_lock(MLOCK_T *lk) { + pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr)) return 1; -#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 + #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1; -#endif + #endif if (pthread_mutex_init(lk, &attr)) return 1; if (pthread_mutexattr_destroy(&attr)) return 1; return 0; + } -#endif /* ... lock types ... */ + #endif /* ... lock types ... */ -/* Common code for all lock types */ -#define USE_LOCK_BIT (2U) + /* Common code for all lock types */ + #define USE_LOCK_BIT (2U) -#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK -#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); -#endif + #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK + #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); + #endif -#ifndef RELEASE_MALLOC_GLOBAL_LOCK -#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); -#endif + #ifndef RELEASE_MALLOC_GLOBAL_LOCK + #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); + #endif -#endif /* USE_LOCKS */ + #endif /* USE_LOCKS */ /* ----------------------- Chunk representations ------------------------ */ @@ -2236,56 +2320,56 @@ static int pthread_init_lock (MLOCK_T *lk) { */ struct malloc_chunk { - size_t prev_foot; /* Size of previous chunk (if free). */ - size_t head; /* Size and inuse bits. */ - struct malloc_chunk* fd; /* double links -- used only if free. */ - struct malloc_chunk* bk; + + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk *fd; /* double links -- used only if free. */ + struct malloc_chunk *bk; + }; typedef struct malloc_chunk mchunk; -typedef struct malloc_chunk* mchunkptr; -typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ -typedef unsigned int bindex_t; /* Described below */ -typedef unsigned int binmap_t; /* Described below */ -typedef unsigned int flag_t; /* The type of various bit flag sets */ +typedef struct malloc_chunk *mchunkptr; +typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */ +typedef unsigned int bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ /* ------------------- Chunks sizes and alignments ----------------------- */ -#define MCHUNK_SIZE (sizeof(mchunk)) + #define MCHUNK_SIZE (sizeof(mchunk)) -#if FOOTERS -#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) -#else /* FOOTERS */ -#define CHUNK_OVERHEAD (SIZE_T_SIZE) -#endif /* FOOTERS */ + #if FOOTERS + #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) + #else /* FOOTERS */ + #define CHUNK_OVERHEAD (SIZE_T_SIZE) + #endif /* FOOTERS */ -/* MMapped chunks need a second word of overhead ... */ -#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) -/* ... and additional padding for fake next-chunk at foot */ -#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) + /* MMapped chunks need a second word of overhead ... */ + #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) + /* ... and additional padding for fake next-chunk at foot */ + #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) -/* The smallest size we can malloc is an aligned minimal chunk */ -#define MIN_CHUNK_SIZE\ - ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + /* The smallest size we can malloc is an aligned minimal chunk */ + #define MIN_CHUNK_SIZE ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) -/* conversion from malloc headers to user pointers, and back */ -#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) -#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) -/* chunk associated with aligned address A */ -#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + /* conversion from malloc headers to user pointers, and back */ + #define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES)) + #define mem2chunk(mem) ((mchunkptr)((char *)(mem)-TWO_SIZE_T_SIZES)) + /* chunk associated with aligned address A */ + #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) -/* Bounds on request (not chunk) sizes. */ -#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) -#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + /* Bounds on request (not chunk) sizes. */ + #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) + #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) -/* pad request bytes into a usable size */ -#define pad_request(req) \ - (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) - -/* pad request, checking for minimum (but not maximum) */ -#define request2size(req) \ - (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + /* pad request bytes into a usable size */ + #define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + /* pad request, checking for minimum (but not maximum) */ + #define request2size(req) \ + (((req) < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(req)) /* ------------------ Operations on head and foot fields ----------------- */ @@ -2297,61 +2381,60 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */ FLAG4_BIT is not used by this malloc, but might be useful in extensions. */ -#define PINUSE_BIT (SIZE_T_ONE) -#define CINUSE_BIT (SIZE_T_TWO) -#define FLAG4_BIT (SIZE_T_FOUR) -#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) -#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) + #define PINUSE_BIT (SIZE_T_ONE) + #define CINUSE_BIT (SIZE_T_TWO) + #define FLAG4_BIT (SIZE_T_FOUR) + #define INUSE_BITS (PINUSE_BIT | CINUSE_BIT) + #define FLAG_BITS (PINUSE_BIT | CINUSE_BIT | FLAG4_BIT) -/* Head value for fenceposts */ -#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + /* Head value for fenceposts */ + #define FENCEPOST_HEAD (INUSE_BITS | SIZE_T_SIZE) -/* extraction of fields from head words */ -#define cinuse(p) ((p)->head & CINUSE_BIT) -#define pinuse(p) ((p)->head & PINUSE_BIT) -#define flag4inuse(p) ((p)->head & FLAG4_BIT) -#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) -#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) + /* extraction of fields from head words */ + #define cinuse(p) ((p)->head & CINUSE_BIT) + #define pinuse(p) ((p)->head & PINUSE_BIT) + #define flag4inuse(p) ((p)->head & FLAG4_BIT) + #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) + #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) -#define chunksize(p) ((p)->head & ~(FLAG_BITS)) + #define chunksize(p) ((p)->head & ~(FLAG_BITS)) -#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) -#define set_flag4(p) ((p)->head |= FLAG4_BIT) -#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) + #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) + #define set_flag4(p) ((p)->head |= FLAG4_BIT) + #define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) -/* Treat space at ptr +/- offset as a chunk */ -#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) -#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) + /* Treat space at ptr +/- offset as a chunk */ + #define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s))) + #define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s))) -/* Ptr to next or previous physical malloc_chunk. */ -#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) -#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) + /* Ptr to next or previous physical malloc_chunk. */ + #define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~FLAG_BITS))) + #define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot))) -/* extract next chunk's pinuse bit */ -#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + /* extract next chunk's pinuse bit */ + #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) -/* Get/set size at footer */ -#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) -#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) + /* Get/set size at footer */ + #define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot) + #define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s)) -/* Set size, pinuse bit, and foot */ -#define set_size_and_pinuse_of_free_chunk(p, s)\ - ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + /* Set size, pinuse bit, and foot */ + #define set_size_and_pinuse_of_free_chunk(p, s) \ + ((p)->head = (s | PINUSE_BIT), set_foot(p, s)) -/* Set size, pinuse bit, foot, and clear next pinuse */ -#define set_free_with_pinuse(p, s, n)\ - (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + /* Set size, pinuse bit, foot, and clear next pinuse */ + #define set_free_with_pinuse(p, s, n) \ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) -/* Get the internal overhead associated with chunk p */ -#define overhead_for(p)\ - (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) + /* Get the internal overhead associated with chunk p */ + #define overhead_for(p) (is_mmapped(p) ? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) -/* Return true if malloced space is not necessarily cleared */ -#if MMAP_CLEARS -#define calloc_must_clear(p) (!is_mmapped(p)) -#else /* MMAP_CLEARS */ -#define calloc_must_clear(p) (1) -#endif /* MMAP_CLEARS */ + /* Return true if malloced space is not necessarily cleared */ + #if MMAP_CLEARS + #define calloc_must_clear(p) (!is_mmapped(p)) + #else /* MMAP_CLEARS */ + #define calloc_must_clear(p) (1) + #endif /* MMAP_CLEARS */ /* ---------------------- Overlaid data structures ----------------------- */ @@ -2445,23 +2528,25 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ size_t prev_foot; size_t head; - struct malloc_tree_chunk* fd; - struct malloc_tree_chunk* bk; + struct malloc_tree_chunk *fd; + struct malloc_tree_chunk *bk; - struct malloc_tree_chunk* child[2]; - struct malloc_tree_chunk* parent; + struct malloc_tree_chunk *child[2]; + struct malloc_tree_chunk *parent; bindex_t index; + }; typedef struct malloc_tree_chunk tchunk; -typedef struct malloc_tree_chunk* tchunkptr; -typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ +typedef struct malloc_tree_chunk *tchunkptr; +typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */ -/* A little helper macro for trees */ -#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + /* A little helper macro for trees */ + #define leftmost_child(t) ((t)->child[0] != 0 ? (t)->child[0] : (t)->child[1]) /* ----------------------------- Segments -------------------------------- */ @@ -2521,141 +2606,145 @@ typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ */ struct malloc_segment { - char* base; /* base address */ - size_t size; /* allocated size */ - struct malloc_segment* next; /* ptr to next segment */ - flag_t sflags; /* mmap and extern flag */ + + char * base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment *next; /* ptr to next segment */ + flag_t sflags; /* mmap and extern flag */ + }; -#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) -#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) + #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) + #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) typedef struct malloc_segment msegment; -typedef struct malloc_segment* msegmentptr; +typedef struct malloc_segment *msegmentptr; -/* ---------------------------- malloc_state ----------------------------- */ + /* ---------------------------- malloc_state ----------------------------- */ -/* - A malloc_state holds all of the bookkeeping for a space. - The main fields are: - - Top - The topmost chunk of the currently active segment. Its size is - cached in topsize. The actual size of topmost space is - topsize+TOP_FOOT_SIZE, which includes space reserved for adding - fenceposts and segment records if necessary when getting more - space from the system. The size at which to autotrim top is - cached from mparams in trim_check, except that it is disabled if - an autotrim fails. - - Designated victim (dv) - This is the preferred chunk for servicing small requests that - don't have exact fits. It is normally the chunk split off most - recently to service another small request. Its size is cached in - dvsize. The link fields of this chunk are not maintained since it - is not kept in a bin. - - SmallBins - An array of bin headers for free chunks. These bins hold chunks - with sizes less than MIN_LARGE_SIZE bytes. Each bin contains - chunks of all the same size, spaced 8 bytes apart. To simplify - use in double-linked lists, each bin header acts as a malloc_chunk - pointing to the real first node, if it exists (else pointing to - itself). This avoids special-casing for headers. But to avoid - waste, we allocate only the fd/bk pointers of bins, and then use - repositioning tricks to treat these as the fields of a chunk. - - TreeBins - Treebins are pointers to the roots of trees holding a range of - sizes. There are 2 equally spaced treebins for each power of two - from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything - larger. - - Bin maps - There is one bit map for small bins ("smallmap") and one for - treebins ("treemap). Each bin sets its bit when non-empty, and - clears the bit when empty. Bit operations are then used to avoid - bin-by-bin searching -- nearly all "search" is done without ever - looking at bins that won't be selected. The bit maps - conservatively use 32 bits per map word, even if on 64bit system. - For a good description of some of the bit-based techniques used - here, see Henry S. Warren Jr's book "Hacker's Delight" (and - supplement at http://hackersdelight.org/). Many of these are - intended to reduce the branchiness of paths through malloc etc, as - well as to reduce the number of memory locations read or written. - - Segments - A list of segments headed by an embedded malloc_segment record - representing the initial space. - - Address check support - The least_addr field is the least address ever obtained from - MORECORE or MMAP. Attempted frees and reallocs of any address less - than this are trapped (unless INSECURE is defined). - - Magic tag - A cross-check field that should always hold same value as mparams.magic. - - Max allowed footprint - The maximum allowed bytes to allocate from system (zero means no limit) - - Flags - Bits recording whether to use MMAP, locks, or contiguous MORECORE - - Statistics - Each space keeps track of current and maximum system memory - obtained via MORECORE or MMAP. - - Trim support - Fields holding the amount of unused topmost memory that should trigger - trimming, and a counter to force periodic scanning to release unused - non-topmost segments. - - Locking - If USE_LOCKS is defined, the "mutex" lock is acquired and released - around every public call using this mspace. - - Extension support - A void* pointer and a size_t field that can be used to help implement - extensions to this malloc. -*/ + /* + A malloc_state holds all of the bookkeeping for a space. + The main fields are: + + Top + The topmost chunk of the currently active segment. Its size is + cached in topsize. The actual size of topmost space is + topsize+TOP_FOOT_SIZE, which includes space reserved for adding + fenceposts and segment records if necessary when getting more + space from the system. The size at which to autotrim top is + cached from mparams in trim_check, except that it is disabled if + an autotrim fails. + + Designated victim (dv) + This is the preferred chunk for servicing small requests that + don't have exact fits. It is normally the chunk split off most + recently to service another small request. Its size is cached in + dvsize. The link fields of this chunk are not maintained since it + is not kept in a bin. + + SmallBins + An array of bin headers for free chunks. These bins hold chunks + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains + chunks of all the same size, spaced 8 bytes apart. To simplify + use in double-linked lists, each bin header acts as a malloc_chunk + pointing to the real first node, if it exists (else pointing to + itself). This avoids special-casing for headers. But to avoid + waste, we allocate only the fd/bk pointers of bins, and then use + repositioning tricks to treat these as the fields of a chunk. + + TreeBins + Treebins are pointers to the roots of trees holding a range of + sizes. There are 2 equally spaced treebins for each power of two + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything + larger. + + Bin maps + There is one bit map for small bins ("smallmap") and one for + treebins ("treemap). Each bin sets its bit when non-empty, and + clears the bit when empty. Bit operations are then used to avoid + bin-by-bin searching -- nearly all "search" is done without ever + looking at bins that won't be selected. The bit maps + conservatively use 32 bits per map word, even if on 64bit system. + For a good description of some of the bit-based techniques used + here, see Henry S. Warren Jr's book "Hacker's Delight" (and + supplement at http://hackersdelight.org/). Many of these are + intended to reduce the branchiness of paths through malloc etc, as + well as to reduce the number of memory locations read or written. + + Segments + A list of segments headed by an embedded malloc_segment record + representing the initial space. + + Address check support + The least_addr field is the least address ever obtained from + MORECORE or MMAP. Attempted frees and reallocs of any address less + than this are trapped (unless INSECURE is defined). + + Magic tag + A cross-check field that should always hold same value as mparams.magic. + + Max allowed footprint + The maximum allowed bytes to allocate from system (zero means no limit) + + Flags + Bits recording whether to use MMAP, locks, or contiguous MORECORE + + Statistics + Each space keeps track of current and maximum system memory + obtained via MORECORE or MMAP. + + Trim support + Fields holding the amount of unused topmost memory that should trigger + trimming, and a counter to force periodic scanning to release unused + non-topmost segments. + + Locking + If USE_LOCKS is defined, the "mutex" lock is acquired and released + around every public call using this mspace. + + Extension support + A void* pointer and a size_t field that can be used to help implement + extensions to this malloc. + */ -/* Bin types, widths and sizes */ -#define NSMALLBINS (32U) -#define NTREEBINS (32U) -#define SMALLBIN_SHIFT (3U) -#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) -#define TREEBIN_SHIFT (8U) -#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) -#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) -#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + /* Bin types, widths and sizes */ + #define NSMALLBINS (32U) + #define NTREEBINS (32U) + #define SMALLBIN_SHIFT (3U) + #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) + #define TREEBIN_SHIFT (8U) + #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) + #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) + #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) struct malloc_state { - binmap_t smallmap; - binmap_t treemap; - size_t dvsize; - size_t topsize; - char* least_addr; - mchunkptr dv; - mchunkptr top; - size_t trim_check; - size_t release_checks; - size_t magic; - mchunkptr smallbins[(NSMALLBINS+1)*2]; - tbinptr treebins[NTREEBINS]; - size_t footprint; - size_t max_footprint; - size_t footprint_limit; /* zero means no limit */ - flag_t mflags; -#if USE_LOCKS - MLOCK_T mutex; /* locate lock among fields that rarely change */ -#endif /* USE_LOCKS */ - msegment seg; - void* extp; /* Unused but available for extensions */ - size_t exts; + + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char * least_addr; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t release_checks; + size_t magic; + mchunkptr smallbins[(NSMALLBINS + 1) * 2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + size_t footprint_limit; /* zero means no limit */ + flag_t mflags; + #if USE_LOCKS + MLOCK_T mutex; /* locate lock among fields that rarely change */ + #endif /* USE_LOCKS */ + msegment seg; + void * extp; /* Unused but available for extensions */ + size_t exts; + }; -typedef struct malloc_state* mstate; +typedef struct malloc_state *mstate; /* ------------- Global malloc_state and malloc_params ------------------- */ @@ -2667,123 +2756,128 @@ typedef struct malloc_state* mstate; */ struct malloc_params { + size_t magic; size_t page_size; size_t granularity; size_t mmap_threshold; size_t trim_threshold; flag_t default_mflags; + }; static struct malloc_params mparams; -/* Ensure mparams initialized */ -#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) + /* Ensure mparams initialized */ + #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) -#if !ONLY_MSPACES + #if !ONLY_MSPACES /* The global malloc_state used for all non-"mspace" calls */ static struct malloc_state _gm_; -#define gm (&_gm_) -#define is_global(M) ((M) == &_gm_) + #define gm (&_gm_) + #define is_global(M) ((M) == &_gm_) -#endif /* !ONLY_MSPACES */ + #endif /* !ONLY_MSPACES */ -#define is_initialized(M) ((M)->top != 0) + #define is_initialized(M) ((M)->top != 0) /* -------------------------- system alloc setup ------------------------- */ /* Operations on mflags */ -#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) -#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) -#if USE_LOCKS -#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) -#else -#define disable_lock(M) -#endif - -#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) -#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) -#if HAVE_MMAP -#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) -#else -#define disable_mmap(M) -#endif - -#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) -#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) - -#define set_lock(M,L)\ - ((M)->mflags = (L)?\ - ((M)->mflags | USE_LOCK_BIT) :\ - ((M)->mflags & ~USE_LOCK_BIT)) - -/* page-align a size */ -#define page_align(S)\ - (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) - -/* granularity-align a size */ -#define granularity_align(S)\ - (((S) + (mparams.granularity - SIZE_T_ONE))\ - & ~(mparams.granularity - SIZE_T_ONE)) - - -/* For mmap, use granularity alignment on windows, else page-align */ -#ifdef WIN32 -#define mmap_align(S) granularity_align(S) -#else -#define mmap_align(S) page_align(S) -#endif - -/* For sys_alloc, enough padding to ensure can malloc request on success */ -#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) - -#define is_page_aligned(S)\ - (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) -#define is_granularity_aligned(S)\ - (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) - -/* True if segment S holds address A */ -#define segment_holds(S, A)\ - ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) + #define use_lock(M) ((M)->mflags & USE_LOCK_BIT) + #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) + #if USE_LOCKS + #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) + #else + #define disable_lock(M) + #endif + + #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) + #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) + #if HAVE_MMAP + #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) + #else + #define disable_mmap(M) + #endif + + #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) + #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) + + #define set_lock(M, L) \ + ((M)->mflags = \ + (L) ? ((M)->mflags | USE_LOCK_BIT) : ((M)->mflags & ~USE_LOCK_BIT)) + + /* page-align a size */ + #define page_align(S) \ + (((S) + (mparams.page_size - SIZE_T_ONE)) & \ + ~(mparams.page_size - SIZE_T_ONE)) + + /* granularity-align a size */ + #define granularity_align(S) \ + (((S) + (mparams.granularity - SIZE_T_ONE)) & \ + ~(mparams.granularity - SIZE_T_ONE)) + + /* For mmap, use granularity alignment on windows, else page-align */ + #ifdef WIN32 + #define mmap_align(S) granularity_align(S) + #else + #define mmap_align(S) page_align(S) + #endif + + /* For sys_alloc, enough padding to ensure can malloc request on success */ + #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) + + #define is_page_aligned(S) \ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) + #define is_granularity_aligned(S) \ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) + + /* True if segment S holds address A */ + #define segment_holds(S, A) \ + ((char *)(A) >= S->base && (char *)(A) < S->base + S->size) /* Return segment holding given address */ -static msegmentptr segment_holding(mstate m, char* addr) { +static msegmentptr segment_holding(mstate m, char *addr) { + msegmentptr sp = &m->seg; for (;;) { - if (addr >= sp->base && addr < sp->base + sp->size) - return sp; - if ((sp = sp->next) == 0) - return 0; + + if (addr >= sp->base && addr < sp->base + sp->size) return sp; + if ((sp = sp->next) == 0) return 0; + } + } /* Return true if segment contains a segment link */ static int has_segment_link(mstate m, msegmentptr ss) { + msegmentptr sp = &m->seg; for (;;) { - if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) - return 1; - if ((sp = sp->next) == 0) - return 0; + + if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size) return 1; + if ((sp = sp->next) == 0) return 0; + } -} -#ifndef MORECORE_CANNOT_TRIM -#define should_trim(M,s) ((s) > (M)->trim_check) -#else /* MORECORE_CANNOT_TRIM */ -#define should_trim(M,s) (0) -#endif /* MORECORE_CANNOT_TRIM */ +} -/* - TOP_FOOT_SIZE is padding at the end of a segment, including space - that may be needed to place segment records and fenceposts when new - noncontiguous segments are added. -*/ -#define TOP_FOOT_SIZE\ - (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + #ifndef MORECORE_CANNOT_TRIM + #define should_trim(M, s) ((s) > (M)->trim_check) + #else /* MORECORE_CANNOT_TRIM */ + #define should_trim(M, s) (0) + #endif /* MORECORE_CANNOT_TRIM */ + /* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. + */ + #define TOP_FOOT_SIZE \ + (align_offset(chunk2mem(0)) + pad_request(sizeof(struct malloc_segment)) + \ + MIN_CHUNK_SIZE) /* ------------------------------- Hooks -------------------------------- */ @@ -2793,20 +2887,25 @@ static int has_segment_link(mstate m, msegmentptr ss) { anything you like. */ -#if USE_LOCKS -#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) -#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } -#else /* USE_LOCKS */ + #if USE_LOCKS + #define PREACTION(M) ((use_lock(M)) ? ACQUIRE_LOCK(&(M)->mutex) : 0) + #define POSTACTION(M) \ + { \ + \ + if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); \ + \ + } + #else /* USE_LOCKS */ -#ifndef PREACTION -#define PREACTION(M) (0) -#endif /* PREACTION */ + #ifndef PREACTION + #define PREACTION(M) (0) + #endif /* PREACTION */ -#ifndef POSTACTION -#define POSTACTION(M) -#endif /* POSTACTION */ + #ifndef POSTACTION + #define POSTACTION(M) + #endif /* POSTACTION */ -#endif /* USE_LOCKS */ + #endif /* USE_LOCKS */ /* CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. @@ -2816,7 +2915,7 @@ static int has_segment_link(mstate m, msegmentptr ss) { useful in custom actions that try to help diagnose errors. */ -#if PROCEED_ON_ERROR + #if PROCEED_ON_ERROR /* A count of the number of corruption errors causing resets */ int malloc_corruption_error_count; @@ -2824,211 +2923,240 @@ int malloc_corruption_error_count; /* default corruption action */ static void reset_on_error(mstate m); -#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) -#define USAGE_ERROR_ACTION(m, p) - -#else /* PROCEED_ON_ERROR */ + #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) + #define USAGE_ERROR_ACTION(m, p) -#ifndef CORRUPTION_ERROR_ACTION -#define CORRUPTION_ERROR_ACTION(m) ABORT -#endif /* CORRUPTION_ERROR_ACTION */ + #else /* PROCEED_ON_ERROR */ -#ifndef USAGE_ERROR_ACTION -#define USAGE_ERROR_ACTION(m,p) ABORT -#endif /* USAGE_ERROR_ACTION */ + #ifndef CORRUPTION_ERROR_ACTION + #define CORRUPTION_ERROR_ACTION(m) ABORT + #endif /* CORRUPTION_ERROR_ACTION */ -#endif /* PROCEED_ON_ERROR */ + #ifndef USAGE_ERROR_ACTION + #define USAGE_ERROR_ACTION(m, p) ABORT + #endif /* USAGE_ERROR_ACTION */ + #endif /* PROCEED_ON_ERROR */ /* -------------------------- Debugging setup ---------------------------- */ -#if ! DEBUG + #if !DEBUG -#define check_free_chunk(M,P) -#define check_inuse_chunk(M,P) -#define check_malloced_chunk(M,P,N) -#define check_mmapped_chunk(M,P) -#define check_malloc_state(M) -#define check_top_chunk(M,P) + #define check_free_chunk(M, P) + #define check_inuse_chunk(M, P) + #define check_malloced_chunk(M, P, N) + #define check_mmapped_chunk(M, P) + #define check_malloc_state(M) + #define check_top_chunk(M, P) -#else /* DEBUG */ -#define check_free_chunk(M,P) do_check_free_chunk(M,P) -#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) -#define check_top_chunk(M,P) do_check_top_chunk(M,P) -#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) -#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) -#define check_malloc_state(M) do_check_malloc_state(M) + #else /* DEBUG */ + #define check_free_chunk(M, P) do_check_free_chunk(M, P) + #define check_inuse_chunk(M, P) do_check_inuse_chunk(M, P) + #define check_top_chunk(M, P) do_check_top_chunk(M, P) + #define check_malloced_chunk(M, P, N) do_check_malloced_chunk(M, P, N) + #define check_mmapped_chunk(M, P) do_check_mmapped_chunk(M, P) + #define check_malloc_state(M) do_check_malloc_state(M) static void do_check_any_chunk(mstate m, mchunkptr p); static void do_check_top_chunk(mstate m, mchunkptr p); static void do_check_mmapped_chunk(mstate m, mchunkptr p); static void do_check_inuse_chunk(mstate m, mchunkptr p); static void do_check_free_chunk(mstate m, mchunkptr p); -static void do_check_malloced_chunk(mstate m, void* mem, size_t s); +static void do_check_malloced_chunk(mstate m, void *mem, size_t s); static void do_check_tree(mstate m, tchunkptr t); static void do_check_treebin(mstate m, bindex_t i); static void do_check_smallbin(mstate m, bindex_t i); static void do_check_malloc_state(mstate m); static int bin_find(mstate m, mchunkptr x); static size_t traverse_and_check(mstate m); -#endif /* DEBUG */ + #endif /* DEBUG */ /* ---------------------------- Indexing Bins ---------------------------- */ -#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) -#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) -#define small_index2size(i) ((i) << SMALLBIN_SHIFT) -#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) - -/* addressing by index. See above about smallbin repositioning */ -#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) -#define treebin_at(M,i) (&((M)->treebins[i])) - -/* assign tree index for size S to variable I. Use x86 asm if possible */ -#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) -#define compute_tree_index(S, I)\ -{\ - unsigned int X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} - -#elif defined (__INTEL_COMPILER) -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K = _bit_scan_reverse (X); \ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} + #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) + #define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) + #define small_index2size(i) ((i) << SMALLBIN_SHIFT) + #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + + /* addressing by index. See above about smallbin repositioning */ + #define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i) << 1]))) + #define treebin_at(M, i) (&((M)->treebins[i])) + + /* assign tree index for size S to variable I. Use x86 asm if possible */ + #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + #define compute_tree_index(S, I) \ + { \ + \ + unsigned int X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int K = (unsigned)sizeof(X) * __CHAR_BIT__ - 1 - \ + (unsigned)__builtin_clz(X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ + \ + } \ + \ + } -#elif defined(_MSC_VER) && _MSC_VER>=1300 -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int K;\ - _BitScanReverse((DWORD *) &K, (DWORD) X);\ - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ - }\ -} + #elif defined(__INTEL_COMPILER) + #define compute_tree_index(S, I) \ + { \ + \ + size_t X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int K = _bit_scan_reverse(X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ + \ + } \ + \ + } -#else /* GNUC */ -#define compute_tree_index(S, I)\ -{\ - size_t X = S >> TREEBIN_SHIFT;\ - if (X == 0)\ - I = 0;\ - else if (X > 0xFFFF)\ - I = NTREEBINS-1;\ - else {\ - unsigned int Y = (unsigned int)X;\ - unsigned int N = ((Y - 0x100) >> 16) & 8;\ - unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ - N += K;\ - N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ - K = 14 - N + ((Y <<= K) >> 15);\ - I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ - }\ -} -#endif /* GNUC */ + #elif defined(_MSC_VER) && _MSC_VER >= 1300 + #define compute_tree_index(S, I) \ + { \ + \ + size_t X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int K; \ + _BitScanReverse((DWORD *)&K, (DWORD)X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \ + \ + } \ + \ + } -/* Bit representing maximum resolved size in a treebin at i */ -#define bit_for_tree_index(i) \ - (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + #else /* GNUC */ + #define compute_tree_index(S, I) \ + { \ + \ + size_t X = S >> TREEBIN_SHIFT; \ + if (X == 0) \ + I = 0; \ + else if (X > 0xFFFF) \ + I = NTREEBINS - 1; \ + else { \ + \ + unsigned int Y = (unsigned int)X; \ + unsigned int N = ((Y - 0x100) >> 16) & 8; \ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4; \ + N += K; \ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2; \ + K = 14 - N + ((Y <<= K) >> 15); \ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1)); \ + \ + } \ + \ + } + #endif /* GNUC */ -/* Shift placing maximum resolved bit in a treebin at i as sign bit */ -#define leftshift_for_tree_index(i) \ - ((i == NTREEBINS-1)? 0 : \ - ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + /* Bit representing maximum resolved size in a treebin at i */ + #define bit_for_tree_index(i) \ + (i == NTREEBINS - 1) ? (SIZE_T_BITSIZE - 1) \ + : (((i) >> 1) + TREEBIN_SHIFT - 2) -/* The size of the smallest chunk held in bin with index i */ -#define minsize_for_tree_index(i) \ - ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ - (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + /* Shift placing maximum resolved bit in a treebin at i as sign bit */ + #define leftshift_for_tree_index(i) \ + ((i == NTREEBINS - 1) \ + ? 0 \ + : ((SIZE_T_BITSIZE - SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + /* The size of the smallest chunk held in bin with index i */ + #define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i)&SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) -/* ------------------------ Operations on bin maps ----------------------- */ + /* ------------------------ Operations on bin maps ----------------------- */ -/* bit corresponding to given index */ -#define idx2bit(i) ((binmap_t)(1) << (i)) + /* bit corresponding to given index */ + #define idx2bit(i) ((binmap_t)(1) << (i)) -/* Mark/Clear bits with given index */ -#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) -#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) -#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + /* Mark/Clear bits with given index */ + #define mark_smallmap(M, i) ((M)->smallmap |= idx2bit(i)) + #define clear_smallmap(M, i) ((M)->smallmap &= ~idx2bit(i)) + #define smallmap_is_marked(M, i) ((M)->smallmap & idx2bit(i)) -#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) -#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) -#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + #define mark_treemap(M, i) ((M)->treemap |= idx2bit(i)) + #define clear_treemap(M, i) ((M)->treemap &= ~idx2bit(i)) + #define treemap_is_marked(M, i) ((M)->treemap & idx2bit(i)) -/* isolate the least set bit of a bitmap */ -#define least_bit(x) ((x) & -(x)) + /* isolate the least set bit of a bitmap */ + #define least_bit(x) ((x) & -(x)) -/* mask with all bits to left of least bit of x on */ -#define left_bits(x) ((x<<1) | -(x<<1)) + /* mask with all bits to left of least bit of x on */ + #define left_bits(x) ((x << 1) | -(x << 1)) -/* mask with all bits to left of or equal to least bit of x on */ -#define same_or_left_bits(x) ((x) | -(x)) + /* mask with all bits to left of or equal to least bit of x on */ + #define same_or_left_bits(x) ((x) | -(x)) /* index corresponding to given bit. Use x86 asm if possible */ -#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - J = __builtin_ctz(X); \ - I = (bindex_t)J;\ -} + #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int J; \ + J = __builtin_ctz(X); \ + I = (bindex_t)J; \ + \ + } -#elif defined (__INTEL_COMPILER) -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - J = _bit_scan_forward (X); \ - I = (bindex_t)J;\ -} + #elif defined(__INTEL_COMPILER) + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int J; \ + J = _bit_scan_forward(X); \ + I = (bindex_t)J; \ + \ + } -#elif defined(_MSC_VER) && _MSC_VER>=1300 -#define compute_bit2idx(X, I)\ -{\ - unsigned int J;\ - _BitScanForward((DWORD *) &J, X);\ - I = (bindex_t)J;\ -} + #elif defined(_MSC_VER) && _MSC_VER >= 1300 + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int J; \ + _BitScanForward((DWORD *)&J, X); \ + I = (bindex_t)J; \ + \ + } -#elif USE_BUILTIN_FFS -#define compute_bit2idx(X, I) I = ffs(X)-1 - -#else -#define compute_bit2idx(X, I)\ -{\ - unsigned int Y = X - 1;\ - unsigned int K = Y >> (16-4) & 16;\ - unsigned int N = K; Y >>= K;\ - N += K = Y >> (8-3) & 8; Y >>= K;\ - N += K = Y >> (4-2) & 4; Y >>= K;\ - N += K = Y >> (2-1) & 2; Y >>= K;\ - N += K = Y >> (1-0) & 1; Y >>= K;\ - I = (bindex_t)(N + Y);\ -} -#endif /* GNUC */ + #elif USE_BUILTIN_FFS + #define compute_bit2idx(X, I) I = ffs(X) - 1 + #else + #define compute_bit2idx(X, I) \ + { \ + \ + unsigned int Y = X - 1; \ + unsigned int K = Y >> (16 - 4) & 16; \ + unsigned int N = K; \ + Y >>= K; \ + N += K = Y >> (8 - 3) & 8; \ + Y >>= K; \ + N += K = Y >> (4 - 2) & 4; \ + Y >>= K; \ + N += K = Y >> (2 - 1) & 2; \ + Y >>= K; \ + N += K = Y >> (1 - 0) & 1; \ + Y >>= K; \ + I = (bindex_t)(N + Y); \ + \ + } + #endif /* GNUC */ /* ----------------------- Runtime Check Support ------------------------- */ @@ -3058,122 +3186,142 @@ static size_t traverse_and_check(mstate m); next, etc). This turns out to be cheaper than relying on hashes. */ -#if !INSECURE -/* Check if address a is at least as high as any from MORECORE or MMAP */ -#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) -/* Check if address of next chunk n is higher than base chunk p */ -#define ok_next(p, n) ((char*)(p) < (char*)(n)) -/* Check if p has inuse status */ -#define ok_inuse(p) is_inuse(p) -/* Check if p has its pinuse bit on */ -#define ok_pinuse(p) pinuse(p) - -#else /* !INSECURE */ -#define ok_address(M, a) (1) -#define ok_next(b, n) (1) -#define ok_inuse(p) (1) -#define ok_pinuse(p) (1) -#endif /* !INSECURE */ - -#if (FOOTERS && !INSECURE) -/* Check if (alleged) mstate m has expected magic field */ -#define ok_magic(M) ((M)->magic == mparams.magic) -#else /* (FOOTERS && !INSECURE) */ -#define ok_magic(M) (1) -#endif /* (FOOTERS && !INSECURE) */ - -/* In gcc, use __builtin_expect to minimize impact of checks */ -#if !INSECURE -#if defined(__GNUC__) && __GNUC__ >= 3 -#define RTCHECK(e) __builtin_expect(e, 1) -#else /* GNUC */ -#define RTCHECK(e) (e) -#endif /* GNUC */ -#else /* !INSECURE */ -#define RTCHECK(e) (1) -#endif /* !INSECURE */ + #if !INSECURE + /* Check if address a is at least as high as any from MORECORE or MMAP */ + #define ok_address(M, a) ((char *)(a) >= (M)->least_addr) + /* Check if address of next chunk n is higher than base chunk p */ + #define ok_next(p, n) ((char *)(p) < (char *)(n)) + /* Check if p has inuse status */ + #define ok_inuse(p) is_inuse(p) + /* Check if p has its pinuse bit on */ + #define ok_pinuse(p) pinuse(p) + + #else /* !INSECURE */ + #define ok_address(M, a) (1) + #define ok_next(b, n) (1) + #define ok_inuse(p) (1) + #define ok_pinuse(p) (1) + #endif /* !INSECURE */ + + #if (FOOTERS && !INSECURE) + /* Check if (alleged) mstate m has expected magic field */ + #define ok_magic(M) ((M)->magic == mparams.magic) + #else /* (FOOTERS && !INSECURE) */ + #define ok_magic(M) (1) + #endif /* (FOOTERS && !INSECURE) */ + + /* In gcc, use __builtin_expect to minimize impact of checks */ + #if !INSECURE + #if defined(__GNUC__) && __GNUC__ >= 3 + #define RTCHECK(e) __builtin_expect(e, 1) + #else /* GNUC */ + #define RTCHECK(e) (e) + #endif /* GNUC */ + #else /* !INSECURE */ + #define RTCHECK(e) (1) + #endif /* !INSECURE */ /* macros to set up inuse chunks with or without footers */ -#if !FOOTERS + #if !FOOTERS -#define mark_inuse_foot(M,p,s) + #define mark_inuse_foot(M, p, s) -/* Macros for setting head/foot of non-mmapped chunks */ + /* Macros for setting head/foot of non-mmapped chunks */ -/* Set cinuse bit and pinuse bit of next chunk */ -#define set_inuse(M,p,s)\ - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + /* Set cinuse bit and pinuse bit of next chunk */ + #define set_inuse(M, p, s) \ + ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \ + ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) -/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ -#define set_inuse_and_pinuse(M,p,s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + /* Set cinuse and pinuse of this chunk and pinuse of next chunk */ + #define set_inuse_and_pinuse(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \ + ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT) -/* Set size, cinuse and pinuse bit of this chunk */ -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + /* Set size, cinuse and pinuse bit of this chunk */ + #define set_size_and_pinuse_of_inuse_chunk(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT)) -#else /* FOOTERS */ + #else /* FOOTERS */ -/* Set foot of inuse chunk to be xor of mstate and seed */ -#define mark_inuse_foot(M,p,s)\ - (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) + /* Set foot of inuse chunk to be xor of mstate and seed */ + #define mark_inuse_foot(M, p, s) \ + (((mchunkptr)((char *)(p) + (s)))->prev_foot = \ + ((size_t)(M) ^ mparams.magic)) -#define get_mstate_for(p)\ - ((mstate)(((mchunkptr)((char*)(p) +\ - (chunksize(p))))->prev_foot ^ mparams.magic)) + #define get_mstate_for(p) \ + ((mstate)(((mchunkptr)((char *)(p) + (chunksize(p))))->prev_foot ^ \ + mparams.magic)) -#define set_inuse(M,p,s)\ - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ - mark_inuse_foot(M,p,s)) + #define set_inuse(M, p, s) \ + ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \ + (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M, p, s)) -#define set_inuse_and_pinuse(M,p,s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ - mark_inuse_foot(M,p,s)) + #define set_inuse_and_pinuse(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \ + (((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M, p, s)) -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ - mark_inuse_foot(M, p, s)) + #define set_size_and_pinuse_of_inuse_chunk(M, p, s) \ + ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), mark_inuse_foot(M, p, s)) -#endif /* !FOOTERS */ + #endif /* !FOOTERS */ /* ---------------------------- setting mparams -------------------------- */ -#if LOCK_AT_FORK -static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); } -static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); } -static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); } -#endif /* LOCK_AT_FORK */ + #if LOCK_AT_FORK +static void pre_fork(void) { + + ACQUIRE_LOCK(&(gm)->mutex); + +} + +static void post_fork_parent(void) { + + RELEASE_LOCK(&(gm)->mutex); + +} + +static void post_fork_child(void) { + + INITIAL_LOCK(&(gm)->mutex); + +} + + #endif /* LOCK_AT_FORK */ /* Initialize mparams */ static int init_mparams(void) { -#ifdef NEED_GLOBAL_LOCK_INIT - if (malloc_global_mutex_status <= 0) - init_malloc_global_mutex(); -#endif + + #ifdef NEED_GLOBAL_LOCK_INIT + if (malloc_global_mutex_status <= 0) init_malloc_global_mutex(); + #endif ACQUIRE_MALLOC_GLOBAL_LOCK(); if (mparams.magic == 0) { + size_t magic; size_t psize; size_t gsize; -#ifndef WIN32 + #ifndef WIN32 psize = malloc_getpagesize; - gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize); -#else /* WIN32 */ + gsize = ((DEFAULT_GRANULARITY != 0) ? DEFAULT_GRANULARITY : psize); + #else /* WIN32 */ { + SYSTEM_INFO system_info; GetSystemInfo(&system_info); psize = system_info.dwPageSize; - gsize = ((DEFAULT_GRANULARITY != 0)? - DEFAULT_GRANULARITY : system_info.dwAllocationGranularity); + gsize = + ((DEFAULT_GRANULARITY != 0) ? DEFAULT_GRANULARITY + : system_info.dwAllocationGranularity); + } -#endif /* WIN32 */ + + #endif /* WIN32 */ /* Sanity-check configuration: size_t must be unsigned and as wide as pointer type. @@ -3181,187 +3329,216 @@ static int init_mparams(void) { alignment must be at least 8. Alignment, min chunk size, and page size must all be powers of 2. */ - if ((sizeof(size_t) != sizeof(char*)) || - (MAX_SIZE_T < MIN_CHUNK_SIZE) || - (sizeof(int) < 4) || - (MALLOC_ALIGNMENT < (size_t)8U) || - ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || - ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || - ((gsize & (gsize-SIZE_T_ONE)) != 0) || - ((psize & (psize-SIZE_T_ONE)) != 0)) + if ((sizeof(size_t) != sizeof(char *)) || (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) || + ((gsize & (gsize - SIZE_T_ONE)) != 0) || + ((psize & (psize - SIZE_T_ONE)) != 0)) ABORT; mparams.granularity = gsize; mparams.page_size = psize; mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; -#if MORECORE_CONTIGUOUS - mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; -#else /* MORECORE_CONTIGUOUS */ - mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; -#endif /* MORECORE_CONTIGUOUS */ - -#if !ONLY_MSPACES + #if MORECORE_CONTIGUOUS + mparams.default_mflags = USE_LOCK_BIT | USE_MMAP_BIT; + #else /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = + USE_LOCK_BIT | USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT; + #endif /* MORECORE_CONTIGUOUS */ + + #if !ONLY_MSPACES /* Set up lock for main malloc area */ gm->mflags = mparams.default_mflags; (void)INITIAL_LOCK(&gm->mutex); -#endif -#if LOCK_AT_FORK + #endif + #if LOCK_AT_FORK pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child); -#endif + #endif { -#if USE_DEV_RANDOM - int fd; + + #if USE_DEV_RANDOM + int fd; unsigned char buf[sizeof(size_t)]; /* Try to use /dev/urandom, else fall back on using time */ if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && read(fd, buf, sizeof(buf)) == sizeof(buf)) { - magic = *((size_t *) buf); + + magic = *((size_t *)buf); close(fd); - } - else -#endif /* USE_DEV_RANDOM */ -#ifdef WIN32 - magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); -#elif defined(LACKS_TIME_H) + + } else + + #endif /* USE_DEV_RANDOM */ + #ifdef WIN32 + magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); + #elif defined(LACKS_TIME_H) magic = (size_t)&magic ^ (size_t)0x55555555U; -#else + #else magic = (size_t)(time(0) ^ (size_t)0x55555555U); -#endif - magic |= (size_t)8U; /* ensure nonzero */ - magic &= ~(size_t)7U; /* improve chances of fault for bad values */ + #endif + magic |= (size_t)8U; /* ensure nonzero */ + magic &= ~(size_t)7U; /* improve chances of fault for bad values */ /* Until memory modes commonly available, use volatile-write */ (*(volatile size_t *)(&(mparams.magic))) = magic; + } + } RELEASE_MALLOC_GLOBAL_LOCK(); return 1; + } /* support for mallopt */ static int change_mparam(int param_number, int value) { + size_t val; ensure_initialization(); - val = (value == -1)? MAX_SIZE_T : (size_t)value; - switch(param_number) { - case M_TRIM_THRESHOLD: - mparams.trim_threshold = val; - return 1; - case M_GRANULARITY: - if (val >= mparams.page_size && ((val & (val-1)) == 0)) { - mparams.granularity = val; + val = (value == -1) ? MAX_SIZE_T : (size_t)value; + switch (param_number) { + + case M_TRIM_THRESHOLD: + mparams.trim_threshold = val; return 1; - } - else + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val - 1)) == 0)) { + + mparams.granularity = val; + return 1; + + } else + + return 0; + case M_MMAP_THRESHOLD: + mparams.mmap_threshold = val; + return 1; + default: return 0; - case M_MMAP_THRESHOLD: - mparams.mmap_threshold = val; - return 1; - default: - return 0; + } + } -#if DEBUG + #if DEBUG /* ------------------------- Debugging Support --------------------------- */ /* Check properties of any chunk, whether free, inuse, mmapped etc */ static void do_check_any_chunk(mstate m, mchunkptr p) { + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); + } /* Check properties of top chunk */ static void do_check_top_chunk(mstate m, mchunkptr p) { - msegmentptr sp = segment_holding(m, (char*)p); - size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ + + msegmentptr sp = segment_holding(m, (char *)p); + size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ assert(sp != 0); assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); assert(sz == m->topsize); assert(sz > 0); - assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); + assert(sz == ((sp->base + sp->size) - (char *)p) - TOP_FOOT_SIZE); assert(pinuse(p)); assert(!pinuse(chunk_plus_offset(p, sz))); + } /* Check properties of (inuse) mmapped chunks */ static void do_check_mmapped_chunk(mstate m, mchunkptr p) { - size_t sz = chunksize(p); + + size_t sz = chunksize(p); size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD); assert(is_mmapped(p)); assert(use_mmap(m)); assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); assert(ok_address(m, p)); assert(!is_small(sz)); - assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); + assert((len & (mparams.page_size - SIZE_T_ONE)) == 0); assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); - assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); + assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0); + } /* Check properties of inuse chunks */ static void do_check_inuse_chunk(mstate m, mchunkptr p) { + do_check_any_chunk(m, p); assert(is_inuse(p)); assert(next_pinuse(p)); /* If not pinuse and not mmapped, previous chunk has OK offset */ assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); - if (is_mmapped(p)) - do_check_mmapped_chunk(m, p); + if (is_mmapped(p)) do_check_mmapped_chunk(m, p); + } /* Check properties of free chunks */ static void do_check_free_chunk(mstate m, mchunkptr p) { - size_t sz = chunksize(p); + + size_t sz = chunksize(p); mchunkptr next = chunk_plus_offset(p, sz); do_check_any_chunk(m, p); assert(!is_inuse(p)); assert(!next_pinuse(p)); - assert (!is_mmapped(p)); + assert(!is_mmapped(p)); if (p != m->dv && p != m->top) { + if (sz >= MIN_CHUNK_SIZE) { + assert((sz & CHUNK_ALIGN_MASK) == 0); assert(is_aligned(chunk2mem(p))); assert(next->prev_foot == sz); assert(pinuse(p)); - assert (next == m->top || is_inuse(next)); + assert(next == m->top || is_inuse(next)); assert(p->fd->bk == p); assert(p->bk->fd == p); - } - else /* markers are always of size SIZE_T_SIZE */ + + } else /* markers are always of size SIZE_T_SIZE */ + assert(sz == SIZE_T_SIZE); + } + } /* Check properties of malloced chunks at the point they are malloced */ -static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { +static void do_check_malloced_chunk(mstate m, void *mem, size_t s) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); - size_t sz = p->head & ~INUSE_BITS; + size_t sz = p->head & ~INUSE_BITS; do_check_inuse_chunk(m, p); assert((sz & CHUNK_ALIGN_MASK) == 0); assert(sz >= MIN_CHUNK_SIZE); assert(sz >= s); /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); + } + } /* Check a tree and its subtrees. */ static void do_check_tree(mstate m, tchunkptr t) { + tchunkptr head = 0; tchunkptr u = t; - bindex_t tindex = t->index; - size_t tsize = chunksize(t); - bindex_t idx; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; compute_tree_index(tsize, idx); assert(tindex == idx); assert(tsize >= MIN_LARGE_SIZE); assert(tsize >= minsize_for_tree_index(idx)); - assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); + assert((idx == NTREEBINS - 1) || (tsize < minsize_for_tree_index((idx + 1)))); - do { /* traverse through chain of same-sized nodes */ + do { /* traverse through chain of same-sized nodes */ do_check_any_chunk(m, ((mchunkptr)u)); assert(u->index == tindex); assert(chunksize(u) == tsize); @@ -3370,56 +3547,72 @@ static void do_check_tree(mstate m, tchunkptr t) { assert(u->fd->bk == u); assert(u->bk->fd == u); if (u->parent == 0) { + assert(u->child[0] == 0); assert(u->child[1] == 0); - } - else { - assert(head == 0); /* only one node on chain has parent */ + + } else { + + assert(head == 0); /* only one node on chain has parent */ head = u; assert(u->parent != u); - assert (u->parent->child[0] == u || - u->parent->child[1] == u || - *((tbinptr*)(u->parent)) == u); + assert(u->parent->child[0] == u || u->parent->child[1] == u || + *((tbinptr *)(u->parent)) == u); if (u->child[0] != 0) { + assert(u->child[0]->parent == u); assert(u->child[0] != u); do_check_tree(m, u->child[0]); + } + if (u->child[1] != 0) { + assert(u->child[1]->parent == u); assert(u->child[1] != u); do_check_tree(m, u->child[1]); + } + if (u->child[0] != 0 && u->child[1] != 0) { + assert(chunksize(u->child[0]) < chunksize(u->child[1])); + } + } + u = u->fd; + } while (u != t); + assert(head != 0); + } /* Check all the chunks in a treebin. */ static void do_check_treebin(mstate m, bindex_t i) { - tbinptr* tb = treebin_at(m, i); + + tbinptr * tb = treebin_at(m, i); tchunkptr t = *tb; - int empty = (m->treemap & (1U << i)) == 0; - if (t == 0) - assert(empty); - if (!empty) - do_check_tree(m, t); + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) assert(empty); + if (!empty) do_check_tree(m, t); + } /* Check all the chunks in a smallbin. */ static void do_check_smallbin(mstate m, bindex_t i) { - sbinptr b = smallbin_at(m, i); - mchunkptr p = b->bk; + + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; unsigned int empty = (m->smallmap & (1U << i)) == 0; - if (p == b) - assert(empty); + if (p == b) assert(empty); if (!empty) { + for (; p != b; p = p->bk) { - size_t size = chunksize(p); + + size_t size = chunksize(p); mchunkptr q; /* each chunk claims to be free */ do_check_free_chunk(m, p); @@ -3428,324 +3621,435 @@ static void do_check_smallbin(mstate m, bindex_t i) { assert(p->bk == b || chunksize(p->bk) == chunksize(p)); /* chunk is followed by an inuse chunk */ q = next_chunk(p); - if (q->head != FENCEPOST_HEAD) - do_check_inuse_chunk(m, q); + if (q->head != FENCEPOST_HEAD) do_check_inuse_chunk(m, q); + } + } + } /* Find x in a bin. Used in other check functions. */ static int bin_find(mstate m, mchunkptr x) { + size_t size = chunksize(x); if (is_small(size)) { + bindex_t sidx = small_index(size); - sbinptr b = smallbin_at(m, sidx); + sbinptr b = smallbin_at(m, sidx); if (smallmap_is_marked(m, sidx)) { + mchunkptr p = b; do { - if (p == x) - return 1; + + if (p == x) return 1; + } while ((p = p->fd) != b); + } - } - else { + + } else { + bindex_t tidx; compute_tree_index(size, tidx); if (treemap_is_marked(m, tidx)) { + tchunkptr t = *treebin_at(m, tidx); - size_t sizebits = size << leftshift_for_tree_index(tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); while (t != 0 && chunksize(t) != size) { - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + + t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]; sizebits <<= 1; + } + if (t != 0) { + tchunkptr u = t; do { - if (u == (tchunkptr)x) - return 1; + + if (u == (tchunkptr)x) return 1; + } while ((u = u->fd) != t); + } + } + } + return 0; + } /* Traverse each chunk and check it; return total */ static size_t traverse_and_check(mstate m) { + size_t sum = 0; if (is_initialized(m)) { + msegmentptr s = &m->seg; sum += m->topsize + TOP_FOOT_SIZE; while (s != 0) { + mchunkptr q = align_as_chunk(s->base); mchunkptr lastq = 0; assert(pinuse(q)); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { + while (segment_holds(s, q) && q != m->top && q->head != FENCEPOST_HEAD) { + sum += chunksize(q); if (is_inuse(q)) { + assert(!bin_find(m, q)); do_check_inuse_chunk(m, q); - } - else { + + } else { + assert(q == m->dv || bin_find(m, q)); - assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ + assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ do_check_free_chunk(m, q); + } + lastq = q; q = next_chunk(q); + } + s = s->next; + } + } + return sum; -} +} /* Check all properties of malloc_state. */ static void do_check_malloc_state(mstate m) { + bindex_t i; - size_t total; + size_t total; /* check bins */ for (i = 0; i < NSMALLBINS; ++i) do_check_smallbin(m, i); for (i = 0; i < NTREEBINS; ++i) do_check_treebin(m, i); - if (m->dvsize != 0) { /* check dv chunk */ + if (m->dvsize != 0) { /* check dv chunk */ do_check_any_chunk(m, m->dv); assert(m->dvsize == chunksize(m->dv)); assert(m->dvsize >= MIN_CHUNK_SIZE); assert(bin_find(m, m->dv) == 0); + } - if (m->top != 0) { /* check top chunk */ + if (m->top != 0) { /* check top chunk */ do_check_top_chunk(m, m->top); /*assert(m->topsize == chunksize(m->top)); redundant */ assert(m->topsize > 0); assert(bin_find(m, m->top) == 0); + } total = traverse_and_check(m); assert(total <= m->footprint); assert(m->footprint <= m->max_footprint); + } -#endif /* DEBUG */ + + #endif /* DEBUG */ /* ----------------------------- statistics ------------------------------ */ -#if !NO_MALLINFO + #if !NO_MALLINFO static struct mallinfo internal_mallinfo(mstate m) { - struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + struct mallinfo nm = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; ensure_initialization(); if (!PREACTION(m)) { + check_malloc_state(m); if (is_initialized(m)) { - size_t nfree = SIZE_T_ONE; /* top always free */ - size_t mfree = m->topsize + TOP_FOOT_SIZE; - size_t sum = mfree; + + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; msegmentptr s = &m->seg; while (s != 0) { + mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { + while (segment_holds(s, q) && q != m->top && + q->head != FENCEPOST_HEAD) { + size_t sz = chunksize(q); sum += sz; if (!is_inuse(q)) { + mfree += sz; ++nfree; + } + q = next_chunk(q); + } + s = s->next; + } - nm.arena = sum; - nm.ordblks = nfree; - nm.hblkhd = m->footprint - sum; - nm.usmblks = m->max_footprint; + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; nm.uordblks = m->footprint - mfree; nm.fordblks = mfree; nm.keepcost = m->topsize; + } POSTACTION(m); + } + return nm; + } -#endif /* !NO_MALLINFO */ -#if !NO_MALLOC_STATS + #endif /* !NO_MALLINFO */ + + #if !NO_MALLOC_STATS static void internal_malloc_stats(mstate m) { + ensure_initialization(); if (!PREACTION(m)) { + size_t maxfp = 0; size_t fp = 0; size_t used = 0; check_malloc_state(m); if (is_initialized(m)) { + msegmentptr s = &m->seg; maxfp = m->max_footprint; fp = m->footprint; used = fp - (m->topsize + TOP_FOOT_SIZE); while (s != 0) { + mchunkptr q = align_as_chunk(s->base); - while (segment_holds(s, q) && - q != m->top && q->head != FENCEPOST_HEAD) { - if (!is_inuse(q)) - used -= chunksize(q); + while (segment_holds(s, q) && q != m->top && + q->head != FENCEPOST_HEAD) { + + if (!is_inuse(q)) used -= chunksize(q); q = next_chunk(q); + } + s = s->next; + } + } - POSTACTION(m); /* drop lock */ + + POSTACTION(m); /* drop lock */ fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); + } + } -#endif /* NO_MALLOC_STATS */ -/* ----------------------- Operations on smallbins ----------------------- */ + #endif /* NO_MALLOC_STATS */ -/* - Various forms of linking and unlinking are defined as macros. Even - the ones for trees, which are very long but have very short typical - paths. This is ugly but reduces reliance on inlining support of - compilers. -*/ + /* ----------------------- Operations on smallbins ----------------------- */ -/* Link a free chunk into a smallbin */ -#define insert_small_chunk(M, P, S) {\ - bindex_t I = small_index(S);\ - mchunkptr B = smallbin_at(M, I);\ - mchunkptr F = B;\ - assert(S >= MIN_CHUNK_SIZE);\ - if (!smallmap_is_marked(M, I))\ - mark_smallmap(M, I);\ - else if (RTCHECK(ok_address(M, B->fd)))\ - F = B->fd;\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - B->fd = P;\ - F->bk = P;\ - P->fd = F;\ - P->bk = B;\ -} + /* + Various forms of linking and unlinking are defined as macros. Even + the ones for trees, which are very long but have very short typical + paths. This is ugly but reduces reliance on inlining support of + compilers. + */ -/* Unlink a chunk from a smallbin */ -#define unlink_small_chunk(M, P, S) {\ - mchunkptr F = P->fd;\ - mchunkptr B = P->bk;\ - bindex_t I = small_index(S);\ - assert(P != B);\ - assert(P != F);\ - assert(chunksize(P) == small_index2size(I));\ - if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \ - if (B == F) {\ - clear_smallmap(M, I);\ - }\ - else if (RTCHECK(B == smallbin_at(M,I) ||\ - (ok_address(M, B) && B->fd == P))) {\ - F->bk = B;\ - B->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ -} + /* Link a free chunk into a smallbin */ + #define insert_small_chunk(M, P, S) \ + { \ + \ + bindex_t I = small_index(S); \ + mchunkptr B = smallbin_at(M, I); \ + mchunkptr F = B; \ + assert(S >= MIN_CHUNK_SIZE); \ + if (!smallmap_is_marked(M, I)) \ + mark_smallmap(M, I); \ + else if (RTCHECK(ok_address(M, B->fd))) \ + F = B->fd; \ + else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + B->fd = P; \ + F->bk = P; \ + P->fd = F; \ + P->bk = B; \ + \ + } -/* Unlink the first chunk from a smallbin */ -#define unlink_first_small_chunk(M, B, P, I) {\ - mchunkptr F = P->fd;\ - assert(P != B);\ - assert(P != F);\ - assert(chunksize(P) == small_index2size(I));\ - if (B == F) {\ - clear_smallmap(M, I);\ - }\ - else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\ - F->bk = B;\ - B->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ -} + /* Unlink a chunk from a smallbin */ + #define unlink_small_chunk(M, P, S) \ + { \ + \ + mchunkptr F = P->fd; \ + mchunkptr B = P->bk; \ + bindex_t I = small_index(S); \ + assert(P != B); \ + assert(P != F); \ + assert(chunksize(P) == small_index2size(I)); \ + if (RTCHECK(F == smallbin_at(M, I) || \ + (ok_address(M, F) && F->bk == P))) { \ + \ + if (B == F) { \ + \ + clear_smallmap(M, I); \ + \ + } else if (RTCHECK(B == smallbin_at(M, I) || \ + \ + (ok_address(M, B) && B->fd == P))) { \ + \ + F->bk = B; \ + B->fd = F; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } -/* Replace dv node, binning the old one */ -/* Used only when dvsize known to be small */ -#define replace_dv(M, P, S) {\ - size_t DVS = M->dvsize;\ - assert(is_small(DVS));\ - if (DVS != 0) {\ - mchunkptr DV = M->dv;\ - insert_small_chunk(M, DV, DVS);\ - }\ - M->dvsize = S;\ - M->dv = P;\ -} + /* Unlink the first chunk from a smallbin */ + #define unlink_first_small_chunk(M, B, P, I) \ + { \ + \ + mchunkptr F = P->fd; \ + assert(P != B); \ + assert(P != F); \ + assert(chunksize(P) == small_index2size(I)); \ + if (B == F) { \ + \ + clear_smallmap(M, I); \ + \ + } else if (RTCHECK(ok_address(M, F) && F->bk == P)) { \ + \ + F->bk = B; \ + B->fd = F; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } -/* ------------------------- Operations on trees ------------------------- */ - -/* Insert chunk into tree */ -#define insert_large_chunk(M, X, S) {\ - tbinptr* H;\ - bindex_t I;\ - compute_tree_index(S, I);\ - H = treebin_at(M, I);\ - X->index = I;\ - X->child[0] = X->child[1] = 0;\ - if (!treemap_is_marked(M, I)) {\ - mark_treemap(M, I);\ - *H = X;\ - X->parent = (tchunkptr)H;\ - X->fd = X->bk = X;\ - }\ - else {\ - tchunkptr T = *H;\ - size_t K = S << leftshift_for_tree_index(I);\ - for (;;) {\ - if (chunksize(T) != S) {\ - tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ - K <<= 1;\ - if (*C != 0)\ - T = *C;\ - else if (RTCHECK(ok_address(M, C))) {\ - *C = X;\ - X->parent = T;\ - X->fd = X->bk = X;\ - break;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - break;\ - }\ - }\ - else {\ - tchunkptr F = T->fd;\ - if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ - T->fd = F->bk = X;\ - X->fd = F;\ - X->bk = T;\ - X->parent = 0;\ - break;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - break;\ - }\ - }\ - }\ - }\ -} + /* Replace dv node, binning the old one */ + /* Used only when dvsize known to be small */ + #define replace_dv(M, P, S) \ + { \ + \ + size_t DVS = M->dvsize; \ + assert(is_small(DVS)); \ + if (DVS != 0) { \ + \ + mchunkptr DV = M->dv; \ + insert_small_chunk(M, DV, DVS); \ + \ + } \ + M->dvsize = S; \ + M->dv = P; \ + \ + } + + /* ------------------------- Operations on trees ------------------------- */ + + /* Insert chunk into tree */ + #define insert_large_chunk(M, X, S) \ + { \ + \ + tbinptr *H; \ + bindex_t I; \ + compute_tree_index(S, I); \ + H = treebin_at(M, I); \ + X->index = I; \ + X->child[0] = X->child[1] = 0; \ + if (!treemap_is_marked(M, I)) { \ + \ + mark_treemap(M, I); \ + *H = X; \ + X->parent = (tchunkptr)H; \ + X->fd = X->bk = X; \ + \ + } else { \ + \ + tchunkptr T = *H; \ + size_t K = S << leftshift_for_tree_index(I); \ + for (;;) { \ + \ + if (chunksize(T) != S) { \ + \ + tchunkptr *C = \ + &(T->child[(K >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]); \ + K <<= 1; \ + if (*C != 0) \ + T = *C; \ + else if (RTCHECK(ok_address(M, C))) { \ + \ + *C = X; \ + X->parent = T; \ + X->fd = X->bk = X; \ + break; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + break; \ + \ + } \ + \ + } else { \ + \ + tchunkptr F = T->fd; \ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) { \ + \ + T->fd = F->bk = X; \ + X->fd = F; \ + X->bk = T; \ + X->parent = 0; \ + break; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + break; \ + \ + } \ + \ + } \ + \ + } \ + \ + } \ + \ + } /* Unlink steps: @@ -3764,104 +4068,145 @@ static void internal_malloc_stats(mstate m) { x's parent and children to x's replacement (or null if none). */ -#define unlink_large_chunk(M, X) {\ - tchunkptr XP = X->parent;\ - tchunkptr R;\ - if (X->bk != X) {\ - tchunkptr F = X->fd;\ - R = X->bk;\ - if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\ - F->bk = R;\ - R->fd = F;\ - }\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else {\ - tchunkptr* RP;\ - if (((R = *(RP = &(X->child[1]))) != 0) ||\ - ((R = *(RP = &(X->child[0]))) != 0)) {\ - tchunkptr* CP;\ - while ((*(CP = &(R->child[1])) != 0) ||\ - (*(CP = &(R->child[0])) != 0)) {\ - R = *(RP = CP);\ - }\ - if (RTCHECK(ok_address(M, RP)))\ - *RP = 0;\ - else {\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - }\ - if (XP != 0) {\ - tbinptr* H = treebin_at(M, X->index);\ - if (X == *H) {\ - if ((*H = R) == 0) \ - clear_treemap(M, X->index);\ - }\ - else if (RTCHECK(ok_address(M, XP))) {\ - if (XP->child[0] == X) \ - XP->child[0] = R;\ - else \ - XP->child[1] = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - if (R != 0) {\ - if (RTCHECK(ok_address(M, R))) {\ - tchunkptr C0, C1;\ - R->parent = XP;\ - if ((C0 = X->child[0]) != 0) {\ - if (RTCHECK(ok_address(M, C0))) {\ - R->child[0] = C0;\ - C0->parent = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - if ((C1 = X->child[1]) != 0) {\ - if (RTCHECK(ok_address(M, C1))) {\ - R->child[1] = C1;\ - C1->parent = R;\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ - else\ - CORRUPTION_ERROR_ACTION(M);\ - }\ - }\ -} + #define unlink_large_chunk(M, X) \ + { \ + \ + tchunkptr XP = X->parent; \ + tchunkptr R; \ + if (X->bk != X) { \ + \ + tchunkptr F = X->fd; \ + R = X->bk; \ + if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) { \ + \ + F->bk = R; \ + R->fd = F; \ + \ + } else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } else { \ + \ + tchunkptr *RP; \ + if (((R = *(RP = &(X->child[1]))) != 0) || \ + ((R = *(RP = &(X->child[0]))) != 0)) { \ + \ + tchunkptr *CP; \ + while ((*(CP = &(R->child[1])) != 0) || \ + (*(CP = &(R->child[0])) != 0)) { \ + \ + R = *(RP = CP); \ + \ + } \ + if (RTCHECK(ok_address(M, RP))) \ + *RP = 0; \ + else { \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } \ + \ + } \ + if (XP != 0) { \ + \ + tbinptr *H = treebin_at(M, X->index); \ + if (X == *H) { \ + \ + if ((*H = R) == 0) clear_treemap(M, X->index); \ + \ + } else if (RTCHECK(ok_address(M, XP))) { \ + \ + if (XP->child[0] == X) \ + XP->child[0] = R; \ + else \ + XP->child[1] = R; \ + \ + } else \ + \ + CORRUPTION_ERROR_ACTION(M); \ + if (R != 0) { \ + \ + if (RTCHECK(ok_address(M, R))) { \ + \ + tchunkptr C0, C1; \ + R->parent = XP; \ + if ((C0 = X->child[0]) != 0) { \ + \ + if (RTCHECK(ok_address(M, C0))) { \ + \ + R->child[0] = C0; \ + C0->parent = R; \ + \ + } else \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + if ((C1 = X->child[1]) != 0) { \ + \ + if (RTCHECK(ok_address(M, C1))) { \ + \ + R->child[1] = C1; \ + C1->parent = R; \ + \ + } else \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } else \ + \ + CORRUPTION_ERROR_ACTION(M); \ + \ + } \ + \ + } \ + \ + } /* Relays to large vs small bin operations */ -#define insert_chunk(M, P, S)\ - if (is_small(S)) insert_small_chunk(M, P, S)\ - else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } - -#define unlink_chunk(M, P, S)\ - if (is_small(S)) unlink_small_chunk(M, P, S)\ - else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + #define insert_chunk(M, P, S) \ + if (is_small(S)) insert_small_chunk(M, P, S) else { \ + \ + tchunkptr TP = (tchunkptr)(P); \ + insert_large_chunk(M, TP, S); \ + \ + } + #define unlink_chunk(M, P, S) \ + if (is_small(S)) unlink_small_chunk(M, P, S) else { \ + \ + tchunkptr TP = (tchunkptr)(P); \ + unlink_large_chunk(M, TP); \ + \ + } /* Relays to internal calls to malloc/free from realloc, memalign etc */ -#if ONLY_MSPACES -#define internal_malloc(m, b) mspace_malloc(m, b) -#define internal_free(m, mem) mspace_free(m,mem); -#else /* ONLY_MSPACES */ -#if MSPACES -#define internal_malloc(m, b)\ - ((m == gm)? dlmalloc(b) : mspace_malloc(m, b)) -#define internal_free(m, mem)\ - if (m == gm) dlfree(mem); else mspace_free(m,mem); -#else /* MSPACES */ -#define internal_malloc(m, b) dlmalloc(b) -#define internal_free(m, mem) dlfree(mem) -#endif /* MSPACES */ -#endif /* ONLY_MSPACES */ + #if ONLY_MSPACES + #define internal_malloc(m, b) mspace_malloc(m, b) + #define internal_free(m, mem) mspace_free(m, mem); + #else /* ONLY_MSPACES */ + #if MSPACES + #define internal_malloc(m, b) \ + ((m == gm) ? dlmalloc(b) : mspace_malloc(m, b)) + #define internal_free(m, mem) \ + if (m == gm) \ + dlfree(mem); \ + else \ + mspace_free(m, mem); + #else /* MSPACES */ + #define internal_malloc(m, b) dlmalloc(b) + #define internal_free(m, mem) dlfree(mem) + #endif /* MSPACES */ + #endif /* ONLY_MSPACES */ /* ----------------------- Direct-mmapping chunks ----------------------- */ @@ -3874,80 +4219,93 @@ static void internal_malloc_stats(mstate m) { */ /* Malloc using mmap */ -static void* mmap_alloc(mstate m, size_t nb) { +static void *mmap_alloc(mstate m, size_t nb) { + size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); if (m->footprint_limit != 0) { + size_t fp = m->footprint + mmsize; - if (fp <= m->footprint || fp > m->footprint_limit) - return 0; + if (fp <= m->footprint || fp > m->footprint_limit) return 0; + } - if (mmsize > nb) { /* Check for wrap around 0 */ - char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); + + if (mmsize > nb) { /* Check for wrap around 0 */ + char *mm = (char *)(CALL_DIRECT_MMAP(mmsize)); if (mm != CMFAIL) { - size_t offset = align_offset(chunk2mem(mm)); - size_t psize = mmsize - offset - MMAP_FOOT_PAD; + + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - MMAP_FOOT_PAD; mchunkptr p = (mchunkptr)(mm + offset); p->prev_foot = offset; p->head = psize; mark_inuse_foot(m, p, psize); chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0; - if (m->least_addr == 0 || mm < m->least_addr) - m->least_addr = mm; + if (m->least_addr == 0 || mm < m->least_addr) m->least_addr = mm; if ((m->footprint += mmsize) > m->max_footprint) m->max_footprint = m->footprint; assert(is_aligned(chunk2mem(p))); check_mmapped_chunk(m, p); return chunk2mem(p); + } + } + return 0; + } /* Realloc using mmap */ static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) { + size_t oldsize = chunksize(oldp); - (void)flags; /* placate people compiling -Wunused */ - if (is_small(nb)) /* Can't shrink mmap regions below small size */ + (void)flags; /* placate people compiling -Wunused */ + if (is_small(nb)) /* Can't shrink mmap regions below small size */ return 0; /* Keep old chunk if big enough but not too big */ if (oldsize >= nb + SIZE_T_SIZE && (oldsize - nb) <= (mparams.granularity << 1)) return oldp; else { + size_t offset = oldp->prev_foot; size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - char* cp = (char*)CALL_MREMAP((char*)oldp - offset, - oldmmsize, newmmsize, flags); + char * cp = + (char *)CALL_MREMAP((char *)oldp - offset, oldmmsize, newmmsize, flags); if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); - size_t psize = newmmsize - offset - MMAP_FOOT_PAD; + size_t psize = newmmsize - offset - MMAP_FOOT_PAD; newp->head = psize; mark_inuse_foot(m, newp, psize); chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; - chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0; - if (cp < m->least_addr) - m->least_addr = cp; + if (cp < m->least_addr) m->least_addr = cp; if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) m->max_footprint = m->footprint; check_mmapped_chunk(m, newp); return newp; + } + } + return 0; -} +} /* -------------------------- mspace management -------------------------- */ /* Initialize top chunk and its size */ static void init_top(mstate m, mchunkptr p, size_t psize) { + /* Ensure alignment */ size_t offset = align_offset(chunk2mem(p)); - p = (mchunkptr)((char*)p + offset); + p = (mchunkptr)((char *)p + offset); psize -= offset; m->top = p; @@ -3955,23 +4313,29 @@ static void init_top(mstate m, mchunkptr p, size_t psize) { p->head = psize | PINUSE_BIT; /* set size of fake trailing chunk holding overhead space only once */ chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; - m->trim_check = mparams.trim_threshold; /* reset on each update */ + m->trim_check = mparams.trim_threshold; /* reset on each update */ + } /* Initialize bins for a new mstate that is otherwise zeroed out */ static void init_bins(mstate m) { + /* Establish circular links for smallbins */ bindex_t i; for (i = 0; i < NSMALLBINS; ++i) { - sbinptr bin = smallbin_at(m,i); + + sbinptr bin = smallbin_at(m, i); bin->fd = bin->bk = bin; + } + } -#if PROCEED_ON_ERROR + #if PROCEED_ON_ERROR /* default corruption action */ static void reset_on_error(mstate m) { + int i; ++malloc_corruption_error_count; /* Reinitialize fields to forget about all memory */ @@ -3984,67 +4348,78 @@ static void reset_on_error(mstate m) { for (i = 0; i < NTREEBINS; ++i) *treebin_at(m, i) = 0; init_bins(m); + } -#endif /* PROCEED_ON_ERROR */ + + #endif /* PROCEED_ON_ERROR */ /* Allocate chunk and prepend remainder with chunk in successor base. */ -static void* prepend_alloc(mstate m, char* newbase, char* oldbase, - size_t nb) { +static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb) { + mchunkptr p = align_as_chunk(newbase); mchunkptr oldfirst = align_as_chunk(oldbase); - size_t psize = (char*)oldfirst - (char*)p; + size_t psize = (char *)oldfirst - (char *)p; mchunkptr q = chunk_plus_offset(p, nb); - size_t qsize = psize - nb; + size_t qsize = psize - nb; set_size_and_pinuse_of_inuse_chunk(m, p, nb); - assert((char*)oldfirst > (char*)q); + assert((char *)oldfirst > (char *)q); assert(pinuse(oldfirst)); assert(qsize >= MIN_CHUNK_SIZE); /* consolidate remainder with first chunk of old base */ if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; m->top = q; q->head = tsize | PINUSE_BIT; check_top_chunk(m, q); - } - else if (oldfirst == m->dv) { + + } else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; m->dv = q; set_size_and_pinuse_of_free_chunk(q, dsize); - } - else { + + } else { + if (!is_inuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); unlink_chunk(m, oldfirst, nsize); oldfirst = chunk_plus_offset(oldfirst, nsize); qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); insert_chunk(m, q, qsize); check_free_chunk(m, q); + } check_malloced_chunk(m, chunk2mem(p), nb); return chunk2mem(p); + } /* Add a segment to hold a new noncontiguous region */ -static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { +static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped) { + /* Determine locations and sizes of segment, fenceposts, old top */ - char* old_top = (char*)m->top; + char * old_top = (char *)m->top; msegmentptr oldsp = segment_holding(m, old_top); - char* old_end = oldsp->base + oldsp->size; - size_t ssize = pad_request(sizeof(struct malloc_segment)); - char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); - size_t offset = align_offset(chunk2mem(rawsp)); - char* asp = rawsp + offset; - char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; - mchunkptr sp = (mchunkptr)csp; + char * old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char * rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char * asp = rawsp + offset; + char * csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp; + mchunkptr sp = (mchunkptr)csp; msegmentptr ss = (msegmentptr)(chunk2mem(sp)); - mchunkptr tnext = chunk_plus_offset(sp, ssize); - mchunkptr p = tnext; - int nfences = 0; + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + int nfences = 0; /* reset top to new space */ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); @@ -4052,7 +4427,7 @@ static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { /* Set up segment record */ assert(is_aligned(ss)); set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); - *ss = m->seg; /* Push current record */ + *ss = m->seg; /* Push current record */ m->seg.base = tbase; m->seg.size = tsize; m->seg.sflags = mmapped; @@ -4060,53 +4435,61 @@ static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { /* Insert trailing fenceposts */ for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); p->head = FENCEPOST_HEAD; ++nfences; - if ((char*)(&(nextp->head)) < old_end) + if ((char *)(&(nextp->head)) < old_end) p = nextp; else break; + } + assert(nfences >= 2); /* Insert the rest of old top into a bin as an ordinary free chunk */ if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; - size_t psize = csp - old_top; + size_t psize = csp - old_top; mchunkptr tn = chunk_plus_offset(q, psize); set_free_with_pinuse(q, psize, tn); insert_chunk(m, q, psize); + } check_top_chunk(m, m->top); + } /* -------------------------- System allocation -------------------------- */ /* Get memory from system using MORECORE or MMAP */ -static void* sys_alloc(mstate m, size_t nb) { - char* tbase = CMFAIL; +static void *sys_alloc(mstate m, size_t nb) { + + char * tbase = CMFAIL; size_t tsize = 0; flag_t mmap_flag = 0; - size_t asize; /* allocation size */ + size_t asize; /* allocation size */ ensure_initialization(); /* Directly map large chunks, but only if already initialized */ if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) { - void* mem = mmap_alloc(m, nb); - if (mem != 0) - return mem; + + void *mem = mmap_alloc(m, nb); + if (mem != 0) return mem; + } asize = granularity_align(nb + SYS_ALLOC_PADDING); - if (asize <= nb) - return 0; /* wraparound */ + if (asize <= nb) return 0; /* wraparound */ if (m->footprint_limit != 0) { + size_t fp = m->footprint + asize; - if (fp <= m->footprint || fp > m->footprint_limit) - return 0; + if (fp <= m->footprint || fp > m->footprint_limit) return 0; + } /* @@ -4132,91 +4515,119 @@ static void* sys_alloc(mstate m, size_t nb) { */ if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { - char* br = CMFAIL; - size_t ssize = asize; /* sbrk call size */ - msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); + + char * br = CMFAIL; + size_t ssize = asize; /* sbrk call size */ + msegmentptr ss = (m->top == 0) ? 0 : segment_holding(m, (char *)m->top); ACQUIRE_MALLOC_GLOBAL_LOCK(); - if (ss == 0) { /* First time through or recovery */ - char* base = (char*)CALL_MORECORE(0); + if (ss == 0) { /* First time through or recovery */ + char *base = (char *)CALL_MORECORE(0); if (base != CMFAIL) { + size_t fp; /* Adjust to end on a page boundary */ if (!is_page_aligned(base)) ssize += (page_align((size_t)base) - (size_t)base); - fp = m->footprint + ssize; /* recheck limits */ + fp = m->footprint + ssize; /* recheck limits */ if (ssize > nb && ssize < HALF_MAX_SIZE_T && (m->footprint_limit == 0 || (fp > m->footprint && fp <= m->footprint_limit)) && - (br = (char*)(CALL_MORECORE(ssize))) == base) { + (br = (char *)(CALL_MORECORE(ssize))) == base) { + tbase = base; tsize = ssize; + } + } - } - else { + + } else { + /* Subtract out existing available top space from MORECORE request. */ ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING); /* Use mem here only if it did continuously extend old space */ if (ssize < HALF_MAX_SIZE_T && - (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) { + (br = (char *)(CALL_MORECORE(ssize))) == ss->base + ss->size) { + tbase = br; tsize = ssize; + } + } - if (tbase == CMFAIL) { /* Cope with partial failure */ - if (br != CMFAIL) { /* Try to use/extend the space we did get */ - if (ssize < HALF_MAX_SIZE_T && - ssize < nb + SYS_ALLOC_PADDING) { + if (tbase == CMFAIL) { /* Cope with partial failure */ + if (br != CMFAIL) { /* Try to use/extend the space we did get */ + if (ssize < HALF_MAX_SIZE_T && ssize < nb + SYS_ALLOC_PADDING) { + size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize); if (esize < HALF_MAX_SIZE_T) { - char* end = (char*)CALL_MORECORE(esize); + + char *end = (char *)CALL_MORECORE(esize); if (end != CMFAIL) ssize += esize; - else { /* Can't use; try to release */ - (void) CALL_MORECORE(-ssize); + else { /* Can't use; try to release */ + (void)CALL_MORECORE(-ssize); br = CMFAIL; + } + } + } + } - if (br != CMFAIL) { /* Use the space we did get */ + + if (br != CMFAIL) { /* Use the space we did get */ tbase = br; tsize = ssize; - } - else - disable_contiguous(m); /* Don't try contiguous path in the future */ + + } else + + disable_contiguous(m); /* Don't try contiguous path in the future */ + } RELEASE_MALLOC_GLOBAL_LOCK(); + } - if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ - char* mp = (char*)(CALL_MMAP(asize)); + if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ + char *mp = (char *)(CALL_MMAP(asize)); if (mp != CMFAIL) { + tbase = mp; tsize = asize; mmap_flag = USE_MMAP_BIT; + } + } - if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ + if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ if (asize < HALF_MAX_SIZE_T) { - char* br = CMFAIL; - char* end = CMFAIL; + + char *br = CMFAIL; + char *end = CMFAIL; ACQUIRE_MALLOC_GLOBAL_LOCK(); - br = (char*)(CALL_MORECORE(asize)); - end = (char*)(CALL_MORECORE(0)); + br = (char *)(CALL_MORECORE(asize)); + end = (char *)(CALL_MORECORE(0)); RELEASE_MALLOC_GLOBAL_LOCK(); if (br != CMFAIL && end != CMFAIL && br < end) { + size_t ssize = end - br; if (ssize > nb + TOP_FOOT_SIZE) { + tbase = br; tsize = ssize; + } + } + } + } if (tbase != CMFAIL) { @@ -4224,61 +4635,66 @@ static void* sys_alloc(mstate m, size_t nb) { if ((m->footprint += tsize) > m->max_footprint) m->max_footprint = m->footprint; - if (!is_initialized(m)) { /* first-time initialization */ - if (m->least_addr == 0 || tbase < m->least_addr) - m->least_addr = tbase; + if (!is_initialized(m)) { /* first-time initialization */ + if (m->least_addr == 0 || tbase < m->least_addr) m->least_addr = tbase; m->seg.base = tbase; m->seg.size = tsize; m->seg.sflags = mmap_flag; m->magic = mparams.magic; m->release_checks = MAX_RELEASE_CHECK_RATE; init_bins(m); -#if !ONLY_MSPACES + #if !ONLY_MSPACES if (is_global(m)) init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); else -#endif + #endif { + /* Offset top by embedded malloc_state */ mchunkptr mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); + init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE); + } + } else { + /* Try to merge with an existing segment */ msegmentptr sp = &m->seg; /* Only consider most recent segment if traversal suppressed */ while (sp != 0 && tbase != sp->base + sp->size) sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && - !is_extern_segment(sp) && + if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & USE_MMAP_BIT) == mmap_flag && - segment_holds(sp, m->top)) { /* append */ + segment_holds(sp, m->top)) { /* append */ sp->size += tsize; init_top(m, m->top, m->topsize + tsize); - } - else { - if (tbase < m->least_addr) - m->least_addr = tbase; + + } else { + + if (tbase < m->least_addr) m->least_addr = tbase; sp = &m->seg; while (sp != 0 && sp->base != tbase + tsize) sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; - if (sp != 0 && - !is_extern_segment(sp) && + if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & USE_MMAP_BIT) == mmap_flag) { - char* oldbase = sp->base; + + char *oldbase = sp->base; sp->base = tbase; sp->size += tsize; return prepend_alloc(m, tbase, oldbase, nb); - } - else + + } else + add_segment(m, tbase, tsize, mmap_flag); + } + } - if (nb < m->topsize) { /* Allocate from new or extended top space */ - size_t rsize = m->topsize -= nb; + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; mchunkptr p = m->top; mchunkptr r = m->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -4286,353 +4702,463 @@ static void* sys_alloc(mstate m, size_t nb) { check_top_chunk(m, m->top); check_malloced_chunk(m, chunk2mem(p), nb); return chunk2mem(p); + } + } MALLOC_FAILURE_ACTION; return 0; + } /* ----------------------- system deallocation -------------------------- */ /* Unmap and unlink any mmapped segments that don't contain used chunks */ static size_t release_unused_segments(mstate m) { - size_t released = 0; - int nsegs = 0; + + size_t released = 0; + int nsegs = 0; msegmentptr pred = &m->seg; msegmentptr sp = pred->next; while (sp != 0) { - char* base = sp->base; - size_t size = sp->size; + + char * base = sp->base; + size_t size = sp->size; msegmentptr next = sp->next; ++nsegs; if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { + mchunkptr p = align_as_chunk(base); - size_t psize = chunksize(p); + size_t psize = chunksize(p); /* Can unmap if first chunk holds entire segment and not pinned */ - if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { + if (!is_inuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; - assert(segment_holds(sp, (char*)sp)); + assert(segment_holds(sp, (char *)sp)); if (p == m->dv) { + m->dv = 0; m->dvsize = 0; - } - else { + + } else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; m->footprint -= size; /* unlink obsoleted record */ sp = pred; sp->next = next; - } - else { /* back out if cannot unmap */ + + } else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } - if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ + + if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ break; pred = sp; sp = next; + } + /* Reset check counter */ - m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)? - (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE); + m->release_checks = (((size_t)nsegs > (size_t)MAX_RELEASE_CHECK_RATE) + ? (size_t)nsegs + : (size_t)MAX_RELEASE_CHECK_RATE); return released; + } static int sys_trim(mstate m, size_t pad) { + size_t released = 0; ensure_initialization(); if (pad < MAX_REQUEST && is_initialized(m)) { - pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ size_t unit = mparams.granularity; - size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - - SIZE_T_ONE) * unit; - msegmentptr sp = segment_holding(m, (char*)m->top); + size_t extra = + ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char *)m->top); if (!is_extern_segment(sp)) { + if (is_mmapped_segment(sp)) { - if (HAVE_MMAP && - sp->size >= extra && - !has_segment_link(m, sp)) { /* can't shrink if pinned */ + + if (HAVE_MMAP && sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ size_t newsize = sp->size - extra; - (void)newsize; /* placate people compiling -Wunused-variable */ + (void)newsize; /* placate people compiling -Wunused-variable */ /* Prefer mremap, fall back to munmap */ if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } - } - else if (HAVE_MORECORE) { - if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ + + } else if (HAVE_MORECORE) { + + if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; ACQUIRE_MALLOC_GLOBAL_LOCK(); { + /* Make sure end of memory is where we last set it. */ - char* old_br = (char*)(CALL_MORECORE(0)); + char *old_br = (char *)(CALL_MORECORE(0)); if (old_br == sp->base + sp->size) { - char* rel_br = (char*)(CALL_MORECORE(-extra)); - char* new_br = (char*)(CALL_MORECORE(0)); + + char *rel_br = (char *)(CALL_MORECORE(-extra)); + char *new_br = (char *)(CALL_MORECORE(0)); if (rel_br != CMFAIL && new_br < old_br) released = old_br - new_br; + } + } + RELEASE_MALLOC_GLOBAL_LOCK(); + } + } if (released != 0) { + sp->size -= released; m->footprint -= released; init_top(m, m->top, m->topsize - released); check_top_chunk(m, m->top); + } + } /* Unmap any unused mmapped segments */ - if (HAVE_MMAP) - released += release_unused_segments(m); + if (HAVE_MMAP) released += release_unused_segments(m); /* On failure, disable autotrim to avoid repeated failed future calls */ - if (released == 0 && m->topsize > m->trim_check) - m->trim_check = MAX_SIZE_T; + if (released == 0 && m->topsize > m->trim_check) m->trim_check = MAX_SIZE_T; + } - return (released != 0)? 1 : 0; + return (released != 0) ? 1 : 0; + } /* Consolidate and bin a chunk. Differs from exported versions of free mainly in that the chunk need not be marked as inuse. */ static void dispose_chunk(mstate m, mchunkptr p, size_t psize) { + mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { + mchunkptr prev; - size_t prevsize = p->prev_foot; + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) - m->footprint -= psize; + if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) m->footprint -= psize; return; + } + prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ if (p != m->dv) { + unlink_chunk(m, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { + + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + m->dvsize = psize; set_free_with_pinuse(p, psize, next); return; + } - } - else { + + } else { + CORRUPTION_ERROR_ACTION(m); return; + } + } + if (RTCHECK(ok_address(m, next))) { - if (!cinuse(next)) { /* consolidate forward */ + + if (!cinuse(next)) { /* consolidate forward */ if (next == m->top) { + size_t tsize = m->topsize += psize; m->top = p; p->head = tsize | PINUSE_BIT; if (p == m->dv) { + m->dv = 0; m->dvsize = 0; + } + return; - } - else if (next == m->dv) { + + } else if (next == m->dv) { + size_t dsize = m->dvsize += psize; m->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); return; - } - else { + + } else { + size_t nsize = chunksize(next); psize += nsize; unlink_chunk(m, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == m->dv) { + m->dvsize = psize; return; + } + } - } - else { + + } else { + set_free_with_pinuse(p, psize, next); + } + insert_chunk(m, p, psize); - } - else { + + } else { + CORRUPTION_ERROR_ACTION(m); + } + } /* ---------------------------- malloc --------------------------- */ /* allocate a large request from the best fitting chunk in a treebin */ -static void* tmalloc_large(mstate m, size_t nb) { +static void *tmalloc_large(mstate m, size_t nb) { + tchunkptr v = 0; - size_t rsize = -nb; /* Unsigned negation */ + size_t rsize = -nb; /* Unsigned negation */ tchunkptr t; - bindex_t idx; + bindex_t idx; compute_tree_index(nb, idx); if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ - size_t sizebits = nb << leftshift_for_tree_index(idx); - tchunkptr rst = 0; /* The deepest untaken right subtree */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ for (;;) { + tchunkptr rt; - size_t trem = chunksize(t) - nb; + size_t trem = chunksize(t) - nb; if (trem < rsize) { + v = t; - if ((rsize = trem) == 0) - break; + if ((rsize = trem) == 0) break; + } + rt = t->child[1]; - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; - if (rt != 0 && rt != t) - rst = rt; + t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) rst = rt; if (t == 0) { - t = rst; /* set t to least subtree holding sizes > nb */ + + t = rst; /* set t to least subtree holding sizes > nb */ break; + } + sizebits <<= 1; + } + } - if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; if (leftbits != 0) { + bindex_t i; binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); t = *treebin_at(m, i); + } + } - while (t != 0) { /* find smallest of tree or subtree */ + while (t != 0) { /* find smallest of tree or subtree */ size_t trem = chunksize(t) - nb; if (trem < rsize) { + rsize = trem; v = t; + } + t = leftmost_child(t); + } /* If dv is a better fit, return 0 so malloc will use it */ if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { - if (RTCHECK(ok_address(m, v))) { /* split */ + + if (RTCHECK(ok_address(m, v))) { /* split */ mchunkptr r = chunk_plus_offset(v, nb); assert(chunksize(v) == rsize + nb); if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); if (rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(m, v, (rsize + nb)); else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); set_size_and_pinuse_of_free_chunk(r, rsize); insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + } + CORRUPTION_ERROR_ACTION(m); + } + return 0; + } /* allocate a small request from the best fitting chunk in a treebin */ -static void* tmalloc_small(mstate m, size_t nb) { +static void *tmalloc_small(mstate m, size_t nb) { + tchunkptr t, v; - size_t rsize; - bindex_t i; - binmap_t leastbit = least_bit(m->treemap); + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); compute_bit2idx(leastbit, i); v = t = *treebin_at(m, i); rsize = chunksize(t) - nb; while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; if (trem < rsize) { + rsize = trem; v = t; + } + } if (RTCHECK(ok_address(m, v))) { + mchunkptr r = chunk_plus_offset(v, nb); assert(chunksize(v) == rsize + nb); if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); if (rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(m, v, (rsize + nb)); else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(m, r, rsize); + } + return chunk2mem(v); + } + } CORRUPTION_ERROR_ACTION(m); return 0; -} -#if !ONLY_MSPACES +} -void* dlmalloc(size_t bytes) { - /* - Basic algorithm: - If a small request (< 256 bytes minus per-chunk overhead): - 1. If one exists, use a remainderless chunk in associated smallbin. - (Remainderless means that there are too few excess bytes to - represent as a chunk.) - 2. If it is big enough, use the dv chunk, which is normally the - chunk adjacent to the one used for the most recent small request. - 3. If one exists, split the smallest available chunk in a bin, - saving remainder in dv. - 4. If it is big enough, use the top chunk. - 5. If available, get memory from system and use it - Otherwise, for a large request: - 1. Find the smallest available binned chunk that fits, and use it - if it is better fitting than dv chunk, splitting if necessary. - 2. If better fitting than any binned chunk, use the dv chunk. - 3. If it is big enough, use the top chunk. - 4. If request size >= mmap threshold, try to directly mmap this chunk. - 5. If available, get memory from system and use it - - The ugly goto's here ensure that postaction occurs along all paths. - */ + #if !ONLY_MSPACES + +void *dlmalloc(size_t bytes) { + + /* + Basic algorithm: + If a small request (< 256 bytes minus per-chunk overhead): + 1. If one exists, use a remainderless chunk in associated smallbin. + (Remainderless means that there are too few excess bytes to + represent as a chunk.) + 2. If it is big enough, use the dv chunk, which is normally the + chunk adjacent to the one used for the most recent small request. + 3. If one exists, split the smallest available chunk in a bin, + saving remainder in dv. + 4. If it is big enough, use the top chunk. + 5. If available, get memory from system and use it + Otherwise, for a large request: + 1. Find the smallest available binned chunk that fits, and use it + if it is better fitting than dv chunk, splitting if necessary. + 2. If better fitting than any binned chunk, use the dv chunk. + 3. If it is big enough, use the top chunk. + 4. If request size >= mmap threshold, try to directly mmap this chunk. + 5. If available, get memory from system and use it + + The ugly goto's here ensure that postaction occurs along all paths. + */ -#if USE_LOCKS - ensure_initialization(); /* initialize in sys_alloc if not using locks */ -#endif + #if USE_LOCKS + ensure_initialization(); /* initialize in sys_alloc if not using locks */ + #endif if (!PREACTION(gm)) { - void* mem; + + void * mem; size_t nb; if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; binmap_t smallbits; - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes); idx = small_index(nb); smallbits = gm->smallmap >> idx; - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ + idx += ~smallbits & 1; /* Uses next bin if idx empty */ b = smallbin_at(gm, idx); p = b->fd; assert(chunksize(p) == small_index2size(idx)); @@ -4641,15 +5167,17 @@ void* dlmalloc(size_t bytes) { mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; + } else if (nb > gm->dvsize) { - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); b = smallbin_at(gm, i); p = b->fd; @@ -4660,54 +5188,71 @@ void* dlmalloc(size_t bytes) { if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(gm, p, small_index2size(i)); else { + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); r = chunk_plus_offset(p, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(gm, r, rsize); + } + mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; + } else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); goto postaction; + } + } - } - else if (bytes >= MAX_REQUEST) + + } else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ else { + nb = pad_request(bytes); if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); goto postaction; + } + } if (nb <= gm->dvsize) { - size_t rsize = gm->dvsize - nb; + + size_t rsize = gm->dvsize - nb; mchunkptr p = gm->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ mchunkptr r = gm->dv = chunk_plus_offset(p, nb); gm->dvsize = rsize; set_size_and_pinuse_of_free_chunk(r, rsize); set_size_and_pinuse_of_inuse_chunk(gm, p, nb); - } - else { /* exhaust dv */ + + } else { /* exhaust dv */ + size_t dvs = gm->dvsize; gm->dvsize = 0; gm->dv = 0; set_inuse_and_pinuse(gm, p, dvs); + } + mem = chunk2mem(p); check_malloced_chunk(gm, mem, nb); goto postaction; + } - else if (nb < gm->topsize) { /* Split top */ - size_t rsize = gm->topsize -= nb; + else if (nb < gm->topsize) { /* Split top */ + size_t rsize = gm->topsize -= nb; mchunkptr p = gm->top; mchunkptr r = gm->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -4716,6 +5261,7 @@ void* dlmalloc(size_t bytes) { check_top_chunk(gm, gm->top); check_malloced_chunk(gm, mem, nb); goto postaction; + } mem = sys_alloc(gm, nb); @@ -4723,14 +5269,17 @@ void* dlmalloc(size_t bytes) { postaction: POSTACTION(gm); return mem; + } return 0; + } /* ---------------------------- free --------------------------- */ -void dlfree(void* mem) { +void dlfree(void *mem) { + /* Consolidate freed chunks with preceeding or succeeding bordering free chunks, if they exist, and then place in a bin. Intermixed @@ -4738,164 +5287,216 @@ void dlfree(void* mem) { */ if (mem != 0) { - mchunkptr p = mem2chunk(mem); -#if FOOTERS + + mchunkptr p = mem2chunk(mem); + #if FOOTERS mstate fm = get_mstate_for(p); if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); return; + } -#else /* FOOTERS */ -#define fm gm -#endif /* FOOTERS */ + + #else /* FOOTERS */ + #define fm gm + #endif /* FOOTERS */ if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { - size_t psize = chunksize(p); + + size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) fm->footprint -= psize; goto postaction; - } - else { + + } else { + mchunkptr prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { + + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; set_free_with_pinuse(p, psize, next); goto postaction; + } - } - else + + } else + goto erroraction; + } + } if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - if (!cinuse(next)) { /* consolidate forward */ + + if (!cinuse(next)) { /* consolidate forward */ if (next == fm->top) { + size_t tsize = fm->topsize += psize; fm->top = p; p->head = tsize | PINUSE_BIT; if (p == fm->dv) { + fm->dv = 0; fm->dvsize = 0; + } - if (should_trim(fm, tsize)) - sys_trim(fm, 0); + + if (should_trim(fm, tsize)) sys_trim(fm, 0); goto postaction; - } - else if (next == fm->dv) { + + } else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; fm->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); goto postaction; - } - else { + + } else { + size_t nsize = chunksize(next); psize += nsize; unlink_chunk(fm, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == fm->dv) { + fm->dvsize = psize; goto postaction; + } + } - } - else + + } else + set_free_with_pinuse(p, psize, next); if (is_small(psize)) { + insert_small_chunk(fm, p, psize); check_free_chunk(fm, p); - } - else { + + } else { + tchunkptr tp = (tchunkptr)p; insert_large_chunk(fm, tp, psize); check_free_chunk(fm, p); - if (--fm->release_checks == 0) - release_unused_segments(fm); + if (--fm->release_checks == 0) release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: USAGE_ERROR_ACTION(fm, p); postaction: POSTACTION(fm); + } + } -#if !FOOTERS -#undef fm -#endif /* FOOTERS */ + + #if !FOOTERS + #undef fm + #endif /* FOOTERS */ + } -void* dlcalloc(size_t n_elements, size_t elem_size) { - void* mem; +void *dlcalloc(size_t n_elements, size_t elem_size) { + + void * mem; size_t req = 0; if (n_elements != 0) { + req = n_elements * elem_size; if (((n_elements | elem_size) & ~(size_t)0xffff) && (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = dlmalloc(req); if (mem != 0 && calloc_must_clear(mem2chunk(mem))) __builtin_memset(mem, 0, req); return mem; + } -#endif /* !ONLY_MSPACES */ + #endif /* !ONLY_MSPACES */ /* ------------ Internal support for realloc, memalign, etc -------------- */ /* Try to realloc; only in-place unless can_move true */ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, int can_move) { + mchunkptr newp = 0; - size_t oldsize = chunksize(p); + size_t oldsize = chunksize(p); mchunkptr next = chunk_plus_offset(p, oldsize); - if (RTCHECK(ok_address(m, p) && ok_inuse(p) && - ok_next(p, next) && ok_pinuse(next))) { + if (RTCHECK(ok_address(m, p) && ok_inuse(p) && ok_next(p, next) && + ok_pinuse(next))) { + if (is_mmapped(p)) { + newp = mmap_resize(m, p, nb, can_move); - } - else if (oldsize >= nb) { /* already big enough */ + + } else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; - if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ + if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ mchunkptr r = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, r, rsize); dispose_chunk(m, r, rsize); + } + newp = p; - } - else if (next == m->top) { /* extend into top */ + + } else if (next == m->top) { /* extend into top */ + if (oldsize + m->topsize > nb) { - size_t newsize = oldsize + m->topsize; - size_t newtopsize = newsize - nb; + + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; mchunkptr newtop = chunk_plus_offset(p, nb); set_inuse(m, p, nb); - newtop->head = newtopsize |PINUSE_BIT; + newtop->head = newtopsize | PINUSE_BIT; m->top = newtop; m->topsize = newtopsize; newp = p; + } - } - else if (next == m->dv) { /* extend into dv */ + + } else if (next == m->dv) { /* extend into dv */ + size_t dvs = m->dvsize; if (oldsize + dvs >= nb) { + size_t dsize = oldsize + dvs - nb; if (dsize >= MIN_CHUNK_SIZE) { + mchunkptr r = chunk_plus_offset(p, nb); mchunkptr n = chunk_plus_offset(r, dsize); set_inuse(m, p, nb); @@ -4903,64 +5504,87 @@ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, clear_pinuse(n); m->dvsize = dsize; m->dv = r; - } - else { /* exhaust dv */ + + } else { /* exhaust dv */ + size_t newsize = oldsize + dvs; set_inuse(m, p, newsize); m->dvsize = 0; m->dv = 0; + } + newp = p; + } - } - else if (!cinuse(next)) { /* extend into next free chunk */ + + } else if (!cinuse(next)) { /* extend into next free chunk */ + size_t nextsize = chunksize(next); if (oldsize + nextsize >= nb) { + size_t rsize = oldsize + nextsize - nb; unlink_chunk(m, next, nextsize); if (rsize < MIN_CHUNK_SIZE) { + size_t newsize = oldsize + nextsize; set_inuse(m, p, newsize); - } - else { + + } else { + mchunkptr r = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, r, rsize); dispose_chunk(m, r, rsize); + } + newp = p; + } + } - } - else { + + } else { + USAGE_ERROR_ACTION(m, chunk2mem(p)); + } + return newp; + } -static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { - void* mem = 0; - if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ +static void *internal_memalign(mstate m, size_t alignment, size_t bytes) { + + void *mem = 0; + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ alignment = MIN_CHUNK_SIZE; - if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ + if ((alignment & (alignment - SIZE_T_ONE)) != 0) { /* Ensure a power of 2 */ size_t a = MALLOC_ALIGNMENT << 1; - while (a < alignment) a <<= 1; + while (a < alignment) + a <<= 1; alignment = a; + } + if (bytes >= MAX_REQUEST - alignment) { - if (m != 0) { /* Test isn't needed but avoids compiler warning */ + + if (m != 0) { /* Test isn't needed but avoids compiler warning */ MALLOC_FAILURE_ACTION; + } - } - else { + + } else { + size_t nb = request2size(bytes); size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; mem = internal_malloc(m, req); if (mem != 0) { + mchunkptr p = mem2chunk(mem); - if (PREACTION(m)) - return 0; - if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ + if (PREACTION(m)) return 0; + if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ /* Find an aligned spot inside chunk. Since we need to give back leading space in a chunk of at least MIN_CHUNK_SIZE, if @@ -4969,47 +5593,59 @@ static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { We've allocated enough total room so that this is always possible. */ - char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment - - SIZE_T_ONE)) & - -alignment)); - char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? - br : br+alignment; + char * br = (char *)mem2chunk((size_t)( + ((size_t)((char *)mem + alignment - SIZE_T_ONE)) & -alignment)); + char * pos = ((size_t)(br - (char *)(p)) >= MIN_CHUNK_SIZE) + ? br + : br + alignment; mchunkptr newp = (mchunkptr)pos; - size_t leadsize = pos - (char*)(p); - size_t newsize = chunksize(p) - leadsize; + size_t leadsize = pos - (char *)(p); + size_t newsize = chunksize(p) - leadsize; - if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ + if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ newp->prev_foot = p->prev_foot + leadsize; newp->head = newsize; - } - else { /* Otherwise, give back leader, use the rest */ + + } else { /* Otherwise, give back leader, use the rest */ + set_inuse(m, newp, newsize); set_inuse(m, p, leadsize); dispose_chunk(m, p, leadsize); + } + p = newp; + } /* Give back spare room at the end */ if (!is_mmapped(p)) { + size_t size = chunksize(p); if (size > nb + MIN_CHUNK_SIZE) { - size_t remainder_size = size - nb; + + size_t remainder_size = size - nb; mchunkptr remainder = chunk_plus_offset(p, nb); set_inuse(m, p, nb); set_inuse(m, remainder, remainder_size); dispose_chunk(m, remainder, remainder_size); + } + } mem = chunk2mem(p); - assert (chunksize(p) >= nb); + assert(chunksize(p) >= nb); assert(((size_t)mem & (alignment - 1)) == 0); check_inuse_chunk(m, p); POSTACTION(m); + } + } + return mem; + } /* @@ -5019,50 +5655,50 @@ static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { bit 0 set if all elements are same size (using sizes[0]) bit 1 set if elements should be zeroed */ -static void** ialloc(mstate m, - size_t n_elements, - size_t* sizes, - int opts, - void* chunks[]) { - - size_t element_size; /* chunksize of each element, if all same */ - size_t contents_size; /* total size of elements */ - size_t array_size; /* request size of pointer array */ - void* mem; /* malloced aggregate space */ - mchunkptr p; /* corresponding chunk */ - size_t remainder_size; /* remaining bytes while splitting */ - void** marray; /* either "chunks" or malloced ptr array */ - mchunkptr array_chunk; /* chunk for malloced ptr array */ - flag_t was_enabled; /* to disable mmap */ +static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, + void *chunks[]) { + + size_t element_size; /* chunksize of each element, if all same */ + size_t contents_size; /* total size of elements */ + size_t array_size; /* request size of pointer array */ + void * mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + void ** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + flag_t was_enabled; /* to disable mmap */ size_t size; size_t i; ensure_initialization(); /* compute array length, if needed */ if (chunks != 0) { - if (n_elements == 0) - return chunks; /* nothing to do */ + + if (n_elements == 0) return chunks; /* nothing to do */ marray = chunks; array_size = 0; - } - else { + + } else { + /* if empty req, must still return chunk representing empty array */ - if (n_elements == 0) - return (void**)internal_malloc(m, 0); + if (n_elements == 0) return (void **)internal_malloc(m, 0); marray = 0; - array_size = request2size(n_elements * (sizeof(void*))); + array_size = request2size(n_elements * (sizeof(void *))); + } /* compute total element size */ - if (opts & 0x1) { /* all-same-size */ + if (opts & 0x1) { /* all-same-size */ element_size = request2size(*sizes); contents_size = n_elements * element_size; - } - else { /* add up all the sizes */ + + } else { /* add up all the sizes */ + element_size = 0; contents_size = 0; for (i = 0; i != n_elements; ++i) contents_size += request2size(sizes[i]); + } size = contents_size + array_size; @@ -5075,10 +5711,8 @@ static void** ialloc(mstate m, was_enabled = use_mmap(m); disable_mmap(m); mem = internal_malloc(m, size - CHUNK_OVERHEAD); - if (was_enabled) - enable_mmap(m); - if (mem == 0) - return 0; + if (was_enabled) enable_mmap(m); + if (mem == 0) return 0; if (PREACTION(m)) return 0; p = mem2chunk(mem); @@ -5086,24 +5720,30 @@ static void** ialloc(mstate m, assert(!is_mmapped(p)); - if (opts & 0x2) { /* optionally clear the elements */ - __builtin_memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); + if (opts & 0x2) { /* optionally clear the elements */ + __builtin_memset((size_t *)mem, 0, + remainder_size - SIZE_T_SIZE - array_size); + } /* If not provided, allocate the pointer array as final part of chunk */ if (marray == 0) { - size_t array_chunk_size; + + size_t array_chunk_size; array_chunk = chunk_plus_offset(p, contents_size); array_chunk_size = remainder_size - contents_size; - marray = (void**) (chunk2mem(array_chunk)); + marray = (void **)(chunk2mem(array_chunk)); set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); remainder_size = contents_size; + } /* split out elements */ - for (i = 0; ; ++i) { + for (i = 0;; ++i) { + marray[i] = chunk2mem(p); - if (i != n_elements-1) { + if (i != n_elements - 1) { + if (element_size != 0) size = element_size; else @@ -5111,31 +5751,42 @@ static void** ialloc(mstate m, remainder_size -= size; set_size_and_pinuse_of_inuse_chunk(m, p, size); p = chunk_plus_offset(p, size); - } - else { /* the final element absorbs any overallocation slop */ + + } else { /* the final element absorbs any overallocation slop */ + set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); break; + } + } -#if DEBUG + #if DEBUG if (marray != chunks) { + /* final element must have exactly exhausted chunk */ if (element_size != 0) { + assert(remainder_size == element_size); - } - else { + + } else { + assert(remainder_size == request2size(sizes[i])); + } + check_inuse_chunk(m, mem2chunk(marray)); + } + for (i = 0; i != n_elements; ++i) check_inuse_chunk(m, mem2chunk(marray[i])); -#endif /* DEBUG */ + #endif /* DEBUG */ POSTACTION(m); return marray; + } /* Try to free all pointers in the given array. @@ -5145,316 +5796,431 @@ static void** ialloc(mstate m, chunks before freeing, which will occur often if allocated with ialloc or the array is sorted. */ -static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) { +static size_t internal_bulk_free(mstate m, void *array[], size_t nelem) { + size_t unfreed = 0; if (!PREACTION(m)) { - void** a; - void** fence = &(array[nelem]); + + void **a; + void **fence = &(array[nelem]); for (a = array; a != fence; ++a) { - void* mem = *a; + + void *mem = *a; if (mem != 0) { + mchunkptr p = mem2chunk(mem); - size_t psize = chunksize(p); -#if FOOTERS + size_t psize = chunksize(p); + #if FOOTERS if (get_mstate_for(p) != m) { + ++unfreed; continue; + } -#endif + + #endif check_inuse_chunk(m, p); *a = 0; if (RTCHECK(ok_address(m, p) && ok_inuse(p))) { - void ** b = a + 1; /* try to merge with next chunk */ + + void ** b = a + 1; /* try to merge with next chunk */ mchunkptr next = next_chunk(p); if (b != fence && *b == chunk2mem(next)) { + size_t newsize = chunksize(next) + psize; set_inuse(m, p, newsize); *b = chunk2mem(p); - } - else + + } else + dispose_chunk(m, p, psize); - } - else { + + } else { + CORRUPTION_ERROR_ACTION(m); break; + } + } + } - if (should_trim(m, m->topsize)) - sys_trim(m, 0); + + if (should_trim(m, m->topsize)) sys_trim(m, 0); POSTACTION(m); + } + return unfreed; + } -/* Traversal */ -#if MALLOC_INSPECT_ALL + /* Traversal */ + #if MALLOC_INSPECT_ALL static void internal_inspect_all(mstate m, - void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { + void (*handler)(void *start, void *end, + size_t used_bytes, + void * callback_arg), + void *arg) { + if (is_initialized(m)) { - mchunkptr top = m->top; + + mchunkptr top = m->top; msegmentptr s; for (s = &m->seg; s != 0; s = s->next) { + mchunkptr q = align_as_chunk(s->base); while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) { + mchunkptr next = next_chunk(q); - size_t sz = chunksize(q); - size_t used; - void* start; + size_t sz = chunksize(q); + size_t used; + void * start; if (is_inuse(q)) { - used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ + + used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ start = chunk2mem(q); - } - else { + + } else { + used = 0; - if (is_small(sz)) { /* offset by possible bookkeeping */ - start = (void*)((char*)q + sizeof(struct malloc_chunk)); - } - else { - start = (void*)((char*)q + sizeof(struct malloc_tree_chunk)); + if (is_small(sz)) { /* offset by possible bookkeeping */ + start = (void *)((char *)q + sizeof(struct malloc_chunk)); + + } else { + + start = (void *)((char *)q + sizeof(struct malloc_tree_chunk)); + } + } - if (start < (void*)next) /* skip if all space is bookkeeping */ + + if (start < (void *)next) /* skip if all space is bookkeeping */ handler(start, next, used, arg); - if (q == top) - break; + if (q == top) break; q = next; + } + } + } + } -#endif /* MALLOC_INSPECT_ALL */ + + #endif /* MALLOC_INSPECT_ALL */ /* ------------------ Exported realloc, memalign, etc -------------------- */ -#if !ONLY_MSPACES + #if !ONLY_MSPACES -void* dlrealloc(void* oldmem, size_t bytes) { - void* mem = 0; +void *dlrealloc(void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem == 0) { + mem = dlmalloc(bytes); - } - else if (bytes >= MAX_REQUEST) { + + } else if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } -#ifdef REALLOC_ZERO_BYTES_FREES + + #ifdef REALLOC_ZERO_BYTES_FREES else if (bytes == 0) { + dlfree(oldmem); + } -#endif /* REALLOC_ZERO_BYTES_FREES */ + + #endif /* REALLOC_ZERO_BYTES_FREES */ else { - size_t nb = request2size(bytes); + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = gm; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); POSTACTION(m); if (newp != 0) { + check_inuse_chunk(m, newp); mem = chunk2mem(newp); - } - else { + + } else { + mem = internal_malloc(m, bytes); if (mem != 0) { + size_t oc = chunksize(oldp) - overhead_for(oldp); - __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes); + __builtin_memcpy(mem, oldmem, (oc < bytes) ? oc : bytes); internal_free(m, oldmem); + } + } + } + } + return mem; + } -void* dlrealloc_in_place(void* oldmem, size_t bytes) { - void* mem = 0; +void *dlrealloc_in_place(void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem != 0) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; - } - else { - size_t nb = request2size(bytes); + + } else { + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = gm; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); POSTACTION(m); if (newp == oldp) { + check_inuse_chunk(m, newp); mem = oldmem; + } + } + } + } + return mem; + } -void* dlmemalign(size_t alignment, size_t bytes) { - if (alignment <= MALLOC_ALIGNMENT) { - return dlmalloc(bytes); - } +void *dlmemalign(size_t alignment, size_t bytes) { + + if (alignment <= MALLOC_ALIGNMENT) { return dlmalloc(bytes); } return internal_memalign(gm, alignment, bytes); + } -int dlposix_memalign(void** pp, size_t alignment, size_t bytes) { - void* mem = 0; +int dlposix_memalign(void **pp, size_t alignment, size_t bytes) { + + void *mem = 0; if (alignment == MALLOC_ALIGNMENT) mem = dlmalloc(bytes); else { - size_t d = alignment / sizeof(void*); - size_t r = alignment % sizeof(void*); - if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0) + + size_t d = alignment / sizeof(void *); + size_t r = alignment % sizeof(void *); + if (r != 0 || d == 0 || (d & (d - SIZE_T_ONE)) != 0) return EINVAL; else if (bytes <= MAX_REQUEST - alignment) { - if (alignment < MIN_CHUNK_SIZE) - alignment = MIN_CHUNK_SIZE; + + if (alignment < MIN_CHUNK_SIZE) alignment = MIN_CHUNK_SIZE; mem = internal_memalign(gm, alignment, bytes); + } + } + if (mem == 0) return ENOMEM; else { + *pp = mem; return 0; + } + } -void* dlvalloc(size_t bytes) { +void *dlvalloc(size_t bytes) { + size_t pagesz; ensure_initialization(); pagesz = mparams.page_size; return dlmemalign(pagesz, bytes); + } -void* dlpvalloc(size_t bytes) { +void *dlpvalloc(size_t bytes) { + size_t pagesz; ensure_initialization(); pagesz = mparams.page_size; - return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); + return dlmemalign(pagesz, + (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); + } -void** dlindependent_calloc(size_t n_elements, size_t elem_size, - void* chunks[]) { - size_t sz = elem_size; /* serves as 1-element array */ +void **dlindependent_calloc(size_t n_elements, size_t elem_size, + void *chunks[]) { + + size_t sz = elem_size; /* serves as 1-element array */ return ialloc(gm, n_elements, &sz, 3, chunks); + } -void** dlindependent_comalloc(size_t n_elements, size_t sizes[], - void* chunks[]) { +void **dlindependent_comalloc(size_t n_elements, size_t sizes[], + void *chunks[]) { + return ialloc(gm, n_elements, sizes, 0, chunks); + } -size_t dlbulk_free(void* array[], size_t nelem) { +size_t dlbulk_free(void *array[], size_t nelem) { + return internal_bulk_free(gm, array, nelem); + } -#if MALLOC_INSPECT_ALL -void dlmalloc_inspect_all(void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { + #if MALLOC_INSPECT_ALL +void dlmalloc_inspect_all(void (*handler)(void *start, void *end, + size_t used_bytes, + void * callback_arg), + void *arg) { + ensure_initialization(); if (!PREACTION(gm)) { + internal_inspect_all(gm, handler, arg); POSTACTION(gm); + } + } -#endif /* MALLOC_INSPECT_ALL */ + + #endif /* MALLOC_INSPECT_ALL */ int dlmalloc_trim(size_t pad) { + int result = 0; ensure_initialization(); if (!PREACTION(gm)) { + result = sys_trim(gm, pad); POSTACTION(gm); + } + return result; + } size_t dlmalloc_footprint(void) { + return gm->footprint; + } size_t dlmalloc_max_footprint(void) { + return gm->max_footprint; + } size_t dlmalloc_footprint_limit(void) { + size_t maf = gm->footprint_limit; return maf == 0 ? MAX_SIZE_T : maf; + } size_t dlmalloc_set_footprint_limit(size_t bytes) { - size_t result; /* invert sense of 0 */ - if (bytes == 0) - result = granularity_align(1); /* Use minimal size */ + + size_t result; /* invert sense of 0 */ + if (bytes == 0) result = granularity_align(1); /* Use minimal size */ if (bytes == MAX_SIZE_T) - result = 0; /* disable */ + result = 0; /* disable */ else result = granularity_align(bytes); return gm->footprint_limit = result; + } -#if !NO_MALLINFO + #if !NO_MALLINFO struct mallinfo dlmallinfo(void) { + return internal_mallinfo(gm); + } -#endif /* NO_MALLINFO */ -#if !NO_MALLOC_STATS + #endif /* NO_MALLINFO */ + + #if !NO_MALLOC_STATS void dlmalloc_stats() { + internal_malloc_stats(gm); + } -#endif /* NO_MALLOC_STATS */ + + #endif /* NO_MALLOC_STATS */ int dlmallopt(int param_number, int value) { + return change_mparam(param_number, value); + } -size_t dlmalloc_usable_size(void* mem) { +size_t dlmalloc_usable_size(void *mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); - if (is_inuse(p)) - return chunksize(p) - overhead_for(p); + if (is_inuse(p)) return chunksize(p) - overhead_for(p); + } + return 0; + } -#endif /* !ONLY_MSPACES */ + #endif /* !ONLY_MSPACES */ /* ----------------------------- user mspaces ---------------------------- */ -#if MSPACES + #if MSPACES + +static mstate init_user_mstate(char *tbase, size_t tsize) { -static mstate init_user_mstate(char* tbase, size_t tsize) { - size_t msize = pad_request(sizeof(struct malloc_state)); + size_t msize = pad_request(sizeof(struct malloc_state)); mchunkptr mn; mchunkptr msp = align_as_chunk(tbase); - mstate m = (mstate)(chunk2mem(msp)); + mstate m = (mstate)(chunk2mem(msp)); __builtin_memset(m, 0, msize); (void)INITIAL_LOCK(&m->mutex); - msp->head = (msize|INUSE_BITS); + msp->head = (msize | INUSE_BITS); m->seg.base = m->least_addr = tbase; m->seg.size = m->footprint = m->max_footprint = tsize; m->magic = mparams.magic; @@ -5465,82 +6231,111 @@ static mstate init_user_mstate(char* tbase, size_t tsize) { disable_contiguous(m); init_bins(m); mn = next_chunk(mem2chunk(m)); - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); + init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE); check_top_chunk(m, m->top); return m; + } mspace create_mspace(size_t capacity, int locked) { + mstate m = 0; size_t msize; ensure_initialization(); msize = pad_request(sizeof(struct malloc_state)); - if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { - size_t rs = ((capacity == 0)? mparams.granularity : - (capacity + TOP_FOOT_SIZE + msize)); + if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) { + + size_t rs = ((capacity == 0) ? mparams.granularity + : (capacity + TOP_FOOT_SIZE + msize)); size_t tsize = granularity_align(rs); - char* tbase = (char*)(CALL_MMAP(tsize)); + char * tbase = (char *)(CALL_MMAP(tsize)); if (tbase != CMFAIL) { + m = init_user_mstate(tbase, tsize); m->seg.sflags = USE_MMAP_BIT; set_lock(m, locked); + } + } + return (mspace)m; + } -mspace create_mspace_with_base(void* base, size_t capacity, int locked) { +mspace create_mspace_with_base(void *base, size_t capacity, int locked) { + mstate m = 0; size_t msize; ensure_initialization(); msize = pad_request(sizeof(struct malloc_state)); if (capacity > msize + TOP_FOOT_SIZE && - capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { - m = init_user_mstate((char*)base, capacity); + capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) { + + m = init_user_mstate((char *)base, capacity); m->seg.sflags = EXTERN_BIT; set_lock(m, locked); + } + return (mspace)m; + } int mspace_track_large_chunks(mspace msp, int enable) { - int ret = 0; + + int ret = 0; mstate ms = (mstate)msp; if (!PREACTION(ms)) { - if (!use_mmap(ms)) { - ret = 1; - } + + if (!use_mmap(ms)) { ret = 1; } if (!enable) { + enable_mmap(ms); + } else { + disable_mmap(ms); + } + POSTACTION(ms); + } + return ret; + } size_t destroy_mspace(mspace msp) { + size_t freed = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + msegmentptr sp = &ms->seg; - (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ + (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ while (sp != 0) { - char* base = sp->base; + + char * base = sp->base; size_t size = sp->size; flag_t flag = sp->sflags; - (void)base; /* placate people compiling -Wunused-variable */ + (void)base; /* placate people compiling -Wunused-variable */ sp = sp->next; if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) && CALL_MUNMAP(base, size) == 0) freed += size; + } + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return freed; + } /* @@ -5548,25 +6343,31 @@ size_t destroy_mspace(mspace msp) { versions. This is not so nice but better than the alternatives. */ -void* mspace_malloc(mspace msp, size_t bytes) { +void *mspace_malloc(mspace msp, size_t bytes) { + mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + if (!PREACTION(ms)) { - void* mem; + + void * mem; size_t nb; if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; binmap_t smallbits; - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes); idx = small_index(nb); smallbits = ms->smallmap >> idx; - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ mchunkptr b, p; - idx += ~smallbits & 1; /* Uses next bin if idx empty */ + idx += ~smallbits & 1; /* Uses next bin if idx empty */ b = smallbin_at(ms, idx); p = b->fd; assert(chunksize(p) == small_index2size(idx)); @@ -5575,15 +6376,17 @@ void* mspace_malloc(mspace msp, size_t bytes) { mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; + } else if (nb > ms->dvsize) { - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ mchunkptr b, p, r; - size_t rsize; - bindex_t i; - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); - binmap_t leastbit = least_bit(leftbits); + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); compute_bit2idx(leastbit, i); b = smallbin_at(ms, i); p = b->fd; @@ -5594,54 +6397,71 @@ void* mspace_malloc(mspace msp, size_t bytes) { if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) set_inuse_and_pinuse(ms, p, small_index2size(i)); else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); r = chunk_plus_offset(p, nb); set_size_and_pinuse_of_free_chunk(r, rsize); replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; + } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); goto postaction; + } + } - } - else if (bytes >= MAX_REQUEST) + + } else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ else { + nb = pad_request(bytes); if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); goto postaction; + } + } if (nb <= ms->dvsize) { - size_t rsize = ms->dvsize - nb; + + size_t rsize = ms->dvsize - nb; mchunkptr p = ms->dv; - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ mchunkptr r = ms->dv = chunk_plus_offset(p, nb); ms->dvsize = rsize; set_size_and_pinuse_of_free_chunk(r, rsize); set_size_and_pinuse_of_inuse_chunk(ms, p, nb); - } - else { /* exhaust dv */ + + } else { /* exhaust dv */ + size_t dvs = ms->dvsize; ms->dvsize = 0; ms->dv = 0; set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); check_malloced_chunk(ms, mem, nb); goto postaction; + } - else if (nb < ms->topsize) { /* Split top */ - size_t rsize = ms->topsize -= nb; + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; mchunkptr p = ms->top; mchunkptr r = ms->top = chunk_plus_offset(p, nb); r->head = rsize | PINUSE_BIT; @@ -5650,6 +6470,7 @@ void* mspace_malloc(mspace msp, size_t bytes) { check_top_chunk(ms, ms->top); check_malloced_chunk(ms, mem, nb); goto postaction; + } mem = sys_alloc(ms, nb); @@ -5657,372 +6478,519 @@ void* mspace_malloc(mspace msp, size_t bytes) { postaction: POSTACTION(ms); return mem; + } return 0; + } -void mspace_free(mspace msp, void* mem) { +void mspace_free(mspace msp, void *mem) { + if (mem != 0) { - mchunkptr p = mem2chunk(mem); -#if FOOTERS + + mchunkptr p = mem2chunk(mem); + #if FOOTERS mstate fm = get_mstate_for(p); - (void)msp; /* placate people compiling -Wunused */ -#else /* FOOTERS */ + (void)msp; /* placate people compiling -Wunused */ + #else /* FOOTERS */ mstate fm = (mstate)msp; -#endif /* FOOTERS */ + #endif /* FOOTERS */ if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); return; + } + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { - size_t psize = chunksize(p); + + size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); if (!pinuse(p)) { + size_t prevsize = p->prev_foot; if (is_mmapped(p)) { + psize += prevsize + MMAP_FOOT_PAD; - if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + if (CALL_MUNMAP((char *)p - prevsize, psize) == 0) fm->footprint -= psize; goto postaction; - } - else { + + } else { + mchunkptr prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); - } - else if ((next->head & INUSE_BITS) == INUSE_BITS) { + + } else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; set_free_with_pinuse(p, psize, next); goto postaction; + } - } - else + + } else + goto erroraction; + } + } if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { - if (!cinuse(next)) { /* consolidate forward */ + + if (!cinuse(next)) { /* consolidate forward */ if (next == fm->top) { + size_t tsize = fm->topsize += psize; fm->top = p; p->head = tsize | PINUSE_BIT; if (p == fm->dv) { + fm->dv = 0; fm->dvsize = 0; + } - if (should_trim(fm, tsize)) - sys_trim(fm, 0); + + if (should_trim(fm, tsize)) sys_trim(fm, 0); goto postaction; - } - else if (next == fm->dv) { + + } else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; fm->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); goto postaction; - } - else { + + } else { + size_t nsize = chunksize(next); psize += nsize; unlink_chunk(fm, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == fm->dv) { + fm->dvsize = psize; goto postaction; + } + } - } - else + + } else + set_free_with_pinuse(p, psize, next); if (is_small(psize)) { + insert_small_chunk(fm, p, psize); check_free_chunk(fm, p); - } - else { + + } else { + tchunkptr tp = (tchunkptr)p; insert_large_chunk(fm, tp, psize); check_free_chunk(fm, p); - if (--fm->release_checks == 0) - release_unused_segments(fm); + if (--fm->release_checks == 0) release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: USAGE_ERROR_ACTION(fm, p); postaction: POSTACTION(fm); + } + } + } -void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { - void* mem; +void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + + void * mem; size_t req = 0; mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + if (n_elements != 0) { + req = n_elements * elem_size; if (((n_elements | elem_size) & ~(size_t)0xffff) && (req / n_elements != elem_size)) - req = MAX_SIZE_T; /* force downstream failure on overflow */ + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = internal_malloc(ms, req); if (mem != 0 && calloc_must_clear(mem2chunk(mem))) __builtin_memset(mem, 0, req); return mem; + } -void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { - void* mem = 0; +void *mspace_realloc(mspace msp, void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem == 0) { + mem = mspace_malloc(msp, bytes); - } - else if (bytes >= MAX_REQUEST) { + + } else if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + } -#ifdef REALLOC_ZERO_BYTES_FREES + + #ifdef REALLOC_ZERO_BYTES_FREES else if (bytes == 0) { + mspace_free(msp, oldmem); + } -#endif /* REALLOC_ZERO_BYTES_FREES */ + + #endif /* REALLOC_ZERO_BYTES_FREES */ else { - size_t nb = request2size(bytes); + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = (mstate)msp; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); POSTACTION(m); if (newp != 0) { + check_inuse_chunk(m, newp); mem = chunk2mem(newp); - } - else { + + } else { + mem = mspace_malloc(m, bytes); if (mem != 0) { + size_t oc = chunksize(oldp) - overhead_for(oldp); - __builtin_memcpy(mem, oldmem, (oc < bytes)? oc : bytes); + __builtin_memcpy(mem, oldmem, (oc < bytes) ? oc : bytes); mspace_free(m, oldmem); + } + } + } + } + return mem; + } -void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) { - void* mem = 0; +void *mspace_realloc_in_place(mspace msp, void *oldmem, size_t bytes) { + + void *mem = 0; if (oldmem != 0) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; - } - else { - size_t nb = request2size(bytes); + + } else { + + size_t nb = request2size(bytes); mchunkptr oldp = mem2chunk(oldmem); -#if ! FOOTERS + #if !FOOTERS mstate m = (mstate)msp; -#else /* FOOTERS */ + #else /* FOOTERS */ mstate m = get_mstate_for(oldp); - (void)msp; /* placate people compiling -Wunused */ + (void)msp; /* placate people compiling -Wunused */ if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); return 0; + } -#endif /* FOOTERS */ + + #endif /* FOOTERS */ if (!PREACTION(m)) { + mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); POSTACTION(m); if (newp == oldp) { + check_inuse_chunk(m, newp); mem = oldmem; + } + } + } + } + return mem; + } -void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { +void *mspace_memalign(mspace msp, size_t alignment, size_t bytes) { + mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } - if (alignment <= MALLOC_ALIGNMENT) - return mspace_malloc(msp, bytes); + + if (alignment <= MALLOC_ALIGNMENT) return mspace_malloc(msp, bytes); return internal_memalign(ms, alignment, bytes); + } -void** mspace_independent_calloc(mspace msp, size_t n_elements, - size_t elem_size, void* chunks[]) { - size_t sz = elem_size; /* serves as 1-element array */ +void **mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void *chunks[]) { + + size_t sz = elem_size; /* serves as 1-element array */ mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + return ialloc(ms, n_elements, &sz, 3, chunks); + } -void** mspace_independent_comalloc(mspace msp, size_t n_elements, - size_t sizes[], void* chunks[]) { +void **mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void *chunks[]) { + mstate ms = (mstate)msp; if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); + + USAGE_ERROR_ACTION(ms, ms); return 0; + } + return ialloc(ms, n_elements, sizes, 0, chunks); + } -size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) { +size_t mspace_bulk_free(mspace msp, void *array[], size_t nelem) { + return internal_bulk_free((mstate)msp, array, nelem); + } -#if MALLOC_INSPECT_ALL + #if MALLOC_INSPECT_ALL void mspace_inspect_all(mspace msp, - void(*handler)(void *start, - void *end, - size_t used_bytes, - void* callback_arg), - void* arg) { + void (*handler)(void *start, void *end, + size_t used_bytes, void *callback_arg), + void *arg) { + mstate ms = (mstate)msp; if (ok_magic(ms)) { + if (!PREACTION(ms)) { + internal_inspect_all(ms, handler, arg); POSTACTION(ms); + } + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + } -#endif /* MALLOC_INSPECT_ALL */ + + #endif /* MALLOC_INSPECT_ALL */ int mspace_trim(mspace msp, size_t pad) { - int result = 0; + + int result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + if (!PREACTION(ms)) { + result = sys_trim(ms, pad); POSTACTION(ms); + } + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } -#if !NO_MALLOC_STATS + #if !NO_MALLOC_STATS void mspace_malloc_stats(mspace msp) { + mstate ms = (mstate)msp; if (ok_magic(ms)) { + internal_malloc_stats(ms); + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + } -#endif /* NO_MALLOC_STATS */ + + #endif /* NO_MALLOC_STATS */ size_t mspace_footprint(mspace msp) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + result = ms->footprint; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } size_t mspace_max_footprint(mspace msp) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + result = ms->max_footprint; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } size_t mspace_footprint_limit(mspace msp) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { + size_t maf = ms->footprint_limit; result = (maf == 0) ? MAX_SIZE_T : maf; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } size_t mspace_set_footprint_limit(mspace msp, size_t bytes) { + size_t result = 0; mstate ms = (mstate)msp; if (ok_magic(ms)) { - if (bytes == 0) - result = granularity_align(1); /* Use minimal size */ + + if (bytes == 0) result = granularity_align(1); /* Use minimal size */ if (bytes == MAX_SIZE_T) - result = 0; /* disable */ + result = 0; /* disable */ else result = granularity_align(bytes); ms->footprint_limit = result; + + } else { + + USAGE_ERROR_ACTION(ms, ms); + } - else { - USAGE_ERROR_ACTION(ms,ms); - } + return result; + } -#if !NO_MALLINFO + #if !NO_MALLINFO struct mallinfo mspace_mallinfo(mspace msp) { + mstate ms = (mstate)msp; - if (!ok_magic(ms)) { - USAGE_ERROR_ACTION(ms,ms); - } + if (!ok_magic(ms)) { USAGE_ERROR_ACTION(ms, ms); } return internal_mallinfo(ms); + } -#endif /* NO_MALLINFO */ -size_t mspace_usable_size(const void* mem) { + #endif /* NO_MALLINFO */ + +size_t mspace_usable_size(const void *mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); - if (is_inuse(p)) - return chunksize(p) - overhead_for(p); + if (is_inuse(p)) return chunksize(p) - overhead_for(p); + } + return 0; + } int mspace_mallopt(int param_number, int value) { + return change_mparam(param_number, value); -} -#endif /* MSPACES */ +} + #endif /* MSPACES */ /* -------------------- Alternative MORECORE functions ------------------- */ @@ -6067,35 +7035,48 @@ int mspace_mallopt(int param_number, int value) { void *osMoreCore(int size) { + void *ptr = 0; static void *sbrk_top = 0; if (size > 0) { + if (size < MINIMUM_MORECORE_SIZE) size = MINIMUM_MORECORE_SIZE; if (CurrentExecutionLevel() == kTaskLevel) ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); if (ptr == 0) { + return (void *) MFAIL; + } + // save ptrs so they can be freed during cleanup our_os_pools[next_os_pool] = ptr; next_os_pool++; ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); sbrk_top = (char *) ptr + size; return ptr; + } + else if (size < 0) { + // we don't currently support shrink behavior return (void *) MFAIL; + } + else { + return sbrk_top; + } + } // cleanup any allocated memory pools @@ -6103,19 +7084,22 @@ int mspace_mallopt(int param_number, int value) { void osCleanupMem(void) { + void **ptr; for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) if (*ptr) { + PoolDeallocate(*ptr); *ptr = 0; + } + } */ - /* ----------------------------------------------------------------------- History: v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea @@ -6335,4 +7319,5 @@ History: */ -#endif // __GLIBC__ +#endif // __GLIBC__ + diff --git a/qemu_mode/libqasan/hooks.c b/qemu_mode/libqasan/hooks.c index 3bb4cc42..405dddae 100644 --- a/qemu_mode/libqasan/hooks.c +++ b/qemu_mode/libqasan/hooks.c @@ -174,7 +174,9 @@ char *fgets(char *s, int size, FILE *stream) { QASAN_DEBUG("%14p: fgets(%p, %d, %p)\n", rtv, s, size, stream); QASAN_STORE(s, size); +#ifndef __ANDROID__ QASAN_LOAD(stream, sizeof(FILE)); +#endif char *r = __lq_libc_fgets(s, size, stream); QASAN_DEBUG("\t\t = %p\n", r); diff --git a/qemu_mode/libqasan/libqasan.c b/qemu_mode/libqasan/libqasan.c index 11b50270..9fc4ef7a 100644 --- a/qemu_mode/libqasan/libqasan.c +++ b/qemu_mode/libqasan/libqasan.c @@ -72,7 +72,7 @@ void __libqasan_print_maps(void) { QASAN_LOG("QEMU-AddressSanitizer (v%s)\n", QASAN_VERSTR); QASAN_LOG( - "Copyright (C) 2019-2020 Andrea Fioraldi \n"); + "Copyright (C) 2019-2021 Andrea Fioraldi \n"); QASAN_LOG("\n"); if (__qasan_log) __libqasan_print_maps(); diff --git a/qemu_mode/libqasan/malloc.c b/qemu_mode/libqasan/malloc.c index 54c1096a..5a2d2a0c 100644 --- a/qemu_mode/libqasan/malloc.c +++ b/qemu_mode/libqasan/malloc.c @@ -51,9 +51,9 @@ typedef struct { struct chunk_begin { size_t requested_size; - void* aligned_orig; // NULL if not aligned - struct chunk_begin* next; - struct chunk_begin* prev; + void * aligned_orig; // NULL if not aligned + struct chunk_begin *next; + struct chunk_begin *prev; char redzone[REDZONE_SIZE]; }; @@ -68,45 +68,45 @@ struct chunk_struct { #ifdef __GLIBC__ -void* (*__lq_libc_malloc)(size_t); -void (*__lq_libc_free)(void*); -#define backend_malloc __lq_libc_malloc -#define backend_free __lq_libc_free +void *(*__lq_libc_malloc)(size_t); +void (*__lq_libc_free)(void *); + #define backend_malloc __lq_libc_malloc + #define backend_free __lq_libc_free -#define TMP_ZONE_SIZE 4096 + #define TMP_ZONE_SIZE 4096 static int __tmp_alloc_zone_idx; static unsigned char __tmp_alloc_zone[TMP_ZONE_SIZE]; #else // From dlmalloc.c -void* dlmalloc(size_t); -void dlfree(void*); -#define backend_malloc dlmalloc -#define backend_free dlfree +void * dlmalloc(size_t); +void dlfree(void *); + #define backend_malloc dlmalloc + #define backend_free dlfree #endif int __libqasan_malloc_initialized; -static struct chunk_begin* quarantine_top; -static struct chunk_begin* quarantine_end; +static struct chunk_begin *quarantine_top; +static struct chunk_begin *quarantine_end; static size_t quarantine_bytes; #ifdef __BIONIC__ -static pthread_mutex_t quarantine_lock; -#define LOCK_TRY pthread_mutex_trylock -#define LOCK_INIT pthread_mutex_init -#define LOCK_UNLOCK pthread_mutex_unlock +static pthread_mutex_t quarantine_lock; + #define LOCK_TRY pthread_mutex_trylock + #define LOCK_INIT pthread_mutex_init + #define LOCK_UNLOCK pthread_mutex_unlock #else -static pthread_spinlock_t quarantine_lock; -#define LOCK_TRY pthread_spin_trylock -#define LOCK_INIT pthread_spin_init -#define LOCK_UNLOCK pthread_spin_unlock +static pthread_spinlock_t quarantine_lock; + #define LOCK_TRY pthread_spin_trylock + #define LOCK_INIT pthread_spin_init + #define LOCK_UNLOCK pthread_spin_unlock #endif // need qasan disabled -static int quanratine_push(struct chunk_begin* ck) { +static int quanratine_push(struct chunk_begin *ck) { if (ck->requested_size >= QUARANTINE_MAX_BYTES) return 0; @@ -114,7 +114,7 @@ static int quanratine_push(struct chunk_begin* ck) { while (ck->requested_size + quarantine_bytes >= QUARANTINE_MAX_BYTES) { - struct chunk_begin* tmp = quarantine_end; + struct chunk_begin *tmp = quarantine_end; quarantine_end = tmp->prev; quarantine_bytes -= tmp->requested_size; @@ -154,23 +154,23 @@ void __libqasan_init_malloc(void) { } -size_t __libqasan_malloc_usable_size(void* ptr) { +size_t __libqasan_malloc_usable_size(void *ptr) { - char* p = ptr; + char *p = ptr; p -= sizeof(struct chunk_begin); - return ((struct chunk_begin*)p)->requested_size; + return ((struct chunk_begin *)p)->requested_size; } -void* __libqasan_malloc(size_t size) { +void *__libqasan_malloc(size_t size) { if (!__libqasan_malloc_initialized) { - + __libqasan_init_malloc(); #ifdef __GLIBC__ - void* r = &__tmp_alloc_zone[__tmp_alloc_zone_idx]; + void *r = &__tmp_alloc_zone[__tmp_alloc_zone_idx]; if (size & (ALLOC_ALIGN_SIZE - 1)) __tmp_alloc_zone_idx += @@ -185,7 +185,7 @@ void* __libqasan_malloc(size_t size) { int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread - struct chunk_begin* p = backend_malloc(sizeof(struct chunk_struct) + size); + struct chunk_begin *p = backend_malloc(sizeof(struct chunk_struct) + size); QASAN_SWAP(state); @@ -197,14 +197,14 @@ void* __libqasan_malloc(size_t size) { p->aligned_orig = NULL; p->next = p->prev = NULL; - QASAN_ALLOC(&p[1], (char*)&p[1] + size); + QASAN_ALLOC(&p[1], (char *)&p[1] + size); QASAN_POISON(p->redzone, REDZONE_SIZE, ASAN_HEAP_LEFT_RZ); if (size & (ALLOC_ALIGN_SIZE - 1)) - QASAN_POISON((char*)&p[1] + size, + QASAN_POISON((char *)&p[1] + size, (size & ~(ALLOC_ALIGN_SIZE - 1)) + 8 - size + REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ); else - QASAN_POISON((char*)&p[1] + size, REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ); + QASAN_POISON((char *)&p[1] + size, REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ); __builtin_memset(&p[1], 0xff, size); @@ -212,17 +212,17 @@ void* __libqasan_malloc(size_t size) { } -void __libqasan_free(void* ptr) { +void __libqasan_free(void *ptr) { if (!ptr) return; - + #ifdef __GLIBC__ - if (ptr >= (void*)__tmp_alloc_zone && - ptr < ((void*)__tmp_alloc_zone + TMP_ZONE_SIZE)) + if (ptr >= (void *)__tmp_alloc_zone && + ptr < ((void *)__tmp_alloc_zone + TMP_ZONE_SIZE)) return; #endif - struct chunk_begin* p = ptr; + struct chunk_begin *p = ptr; p -= 1; size_t n = p->requested_size; @@ -249,21 +249,22 @@ void __libqasan_free(void* ptr) { } -void* __libqasan_calloc(size_t nmemb, size_t size) { +void *__libqasan_calloc(size_t nmemb, size_t size) { size *= nmemb; #ifdef __GLIBC__ if (!__libqasan_malloc_initialized) { - void* r = &__tmp_alloc_zone[__tmp_alloc_zone_idx]; + void *r = &__tmp_alloc_zone[__tmp_alloc_zone_idx]; __tmp_alloc_zone_idx += size; return r; } + #endif - char* p = __libqasan_malloc(size); + char *p = __libqasan_malloc(size); if (!p) return NULL; __builtin_memset(p, 0, size); @@ -272,14 +273,14 @@ void* __libqasan_calloc(size_t nmemb, size_t size) { } -void* __libqasan_realloc(void* ptr, size_t size) { +void *__libqasan_realloc(void *ptr, size_t size) { - char* p = __libqasan_malloc(size); + char *p = __libqasan_malloc(size); if (!p) return NULL; if (!ptr) return p; - size_t n = ((struct chunk_begin*)ptr)[-1].requested_size; + size_t n = ((struct chunk_begin *)ptr)[-1].requested_size; if (size < n) n = size; __builtin_memcpy(p, ptr, n); @@ -289,9 +290,9 @@ void* __libqasan_realloc(void* ptr, size_t size) { } -int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { +int __libqasan_posix_memalign(void **ptr, size_t align, size_t len) { - if ((align % 2) || (align % sizeof(void*))) return EINVAL; + if ((align % 2) || (align % sizeof(void *))) return EINVAL; if (len == 0) { *ptr = NULL; @@ -305,7 +306,7 @@ int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread - char* orig = backend_malloc(sizeof(struct chunk_struct) + size); + char *orig = backend_malloc(sizeof(struct chunk_struct) + size); QASAN_SWAP(state); @@ -313,10 +314,10 @@ int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { QASAN_UNPOISON(orig, sizeof(struct chunk_struct) + size); - char* data = orig + sizeof(struct chunk_begin); + char *data = orig + sizeof(struct chunk_begin); data += align - ((uintptr_t)data % align); - struct chunk_begin* p = (struct chunk_begin*)data - 1; + struct chunk_begin *p = (struct chunk_begin *)data - 1; p->requested_size = len; p->aligned_orig = orig; @@ -339,9 +340,9 @@ int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) { } -void* __libqasan_memalign(size_t align, size_t len) { +void *__libqasan_memalign(size_t align, size_t len) { - void* ret = NULL; + void *ret = NULL; __libqasan_posix_memalign(&ret, align, len); @@ -349,9 +350,9 @@ void* __libqasan_memalign(size_t align, size_t len) { } -void* __libqasan_aligned_alloc(size_t align, size_t len) { +void *__libqasan_aligned_alloc(size_t align, size_t len) { - void* ret = NULL; + void *ret = NULL; if ((len % align)) return NULL; diff --git a/qemu_mode/libqasan/string.c b/qemu_mode/libqasan/string.c index 4be01279..c850463b 100644 --- a/qemu_mode/libqasan/string.c +++ b/qemu_mode/libqasan/string.c @@ -271,7 +271,7 @@ void *__libqasan_memmem(const void *haystack, size_t haystack_len, } - } while (++h <= end); + } while (h++ <= end); return 0; diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 47722f64..9a258d5b 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 47722f64e4c1662bad97dc25f3e4cc63959ff5f3 +Subproject commit 9a258d5b7a38c045a6e385fcfcf80a746a60e557 diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index 7844eedf..3ce4148d 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -1303,7 +1303,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, } -#endif /* CMPLOG_SOLVE_ARITHMETIC */ +#endif /* CMPLOG_SOLVE_ARITHMETIC */ return 0; @@ -2670,3 +2670,4 @@ exit_its: return r; } + -- cgit 1.4.1 From d827bc458061bc4320e25a27fd77cdbc4bba47ba Mon Sep 17 00:00:00 2001 From: aflpp Date: Sat, 13 Feb 2021 09:12:36 +0100 Subject: dont break on llvm 13 --- GNUmakefile.llvm | 7 ++++++- qemu_mode/qemuafl | 2 +- unicorn_mode/unicornafl | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/GNUmakefile.llvm b/GNUmakefile.llvm index d3691658..c23af200 100644 --- a/GNUmakefile.llvm +++ b/GNUmakefile.llvm @@ -43,7 +43,8 @@ endif LLVMVER = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/git//' | sed 's/svn//' ) LLVM_MAJOR = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/\..*//' ) LLVM_MINOR = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/.*\.//' | sed 's/git//' | sed 's/svn//' | sed 's/ .*//' ) -LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^3\.[0-3]|^1[3-9]' && echo 1 || echo 0 ) +LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^[0-2]\.|3\.[0-3]' && echo 1 || echo 0 ) +LLVM_TOO_NEW = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[3-9]' && echo 1 || echo 0 ) LLVM_NEW_API = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[0-9]' && echo 1 || echo 0 ) LLVM_10_OK = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[1-9]|^10\.[1-9]|^10\.0.[1-9]' && echo 1 || echo 0 ) LLVM_HAVE_LTO = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[1-9]' && echo 1 || echo 0 ) @@ -61,6 +62,10 @@ ifeq "$(LLVM_UNSUPPORTED)" "1" $(warning llvm_mode only supports llvm versions 3.4 up to 12) endif +ifeq "$(LLVM_TOO_NEW)" "1" + $(warning you are using an in-development llvm version - this might break llvm_mode!) +endif + LLVM_TOO_OLD=1 ifeq "$(LLVM_MAJOR)" "9" diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 9a258d5b..246c1777 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 9a258d5b7a38c045a6e385fcfcf80a746a60e557 +Subproject commit 246c1777f453a280cbafc57f92742147ffc72818 diff --git a/unicorn_mode/unicornafl b/unicorn_mode/unicornafl index fb2fc9f2..80d31ef3 160000 --- a/unicorn_mode/unicornafl +++ b/unicorn_mode/unicornafl @@ -1 +1 @@ -Subproject commit fb2fc9f25df32f17f6b6b859e4dbd70f9a857e0c +Subproject commit 80d31ef367f7a1a75fc48e08e129d10f2ffa0498 -- cgit 1.4.1 From fe9da707058b3b2cb1812c3d635d3bb43fe33d13 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sun, 14 Feb 2021 18:43:43 +0100 Subject: disabling march=native due problems on intel platforms --- Dockerfile | 4 ++-- GNUmakefile | 18 +++++++++--------- qemu_mode/qemuafl | 2 +- 3 files changed, 12 insertions(+), 12 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/Dockerfile b/Dockerfile index 8779fee5..0c4556ca 100644 --- a/Dockerfile +++ b/Dockerfile @@ -55,9 +55,9 @@ RUN cd /afl-cov && make install && cd .. COPY . /AFLplusplus WORKDIR /AFLplusplus -RUN export REAL_CXX=g++-10 && export CC=gcc-10 && \ +RUN export export CC=gcc-10 && \ export CXX=g++-10 && make clean && \ - make distrib CFLAGS="-O3 -funroll-loops -D_FORTIFY_SOURCE=2" && make install && make clean + make distrib && make install && make clean RUN echo 'alias joe="jupp --wordwrap"' >> ~/.bashrc RUN echo 'export PS1="[afl++]$PS1"' >> ~/.bashrc diff --git a/GNUmakefile b/GNUmakefile index 4ba5d3b3..45fa3ba1 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -77,17 +77,17 @@ ifeq "$(shell echo 'int main() {return 0; }' | $(CC) -fno-move-loop-invariants - SPECIAL_PERFORMANCE += -fno-move-loop-invariants -fdisable-tree-cunrolli endif -ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -march=native -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" - ifndef SOURCE_DATE_EPOCH - HAVE_MARCHNATIVE = 1 - CFLAGS_OPT += -march=native - endif -endif +#ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -march=native -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" +# ifndef SOURCE_DATE_EPOCH +# HAVE_MARCHNATIVE = 1 +# CFLAGS_OPT += -march=native +# endif +#endif ifneq "$(shell uname)" "Darwin" - ifeq "$(HAVE_MARCHNATIVE)" "1" - SPECIAL_PERFORMANCE += -march=native - endif + #ifeq "$(HAVE_MARCHNATIVE)" "1" + # SPECIAL_PERFORMANCE += -march=native + #endif # OS X does not like _FORTIFY_SOURCE=2 ifndef DEBUG CFLAGS_OPT += -D_FORTIFY_SOURCE=2 diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 246c1777..9a258d5b 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 246c1777f453a280cbafc57f92742147ffc72818 +Subproject commit 9a258d5b7a38c045a6e385fcfcf80a746a60e557 -- cgit 1.4.1 From ffc1fc655f24aa6532a6feadf805a852aeb644e8 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Mon, 15 Feb 2021 10:27:44 +0100 Subject: qenuafl --- qemu_mode/QEMUAFL_VERSION | 2 +- qemu_mode/qemuafl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/qemu_mode/QEMUAFL_VERSION b/qemu_mode/QEMUAFL_VERSION index e73a9588..b0d4fd45 100644 --- a/qemu_mode/QEMUAFL_VERSION +++ b/qemu_mode/QEMUAFL_VERSION @@ -1 +1 @@ -9a258d5b7a +213f3b27dd diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 9a258d5b..213f3b27 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 9a258d5b7a38c045a6e385fcfcf80a746a60e557 +Subproject commit 213f3b27dd099ef352181c48cd75c0f20a73e3f0 -- cgit 1.4.1 From dd2fd8027454acaa5c12beea6f7b721fc8794715 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Mon, 15 Feb 2021 12:40:10 +0100 Subject: doc updates --- README.md | 5 +++-- docs/env_variables.md | 1 + qemu_mode/libqasan/README.md | 23 ++++++++++++++++------- qemu_mode/qemuafl | 2 +- 4 files changed, 21 insertions(+), 10 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/README.md b/README.md index 0778026c..00095390 100644 --- a/README.md +++ b/README.md @@ -751,6 +751,8 @@ campaigns as these are much shorter runnings. * for CMPLOG targets, 60% for `-l 2`, 40% for `-l 3` 4. Do *not* run any `-M` modes, just running `-S` modes is better for CI fuzzing. + `-M` enables deterministic fuzzing, old queue handling etc. which is good for + a fuzzing campaign but not good for short CI runs. ## Fuzzing binary-only targets @@ -788,8 +790,7 @@ If [afl-dyninst](https://github.com/vanhauser-thc/afl-dyninst) works for your binary, then you can use afl-fuzz normally and it will have twice the speed compared to qemu_mode (but slower than persistent mode). Note that several other binary rewriters exist, all with their advantages and -caveats. As rewriting a binary is much faster than Qemu this is a highly -recommended approach! +caveats. ### Unicorn diff --git a/docs/env_variables.md b/docs/env_variables.md index f7745247..f6ed12d0 100644 --- a/docs/env_variables.md +++ b/docs/env_variables.md @@ -451,6 +451,7 @@ checks or alter some of the more exotic semantics of the tool: `banner` corresponds to the name of the fuzzer provided through `-M/-S`. `afl_version` corresponds to the currently running afl version (e.g `++3.0c`). Default (empty/non present) will add no tags to the metrics. + See [rpc_statsd.md](rpc_statsd.md) for more information. - Setting `AFL_CRASH_EXITCODE` sets the exit code afl treats as crash. For example, if `AFL_CRASH_EXITCODE='-1'` is set, each input resulting diff --git a/qemu_mode/libqasan/README.md b/qemu_mode/libqasan/README.md index 83fb2442..4a241233 100644 --- a/qemu_mode/libqasan/README.md +++ b/qemu_mode/libqasan/README.md @@ -4,16 +4,25 @@ This library is the injected runtime used by QEMU AddressSanitizer (QASan). The original repository is [here](https://github.com/andreafioraldi/qasan). -The version embedded in qemuafl is an updated version of just the usermode part and this runtime is injected via LD_PRELOAD (so works just for dynamically linked binaries). +The version embedded in qemuafl is an updated version of just the usermode part +and this runtime is injected via LD_PRELOAD (so works just for dynamically +linked binaries). -The usage is super simple, just set the env var `AFL_USE_QASAN=1` when fuzzing in qemu mode (-Q). afl-fuzz will automatically set AFL_PRELOAD to load this library and enable the QASan instrumentation in afl-qemu-trace. +The usage is super simple, just set the env var `AFL_USE_QASAN=1` when fuzzing +in qemu mode (-Q). afl-fuzz will automatically set AFL_PRELOAD to load this +library and enable the QASan instrumentation in afl-qemu-trace. -For debugging purposes, we still suggest to run the original QASan as the stacktrace support for ARM (just a debug feature, it does not affect the bug finding capabilities during fuzzing) is WIP. +For debugging purposes, we still suggest to run the original QASan as the +stacktrace support for ARM (just a debug feature, it does not affect the bug +finding capabilities during fuzzing) is WIP. -### When I should use QASan? +### When should I use QASan? -If your target binary is PIC x86_64, you should also give a try to [retrowrite](https://github.com/HexHive/retrowrite) for static rewriting. +If your target binary is PIC x86_64, you should also give a try to +[retrowrite](https://github.com/HexHive/retrowrite) for static rewriting. -If it fails, or if your binary is for another architecture, or you want to use persistent and snapshot mode, AFL++ QASan mode is what you want/have to use. +If it fails, or if your binary is for another architecture, or you want to use +persistent and snapshot mode, AFL++ QASan mode is what you want/have to use. -Note that the overhead of libdislocator when combined with QEMU mode is much lower but it can catch less bugs. This is a short blanket, take your choice. +Note that the overhead of libdislocator when combined with QEMU mode is much +lower but it can catch less bugs. This is a short blanket, take your choice. diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 213f3b27..9a258d5b 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 213f3b27dd099ef352181c48cd75c0f20a73e3f0 +Subproject commit 9a258d5b7a38c045a6e385fcfcf80a746a60e557 -- cgit 1.4.1 From 938512a6b9451000f40491b2554b5d360840cfe5 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 17 Feb 2021 09:48:04 +0100 Subject: minor fixes --- include/config.h | 4 ++-- include/envs.h | 1 + instrumentation/afl-compiler-rt.o.c | 2 +- qemu_mode/README.md | 22 ++++++++++------------ qemu_mode/qemuafl | 2 +- 5 files changed, 15 insertions(+), 16 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/include/config.h b/include/config.h index 181285cd..9f7db04d 100644 --- a/include/config.h +++ b/include/config.h @@ -42,7 +42,7 @@ * */ -/* Enable arithmetic compare solving for both path */ +/* Enable arithmetic compare solving for both branches */ #define CMPLOG_SOLVE_ARITHMETIC /* Enable transform following (XOR/ADD/SUB manipulations, hex en/decoding) */ @@ -51,7 +51,7 @@ /* if TRANSFORM is enabled, this additionally enables base64 en/decoding */ // #define CMPLOG_SOLVE_TRANSFORM_BASE64 -/* If a redqueen pass finds more than one solve, try to combine them? */ +/* If a redqueen pass finds more than one solution, try to combine them? */ #define CMPLOG_COMBINE /* Minimum % of the corpus to perform cmplog on. Default: 20% */ diff --git a/include/envs.h b/include/envs.h index 4313e053..143979c6 100644 --- a/include/envs.h +++ b/include/envs.h @@ -131,6 +131,7 @@ static char *afl_environment_variables[] = { "AFL_QEMU_DEBUG_MAPS", "AFL_QEMU_DISABLE_CACHE", "AFL_QEMU_DRIVER_NO_HOOK", + "AFL_QEMU_FORCE_DFL", "AFL_QEMU_PERSISTENT_ADDR", "AFL_QEMU_PERSISTENT_CNT", "AFL_QEMU_PERSISTENT_GPR", diff --git a/instrumentation/afl-compiler-rt.o.c b/instrumentation/afl-compiler-rt.o.c index 5fb715e2..dba4dc65 100644 --- a/instrumentation/afl-compiler-rt.o.c +++ b/instrumentation/afl-compiler-rt.o.c @@ -1090,7 +1090,7 @@ __attribute__((constructor(0))) void __afl_auto_first(void) { if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return; u8 *ptr; - ptr = (u8 *)malloc(2097152); + ptr = (u8 *)malloc(MAP_INITIAL_SIZE); if (ptr && (ssize_t)ptr != -1) { diff --git a/qemu_mode/README.md b/qemu_mode/README.md index 9818846d..bc4c1d2c 100644 --- a/qemu_mode/README.md +++ b/qemu_mode/README.md @@ -17,7 +17,7 @@ The idea and much of the initial implementation comes from Andrew Griffiths. The actual implementation on current QEMU (shipped as qemuafl) is from Andrea Fioraldi. Special thanks to abiondo that re-enabled TCG chaining. -## 2) How to use +## 2) How to use qemu_mode The feature is implemented with a patched QEMU. The simplest way to build it is to run ./build_qemu_support.sh. The script will download, @@ -176,7 +176,12 @@ Comparative measurements of execution speed or instrumentation coverage will be fairly meaningless if the optimization levels or instrumentation scopes don't match. -## 12) Gotchas, feedback, bugs +## 12) Other features + +With `AFL_QEMU_FORCE_DFL` you force QEMU to ignore the registered signal +handlers of the target. + +## 13) Gotchas, feedback, bugs If you need to fix up checksums or do other cleanup on mutated test cases, see utils/custom_mutators/ for a viable solution. @@ -197,19 +202,12 @@ with -march=core2, can help. Beyond that, this is an early-stage mechanism, so fields reports are welcome. You can send them to . -## 13) Alternatives: static rewriting +## 14) Alternatives: static rewriting Statically rewriting binaries just once, instead of attempting to translate them at run time, can be a faster alternative. That said, static rewriting is fraught with peril, because it depends on being able to properly and fully model program control flow without actually executing each and every code path. -The best implementation is this one: - - https://github.com/vanhauser-thc/afl-dyninst - -The issue however is Dyninst which is not rewriting the binaries so that -they run stable. A lot of crashes happen, especially in C++ programs that -use throw/catch. Try it first, and if it works for you be happy as it is -2-3x as fast as qemu_mode, however usually not as fast as QEMU persistent mode. - +Checkout the "Fuzzing binary-only targets" section in our main README.md and +the docs/binaryonly_fuzzing.md document for more information and hints. diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 9a258d5b..213f3b27 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 9a258d5b7a38c045a6e385fcfcf80a746a60e557 +Subproject commit 213f3b27dd099ef352181c48cd75c0f20a73e3f0 -- cgit 1.4.1 From d6fe6b953758de193815ebcdd3d9ce825e3ffd27 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 19 Feb 2021 10:49:36 +0100 Subject: qemuafl --- qemu_mode/QEMUAFL_VERSION | 2 +- qemu_mode/qemuafl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/qemu_mode/QEMUAFL_VERSION b/qemu_mode/QEMUAFL_VERSION index b0d4fd45..b9e8fe7e 100644 --- a/qemu_mode/QEMUAFL_VERSION +++ b/qemu_mode/QEMUAFL_VERSION @@ -1 +1 @@ -213f3b27dd +3e13e0ed44 diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 213f3b27..3e13e0ed 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 213f3b27dd099ef352181c48cd75c0f20a73e3f0 +Subproject commit 3e13e0ed4437de0ff62385420504414bd7b14a96 -- cgit 1.4.1 From d941da33aebd04d4552e9d4313b946e41234aa52 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 19 Feb 2021 21:20:33 +0100 Subject: qemuafl --- qemu_mode/QEMUAFL_VERSION | 2 +- qemu_mode/qemuafl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/qemu_mode/QEMUAFL_VERSION b/qemu_mode/QEMUAFL_VERSION index b9e8fe7e..1152380c 100644 --- a/qemu_mode/QEMUAFL_VERSION +++ b/qemu_mode/QEMUAFL_VERSION @@ -1 +1 @@ -3e13e0ed44 +e36a30ebca diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 3e13e0ed..e36a30eb 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 3e13e0ed4437de0ff62385420504414bd7b14a96 +Subproject commit e36a30ebca57ca433a5d6e20b1a32975aabb761b -- cgit 1.4.1 From 100aac4dd39012750036b2fd71eed5b21959f693 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sat, 20 Feb 2021 14:15:38 +0100 Subject: -t help --- qemu_mode/qemuafl | 2 +- src/afl-fuzz.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index e36a30eb..213f3b27 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit e36a30ebca57ca433a5d6e20b1a32975aabb761b +Subproject commit 213f3b27dd099ef352181c48cd75c0f20a73e3f0 diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 8eb3625b..e3e9007d 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -103,7 +103,8 @@ static void usage(u8 *argv0, int more_help) { " quad -- see docs/power_schedules.md\n" " -f file - location read by the fuzzed program (default: stdin " "or @@)\n" - " -t msec - timeout for each run (auto-scaled, 50-%u ms)\n" + " -t msec - timeout for each run (auto-scaled, 50-... ms, default %u ms)\n" + " add a '+' to skip over seeds running longer.\n" " -m megs - memory limit for child process (%u MB, 0 = no limit " "[default])\n" " -Q - use binary-only instrumentation (QEMU mode)\n" -- cgit 1.4.1 From 79f1a44a01775ab28ad39f21f09e084fcd773c98 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sat, 27 Feb 2021 18:14:50 +0100 Subject: fix qasan search path --- qemu_mode/qemuafl | 2 +- src/afl-common.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 213f3b27..e36a30eb 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 213f3b27dd099ef352181c48cd75c0f20a73e3f0 +Subproject commit e36a30ebca57ca433a5d6e20b1a32975aabb761b diff --git a/src/afl-common.c b/src/afl-common.c index 078ffb9d..c341bb97 100644 --- a/src/afl-common.c +++ b/src/afl-common.c @@ -372,11 +372,11 @@ u8 *get_libqasan_path(u8 *own_loc) { } - if (!access(BIN_PATH "/libqasan.so", X_OK)) { + if (!access(AFL_PATH "/libqasan.so", X_OK)) { if (cp) { ck_free(cp); } - return ck_strdup(BIN_PATH "/libqasan.so"); + return ck_strdup(AFL_PATH "/libqasan.so"); } -- cgit 1.4.1 From ad7a7fcf075c617e09cb516da000b244be161093 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Mon, 1 Mar 2021 15:30:55 +0100 Subject: ASan-compatible area_is_mapped() --- instrumentation/afl-compiler-rt.o.c | 10 +++++++++- qemu_mode/qemuafl | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/instrumentation/afl-compiler-rt.o.c b/instrumentation/afl-compiler-rt.o.c index ecb94cab..dab06177 100644 --- a/instrumentation/afl-compiler-rt.o.c +++ b/instrumentation/afl-compiler-rt.o.c @@ -34,6 +34,7 @@ #include #include +#include #ifndef __HAIKU__ #include #endif @@ -1551,15 +1552,22 @@ void __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases) { } +__attribute__((weak)) void *__asan_region_is_poisoned(void *beg, size_t size) { + return NULL; +} + // POSIX shenanigan to see if an area is mapped. // If it is mapped as X-only, we have a problem, so maybe we should add a check // to avoid to call it on .text addresses static int area_is_mapped(void *ptr, size_t len) { + if (__asan_region_is_poisoned(ptr, len) == NULL) + return 1; + char *p = (char *)ptr; char *page = (char *)((uintptr_t)p & ~(sysconf(_SC_PAGE_SIZE) - 1)); - int r = msync(page, (p - page) + len, MS_ASYNC); + int r = syscall(SYS_msync, page, (p - page) + len, MS_ASYNC); if (r < 0) return errno != ENOMEM; return 1; diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index e36a30eb..213f3b27 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit e36a30ebca57ca433a5d6e20b1a32975aabb761b +Subproject commit 213f3b27dd099ef352181c48cd75c0f20a73e3f0 -- cgit 1.4.1 From 3977d50b5538e8097eb5d9329c6df5eaa147374b Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Thu, 4 Mar 2021 10:52:22 +0100 Subject: update qemuafl and remove git stas --- qemu_mode/QEMUAFL_VERSION | 2 +- qemu_mode/build_qemu_support.sh | 2 +- qemu_mode/qemuafl | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/qemu_mode/QEMUAFL_VERSION b/qemu_mode/QEMUAFL_VERSION index 1152380c..a7f25da3 100644 --- a/qemu_mode/QEMUAFL_VERSION +++ b/qemu_mode/QEMUAFL_VERSION @@ -1 +1 @@ -e36a30ebca +d1ca56b84e diff --git a/qemu_mode/build_qemu_support.sh b/qemu_mode/build_qemu_support.sh index 815e77d6..97a05800 100755 --- a/qemu_mode/build_qemu_support.sh +++ b/qemu_mode/build_qemu_support.sh @@ -132,7 +132,7 @@ echo "[+] Got qemuafl." cd "qemuafl" || exit 1 echo "[*] Checking out $QEMUAFL_VERSION" -sh -c 'git stash && git stash drop' 1>/dev/null 2>/dev/null +# sh -c 'git stash && git stash drop' 1>/dev/null 2>/dev/null git checkout "$QEMUAFL_VERSION" || echo Warning: could not check out to commit $QEMUAFL_VERSION echo "[*] Making sure imported headers matches" diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 213f3b27..d1ca56b8 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 213f3b27dd099ef352181c48cd75c0f20a73e3f0 +Subproject commit d1ca56b84e78f821406eef28d836918edfc8d610 -- cgit 1.4.1 From 4e567d3f5d22ae14bffc17cc8d475959d5fcfc21 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 16 Mar 2021 15:38:45 +0100 Subject: update qemuafl --- qemu_mode/QEMUAFL_VERSION | 2 +- qemu_mode/qemuafl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/qemu_mode/QEMUAFL_VERSION b/qemu_mode/QEMUAFL_VERSION index a7f25da3..68290650 100644 --- a/qemu_mode/QEMUAFL_VERSION +++ b/qemu_mode/QEMUAFL_VERSION @@ -1 +1 @@ -d1ca56b84e +0fb212daab diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index d1ca56b8..0fb212da 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit d1ca56b84e78f821406eef28d836918edfc8d610 +Subproject commit 0fb212daab492411b3e323bc18a3074c1aecfd37 -- cgit 1.4.1 From f5420e737a1ed1dbeb81783836d0449c06aa0fcc Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 16 Mar 2021 16:15:29 +0100 Subject: rtn fix --- docs/Changelog.md | 2 +- instrumentation/afl-compiler-rt.o.c | 23 ++++++++++++----------- qemu_mode/qemuafl | 2 +- 3 files changed, 14 insertions(+), 13 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/docs/Changelog.md b/docs/Changelog.md index 8222f942..9aea3638 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -9,7 +9,7 @@ Want to stay in the loop on major new features? Join our mailing list by sending a mail to . ### Version ++3.12a (dev) - - ... + - fix cmplog rtn (rare crash and not being able to gather ptr data) ### Version ++3.11c (release) diff --git a/instrumentation/afl-compiler-rt.o.c b/instrumentation/afl-compiler-rt.o.c index 50b4e2c5..892118fb 100644 --- a/instrumentation/afl-compiler-rt.o.c +++ b/instrumentation/afl-compiler-rt.o.c @@ -1734,25 +1734,26 @@ static int area_is_valid(void *ptr, size_t len) { long r = syscall(SYS_write, __afl_dummy_fd[1], ptr, len); - if (unlikely(r <= 0 || r > len)) { // fail - maybe hitting asan boundary? + if (r <= 0 || r > len) return 0; - char *p = (char *)ptr; - long page_size = sysconf(_SC_PAGE_SIZE); - char *page = (char *)((uintptr_t)p & ~(page_size - 1)) + page_size; - if (page >= p + len) { return 0; } // no isnt, return fail - len = page - p - len; - r = syscall(SYS_write, __afl_dummy_fd[1], page, len); + // even if the write succeed this can be a false positive if we cross + // a page boundary. who knows why. - } + char *p = (char *)ptr; + long page_size = sysconf(_SC_PAGE_SIZE); + char *page = (char *)((uintptr_t)p & ~(page_size - 1)) + page_size; - // partial writes - we return what was written. - if (likely(r >= 0 && r <= len)) { + if (page > p + len) { + // no, not crossing a page boundary return (int)r; } else { - return 0; + // yes it crosses a boundary, hence we can only return the length of + // rest of the first page, we cannot detect if the next page is valid + // or not, neither by SYS_write nor msync() :-( + return (int)(page - p); } diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 0fb212da..d1ca56b8 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 0fb212daab492411b3e323bc18a3074c1aecfd37 +Subproject commit d1ca56b84e78f821406eef28d836918edfc8d610 -- cgit 1.4.1 From 65e3770badc154788a8c9a9c16c1c2a0ebed833f Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 16 Mar 2021 16:32:35 +0100 Subject: qemuafl --- qemu_mode/qemuafl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'qemu_mode/qemuafl') diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index d1ca56b8..0fb212da 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit d1ca56b84e78f821406eef28d836918edfc8d610 +Subproject commit 0fb212daab492411b3e323bc18a3074c1aecfd37 -- cgit 1.4.1 From 82554677a812470ccebae7f1e7c76e11aed82eaf Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Wed, 24 Mar 2021 11:00:13 +0100 Subject: update qemuafl --- qemu_mode/QEMUAFL_VERSION | 2 +- qemu_mode/qemuafl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/qemu_mode/QEMUAFL_VERSION b/qemu_mode/QEMUAFL_VERSION index 68290650..b541116b 100644 --- a/qemu_mode/QEMUAFL_VERSION +++ b/qemu_mode/QEMUAFL_VERSION @@ -1 +1 @@ -0fb212daab +d6ff420165 diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 0fb212da..d6ff4201 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 0fb212daab492411b3e323bc18a3074c1aecfd37 +Subproject commit d6ff420165aca12996d4b307b4641445048f0e71 -- cgit 1.4.1 From a908a982254305777a3ea4dadf70120089b4ddf4 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Wed, 24 Mar 2021 11:06:02 +0100 Subject: update qemuafl --- qemu_mode/QEMUAFL_VERSION | 2 +- qemu_mode/qemuafl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'qemu_mode/qemuafl') diff --git a/qemu_mode/QEMUAFL_VERSION b/qemu_mode/QEMUAFL_VERSION index b541116b..8d95c359 100644 --- a/qemu_mode/QEMUAFL_VERSION +++ b/qemu_mode/QEMUAFL_VERSION @@ -1 +1 @@ -d6ff420165 +ddc4a9748d diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index d6ff4201..ddc4a974 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit d6ff420165aca12996d4b307b4641445048f0e71 +Subproject commit ddc4a9748d59857753fb33c30a356f354595f36d -- cgit 1.4.1 From 2b3642aa39fc79b5fd394120f0fadf4476d4476e Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 24 Mar 2021 11:13:16 +0100 Subject: v3.12c ready to go --- docs/Changelog.md | 1 + qemu_mode/qemuafl | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'qemu_mode/qemuafl') diff --git a/docs/Changelog.md b/docs/Changelog.md index 476c6f4e..5b7d6ab6 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -13,6 +13,7 @@ sending a mail to . - added AFL_TARGET_ENV variable to pass extra env vars to the target (for things like LD_LIBRARY_PATH) - fix map detection, AFL_MAP_SIZE not needed anymore for most cases + - fix counting favorites (just a display thing) - afl-cc: - fix cmplog rtn (rare crash and not being able to gather ptr data) - fix our own PCGUARD implementation to compile with llvm 10.0.1 diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index ddc4a974..0fb212da 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit ddc4a9748d59857753fb33c30a356f354595f36d +Subproject commit 0fb212daab492411b3e323bc18a3074c1aecfd37 -- cgit 1.4.1